blob: 918814cd0f806f5344e5f293e2bb059010237727 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020058#define DRV_VERSION "1.0"
59#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
63 "v" DRV_VERSION " (" DRV_RELDATE ")");
64MODULE_LICENSE("Dual BSD/GPL");
65
David Dillow49248642011-01-14 18:23:24 -050066static unsigned int srp_sg_tablesize;
67static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050068static unsigned int indirect_sg_entries;
69static bool allow_ext_sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +020070static bool prefer_fr;
Bart Van Asscheb1b88542014-05-20 15:06:41 +020071static bool register_always;
Roland Dreieraef9ec32005-11-02 14:07:13 -080072static int topspin_workarounds = 1;
73
David Dillow49248642011-01-14 18:23:24 -050074module_param(srp_sg_tablesize, uint, 0444);
75MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
76
77module_param(cmd_sg_entries, uint, 0444);
78MODULE_PARM_DESC(cmd_sg_entries,
79 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
80
David Dillowc07d4242011-01-16 13:57:10 -050081module_param(indirect_sg_entries, uint, 0444);
82MODULE_PARM_DESC(indirect_sg_entries,
83 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
84
85module_param(allow_ext_sg, bool, 0444);
86MODULE_PARM_DESC(allow_ext_sg,
87 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
88
Roland Dreieraef9ec32005-11-02 14:07:13 -080089module_param(topspin_workarounds, int, 0444);
90MODULE_PARM_DESC(topspin_workarounds,
91 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
92
Bart Van Assche5cfb1782014-05-20 15:08:34 +020093module_param(prefer_fr, bool, 0444);
94MODULE_PARM_DESC(prefer_fr,
95"Whether to use fast registration if both FMR and fast registration are supported");
96
Bart Van Asscheb1b88542014-05-20 15:06:41 +020097module_param(register_always, bool, 0444);
98MODULE_PARM_DESC(register_always,
99 "Use memory registration even for contiguous memory regions");
100
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200101static struct kernel_param_ops srp_tmo_ops;
102
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200103static int srp_reconnect_delay = 10;
104module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
105 S_IRUGO | S_IWUSR);
106MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
107
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200108static int srp_fast_io_fail_tmo = 15;
109module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
110 S_IRUGO | S_IWUSR);
111MODULE_PARM_DESC(fast_io_fail_tmo,
112 "Number of seconds between the observation of a transport"
113 " layer error and failing all I/O. \"off\" means that this"
114 " functionality is disabled.");
115
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200116static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200117module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
118 S_IRUGO | S_IWUSR);
119MODULE_PARM_DESC(dev_loss_tmo,
120 "Maximum number of seconds that the SRP transport should"
121 " insulate transport layer errors. After this time has been"
122 " exceeded the SCSI host is removed. Should be"
123 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
124 " if fast_io_fail_tmo has not been set. \"off\" means that"
125 " this functionality is disabled.");
126
Bart Van Assched92c0da2014-10-06 17:14:36 +0200127static unsigned ch_count;
128module_param(ch_count, uint, 0444);
129MODULE_PARM_DESC(ch_count,
130 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
131
Roland Dreieraef9ec32005-11-02 14:07:13 -0800132static void srp_add_one(struct ib_device *device);
133static void srp_remove_one(struct ib_device *device);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100134static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
135static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800136static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
137
FUJITA Tomonori32368222007-06-27 16:33:12 +0900138static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200139static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900140
Roland Dreieraef9ec32005-11-02 14:07:13 -0800141static struct ib_client srp_client = {
142 .name = "srp",
143 .add = srp_add_one,
144 .remove = srp_remove_one
145};
146
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700147static struct ib_sa_client srp_sa_client;
148
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200149static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
150{
151 int tmo = *(int *)kp->arg;
152
153 if (tmo >= 0)
154 return sprintf(buffer, "%d", tmo);
155 else
156 return sprintf(buffer, "off");
157}
158
159static int srp_tmo_set(const char *val, const struct kernel_param *kp)
160{
161 int tmo, res;
162
163 if (strncmp(val, "off", 3) != 0) {
164 res = kstrtoint(val, 0, &tmo);
165 if (res)
166 goto out;
167 } else {
168 tmo = -1;
169 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200170 if (kp->arg == &srp_reconnect_delay)
171 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
172 srp_dev_loss_tmo);
173 else if (kp->arg == &srp_fast_io_fail_tmo)
174 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200175 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200176 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
177 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200178 if (res)
179 goto out;
180 *(int *)kp->arg = tmo;
181
182out:
183 return res;
184}
185
186static struct kernel_param_ops srp_tmo_ops = {
187 .get = srp_tmo_get,
188 .set = srp_tmo_set,
189};
190
Roland Dreieraef9ec32005-11-02 14:07:13 -0800191static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
192{
193 return (struct srp_target_port *) host->hostdata;
194}
195
196static const char *srp_target_info(struct Scsi_Host *host)
197{
198 return host_to_target(host)->target_name;
199}
200
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700201static int srp_target_is_topspin(struct srp_target_port *target)
202{
203 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700204 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700205
206 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700207 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
208 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700209}
210
Roland Dreieraef9ec32005-11-02 14:07:13 -0800211static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
212 gfp_t gfp_mask,
213 enum dma_data_direction direction)
214{
215 struct srp_iu *iu;
216
217 iu = kmalloc(sizeof *iu, gfp_mask);
218 if (!iu)
219 goto out;
220
221 iu->buf = kzalloc(size, gfp_mask);
222 if (!iu->buf)
223 goto out_free_iu;
224
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100225 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
226 direction);
227 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800228 goto out_free_buf;
229
230 iu->size = size;
231 iu->direction = direction;
232
233 return iu;
234
235out_free_buf:
236 kfree(iu->buf);
237out_free_iu:
238 kfree(iu);
239out:
240 return NULL;
241}
242
243static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
244{
245 if (!iu)
246 return;
247
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100248 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
249 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800250 kfree(iu->buf);
251 kfree(iu);
252}
253
254static void srp_qp_event(struct ib_event *event, void *context)
255{
Bart Van Asschee0bda7d2012-01-14 12:39:44 +0000256 pr_debug("QP event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800257}
258
259static int srp_init_qp(struct srp_target_port *target,
260 struct ib_qp *qp)
261{
262 struct ib_qp_attr *attr;
263 int ret;
264
265 attr = kmalloc(sizeof *attr, GFP_KERNEL);
266 if (!attr)
267 return -ENOMEM;
268
Bart Van Assche56b53902014-07-09 15:58:22 +0200269 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270 target->srp_host->port,
271 be16_to_cpu(target->pkey),
272 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800273 if (ret)
274 goto out;
275
276 attr->qp_state = IB_QPS_INIT;
277 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278 IB_ACCESS_REMOTE_WRITE);
279 attr->port_num = target->srp_host->port;
280
281 ret = ib_modify_qp(qp, attr,
282 IB_QP_STATE |
283 IB_QP_PKEY_INDEX |
284 IB_QP_ACCESS_FLAGS |
285 IB_QP_PORT);
286
287out:
288 kfree(attr);
289 return ret;
290}
291
Bart Van Assche509c07b2014-10-30 14:48:30 +0100292static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500293{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100294 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500295 struct ib_cm_id *new_cm_id;
296
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100297 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100298 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500299 if (IS_ERR(new_cm_id))
300 return PTR_ERR(new_cm_id);
301
Bart Van Assche509c07b2014-10-30 14:48:30 +0100302 if (ch->cm_id)
303 ib_destroy_cm_id(ch->cm_id);
304 ch->cm_id = new_cm_id;
305 ch->path.sgid = target->sgid;
306 ch->path.dgid = target->orig_dgid;
307 ch->path.pkey = target->pkey;
308 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500309
310 return 0;
311}
312
Bart Van Assched1b42892014-05-20 15:07:20 +0200313static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314{
315 struct srp_device *dev = target->srp_host->srp_dev;
316 struct ib_fmr_pool_param fmr_param;
317
318 memset(&fmr_param, 0, sizeof(fmr_param));
319 fmr_param.pool_size = target->scsi_host->can_queue;
320 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
321 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200322 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200324 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
325 IB_ACCESS_REMOTE_WRITE |
326 IB_ACCESS_REMOTE_READ);
327
328 return ib_create_fmr_pool(dev->pd, &fmr_param);
329}
330
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200331/**
332 * srp_destroy_fr_pool() - free the resources owned by a pool
333 * @pool: Fast registration pool to be destroyed.
334 */
335static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
336{
337 int i;
338 struct srp_fr_desc *d;
339
340 if (!pool)
341 return;
342
343 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
344 if (d->frpl)
345 ib_free_fast_reg_page_list(d->frpl);
346 if (d->mr)
347 ib_dereg_mr(d->mr);
348 }
349 kfree(pool);
350}
351
352/**
353 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
354 * @device: IB device to allocate fast registration descriptors for.
355 * @pd: Protection domain associated with the FR descriptors.
356 * @pool_size: Number of descriptors to allocate.
357 * @max_page_list_len: Maximum fast registration work request page list length.
358 */
359static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
360 struct ib_pd *pd, int pool_size,
361 int max_page_list_len)
362{
363 struct srp_fr_pool *pool;
364 struct srp_fr_desc *d;
365 struct ib_mr *mr;
366 struct ib_fast_reg_page_list *frpl;
367 int i, ret = -EINVAL;
368
369 if (pool_size <= 0)
370 goto err;
371 ret = -ENOMEM;
372 pool = kzalloc(sizeof(struct srp_fr_pool) +
373 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
374 if (!pool)
375 goto err;
376 pool->size = pool_size;
377 pool->max_page_list_len = max_page_list_len;
378 spin_lock_init(&pool->lock);
379 INIT_LIST_HEAD(&pool->free_list);
380
381 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
382 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
383 if (IS_ERR(mr)) {
384 ret = PTR_ERR(mr);
385 goto destroy_pool;
386 }
387 d->mr = mr;
388 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
389 if (IS_ERR(frpl)) {
390 ret = PTR_ERR(frpl);
391 goto destroy_pool;
392 }
393 d->frpl = frpl;
394 list_add_tail(&d->entry, &pool->free_list);
395 }
396
397out:
398 return pool;
399
400destroy_pool:
401 srp_destroy_fr_pool(pool);
402
403err:
404 pool = ERR_PTR(ret);
405 goto out;
406}
407
408/**
409 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
410 * @pool: Pool to obtain descriptor from.
411 */
412static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
413{
414 struct srp_fr_desc *d = NULL;
415 unsigned long flags;
416
417 spin_lock_irqsave(&pool->lock, flags);
418 if (!list_empty(&pool->free_list)) {
419 d = list_first_entry(&pool->free_list, typeof(*d), entry);
420 list_del(&d->entry);
421 }
422 spin_unlock_irqrestore(&pool->lock, flags);
423
424 return d;
425}
426
427/**
428 * srp_fr_pool_put() - put an FR descriptor back in the free list
429 * @pool: Pool the descriptor was allocated from.
430 * @desc: Pointer to an array of fast registration descriptor pointers.
431 * @n: Number of descriptors to put back.
432 *
433 * Note: The caller must already have queued an invalidation request for
434 * desc->mr->rkey before calling this function.
435 */
436static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
437 int n)
438{
439 unsigned long flags;
440 int i;
441
442 spin_lock_irqsave(&pool->lock, flags);
443 for (i = 0; i < n; i++)
444 list_add(&desc[i]->entry, &pool->free_list);
445 spin_unlock_irqrestore(&pool->lock, flags);
446}
447
448static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
449{
450 struct srp_device *dev = target->srp_host->srp_dev;
451
452 return srp_create_fr_pool(dev->dev, dev->pd,
453 target->scsi_host->can_queue,
454 dev->max_pages_per_mr);
455}
456
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200457/**
458 * srp_destroy_qp() - destroy an RDMA queue pair
459 * @ch: SRP RDMA channel.
460 *
461 * Change a queue pair into the error state and wait until all receive
462 * completions have been processed before destroying it. This avoids that
463 * the receive completion handler can access the queue pair while it is
464 * being destroyed.
465 */
466static void srp_destroy_qp(struct srp_rdma_ch *ch)
467{
468 struct srp_target_port *target = ch->target;
469 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
470 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
471 struct ib_recv_wr *bad_wr;
472 int ret;
473
474 /* Destroying a QP and reusing ch->done is only safe if not connected */
475 WARN_ON_ONCE(target->connected);
476
477 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
478 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
479 if (ret)
480 goto out;
481
482 init_completion(&ch->done);
483 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
484 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
485 if (ret == 0)
486 wait_for_completion(&ch->done);
487
488out:
489 ib_destroy_qp(ch->qp);
490}
491
Bart Van Assche509c07b2014-10-30 14:48:30 +0100492static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800493{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100494 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200495 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800496 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100497 struct ib_cq *recv_cq, *send_cq;
498 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200499 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200500 struct srp_fr_pool *fr_pool = NULL;
501 const int m = 1 + dev->use_fast_reg;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800502 int ret;
503
504 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
505 if (!init_attr)
506 return -ENOMEM;
507
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200508 /* + 1 for SRP_LAST_WR_ID */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100509 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200510 target->queue_size + 1, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100511 if (IS_ERR(recv_cq)) {
512 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800513 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800514 }
515
Bart Van Assche509c07b2014-10-30 14:48:30 +0100516 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
517 m * target->queue_size, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100518 if (IS_ERR(send_cq)) {
519 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800520 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000521 }
522
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100523 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800524
525 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200526 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200527 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800528 init_attr->cap.max_recv_sge = 1;
529 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200530 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800531 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100532 init_attr->send_cq = send_cq;
533 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800534
Bart Van Assche62154b22014-05-20 15:04:45 +0200535 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100536 if (IS_ERR(qp)) {
537 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800538 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800539 }
540
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100541 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800542 if (ret)
543 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800544
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200545 if (dev->use_fast_reg && dev->has_fr) {
546 fr_pool = srp_alloc_fr_pool(target);
547 if (IS_ERR(fr_pool)) {
548 ret = PTR_ERR(fr_pool);
549 shost_printk(KERN_WARNING, target->scsi_host, PFX
550 "FR pool allocation failed (%d)\n", ret);
551 goto err_qp;
552 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100553 if (ch->fr_pool)
554 srp_destroy_fr_pool(ch->fr_pool);
555 ch->fr_pool = fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200556 } else if (!dev->use_fast_reg && dev->has_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200557 fmr_pool = srp_alloc_fmr_pool(target);
558 if (IS_ERR(fmr_pool)) {
559 ret = PTR_ERR(fmr_pool);
560 shost_printk(KERN_WARNING, target->scsi_host, PFX
561 "FMR pool allocation failed (%d)\n", ret);
562 goto err_qp;
563 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100564 if (ch->fmr_pool)
565 ib_destroy_fmr_pool(ch->fmr_pool);
566 ch->fmr_pool = fmr_pool;
Bart Van Assched1b42892014-05-20 15:07:20 +0200567 }
568
Bart Van Assche509c07b2014-10-30 14:48:30 +0100569 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200570 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100571 if (ch->recv_cq)
572 ib_destroy_cq(ch->recv_cq);
573 if (ch->send_cq)
574 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100575
Bart Van Assche509c07b2014-10-30 14:48:30 +0100576 ch->qp = qp;
577 ch->recv_cq = recv_cq;
578 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100579
Roland Dreierda9d2f02010-02-24 15:07:59 -0800580 kfree(init_attr);
581 return 0;
582
583err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100584 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800585
586err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100587 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800588
589err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100590 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800591
592err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800593 kfree(init_attr);
594 return ret;
595}
596
Bart Van Assche4d73f952013-10-26 14:40:37 +0200597/*
598 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100599 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200600 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100601static void srp_free_ch_ib(struct srp_target_port *target,
602 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800603{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200604 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800605 int i;
606
Bart Van Assched92c0da2014-10-06 17:14:36 +0200607 if (!ch->target)
608 return;
609
Bart Van Assche509c07b2014-10-30 14:48:30 +0100610 if (ch->cm_id) {
611 ib_destroy_cm_id(ch->cm_id);
612 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100613 }
614
Bart Van Assched92c0da2014-10-06 17:14:36 +0200615 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
616 if (!ch->qp)
617 return;
618
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200619 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100620 if (ch->fr_pool)
621 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200622 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100623 if (ch->fmr_pool)
624 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200625 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200626 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100627 ib_destroy_cq(ch->send_cq);
628 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800629
Bart Van Assched92c0da2014-10-06 17:14:36 +0200630 /*
631 * Avoid that the SCSI error handler tries to use this channel after
632 * it has been freed. The SCSI error handler can namely continue
633 * trying to perform recovery actions after scsi_remove_host()
634 * returned.
635 */
636 ch->target = NULL;
637
Bart Van Assche509c07b2014-10-30 14:48:30 +0100638 ch->qp = NULL;
639 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100640
Bart Van Assche509c07b2014-10-30 14:48:30 +0100641 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200642 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100643 srp_free_iu(target->srp_host, ch->rx_ring[i]);
644 kfree(ch->rx_ring);
645 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200646 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100647 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200648 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100649 srp_free_iu(target->srp_host, ch->tx_ring[i]);
650 kfree(ch->tx_ring);
651 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200652 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800653}
654
655static void srp_path_rec_completion(int status,
656 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100657 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800658{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100659 struct srp_rdma_ch *ch = ch_ptr;
660 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800661
Bart Van Assche509c07b2014-10-30 14:48:30 +0100662 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800663 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500664 shost_printk(KERN_ERR, target->scsi_host,
665 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800666 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100667 ch->path = *pathrec;
668 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800669}
670
Bart Van Assche509c07b2014-10-30 14:48:30 +0100671static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800672{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100673 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100674 int ret;
675
Bart Van Assche509c07b2014-10-30 14:48:30 +0100676 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800677
Bart Van Assche509c07b2014-10-30 14:48:30 +0100678 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800679
Bart Van Assche509c07b2014-10-30 14:48:30 +0100680 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
681 target->srp_host->srp_dev->dev,
682 target->srp_host->port,
683 &ch->path,
684 IB_SA_PATH_REC_SERVICE_ID |
685 IB_SA_PATH_REC_DGID |
686 IB_SA_PATH_REC_SGID |
687 IB_SA_PATH_REC_NUMB_PATH |
688 IB_SA_PATH_REC_PKEY,
689 SRP_PATH_REC_TIMEOUT_MS,
690 GFP_KERNEL,
691 srp_path_rec_completion,
692 ch, &ch->path_query);
693 if (ch->path_query_id < 0)
694 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800695
Bart Van Assche509c07b2014-10-30 14:48:30 +0100696 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100697 if (ret < 0)
698 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800699
Bart Van Assche509c07b2014-10-30 14:48:30 +0100700 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500701 shost_printk(KERN_WARNING, target->scsi_host,
702 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800703
Bart Van Assche509c07b2014-10-30 14:48:30 +0100704 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800705}
706
Bart Van Assched92c0da2014-10-06 17:14:36 +0200707static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800708{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100709 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800710 struct {
711 struct ib_cm_req_param param;
712 struct srp_login_req priv;
713 } *req = NULL;
714 int status;
715
716 req = kzalloc(sizeof *req, GFP_KERNEL);
717 if (!req)
718 return -ENOMEM;
719
Bart Van Assche509c07b2014-10-30 14:48:30 +0100720 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800721 req->param.alternate_path = NULL;
722 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100723 req->param.qp_num = ch->qp->qp_num;
724 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800725 req->param.private_data = &req->priv;
726 req->param.private_data_len = sizeof req->priv;
727 req->param.flow_control = 1;
728
729 get_random_bytes(&req->param.starting_psn, 4);
730 req->param.starting_psn &= 0xffffff;
731
732 /*
733 * Pick some arbitrary defaults here; we could make these
734 * module parameters if anyone cared about setting them.
735 */
736 req->param.responder_resources = 4;
737 req->param.remote_cm_response_timeout = 20;
738 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200739 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800740 req->param.rnr_retry_count = 7;
741 req->param.max_cm_retries = 15;
742
743 req->priv.opcode = SRP_LOGIN_REQ;
744 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500745 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800746 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
747 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200748 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
749 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700750 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700751 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700752 * port identifier format is 8 bytes of ID extension followed
753 * by 8 bytes of GUID. Older drafts put the two halves in the
754 * opposite order, so that the GUID comes first.
755 *
756 * Targets conforming to these obsolete drafts can be
757 * recognized by the I/O Class they report.
758 */
759 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
760 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100761 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700762 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200763 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700764 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
765 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
766 } else {
767 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200768 &target->initiator_ext, 8);
769 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100770 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700771 memcpy(req->priv.target_port_id, &target->id_ext, 8);
772 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
773 }
774
Roland Dreieraef9ec32005-11-02 14:07:13 -0800775 /*
776 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200777 * zero out the first 8 bytes of our initiator port ID and set
778 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800779 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700780 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500781 shost_printk(KERN_DEBUG, target->scsi_host,
782 PFX "Topspin/Cisco initiator port ID workaround "
783 "activated for target GUID %016llx\n",
784 (unsigned long long) be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800785 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200786 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100787 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800788 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800789
Bart Van Assche509c07b2014-10-30 14:48:30 +0100790 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800791
792 kfree(req);
793
794 return status;
795}
796
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000797static bool srp_queue_remove_work(struct srp_target_port *target)
798{
799 bool changed = false;
800
801 spin_lock_irq(&target->lock);
802 if (target->state != SRP_TARGET_REMOVED) {
803 target->state = SRP_TARGET_REMOVED;
804 changed = true;
805 }
806 spin_unlock_irq(&target->lock);
807
808 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200809 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000810
811 return changed;
812}
813
Bart Van Assche294c8752011-12-25 12:18:12 +0000814static bool srp_change_conn_state(struct srp_target_port *target,
815 bool connected)
816{
817 bool changed = false;
818
819 spin_lock_irq(&target->lock);
820 if (target->connected != connected) {
821 target->connected = connected;
822 changed = true;
823 }
824 spin_unlock_irq(&target->lock);
825
826 return changed;
827}
828
Roland Dreieraef9ec32005-11-02 14:07:13 -0800829static void srp_disconnect_target(struct srp_target_port *target)
830{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200831 struct srp_rdma_ch *ch;
832 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100833
Bart Van Assche294c8752011-12-25 12:18:12 +0000834 if (srp_change_conn_state(target, false)) {
835 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800836
Bart Van Assched92c0da2014-10-06 17:14:36 +0200837 for (i = 0; i < target->ch_count; i++) {
838 ch = &target->ch[i];
839 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
840 shost_printk(KERN_DEBUG, target->scsi_host,
841 PFX "Sending CM DREQ failed\n");
842 }
Bart Van Assche294c8752011-12-25 12:18:12 +0000843 }
Roland Dreiere6581052006-05-17 09:13:21 -0700844 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800845}
846
Bart Van Assche509c07b2014-10-30 14:48:30 +0100847static void srp_free_req_data(struct srp_target_port *target,
848 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500849{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200850 struct srp_device *dev = target->srp_host->srp_dev;
851 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500852 struct srp_request *req;
853 int i;
854
Bart Van Assched92c0da2014-10-06 17:14:36 +0200855 if (!ch->target || !ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200856 return;
857
858 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100859 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200860 if (dev->use_fast_reg)
861 kfree(req->fr_list);
862 else
863 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500864 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500865 if (req->indirect_dma_addr) {
866 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
867 target->indirect_size,
868 DMA_TO_DEVICE);
869 }
870 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500871 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200872
Bart Van Assche509c07b2014-10-30 14:48:30 +0100873 kfree(ch->req_ring);
874 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500875}
876
Bart Van Assche509c07b2014-10-30 14:48:30 +0100877static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200878{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100879 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200880 struct srp_device *srp_dev = target->srp_host->srp_dev;
881 struct ib_device *ibdev = srp_dev->dev;
882 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200883 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200884 dma_addr_t dma_addr;
885 int i, ret = -ENOMEM;
886
Bart Van Assche509c07b2014-10-30 14:48:30 +0100887 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
888 GFP_KERNEL);
889 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200890 goto out;
891
892 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100893 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200894 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
895 GFP_KERNEL);
896 if (!mr_list)
897 goto out;
898 if (srp_dev->use_fast_reg)
899 req->fr_list = mr_list;
900 else
901 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200902 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200903 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200904 if (!req->map_page)
905 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200906 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200907 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200908 goto out;
909
910 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
911 target->indirect_size,
912 DMA_TO_DEVICE);
913 if (ib_dma_mapping_error(ibdev, dma_addr))
914 goto out;
915
916 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200917 }
918 ret = 0;
919
920out:
921 return ret;
922}
923
Bart Van Assche683b1592012-01-14 12:40:44 +0000924/**
925 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
926 * @shost: SCSI host whose attributes to remove from sysfs.
927 *
928 * Note: Any attributes defined in the host template and that did not exist
929 * before invocation of this function will be ignored.
930 */
931static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
932{
933 struct device_attribute **attr;
934
935 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
936 device_remove_file(&shost->shost_dev, *attr);
937}
938
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000939static void srp_remove_target(struct srp_target_port *target)
940{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200941 struct srp_rdma_ch *ch;
942 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100943
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000944 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
945
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000946 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200947 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000948 srp_remove_host(target->scsi_host);
949 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100950 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000951 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200952 for (i = 0; i < target->ch_count; i++) {
953 ch = &target->ch[i];
954 srp_free_ch_ib(target, ch);
955 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200956 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200957 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200958 for (i = 0; i < target->ch_count; i++) {
959 ch = &target->ch[i];
960 srp_free_req_data(target, ch);
961 }
962 kfree(target->ch);
963 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200964
965 spin_lock(&target->srp_host->target_lock);
966 list_del(&target->list);
967 spin_unlock(&target->srp_host->target_lock);
968
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000969 scsi_host_put(target->scsi_host);
970}
971
David Howellsc4028952006-11-22 14:57:56 +0000972static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800973{
David Howellsc4028952006-11-22 14:57:56 +0000974 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000975 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800976
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000977 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800978
Bart Van Assche96fc2482013-06-28 14:51:26 +0200979 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800980}
981
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200982static void srp_rport_delete(struct srp_rport *rport)
983{
984 struct srp_target_port *target = rport->lld_data;
985
986 srp_queue_remove_work(target);
987}
988
Bart Van Assched92c0da2014-10-06 17:14:36 +0200989static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800990{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100991 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800992 int ret;
993
Bart Van Assched92c0da2014-10-06 17:14:36 +0200994 WARN_ON_ONCE(!multich && target->connected);
Bart Van Assche294c8752011-12-25 12:18:12 +0000995
Bart Van Assche948d1e82011-09-03 09:25:42 +0200996 target->qp_in_error = false;
997
Bart Van Assche509c07b2014-10-30 14:48:30 +0100998 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800999 if (ret)
1000 return ret;
1001
1002 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001003 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +02001004 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001005 if (ret)
1006 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001007 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001008 if (ret < 0)
1009 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001010
1011 /*
1012 * The CM event handling code will set status to
1013 * SRP_PORT_REDIRECT if we get a port redirect REJ
1014 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1015 * redirect REJ back.
1016 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001017 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001018 case 0:
Bart Van Assche294c8752011-12-25 12:18:12 +00001019 srp_change_conn_state(target, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001020 return 0;
1021
1022 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001023 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001024 if (ret)
1025 return ret;
1026 break;
1027
1028 case SRP_DLID_REDIRECT:
1029 break;
1030
David Dillow9fe4bcf2008-01-08 17:08:52 -05001031 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001032 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001033 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001034 ch->status = -ECONNRESET;
1035 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001036
Roland Dreieraef9ec32005-11-02 14:07:13 -08001037 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001038 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001039 }
1040 }
1041}
1042
Bart Van Assche509c07b2014-10-30 14:48:30 +01001043static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001044{
1045 struct ib_send_wr *bad_wr;
1046 struct ib_send_wr wr = {
1047 .opcode = IB_WR_LOCAL_INV,
1048 .wr_id = LOCAL_INV_WR_ID_MASK,
1049 .next = NULL,
1050 .num_sge = 0,
1051 .send_flags = 0,
1052 .ex.invalidate_rkey = rkey,
1053 };
1054
Bart Van Assche509c07b2014-10-30 14:48:30 +01001055 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001056}
1057
Roland Dreierd945e1d2006-05-09 10:50:28 -07001058static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001059 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001060 struct srp_request *req)
1061{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001062 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001063 struct srp_device *dev = target->srp_host->srp_dev;
1064 struct ib_device *ibdev = dev->dev;
1065 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001066
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001067 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001068 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1069 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1070 return;
1071
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001072 if (dev->use_fast_reg) {
1073 struct srp_fr_desc **pfr;
1074
1075 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001076 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001077 if (res < 0) {
1078 shost_printk(KERN_ERR, target->scsi_host, PFX
1079 "Queueing INV WR for rkey %#x failed (%d)\n",
1080 (*pfr)->mr->rkey, res);
1081 queue_work(system_long_wq,
1082 &target->tl_err_work);
1083 }
1084 }
1085 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001086 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001087 req->nmdesc);
1088 } else {
1089 struct ib_pool_fmr **pfmr;
1090
1091 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1092 ib_fmr_pool_unmap(*pfmr);
1093 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001094
David Dillow8f26c9f2011-01-14 19:45:50 -05001095 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1096 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001097}
1098
Bart Van Assche22032992012-08-14 13:18:53 +00001099/**
1100 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001101 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001102 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001103 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001104 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1105 * ownership of @req->scmnd if it equals @scmnd.
1106 *
1107 * Return value:
1108 * Either NULL or a pointer to the SCSI command the caller became owner of.
1109 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001110static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001111 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001112 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001113 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001114{
Bart Van Assche94a91742010-11-26 14:50:09 -05001115 unsigned long flags;
1116
Bart Van Assche509c07b2014-10-30 14:48:30 +01001117 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001118 if (req->scmnd &&
1119 (!sdev || req->scmnd->device == sdev) &&
1120 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001121 scmnd = req->scmnd;
1122 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001123 } else {
1124 scmnd = NULL;
1125 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001126 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001127
1128 return scmnd;
1129}
1130
1131/**
1132 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001133 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001134 * @req: Request to be freed.
1135 * @scmnd: SCSI command associated with @req.
1136 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001137 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001138static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1139 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001140{
1141 unsigned long flags;
1142
Bart Van Assche509c07b2014-10-30 14:48:30 +01001143 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001144
Bart Van Assche509c07b2014-10-30 14:48:30 +01001145 spin_lock_irqsave(&ch->lock, flags);
1146 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001147 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001148}
1149
Bart Van Assche509c07b2014-10-30 14:48:30 +01001150static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1151 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001152{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001153 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001154
1155 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001156 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001157 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001158 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001159 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001160}
1161
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001162static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001163{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001164 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001165 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001166 struct Scsi_Host *shost = target->scsi_host;
1167 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001168 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001169
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001170 /*
1171 * Invoking srp_terminate_io() while srp_queuecommand() is running
1172 * is not safe. Hence the warning statement below.
1173 */
1174 shost_for_each_device(sdev, shost)
1175 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1176
Bart Van Assched92c0da2014-10-06 17:14:36 +02001177 for (i = 0; i < target->ch_count; i++) {
1178 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001179
Bart Van Assched92c0da2014-10-06 17:14:36 +02001180 for (j = 0; j < target->req_ring_size; ++j) {
1181 struct srp_request *req = &ch->req_ring[j];
1182
1183 srp_finish_req(ch, req, NULL,
1184 DID_TRANSPORT_FAILFAST << 16);
1185 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001186 }
1187}
1188
1189/*
1190 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1191 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1192 * srp_reset_device() or srp_reset_host() calls will occur while this function
1193 * is in progress. One way to realize that is not to call this function
1194 * directly but to call srp_reconnect_rport() instead since that last function
1195 * serializes calls of this function via rport->mutex and also blocks
1196 * srp_queuecommand() calls before invoking this function.
1197 */
1198static int srp_rport_reconnect(struct srp_rport *rport)
1199{
1200 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001201 struct srp_rdma_ch *ch;
1202 int i, j, ret = 0;
1203 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001204
Roland Dreieraef9ec32005-11-02 14:07:13 -08001205 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001206
1207 if (target->state == SRP_TARGET_SCANNING)
1208 return -ENODEV;
1209
Roland Dreieraef9ec32005-11-02 14:07:13 -08001210 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001211 * Now get a new local CM ID so that we avoid confusing the target in
1212 * case things are really fouled up. Doing so also ensures that all CM
1213 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001214 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001215 for (i = 0; i < target->ch_count; i++) {
1216 ch = &target->ch[i];
1217 if (!ch->target)
1218 break;
1219 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001220 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001221 for (i = 0; i < target->ch_count; i++) {
1222 ch = &target->ch[i];
1223 if (!ch->target)
1224 break;
1225 for (j = 0; j < target->req_ring_size; ++j) {
1226 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001227
Bart Van Assched92c0da2014-10-06 17:14:36 +02001228 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1229 }
1230 }
1231 for (i = 0; i < target->ch_count; i++) {
1232 ch = &target->ch[i];
1233 if (!ch->target)
1234 break;
1235 /*
1236 * Whether or not creating a new CM ID succeeded, create a new
1237 * QP. This guarantees that all completion callback function
1238 * invocations have finished before request resetting starts.
1239 */
1240 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001241
Bart Van Assched92c0da2014-10-06 17:14:36 +02001242 INIT_LIST_HEAD(&ch->free_tx);
1243 for (j = 0; j < target->queue_size; ++j)
1244 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1245 }
1246 for (i = 0; i < target->ch_count; i++) {
1247 ch = &target->ch[i];
1248 if (ret || !ch->target) {
1249 if (i > 1)
1250 ret = 0;
1251 break;
1252 }
1253 ret = srp_connect_ch(ch, multich);
1254 multich = true;
1255 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001256
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001257 if (ret == 0)
1258 shost_printk(KERN_INFO, target->scsi_host,
1259 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001260
1261 return ret;
1262}
1263
David Dillow8f26c9f2011-01-14 19:45:50 -05001264static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1265 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001266{
David Dillow8f26c9f2011-01-14 19:45:50 -05001267 struct srp_direct_buf *desc = state->desc;
1268
1269 desc->va = cpu_to_be64(dma_addr);
1270 desc->key = cpu_to_be32(rkey);
1271 desc->len = cpu_to_be32(dma_len);
1272
1273 state->total_len += dma_len;
1274 state->desc++;
1275 state->ndesc++;
1276}
1277
1278static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001279 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001280{
David Dillow8f26c9f2011-01-14 19:45:50 -05001281 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001282 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001283
Bart Van Assche509c07b2014-10-30 14:48:30 +01001284 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001285 state->npages, io_addr);
1286 if (IS_ERR(fmr))
1287 return PTR_ERR(fmr);
1288
1289 *state->next_fmr++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001290 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001291
Bart Van Assche52ede082014-05-20 15:07:45 +02001292 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001293
David Dillow8f26c9f2011-01-14 19:45:50 -05001294 return 0;
1295}
1296
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001297static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001298 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001299{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001300 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001301 struct srp_device *dev = target->srp_host->srp_dev;
1302 struct ib_send_wr *bad_wr;
1303 struct ib_send_wr wr;
1304 struct srp_fr_desc *desc;
1305 u32 rkey;
1306
Bart Van Assche509c07b2014-10-30 14:48:30 +01001307 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001308 if (!desc)
1309 return -ENOMEM;
1310
1311 rkey = ib_inc_rkey(desc->mr->rkey);
1312 ib_update_fast_reg_key(desc->mr, rkey);
1313
1314 memcpy(desc->frpl->page_list, state->pages,
1315 sizeof(state->pages[0]) * state->npages);
1316
1317 memset(&wr, 0, sizeof(wr));
1318 wr.opcode = IB_WR_FAST_REG_MR;
1319 wr.wr_id = FAST_REG_WR_ID_MASK;
1320 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1321 wr.wr.fast_reg.page_list = desc->frpl;
1322 wr.wr.fast_reg.page_list_len = state->npages;
1323 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1324 wr.wr.fast_reg.length = state->dma_len;
1325 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1326 IB_ACCESS_REMOTE_READ |
1327 IB_ACCESS_REMOTE_WRITE);
1328 wr.wr.fast_reg.rkey = desc->mr->lkey;
1329
1330 *state->next_fr++ = desc;
1331 state->nmdesc++;
1332
1333 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1334 desc->mr->rkey);
1335
Bart Van Assche509c07b2014-10-30 14:48:30 +01001336 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001337}
1338
Bart Van Assche539dde62014-05-20 15:05:46 +02001339static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001340 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001341{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001342 struct srp_target_port *target = ch->target;
Bart Van Assche539dde62014-05-20 15:05:46 +02001343 int ret = 0;
1344
1345 if (state->npages == 0)
1346 return 0;
1347
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001348 if (state->npages == 1 && !register_always)
Bart Van Assche52ede082014-05-20 15:07:45 +02001349 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche539dde62014-05-20 15:05:46 +02001350 target->rkey);
1351 else
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001352 ret = target->srp_host->srp_dev->use_fast_reg ?
Bart Van Assche509c07b2014-10-30 14:48:30 +01001353 srp_map_finish_fr(state, ch) :
1354 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001355
1356 if (ret == 0) {
1357 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001358 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001359 }
1360
1361 return ret;
1362}
1363
David Dillow8f26c9f2011-01-14 19:45:50 -05001364static void srp_map_update_start(struct srp_map_state *state,
1365 struct scatterlist *sg, int sg_index,
1366 dma_addr_t dma_addr)
1367{
1368 state->unmapped_sg = sg;
1369 state->unmapped_index = sg_index;
1370 state->unmapped_addr = dma_addr;
1371}
1372
1373static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001374 struct srp_rdma_ch *ch,
David Dillow8f26c9f2011-01-14 19:45:50 -05001375 struct scatterlist *sg, int sg_index,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001376 bool use_mr)
David Dillow8f26c9f2011-01-14 19:45:50 -05001377{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001378 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001379 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001380 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001381 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1382 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1383 unsigned int len;
1384 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001385
David Dillow8f26c9f2011-01-14 19:45:50 -05001386 if (!dma_len)
1387 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -07001388
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001389 if (!use_mr) {
1390 /*
1391 * Once we're in direct map mode for a request, we don't
1392 * go back to FMR or FR mode, so no need to update anything
David Dillow8f26c9f2011-01-14 19:45:50 -05001393 * other than the descriptor.
1394 */
1395 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1396 return 0;
1397 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001398
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001399 /*
1400 * Since not all RDMA HW drivers support non-zero page offsets for
1401 * FMR, if we start at an offset into a page, don't merge into the
1402 * current FMR mapping. Finish it out, and use the kernel's MR for
1403 * this sg entry.
David Dillow8f26c9f2011-01-14 19:45:50 -05001404 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001405 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1406 dma_len > dev->mr_max_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001407 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001408 if (ret)
1409 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001410
David Dillow8f26c9f2011-01-14 19:45:50 -05001411 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1412 srp_map_update_start(state, NULL, 0, 0);
1413 return 0;
1414 }
1415
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001416 /*
1417 * If this is the first sg that will be mapped via FMR or via FR, save
1418 * our position. We need to know the first unmapped entry, its index,
1419 * and the first unmapped address within that entry to be able to
1420 * restart mapping after an error.
David Dillow8f26c9f2011-01-14 19:45:50 -05001421 */
1422 if (!state->unmapped_sg)
1423 srp_map_update_start(state, sg, sg_index, dma_addr);
1424
1425 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001426 unsigned offset = dma_addr & ~dev->mr_page_mask;
1427 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001428 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001429 if (ret)
1430 return ret;
1431
1432 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001433 }
1434
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001435 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001436
1437 if (!state->npages)
1438 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001439 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001440 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001441 dma_addr += len;
1442 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001443 }
1444
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001445 /*
1446 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001447 * close it out and start a new one -- we can only merge at page
1448 * boundries.
1449 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001450 ret = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001451 if (len != dev->mr_page_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001452 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001453 if (!ret)
1454 srp_map_update_start(state, NULL, 0, 0);
1455 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001456 return ret;
1457}
1458
Bart Van Assche509c07b2014-10-30 14:48:30 +01001459static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1460 struct srp_request *req, struct scatterlist *scat,
1461 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001462{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001463 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001464 struct srp_device *dev = target->srp_host->srp_dev;
1465 struct ib_device *ibdev = dev->dev;
1466 struct scatterlist *sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001467 int i;
1468 bool use_mr;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001469
1470 state->desc = req->indirect_desc;
1471 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001472 if (dev->use_fast_reg) {
1473 state->next_fr = req->fr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001474 use_mr = !!ch->fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001475 } else {
1476 state->next_fmr = req->fmr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001477 use_mr = !!ch->fmr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001478 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001479
1480 for_each_sg(scat, sg, count, i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001481 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001482 /*
1483 * Memory registration failed, so backtrack to the
1484 * first unmapped entry and continue on without using
1485 * memory registration.
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001486 */
1487 dma_addr_t dma_addr;
1488 unsigned int dma_len;
1489
1490backtrack:
1491 sg = state->unmapped_sg;
1492 i = state->unmapped_index;
1493
1494 dma_addr = ib_sg_dma_address(ibdev, sg);
1495 dma_len = ib_sg_dma_len(ibdev, sg);
1496 dma_len -= (state->unmapped_addr - dma_addr);
1497 dma_addr = state->unmapped_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001498 use_mr = false;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001499 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1500 }
1501 }
1502
Bart Van Assche509c07b2014-10-30 14:48:30 +01001503 if (use_mr && srp_finish_mapping(state, ch))
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001504 goto backtrack;
1505
Bart Van Assche52ede082014-05-20 15:07:45 +02001506 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001507
1508 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001509}
1510
Bart Van Assche509c07b2014-10-30 14:48:30 +01001511static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001512 struct srp_request *req)
1513{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001514 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001515 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001516 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001517 int len, nents, count;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001518 struct srp_device *dev;
1519 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001520 struct srp_map_state state;
1521 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001522 u32 table_len;
1523 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001524
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001525 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001526 return sizeof (struct srp_cmd);
1527
1528 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1529 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001530 shost_printk(KERN_WARNING, target->scsi_host,
1531 PFX "Unhandled data direction %d\n",
1532 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001533 return -EINVAL;
1534 }
1535
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001536 nents = scsi_sg_count(scmnd);
1537 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001538
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001539 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001540 ibdev = dev->dev;
1541
1542 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001543 if (unlikely(count == 0))
1544 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001545
1546 fmt = SRP_DATA_DESC_DIRECT;
1547 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001548
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001549 if (count == 1 && !register_always) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001550 /*
1551 * The midlayer only generated a single gather/scatter
1552 * entry, or DMA mapping coalesced everything to a
1553 * single entry. So a direct descriptor along with
1554 * the DMA MR suffices.
1555 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001556 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001557
Ralph Campbell85507bc2006-12-12 14:30:55 -08001558 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001559 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001560 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001561
Bart Van Assche52ede082014-05-20 15:07:45 +02001562 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001563 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001564 }
1565
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001566 /*
1567 * We have more than one scatter/gather entry, so build our indirect
1568 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001569 */
1570 indirect_hdr = (void *) cmd->add_data;
1571
David Dillowc07d4242011-01-16 13:57:10 -05001572 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1573 target->indirect_size, DMA_TO_DEVICE);
1574
David Dillow8f26c9f2011-01-14 19:45:50 -05001575 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001576 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001577
David Dillowc07d4242011-01-16 13:57:10 -05001578 /* We've mapped the request, now pull as much of the indirect
1579 * descriptor table as we can into the command buffer. If this
1580 * target is not using an external indirect table, we are
1581 * guaranteed to fit into the command, as the SCSI layer won't
1582 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001583 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001584 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001585 /*
1586 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001587 * so use a direct descriptor.
1588 */
1589 struct srp_direct_buf *buf = (void *) cmd->add_data;
1590
David Dillowc07d4242011-01-16 13:57:10 -05001591 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001592 goto map_complete;
1593 }
1594
David Dillowc07d4242011-01-16 13:57:10 -05001595 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1596 !target->allow_ext_sg)) {
1597 shost_printk(KERN_ERR, target->scsi_host,
1598 "Could not fit S/G list into SRP_CMD\n");
1599 return -EIO;
1600 }
1601
1602 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001603 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1604
1605 fmt = SRP_DATA_DESC_INDIRECT;
1606 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001607 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001608
David Dillowc07d4242011-01-16 13:57:10 -05001609 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1610 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001611
David Dillowc07d4242011-01-16 13:57:10 -05001612 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001613 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1614 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1615 indirect_hdr->len = cpu_to_be32(state.total_len);
1616
1617 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001618 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001619 else
David Dillowc07d4242011-01-16 13:57:10 -05001620 cmd->data_in_desc_cnt = count;
1621
1622 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1623 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001624
1625map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001626 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1627 cmd->buf_fmt = fmt << 4;
1628 else
1629 cmd->buf_fmt = fmt;
1630
Roland Dreieraef9ec32005-11-02 14:07:13 -08001631 return len;
1632}
1633
David Dillow05a1d752010-10-08 14:48:14 -04001634/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001635 * Return an IU and possible credit to the free pool
1636 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001637static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001638 enum srp_iu_type iu_type)
1639{
1640 unsigned long flags;
1641
Bart Van Assche509c07b2014-10-30 14:48:30 +01001642 spin_lock_irqsave(&ch->lock, flags);
1643 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001644 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001645 ++ch->req_lim;
1646 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001647}
1648
1649/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001650 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001651 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001652 *
1653 * Note:
1654 * An upper limit for the number of allocated information units for each
1655 * request type is:
1656 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1657 * more than Scsi_Host.can_queue requests.
1658 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1659 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1660 * one unanswered SRP request to an initiator.
1661 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001662static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001663 enum srp_iu_type iu_type)
1664{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001665 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001666 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1667 struct srp_iu *iu;
1668
Bart Van Assche509c07b2014-10-30 14:48:30 +01001669 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001670
Bart Van Assche509c07b2014-10-30 14:48:30 +01001671 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001672 return NULL;
1673
1674 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001675 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001676 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001677 ++target->zero_req_lim;
1678 return NULL;
1679 }
1680
Bart Van Assche509c07b2014-10-30 14:48:30 +01001681 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001682 }
1683
Bart Van Assche509c07b2014-10-30 14:48:30 +01001684 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001685 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001686 return iu;
1687}
1688
Bart Van Assche509c07b2014-10-30 14:48:30 +01001689static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001690{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001691 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001692 struct ib_sge list;
1693 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001694
1695 list.addr = iu->dma;
1696 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001697 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001698
1699 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001700 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001701 wr.sg_list = &list;
1702 wr.num_sge = 1;
1703 wr.opcode = IB_WR_SEND;
1704 wr.send_flags = IB_SEND_SIGNALED;
1705
Bart Van Assche509c07b2014-10-30 14:48:30 +01001706 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001707}
1708
Bart Van Assche509c07b2014-10-30 14:48:30 +01001709static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001710{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001711 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001712 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001713 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001714
1715 list.addr = iu->dma;
1716 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001717 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001718
1719 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001720 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001721 wr.sg_list = &list;
1722 wr.num_sge = 1;
1723
Bart Van Assche509c07b2014-10-30 14:48:30 +01001724 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001725}
1726
Bart Van Assche509c07b2014-10-30 14:48:30 +01001727static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001728{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001729 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001730 struct srp_request *req;
1731 struct scsi_cmnd *scmnd;
1732 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001733
Roland Dreieraef9ec32005-11-02 14:07:13 -08001734 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001735 spin_lock_irqsave(&ch->lock, flags);
1736 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1737 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001738
Bart Van Assche509c07b2014-10-30 14:48:30 +01001739 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001740 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001741 ch->tsk_mgmt_status = rsp->data[3];
1742 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001743 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001744 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1745 if (scmnd) {
1746 req = (void *)scmnd->host_scribble;
1747 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1748 }
Bart Van Assche22032992012-08-14 13:18:53 +00001749 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001750 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001751 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1752 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001753
Bart Van Assche509c07b2014-10-30 14:48:30 +01001754 spin_lock_irqsave(&ch->lock, flags);
1755 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1756 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001757
1758 return;
1759 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001760 scmnd->result = rsp->status;
1761
1762 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1763 memcpy(scmnd->sense_buffer, rsp->data +
1764 be32_to_cpu(rsp->resp_data_len),
1765 min_t(int, be32_to_cpu(rsp->sense_data_len),
1766 SCSI_SENSE_BUFFERSIZE));
1767 }
1768
Bart Van Asschee7145312014-07-09 15:57:51 +02001769 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001770 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001771 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1772 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1773 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1774 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1775 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1776 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001777
Bart Van Assche509c07b2014-10-30 14:48:30 +01001778 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001779 be32_to_cpu(rsp->req_lim_delta));
1780
David Dillowf8b6e312010-11-26 13:02:21 -05001781 scmnd->host_scribble = NULL;
1782 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001783 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001784}
1785
Bart Van Assche509c07b2014-10-30 14:48:30 +01001786static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001787 void *rsp, int len)
1788{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001789 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001790 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001791 unsigned long flags;
1792 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001793 int err;
David Dillowbb125882010-10-08 14:40:47 -04001794
Bart Van Assche509c07b2014-10-30 14:48:30 +01001795 spin_lock_irqsave(&ch->lock, flags);
1796 ch->req_lim += req_delta;
1797 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1798 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001799
David Dillowbb125882010-10-08 14:40:47 -04001800 if (!iu) {
1801 shost_printk(KERN_ERR, target->scsi_host, PFX
1802 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001803 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001804 }
1805
1806 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1807 memcpy(iu->buf, rsp, len);
1808 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1809
Bart Van Assche509c07b2014-10-30 14:48:30 +01001810 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001811 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001812 shost_printk(KERN_ERR, target->scsi_host, PFX
1813 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001814 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001815 }
David Dillowbb125882010-10-08 14:40:47 -04001816
David Dillowbb125882010-10-08 14:40:47 -04001817 return err;
1818}
1819
Bart Van Assche509c07b2014-10-30 14:48:30 +01001820static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001821 struct srp_cred_req *req)
1822{
1823 struct srp_cred_rsp rsp = {
1824 .opcode = SRP_CRED_RSP,
1825 .tag = req->tag,
1826 };
1827 s32 delta = be32_to_cpu(req->req_lim_delta);
1828
Bart Van Assche509c07b2014-10-30 14:48:30 +01001829 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1830 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001831 "problems processing SRP_CRED_REQ\n");
1832}
1833
Bart Van Assche509c07b2014-10-30 14:48:30 +01001834static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001835 struct srp_aer_req *req)
1836{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001837 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001838 struct srp_aer_rsp rsp = {
1839 .opcode = SRP_AER_RSP,
1840 .tag = req->tag,
1841 };
1842 s32 delta = be32_to_cpu(req->req_lim_delta);
1843
1844 shost_printk(KERN_ERR, target->scsi_host, PFX
1845 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1846
Bart Van Assche509c07b2014-10-30 14:48:30 +01001847 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001848 shost_printk(KERN_ERR, target->scsi_host, PFX
1849 "problems processing SRP_AER_REQ\n");
1850}
1851
Bart Van Assche509c07b2014-10-30 14:48:30 +01001852static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001853{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001854 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001855 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001856 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001857 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001858 u8 opcode;
1859
Bart Van Assche509c07b2014-10-30 14:48:30 +01001860 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001861 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001862
1863 opcode = *(u8 *) iu->buf;
1864
1865 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001866 shost_printk(KERN_ERR, target->scsi_host,
1867 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001868 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1869 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001870 }
1871
1872 switch (opcode) {
1873 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001874 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001875 break;
1876
David Dillowbb125882010-10-08 14:40:47 -04001877 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001878 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001879 break;
1880
1881 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001882 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001883 break;
1884
Roland Dreieraef9ec32005-11-02 14:07:13 -08001885 case SRP_T_LOGOUT:
1886 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001887 shost_printk(KERN_WARNING, target->scsi_host,
1888 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001889 break;
1890
1891 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001892 shost_printk(KERN_WARNING, target->scsi_host,
1893 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001894 break;
1895 }
1896
Bart Van Assche509c07b2014-10-30 14:48:30 +01001897 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001898 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001899
Bart Van Assche509c07b2014-10-30 14:48:30 +01001900 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001901 if (res != 0)
1902 shost_printk(KERN_ERR, target->scsi_host,
1903 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001904}
1905
Bart Van Asschec1120f82013-10-26 14:35:08 +02001906/**
1907 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001908 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001909 *
1910 * Note: This function may get invoked before the rport has been created,
1911 * hence the target->rport test.
1912 */
1913static void srp_tl_err_work(struct work_struct *work)
1914{
1915 struct srp_target_port *target;
1916
1917 target = container_of(work, struct srp_target_port, tl_err_work);
1918 if (target->rport)
1919 srp_start_tl_fail_timers(target->rport);
1920}
1921
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001922static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001923 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001924{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001925 struct srp_target_port *target = ch->target;
1926
1927 if (wr_id == SRP_LAST_WR_ID) {
1928 complete(&ch->done);
1929 return;
1930 }
1931
Bart Van Assche294c8752011-12-25 12:18:12 +00001932 if (target->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001933 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1934 shost_printk(KERN_ERR, target->scsi_host, PFX
1935 "LOCAL_INV failed with status %d\n",
1936 wc_status);
1937 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1938 shost_printk(KERN_ERR, target->scsi_host, PFX
1939 "FAST_REG_MR failed status %d\n",
1940 wc_status);
1941 } else {
1942 shost_printk(KERN_ERR, target->scsi_host,
1943 PFX "failed %s status %d for iu %p\n",
1944 send_err ? "send" : "receive",
1945 wc_status, (void *)(uintptr_t)wr_id);
1946 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001947 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001948 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001949 target->qp_in_error = true;
1950}
1951
Bart Van Assche509c07b2014-10-30 14:48:30 +01001952static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001953{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001954 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001955 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001956
1957 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1958 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001959 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001960 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001961 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001962 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001963 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001964 }
1965}
1966
Bart Van Assche509c07b2014-10-30 14:48:30 +01001967static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001968{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001969 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001970 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001971 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001972
1973 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001974 if (likely(wc.status == IB_WC_SUCCESS)) {
1975 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001976 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001977 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001978 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001979 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001980 }
1981}
1982
Bart Van Assche76c75b22010-11-26 14:37:47 -05001983static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001984{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001985 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001986 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001987 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001988 struct srp_request *req;
1989 struct srp_iu *iu;
1990 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001991 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001992 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001993 u32 tag;
1994 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02001995 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001996 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1997
1998 /*
1999 * The SCSI EH thread is the only context from which srp_queuecommand()
2000 * can get invoked for blocked devices (SDEV_BLOCK /
2001 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2002 * locking the rport mutex if invoked from inside the SCSI EH.
2003 */
2004 if (in_scsi_eh)
2005 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002006
Bart Van Assched1b42892014-05-20 15:07:20 +02002007 scmnd->result = srp_chkready(target->rport);
2008 if (unlikely(scmnd->result))
2009 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002010
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002011 WARN_ON_ONCE(scmnd->request->tag < 0);
2012 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002013 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002014 idx = blk_mq_unique_tag_to_tag(tag);
2015 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2016 dev_name(&shost->shost_gendev), tag, idx,
2017 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002018
2019 spin_lock_irqsave(&ch->lock, flags);
2020 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002021 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002022
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002023 if (!iu)
2024 goto err;
2025
2026 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002027 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002028 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002029 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002030
David Dillowf8b6e312010-11-26 13:02:21 -05002031 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002032
2033 cmd = iu->buf;
2034 memset(cmd, 0, sizeof *cmd);
2035
2036 cmd->opcode = SRP_CMD;
2037 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002038 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002039 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2040
Roland Dreieraef9ec32005-11-02 14:07:13 -08002041 req->scmnd = scmnd;
2042 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002043
Bart Van Assche509c07b2014-10-30 14:48:30 +01002044 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002045 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002046 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002047 PFX "Failed to map data (%d)\n", len);
2048 /*
2049 * If we ran out of memory descriptors (-ENOMEM) because an
2050 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002051 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002052 * to reduce queue depth temporarily.
2053 */
2054 scmnd->result = len == -ENOMEM ?
2055 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002056 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002057 }
2058
David Dillow49248642011-01-14 18:23:24 -05002059 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002060 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002061
Bart Van Assche509c07b2014-10-30 14:48:30 +01002062 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002063 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002064 goto err_unmap;
2065 }
2066
Bart Van Assched1b42892014-05-20 15:07:20 +02002067 ret = 0;
2068
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002069unlock_rport:
2070 if (in_scsi_eh)
2071 mutex_unlock(&rport->mutex);
2072
Bart Van Assched1b42892014-05-20 15:07:20 +02002073 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002074
2075err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002076 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002077
Bart Van Assche76c75b22010-11-26 14:37:47 -05002078err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002079 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002080
Bart Van Assche024ca902014-05-20 15:03:49 +02002081 /*
2082 * Avoid that the loops that iterate over the request ring can
2083 * encounter a dangling SCSI command pointer.
2084 */
2085 req->scmnd = NULL;
2086
Bart Van Assched1b42892014-05-20 15:07:20 +02002087err:
2088 if (scmnd->result) {
2089 scmnd->scsi_done(scmnd);
2090 ret = 0;
2091 } else {
2092 ret = SCSI_MLQUEUE_HOST_BUSY;
2093 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002094
Bart Van Assched1b42892014-05-20 15:07:20 +02002095 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002096}
2097
Bart Van Assche4d73f952013-10-26 14:40:37 +02002098/*
2099 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002100 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002101 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002102static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002103{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002104 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002105 int i;
2106
Bart Van Assche509c07b2014-10-30 14:48:30 +01002107 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2108 GFP_KERNEL);
2109 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002110 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002111 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2112 GFP_KERNEL);
2113 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002114 goto err_no_ring;
2115
2116 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002117 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2118 ch->max_ti_iu_len,
2119 GFP_KERNEL, DMA_FROM_DEVICE);
2120 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002121 goto err;
2122 }
2123
Bart Van Assche4d73f952013-10-26 14:40:37 +02002124 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002125 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2126 target->max_iu_len,
2127 GFP_KERNEL, DMA_TO_DEVICE);
2128 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002129 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002130
Bart Van Assche509c07b2014-10-30 14:48:30 +01002131 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002132 }
2133
2134 return 0;
2135
2136err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002137 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002138 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2139 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002140 }
2141
Bart Van Assche4d73f952013-10-26 14:40:37 +02002142
2143err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002144 kfree(ch->tx_ring);
2145 ch->tx_ring = NULL;
2146 kfree(ch->rx_ring);
2147 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002148
2149 return -ENOMEM;
2150}
2151
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002152static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2153{
2154 uint64_t T_tr_ns, max_compl_time_ms;
2155 uint32_t rq_tmo_jiffies;
2156
2157 /*
2158 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2159 * table 91), both the QP timeout and the retry count have to be set
2160 * for RC QP's during the RTR to RTS transition.
2161 */
2162 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2163 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2164
2165 /*
2166 * Set target->rq_tmo_jiffies to one second more than the largest time
2167 * it can take before an error completion is generated. See also
2168 * C9-140..142 in the IBTA spec for more information about how to
2169 * convert the QP Local ACK Timeout value to nanoseconds.
2170 */
2171 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2172 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2173 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2174 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2175
2176 return rq_tmo_jiffies;
2177}
2178
David Dillow961e0be2011-01-14 17:32:07 -05002179static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2180 struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002181 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002182{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002183 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002184 struct ib_qp_attr *qp_attr = NULL;
2185 int attr_mask = 0;
2186 int ret;
2187 int i;
2188
2189 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002190 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2191 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002192
2193 /*
2194 * Reserve credits for task management so we don't
2195 * bounce requests back to the SCSI mid-layer.
2196 */
2197 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002198 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002199 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002200 target->scsi_host->cmd_per_lun
2201 = min_t(int, target->scsi_host->can_queue,
2202 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002203 } else {
2204 shost_printk(KERN_WARNING, target->scsi_host,
2205 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2206 ret = -ECONNRESET;
2207 goto error;
2208 }
2209
Bart Van Assche509c07b2014-10-30 14:48:30 +01002210 if (!ch->rx_ring) {
2211 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002212 if (ret)
2213 goto error;
2214 }
2215
2216 ret = -ENOMEM;
2217 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2218 if (!qp_attr)
2219 goto error;
2220
2221 qp_attr->qp_state = IB_QPS_RTR;
2222 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2223 if (ret)
2224 goto error_free;
2225
Bart Van Assche509c07b2014-10-30 14:48:30 +01002226 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002227 if (ret)
2228 goto error_free;
2229
Bart Van Assche4d73f952013-10-26 14:40:37 +02002230 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002231 struct srp_iu *iu = ch->rx_ring[i];
2232
2233 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002234 if (ret)
2235 goto error_free;
2236 }
2237
2238 qp_attr->qp_state = IB_QPS_RTS;
2239 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2240 if (ret)
2241 goto error_free;
2242
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002243 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2244
Bart Van Assche509c07b2014-10-30 14:48:30 +01002245 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002246 if (ret)
2247 goto error_free;
2248
2249 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2250
2251error_free:
2252 kfree(qp_attr);
2253
2254error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002255 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002256}
2257
Roland Dreieraef9ec32005-11-02 14:07:13 -08002258static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2259 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002260 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002261{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002262 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002263 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002264 struct ib_class_port_info *cpi;
2265 int opcode;
2266
2267 switch (event->param.rej_rcvd.reason) {
2268 case IB_CM_REJ_PORT_CM_REDIRECT:
2269 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002270 ch->path.dlid = cpi->redirect_lid;
2271 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002272 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002273 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002274
Bart Van Assche509c07b2014-10-30 14:48:30 +01002275 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002276 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2277 break;
2278
2279 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002280 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002281 /*
2282 * Topspin/Cisco SRP gateways incorrectly send
2283 * reject reason code 25 when they mean 24
2284 * (port redirect).
2285 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002286 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002287 event->param.rej_rcvd.ari, 16);
2288
David Dillow7aa54bd2008-01-07 18:23:41 -05002289 shost_printk(KERN_DEBUG, shost,
2290 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002291 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2292 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002293
Bart Van Assche509c07b2014-10-30 14:48:30 +01002294 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002295 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002296 shost_printk(KERN_WARNING, shost,
2297 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002298 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002299 }
2300 break;
2301
2302 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002303 shost_printk(KERN_WARNING, shost,
2304 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002305 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002306 break;
2307
2308 case IB_CM_REJ_CONSUMER_DEFINED:
2309 opcode = *(u8 *) event->private_data;
2310 if (opcode == SRP_LOGIN_REJ) {
2311 struct srp_login_rej *rej = event->private_data;
2312 u32 reason = be32_to_cpu(rej->reason);
2313
2314 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002315 shost_printk(KERN_WARNING, shost,
2316 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002317 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002318 shost_printk(KERN_WARNING, shost, PFX
2319 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002320 target->sgid.raw,
2321 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002322 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002323 shost_printk(KERN_WARNING, shost,
2324 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2325 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002326 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002327 break;
2328
David Dillow9fe4bcf2008-01-08 17:08:52 -05002329 case IB_CM_REJ_STALE_CONN:
2330 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002331 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002332 break;
2333
Roland Dreieraef9ec32005-11-02 14:07:13 -08002334 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002335 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2336 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002337 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002338 }
2339}
2340
2341static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2342{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002343 struct srp_rdma_ch *ch = cm_id->context;
2344 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002345 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002346
2347 switch (event->event) {
2348 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002349 shost_printk(KERN_DEBUG, target->scsi_host,
2350 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002351 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002352 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002353 break;
2354
2355 case IB_CM_REP_RECEIVED:
2356 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002357 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002358 break;
2359
2360 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002361 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002362 comp = 1;
2363
Bart Van Assche509c07b2014-10-30 14:48:30 +01002364 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002365 break;
2366
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002367 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002368 shost_printk(KERN_WARNING, target->scsi_host,
2369 PFX "DREQ received - connection closed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +00002370 srp_change_conn_state(target, false);
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002371 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002372 shost_printk(KERN_ERR, target->scsi_host,
2373 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002374 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002375 break;
2376
2377 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002378 shost_printk(KERN_ERR, target->scsi_host,
2379 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002380 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002381
Bart Van Assche509c07b2014-10-30 14:48:30 +01002382 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002383 break;
2384
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002385 case IB_CM_MRA_RECEIVED:
2386 case IB_CM_DREQ_ERROR:
2387 case IB_CM_DREP_RECEIVED:
2388 break;
2389
Roland Dreieraef9ec32005-11-02 14:07:13 -08002390 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002391 shost_printk(KERN_WARNING, target->scsi_host,
2392 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002393 break;
2394 }
2395
2396 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002397 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002398
Roland Dreieraef9ec32005-11-02 14:07:13 -08002399 return 0;
2400}
2401
Jack Wang71444b92013-11-07 11:37:37 +01002402/**
Jack Wang71444b92013-11-07 11:37:37 +01002403 * srp_change_queue_depth - setting device queue depth
2404 * @sdev: scsi device struct
2405 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002406 *
2407 * Returns queue depth.
2408 */
2409static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002410srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002411{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002412 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002413 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002414 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002415}
2416
Bart Van Assche509c07b2014-10-30 14:48:30 +01002417static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2418 unsigned int lun, u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002419{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002420 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002421 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002422 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002423 struct srp_iu *iu;
2424 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002425
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002426 if (!target->connected || target->qp_in_error)
2427 return -1;
2428
Bart Van Assche509c07b2014-10-30 14:48:30 +01002429 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002430
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002431 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002432 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002433 * invoked while a task management function is being sent.
2434 */
2435 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002436 spin_lock_irq(&ch->lock);
2437 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2438 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002439
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002440 if (!iu) {
2441 mutex_unlock(&rport->mutex);
2442
Bart Van Assche76c75b22010-11-26 14:37:47 -05002443 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002444 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002445
David Dillow19081f32010-10-18 08:54:49 -04002446 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2447 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002448 tsk_mgmt = iu->buf;
2449 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2450
2451 tsk_mgmt->opcode = SRP_TSK_MGMT;
David Dillowf8b6e312010-11-26 13:02:21 -05002452 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2453 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002454 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002455 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002456
David Dillow19081f32010-10-18 08:54:49 -04002457 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2458 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002459 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2460 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002461 mutex_unlock(&rport->mutex);
2462
Bart Van Assche76c75b22010-11-26 14:37:47 -05002463 return -1;
2464 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002465 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002466
Bart Van Assche509c07b2014-10-30 14:48:30 +01002467 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002468 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002469 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002470
Roland Dreierd945e1d2006-05-09 10:50:28 -07002471 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002472}
2473
Roland Dreieraef9ec32005-11-02 14:07:13 -08002474static int srp_abort(struct scsi_cmnd *scmnd)
2475{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002476 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002477 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002478 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002479 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002480 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002481 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002482
David Dillow7aa54bd2008-01-07 18:23:41 -05002483 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002484
Bart Van Assched92c0da2014-10-06 17:14:36 +02002485 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002486 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002487 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002488 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2489 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2490 return SUCCESS;
2491 ch = &target->ch[ch_idx];
2492 if (!srp_claim_req(ch, req, NULL, scmnd))
2493 return SUCCESS;
2494 shost_printk(KERN_ERR, target->scsi_host,
2495 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002496 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002497 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002498 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002499 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002500 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002501 else
2502 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002503 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002504 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002505 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002506
Bart Van Assche086f44f2013-06-12 15:23:04 +02002507 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002508}
2509
2510static int srp_reset_device(struct scsi_cmnd *scmnd)
2511{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002512 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002513 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002514 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002515
David Dillow7aa54bd2008-01-07 18:23:41 -05002516 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002517
Bart Van Assched92c0da2014-10-06 17:14:36 +02002518 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002519 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002520 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002521 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002522 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002523 return FAILED;
2524
Bart Van Assched92c0da2014-10-06 17:14:36 +02002525 for (i = 0; i < target->ch_count; i++) {
2526 ch = &target->ch[i];
2527 for (i = 0; i < target->req_ring_size; ++i) {
2528 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002529
Bart Van Assched92c0da2014-10-06 17:14:36 +02002530 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2531 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002532 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002533
Roland Dreierd945e1d2006-05-09 10:50:28 -07002534 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002535}
2536
2537static int srp_reset_host(struct scsi_cmnd *scmnd)
2538{
2539 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002540
David Dillow7aa54bd2008-01-07 18:23:41 -05002541 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002542
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002543 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002544}
2545
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002546static int srp_slave_configure(struct scsi_device *sdev)
2547{
2548 struct Scsi_Host *shost = sdev->host;
2549 struct srp_target_port *target = host_to_target(shost);
2550 struct request_queue *q = sdev->request_queue;
2551 unsigned long timeout;
2552
2553 if (sdev->type == TYPE_DISK) {
2554 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2555 blk_queue_rq_timeout(q, timeout);
2556 }
2557
2558 return 0;
2559}
2560
Tony Jonesee959b02008-02-22 00:13:36 +01002561static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2562 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002563{
Tony Jonesee959b02008-02-22 00:13:36 +01002564 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002565
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002566 return sprintf(buf, "0x%016llx\n",
2567 (unsigned long long) be64_to_cpu(target->id_ext));
2568}
2569
Tony Jonesee959b02008-02-22 00:13:36 +01002570static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2571 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002572{
Tony Jonesee959b02008-02-22 00:13:36 +01002573 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002574
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002575 return sprintf(buf, "0x%016llx\n",
2576 (unsigned long long) be64_to_cpu(target->ioc_guid));
2577}
2578
Tony Jonesee959b02008-02-22 00:13:36 +01002579static ssize_t show_service_id(struct device *dev,
2580 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002581{
Tony Jonesee959b02008-02-22 00:13:36 +01002582 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002583
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002584 return sprintf(buf, "0x%016llx\n",
2585 (unsigned long long) be64_to_cpu(target->service_id));
2586}
2587
Tony Jonesee959b02008-02-22 00:13:36 +01002588static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2589 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002590{
Tony Jonesee959b02008-02-22 00:13:36 +01002591 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002592
Bart Van Assche747fe002014-10-30 14:48:05 +01002593 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002594}
2595
Bart Van Assche848b3082013-10-26 14:38:12 +02002596static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2597 char *buf)
2598{
2599 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2600
Bart Van Assche747fe002014-10-30 14:48:05 +01002601 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002602}
2603
Tony Jonesee959b02008-02-22 00:13:36 +01002604static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2605 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002606{
Tony Jonesee959b02008-02-22 00:13:36 +01002607 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002608 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002609
Bart Van Assche509c07b2014-10-30 14:48:30 +01002610 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002611}
2612
Tony Jonesee959b02008-02-22 00:13:36 +01002613static ssize_t show_orig_dgid(struct device *dev,
2614 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002615{
Tony Jonesee959b02008-02-22 00:13:36 +01002616 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002617
Bart Van Assche747fe002014-10-30 14:48:05 +01002618 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002619}
2620
Bart Van Assche89de7482010-08-03 14:08:45 +00002621static ssize_t show_req_lim(struct device *dev,
2622 struct device_attribute *attr, char *buf)
2623{
2624 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002625 struct srp_rdma_ch *ch;
2626 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002627
Bart Van Assched92c0da2014-10-06 17:14:36 +02002628 for (i = 0; i < target->ch_count; i++) {
2629 ch = &target->ch[i];
2630 req_lim = min(req_lim, ch->req_lim);
2631 }
2632 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002633}
2634
Tony Jonesee959b02008-02-22 00:13:36 +01002635static ssize_t show_zero_req_lim(struct device *dev,
2636 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002637{
Tony Jonesee959b02008-02-22 00:13:36 +01002638 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002639
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002640 return sprintf(buf, "%d\n", target->zero_req_lim);
2641}
2642
Tony Jonesee959b02008-02-22 00:13:36 +01002643static ssize_t show_local_ib_port(struct device *dev,
2644 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002645{
Tony Jonesee959b02008-02-22 00:13:36 +01002646 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002647
2648 return sprintf(buf, "%d\n", target->srp_host->port);
2649}
2650
Tony Jonesee959b02008-02-22 00:13:36 +01002651static ssize_t show_local_ib_device(struct device *dev,
2652 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002653{
Tony Jonesee959b02008-02-22 00:13:36 +01002654 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002655
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002656 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002657}
2658
Bart Van Assched92c0da2014-10-06 17:14:36 +02002659static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2660 char *buf)
2661{
2662 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2663
2664 return sprintf(buf, "%d\n", target->ch_count);
2665}
2666
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002667static ssize_t show_comp_vector(struct device *dev,
2668 struct device_attribute *attr, char *buf)
2669{
2670 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2671
2672 return sprintf(buf, "%d\n", target->comp_vector);
2673}
2674
Vu Pham7bb312e2013-10-26 14:31:27 +02002675static ssize_t show_tl_retry_count(struct device *dev,
2676 struct device_attribute *attr, char *buf)
2677{
2678 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2679
2680 return sprintf(buf, "%d\n", target->tl_retry_count);
2681}
2682
David Dillow49248642011-01-14 18:23:24 -05002683static ssize_t show_cmd_sg_entries(struct device *dev,
2684 struct device_attribute *attr, char *buf)
2685{
2686 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2687
2688 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2689}
2690
David Dillowc07d4242011-01-16 13:57:10 -05002691static ssize_t show_allow_ext_sg(struct device *dev,
2692 struct device_attribute *attr, char *buf)
2693{
2694 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2695
2696 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2697}
2698
Tony Jonesee959b02008-02-22 00:13:36 +01002699static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2700static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2701static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2702static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002703static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002704static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2705static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002706static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002707static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2708static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2709static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002710static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002711static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002712static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002713static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002714static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002715
Tony Jonesee959b02008-02-22 00:13:36 +01002716static struct device_attribute *srp_host_attrs[] = {
2717 &dev_attr_id_ext,
2718 &dev_attr_ioc_guid,
2719 &dev_attr_service_id,
2720 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002721 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002722 &dev_attr_dgid,
2723 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002724 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002725 &dev_attr_zero_req_lim,
2726 &dev_attr_local_ib_port,
2727 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002728 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002729 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002730 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002731 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002732 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002733 NULL
2734};
2735
Roland Dreieraef9ec32005-11-02 14:07:13 -08002736static struct scsi_host_template srp_template = {
2737 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002738 .name = "InfiniBand SRP initiator",
2739 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002740 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002741 .info = srp_target_info,
2742 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002743 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002744 .eh_abort_handler = srp_abort,
2745 .eh_device_reset_handler = srp_reset_device,
2746 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002747 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002748 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002749 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002750 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002751 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002752 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002753 .shost_attrs = srp_host_attrs,
2754 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002755 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002756};
2757
Bart Van Assche34aa6542014-10-30 14:47:22 +01002758static int srp_sdev_count(struct Scsi_Host *host)
2759{
2760 struct scsi_device *sdev;
2761 int c = 0;
2762
2763 shost_for_each_device(sdev, host)
2764 c++;
2765
2766 return c;
2767}
2768
Roland Dreieraef9ec32005-11-02 14:07:13 -08002769static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2770{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002771 struct srp_rport_identifiers ids;
2772 struct srp_rport *rport;
2773
Bart Van Assche34aa6542014-10-30 14:47:22 +01002774 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002775 sprintf(target->target_name, "SRP.T10:%016llX",
2776 (unsigned long long) be64_to_cpu(target->id_ext));
2777
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002778 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002779 return -ENODEV;
2780
FUJITA Tomonori32368222007-06-27 16:33:12 +09002781 memcpy(ids.port_id, &target->id_ext, 8);
2782 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002783 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002784 rport = srp_rport_add(target->scsi_host, &ids);
2785 if (IS_ERR(rport)) {
2786 scsi_remove_host(target->scsi_host);
2787 return PTR_ERR(rport);
2788 }
2789
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002790 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002791 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002792
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002793 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002794 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002795 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002796
Roland Dreieraef9ec32005-11-02 14:07:13 -08002797 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002798 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002799
Bart Van Assche34aa6542014-10-30 14:47:22 +01002800 if (!target->connected || target->qp_in_error) {
2801 shost_printk(KERN_INFO, target->scsi_host,
2802 PFX "SCSI scan failed - removing SCSI host\n");
2803 srp_queue_remove_work(target);
2804 goto out;
2805 }
2806
2807 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2808 dev_name(&target->scsi_host->shost_gendev),
2809 srp_sdev_count(target->scsi_host));
2810
2811 spin_lock_irq(&target->lock);
2812 if (target->state == SRP_TARGET_SCANNING)
2813 target->state = SRP_TARGET_LIVE;
2814 spin_unlock_irq(&target->lock);
2815
2816out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002817 return 0;
2818}
2819
Tony Jonesee959b02008-02-22 00:13:36 +01002820static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002821{
2822 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002823 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002824
2825 complete(&host->released);
2826}
2827
2828static struct class srp_class = {
2829 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002830 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002831};
2832
Bart Van Assche96fc2482013-06-28 14:51:26 +02002833/**
2834 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002835 * @host: SRP host.
2836 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002837 */
2838static bool srp_conn_unique(struct srp_host *host,
2839 struct srp_target_port *target)
2840{
2841 struct srp_target_port *t;
2842 bool ret = false;
2843
2844 if (target->state == SRP_TARGET_REMOVED)
2845 goto out;
2846
2847 ret = true;
2848
2849 spin_lock(&host->target_lock);
2850 list_for_each_entry(t, &host->target_list, list) {
2851 if (t != target &&
2852 target->id_ext == t->id_ext &&
2853 target->ioc_guid == t->ioc_guid &&
2854 target->initiator_ext == t->initiator_ext) {
2855 ret = false;
2856 break;
2857 }
2858 }
2859 spin_unlock(&host->target_lock);
2860
2861out:
2862 return ret;
2863}
2864
Roland Dreieraef9ec32005-11-02 14:07:13 -08002865/*
2866 * Target ports are added by writing
2867 *
2868 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2869 * pkey=<P_Key>,service_id=<service ID>
2870 *
2871 * to the add_target sysfs attribute.
2872 */
2873enum {
2874 SRP_OPT_ERR = 0,
2875 SRP_OPT_ID_EXT = 1 << 0,
2876 SRP_OPT_IOC_GUID = 1 << 1,
2877 SRP_OPT_DGID = 1 << 2,
2878 SRP_OPT_PKEY = 1 << 3,
2879 SRP_OPT_SERVICE_ID = 1 << 4,
2880 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002881 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002882 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002883 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002884 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002885 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2886 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002887 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002888 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002889 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002890 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2891 SRP_OPT_IOC_GUID |
2892 SRP_OPT_DGID |
2893 SRP_OPT_PKEY |
2894 SRP_OPT_SERVICE_ID),
2895};
2896
Steven Whitehousea447c092008-10-13 10:46:57 +01002897static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002898 { SRP_OPT_ID_EXT, "id_ext=%s" },
2899 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2900 { SRP_OPT_DGID, "dgid=%s" },
2901 { SRP_OPT_PKEY, "pkey=%x" },
2902 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2903 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2904 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002905 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002906 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002907 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002908 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2909 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002910 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002911 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002912 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002913 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002914};
2915
2916static int srp_parse_options(const char *buf, struct srp_target_port *target)
2917{
2918 char *options, *sep_opt;
2919 char *p;
2920 char dgid[3];
2921 substring_t args[MAX_OPT_ARGS];
2922 int opt_mask = 0;
2923 int token;
2924 int ret = -EINVAL;
2925 int i;
2926
2927 options = kstrdup(buf, GFP_KERNEL);
2928 if (!options)
2929 return -ENOMEM;
2930
2931 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002932 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002933 if (!*p)
2934 continue;
2935
2936 token = match_token(p, srp_opt_tokens, args);
2937 opt_mask |= token;
2938
2939 switch (token) {
2940 case SRP_OPT_ID_EXT:
2941 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002942 if (!p) {
2943 ret = -ENOMEM;
2944 goto out;
2945 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002946 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2947 kfree(p);
2948 break;
2949
2950 case SRP_OPT_IOC_GUID:
2951 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002952 if (!p) {
2953 ret = -ENOMEM;
2954 goto out;
2955 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002956 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2957 kfree(p);
2958 break;
2959
2960 case SRP_OPT_DGID:
2961 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002962 if (!p) {
2963 ret = -ENOMEM;
2964 goto out;
2965 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002966 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002967 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002968 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002969 goto out;
2970 }
2971
2972 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002973 strlcpy(dgid, p + i * 2, sizeof(dgid));
2974 if (sscanf(dgid, "%hhx",
2975 &target->orig_dgid.raw[i]) < 1) {
2976 ret = -EINVAL;
2977 kfree(p);
2978 goto out;
2979 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002980 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002981 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002982 break;
2983
2984 case SRP_OPT_PKEY:
2985 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002986 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002987 goto out;
2988 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002989 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002990 break;
2991
2992 case SRP_OPT_SERVICE_ID:
2993 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002994 if (!p) {
2995 ret = -ENOMEM;
2996 goto out;
2997 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002998 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2999 kfree(p);
3000 break;
3001
3002 case SRP_OPT_MAX_SECT:
3003 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003004 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003005 goto out;
3006 }
3007 target->scsi_host->max_sectors = token;
3008 break;
3009
Bart Van Assche4d73f952013-10-26 14:40:37 +02003010 case SRP_OPT_QUEUE_SIZE:
3011 if (match_int(args, &token) || token < 1) {
3012 pr_warn("bad queue_size parameter '%s'\n", p);
3013 goto out;
3014 }
3015 target->scsi_host->can_queue = token;
3016 target->queue_size = token + SRP_RSP_SQ_SIZE +
3017 SRP_TSK_MGMT_SQ_SIZE;
3018 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3019 target->scsi_host->cmd_per_lun = token;
3020 break;
3021
Vu Pham52fb2b502006-06-17 20:37:31 -07003022 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003023 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003024 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3025 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003026 goto out;
3027 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003028 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003029 break;
3030
Ramachandra K0c0450db2006-06-17 20:37:38 -07003031 case SRP_OPT_IO_CLASS:
3032 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003033 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003034 goto out;
3035 }
3036 if (token != SRP_REV10_IB_IO_CLASS &&
3037 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003038 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3039 token, SRP_REV10_IB_IO_CLASS,
3040 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003041 goto out;
3042 }
3043 target->io_class = token;
3044 break;
3045
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003046 case SRP_OPT_INITIATOR_EXT:
3047 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003048 if (!p) {
3049 ret = -ENOMEM;
3050 goto out;
3051 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003052 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3053 kfree(p);
3054 break;
3055
David Dillow49248642011-01-14 18:23:24 -05003056 case SRP_OPT_CMD_SG_ENTRIES:
3057 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003058 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3059 p);
David Dillow49248642011-01-14 18:23:24 -05003060 goto out;
3061 }
3062 target->cmd_sg_cnt = token;
3063 break;
3064
David Dillowc07d4242011-01-16 13:57:10 -05003065 case SRP_OPT_ALLOW_EXT_SG:
3066 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003067 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003068 goto out;
3069 }
3070 target->allow_ext_sg = !!token;
3071 break;
3072
3073 case SRP_OPT_SG_TABLESIZE:
3074 if (match_int(args, &token) || token < 1 ||
3075 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003076 pr_warn("bad max sg_tablesize parameter '%s'\n",
3077 p);
David Dillowc07d4242011-01-16 13:57:10 -05003078 goto out;
3079 }
3080 target->sg_tablesize = token;
3081 break;
3082
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003083 case SRP_OPT_COMP_VECTOR:
3084 if (match_int(args, &token) || token < 0) {
3085 pr_warn("bad comp_vector parameter '%s'\n", p);
3086 goto out;
3087 }
3088 target->comp_vector = token;
3089 break;
3090
Vu Pham7bb312e2013-10-26 14:31:27 +02003091 case SRP_OPT_TL_RETRY_COUNT:
3092 if (match_int(args, &token) || token < 2 || token > 7) {
3093 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3094 p);
3095 goto out;
3096 }
3097 target->tl_retry_count = token;
3098 break;
3099
Roland Dreieraef9ec32005-11-02 14:07:13 -08003100 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003101 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3102 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003103 goto out;
3104 }
3105 }
3106
3107 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3108 ret = 0;
3109 else
3110 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3111 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3112 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003113 pr_warn("target creation request is missing parameter '%s'\n",
3114 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003115
Bart Van Assche4d73f952013-10-26 14:40:37 +02003116 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3117 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3118 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3119 target->scsi_host->cmd_per_lun,
3120 target->scsi_host->can_queue);
3121
Roland Dreieraef9ec32005-11-02 14:07:13 -08003122out:
3123 kfree(options);
3124 return ret;
3125}
3126
Tony Jonesee959b02008-02-22 00:13:36 +01003127static ssize_t srp_create_target(struct device *dev,
3128 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003129 const char *buf, size_t count)
3130{
3131 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003132 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003133 struct Scsi_Host *target_host;
3134 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003135 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003136 struct srp_device *srp_dev = host->srp_dev;
3137 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003138 int ret, node_idx, node, cpu, i;
3139 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003140
3141 target_host = scsi_host_alloc(&srp_template,
3142 sizeof (struct srp_target_port));
3143 if (!target_host)
3144 return -ENOMEM;
3145
David Dillow49248642011-01-14 18:23:24 -05003146 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003147 target_host->max_channel = 0;
3148 target_host->max_id = 1;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003149 target_host->max_lun = SRP_MAX_LUN;
3150 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003151
Roland Dreieraef9ec32005-11-02 14:07:13 -08003152 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003153
David Dillow49248642011-01-14 18:23:24 -05003154 target->io_class = SRP_REV16A_IB_IO_CLASS;
3155 target->scsi_host = target_host;
3156 target->srp_host = host;
3157 target->lkey = host->srp_dev->mr->lkey;
3158 target->rkey = host->srp_dev->mr->rkey;
3159 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003160 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3161 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003162 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003163 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003164
Bart Van Assche34aa6542014-10-30 14:47:22 +01003165 /*
3166 * Avoid that the SCSI host can be removed by srp_remove_target()
3167 * before this function returns.
3168 */
3169 scsi_host_get(target->scsi_host);
3170
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003171 mutex_lock(&host->add_target_mutex);
3172
Roland Dreieraef9ec32005-11-02 14:07:13 -08003173 ret = srp_parse_options(buf, target);
3174 if (ret)
3175 goto err;
3176
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003177 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3178 if (ret)
3179 goto err;
3180
Bart Van Assche4d73f952013-10-26 14:40:37 +02003181 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3182
Bart Van Assche96fc2482013-06-28 14:51:26 +02003183 if (!srp_conn_unique(target->srp_host, target)) {
3184 shost_printk(KERN_INFO, target->scsi_host,
3185 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3186 be64_to_cpu(target->id_ext),
3187 be64_to_cpu(target->ioc_guid),
3188 be64_to_cpu(target->initiator_ext));
3189 ret = -EEXIST;
3190 goto err;
3191 }
3192
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003193 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003194 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003195 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003196 target->sg_tablesize = target->cmd_sg_cnt;
3197 }
3198
3199 target_host->sg_tablesize = target->sg_tablesize;
3200 target->indirect_size = target->sg_tablesize *
3201 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003202 target->max_iu_len = sizeof (struct srp_cmd) +
3203 sizeof (struct srp_indirect_buf) +
3204 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3205
Bart Van Asschec1120f82013-10-26 14:35:08 +02003206 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003207 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003208 spin_lock_init(&target->lock);
Bart Van Assche747fe002014-10-30 14:48:05 +01003209 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003210 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02003211 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003212
Bart Van Assched92c0da2014-10-06 17:14:36 +02003213 ret = -ENOMEM;
3214 target->ch_count = max_t(unsigned, num_online_nodes(),
3215 min(ch_count ? :
3216 min(4 * num_online_nodes(),
3217 ibdev->num_comp_vectors),
3218 num_online_cpus()));
3219 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3220 GFP_KERNEL);
3221 if (!target->ch)
3222 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003223
Bart Van Assched92c0da2014-10-06 17:14:36 +02003224 node_idx = 0;
3225 for_each_online_node(node) {
3226 const int ch_start = (node_idx * target->ch_count /
3227 num_online_nodes());
3228 const int ch_end = ((node_idx + 1) * target->ch_count /
3229 num_online_nodes());
3230 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3231 num_online_nodes() + target->comp_vector)
3232 % ibdev->num_comp_vectors;
3233 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3234 num_online_nodes() + target->comp_vector)
3235 % ibdev->num_comp_vectors;
3236 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003237
Bart Van Assched92c0da2014-10-06 17:14:36 +02003238 for_each_online_cpu(cpu) {
3239 if (cpu_to_node(cpu) != node)
3240 continue;
3241 if (ch_start + cpu_idx >= ch_end)
3242 continue;
3243 ch = &target->ch[ch_start + cpu_idx];
3244 ch->target = target;
3245 ch->comp_vector = cv_start == cv_end ? cv_start :
3246 cv_start + cpu_idx % (cv_end - cv_start);
3247 spin_lock_init(&ch->lock);
3248 INIT_LIST_HEAD(&ch->free_tx);
3249 ret = srp_new_cm_id(ch);
3250 if (ret)
3251 goto err_disconnect;
3252
3253 ret = srp_create_ch_ib(ch);
3254 if (ret)
3255 goto err_disconnect;
3256
3257 ret = srp_alloc_req_data(ch);
3258 if (ret)
3259 goto err_disconnect;
3260
3261 ret = srp_connect_ch(ch, multich);
3262 if (ret) {
3263 shost_printk(KERN_ERR, target->scsi_host,
3264 PFX "Connection %d/%d failed\n",
3265 ch_start + cpu_idx,
3266 target->ch_count);
3267 if (node_idx == 0 && cpu_idx == 0) {
3268 goto err_disconnect;
3269 } else {
3270 srp_free_ch_ib(target, ch);
3271 srp_free_req_data(target, ch);
3272 target->ch_count = ch - target->ch;
3273 break;
3274 }
3275 }
3276
3277 multich = true;
3278 cpu_idx++;
3279 }
3280 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003281 }
3282
Bart Van Assched92c0da2014-10-06 17:14:36 +02003283 target->scsi_host->nr_hw_queues = target->ch_count;
3284
Roland Dreieraef9ec32005-11-02 14:07:13 -08003285 ret = srp_add_target(host, target);
3286 if (ret)
3287 goto err_disconnect;
3288
Bart Van Assche34aa6542014-10-30 14:47:22 +01003289 if (target->state != SRP_TARGET_REMOVED) {
3290 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3291 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3292 be64_to_cpu(target->id_ext),
3293 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003294 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003295 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003296 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003297 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003298
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003299 ret = count;
3300
3301out:
3302 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003303
3304 scsi_host_put(target->scsi_host);
3305
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003306 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003307
3308err_disconnect:
3309 srp_disconnect_target(target);
3310
Bart Van Assched92c0da2014-10-06 17:14:36 +02003311 for (i = 0; i < target->ch_count; i++) {
3312 ch = &target->ch[i];
3313 srp_free_ch_ib(target, ch);
3314 srp_free_req_data(target, ch);
3315 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003316
Bart Van Assched92c0da2014-10-06 17:14:36 +02003317 kfree(target->ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05003318
Roland Dreieraef9ec32005-11-02 14:07:13 -08003319err:
3320 scsi_host_put(target_host);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003321 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003322}
3323
Tony Jonesee959b02008-02-22 00:13:36 +01003324static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003325
Tony Jonesee959b02008-02-22 00:13:36 +01003326static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3327 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003328{
Tony Jonesee959b02008-02-22 00:13:36 +01003329 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003330
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003331 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003332}
3333
Tony Jonesee959b02008-02-22 00:13:36 +01003334static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003335
Tony Jonesee959b02008-02-22 00:13:36 +01003336static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3337 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003338{
Tony Jonesee959b02008-02-22 00:13:36 +01003339 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003340
3341 return sprintf(buf, "%d\n", host->port);
3342}
3343
Tony Jonesee959b02008-02-22 00:13:36 +01003344static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003345
Roland Dreierf5358a12006-06-17 20:37:29 -07003346static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003347{
3348 struct srp_host *host;
3349
3350 host = kzalloc(sizeof *host, GFP_KERNEL);
3351 if (!host)
3352 return NULL;
3353
3354 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003355 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003356 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003357 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003358 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003359 host->port = port;
3360
Tony Jonesee959b02008-02-22 00:13:36 +01003361 host->dev.class = &srp_class;
3362 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003363 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003364
Tony Jonesee959b02008-02-22 00:13:36 +01003365 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003366 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003367 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003368 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003369 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003370 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003371 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003372 goto err_class;
3373
3374 return host;
3375
3376err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003377 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003378
Roland Dreierf5358a12006-06-17 20:37:29 -07003379free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003380 kfree(host);
3381
3382 return NULL;
3383}
3384
3385static void srp_add_one(struct ib_device *device)
3386{
Roland Dreierf5358a12006-06-17 20:37:29 -07003387 struct srp_device *srp_dev;
3388 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003389 struct srp_host *host;
Bart Van Assche52ede082014-05-20 15:07:45 +02003390 int mr_page_shift, s, e, p;
3391 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003392
Roland Dreierf5358a12006-06-17 20:37:29 -07003393 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3394 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003395 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003396
Roland Dreierf5358a12006-06-17 20:37:29 -07003397 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003398 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003399 goto free_attr;
3400 }
3401
3402 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3403 if (!srp_dev)
3404 goto free_attr;
3405
Bart Van Assched1b42892014-05-20 15:07:20 +02003406 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3407 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003408 srp_dev->has_fr = (dev_attr->device_cap_flags &
3409 IB_DEVICE_MEM_MGT_EXTENSIONS);
3410 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3411 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3412
3413 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3414 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assched1b42892014-05-20 15:07:20 +02003415
Roland Dreierf5358a12006-06-17 20:37:29 -07003416 /*
3417 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003418 * minimum of 4096 bytes. We're unlikely to build large sglists
3419 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003420 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003421 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3422 srp_dev->mr_page_size = 1 << mr_page_shift;
3423 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3424 max_pages_per_mr = dev_attr->max_mr_size;
3425 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3426 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3427 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003428 if (srp_dev->use_fast_reg) {
3429 srp_dev->max_pages_per_mr =
3430 min_t(u32, srp_dev->max_pages_per_mr,
3431 dev_attr->max_fast_reg_page_list_len);
3432 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003433 srp_dev->mr_max_size = srp_dev->mr_page_size *
3434 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003435 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003436 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003437 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003438 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003439
3440 INIT_LIST_HEAD(&srp_dev->dev_list);
3441
3442 srp_dev->dev = device;
3443 srp_dev->pd = ib_alloc_pd(device);
3444 if (IS_ERR(srp_dev->pd))
3445 goto free_dev;
3446
3447 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3448 IB_ACCESS_LOCAL_WRITE |
3449 IB_ACCESS_REMOTE_READ |
3450 IB_ACCESS_REMOTE_WRITE);
3451 if (IS_ERR(srp_dev->mr))
3452 goto err_pd;
3453
Tom Tucker07ebafb2006-08-03 16:02:42 -05003454 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08003455 s = 0;
3456 e = 0;
3457 } else {
3458 s = 1;
3459 e = device->phys_port_cnt;
3460 }
3461
3462 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003463 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003464 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003465 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003466 }
3467
Roland Dreierf5358a12006-06-17 20:37:29 -07003468 ib_set_client_data(device, &srp_client, srp_dev);
3469
3470 goto free_attr;
3471
3472err_pd:
3473 ib_dealloc_pd(srp_dev->pd);
3474
3475free_dev:
3476 kfree(srp_dev);
3477
3478free_attr:
3479 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003480}
3481
3482static void srp_remove_one(struct ib_device *device)
3483{
Roland Dreierf5358a12006-06-17 20:37:29 -07003484 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003485 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003486 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003487
Roland Dreierf5358a12006-06-17 20:37:29 -07003488 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003489 if (!srp_dev)
3490 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003491
Roland Dreierf5358a12006-06-17 20:37:29 -07003492 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003493 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003494 /*
3495 * Wait for the sysfs entry to go away, so that no new
3496 * target ports can be created.
3497 */
3498 wait_for_completion(&host->released);
3499
3500 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003501 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003502 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003503 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003504 list_for_each_entry(target, &host->target_list, list)
3505 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003506 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003507
3508 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003509 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003510 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003511 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003512 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003513
Roland Dreieraef9ec32005-11-02 14:07:13 -08003514 kfree(host);
3515 }
3516
Roland Dreierf5358a12006-06-17 20:37:29 -07003517 ib_dereg_mr(srp_dev->mr);
3518 ib_dealloc_pd(srp_dev->pd);
3519
3520 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003521}
3522
FUJITA Tomonori32368222007-06-27 16:33:12 +09003523static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003524 .has_rport_state = true,
3525 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003526 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003527 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3528 .dev_loss_tmo = &srp_dev_loss_tmo,
3529 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003530 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003531 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003532};
3533
Roland Dreieraef9ec32005-11-02 14:07:13 -08003534static int __init srp_init_module(void)
3535{
3536 int ret;
3537
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003538 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003539
David Dillow49248642011-01-14 18:23:24 -05003540 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003541 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003542 if (!cmd_sg_entries)
3543 cmd_sg_entries = srp_sg_tablesize;
3544 }
3545
3546 if (!cmd_sg_entries)
3547 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3548
3549 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003550 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003551 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003552 }
3553
David Dillowc07d4242011-01-16 13:57:10 -05003554 if (!indirect_sg_entries)
3555 indirect_sg_entries = cmd_sg_entries;
3556 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003557 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3558 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003559 indirect_sg_entries = cmd_sg_entries;
3560 }
3561
Bart Van Asschebcc05912014-07-09 15:57:26 +02003562 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003563 if (!srp_remove_wq) {
3564 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003565 goto out;
3566 }
3567
3568 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003569 ib_srp_transport_template =
3570 srp_attach_transport(&ib_srp_transport_functions);
3571 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003572 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003573
Roland Dreieraef9ec32005-11-02 14:07:13 -08003574 ret = class_register(&srp_class);
3575 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003576 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003577 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003578 }
3579
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003580 ib_sa_register_client(&srp_sa_client);
3581
Roland Dreieraef9ec32005-11-02 14:07:13 -08003582 ret = ib_register_client(&srp_client);
3583 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003584 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003585 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003586 }
3587
Bart Van Asschebcc05912014-07-09 15:57:26 +02003588out:
3589 return ret;
3590
3591unreg_sa:
3592 ib_sa_unregister_client(&srp_sa_client);
3593 class_unregister(&srp_class);
3594
3595release_tr:
3596 srp_release_transport(ib_srp_transport_template);
3597
3598destroy_wq:
3599 destroy_workqueue(srp_remove_wq);
3600 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003601}
3602
3603static void __exit srp_cleanup_module(void)
3604{
3605 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003606 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003607 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003608 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003609 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003610}
3611
3612module_init(srp_init_module);
3613module_exit(srp_cleanup_module);