blob: c418d1fde1edd0b59b900d7e80939379466c535f [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020058#define DRV_VERSION "1.0"
59#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
63 "v" DRV_VERSION " (" DRV_RELDATE ")");
64MODULE_LICENSE("Dual BSD/GPL");
65
David Dillow49248642011-01-14 18:23:24 -050066static unsigned int srp_sg_tablesize;
67static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050068static unsigned int indirect_sg_entries;
69static bool allow_ext_sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +020070static bool prefer_fr;
Bart Van Asscheb1b88542014-05-20 15:06:41 +020071static bool register_always;
Roland Dreieraef9ec32005-11-02 14:07:13 -080072static int topspin_workarounds = 1;
73
David Dillow49248642011-01-14 18:23:24 -050074module_param(srp_sg_tablesize, uint, 0444);
75MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
76
77module_param(cmd_sg_entries, uint, 0444);
78MODULE_PARM_DESC(cmd_sg_entries,
79 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
80
David Dillowc07d4242011-01-16 13:57:10 -050081module_param(indirect_sg_entries, uint, 0444);
82MODULE_PARM_DESC(indirect_sg_entries,
83 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
84
85module_param(allow_ext_sg, bool, 0444);
86MODULE_PARM_DESC(allow_ext_sg,
87 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
88
Roland Dreieraef9ec32005-11-02 14:07:13 -080089module_param(topspin_workarounds, int, 0444);
90MODULE_PARM_DESC(topspin_workarounds,
91 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
92
Bart Van Assche5cfb1782014-05-20 15:08:34 +020093module_param(prefer_fr, bool, 0444);
94MODULE_PARM_DESC(prefer_fr,
95"Whether to use fast registration if both FMR and fast registration are supported");
96
Bart Van Asscheb1b88542014-05-20 15:06:41 +020097module_param(register_always, bool, 0444);
98MODULE_PARM_DESC(register_always,
99 "Use memory registration even for contiguous memory regions");
100
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200101static struct kernel_param_ops srp_tmo_ops;
102
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200103static int srp_reconnect_delay = 10;
104module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
105 S_IRUGO | S_IWUSR);
106MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
107
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200108static int srp_fast_io_fail_tmo = 15;
109module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
110 S_IRUGO | S_IWUSR);
111MODULE_PARM_DESC(fast_io_fail_tmo,
112 "Number of seconds between the observation of a transport"
113 " layer error and failing all I/O. \"off\" means that this"
114 " functionality is disabled.");
115
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200116static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200117module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
118 S_IRUGO | S_IWUSR);
119MODULE_PARM_DESC(dev_loss_tmo,
120 "Maximum number of seconds that the SRP transport should"
121 " insulate transport layer errors. After this time has been"
122 " exceeded the SCSI host is removed. Should be"
123 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
124 " if fast_io_fail_tmo has not been set. \"off\" means that"
125 " this functionality is disabled.");
126
Bart Van Assched92c0da2014-10-06 17:14:36 +0200127static unsigned ch_count;
128module_param(ch_count, uint, 0444);
129MODULE_PARM_DESC(ch_count,
130 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
131
Roland Dreieraef9ec32005-11-02 14:07:13 -0800132static void srp_add_one(struct ib_device *device);
133static void srp_remove_one(struct ib_device *device);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100134static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
135static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800136static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
137
FUJITA Tomonori32368222007-06-27 16:33:12 +0900138static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200139static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900140
Roland Dreieraef9ec32005-11-02 14:07:13 -0800141static struct ib_client srp_client = {
142 .name = "srp",
143 .add = srp_add_one,
144 .remove = srp_remove_one
145};
146
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700147static struct ib_sa_client srp_sa_client;
148
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200149static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
150{
151 int tmo = *(int *)kp->arg;
152
153 if (tmo >= 0)
154 return sprintf(buffer, "%d", tmo);
155 else
156 return sprintf(buffer, "off");
157}
158
159static int srp_tmo_set(const char *val, const struct kernel_param *kp)
160{
161 int tmo, res;
162
163 if (strncmp(val, "off", 3) != 0) {
164 res = kstrtoint(val, 0, &tmo);
165 if (res)
166 goto out;
167 } else {
168 tmo = -1;
169 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200170 if (kp->arg == &srp_reconnect_delay)
171 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
172 srp_dev_loss_tmo);
173 else if (kp->arg == &srp_fast_io_fail_tmo)
174 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200175 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200176 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
177 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200178 if (res)
179 goto out;
180 *(int *)kp->arg = tmo;
181
182out:
183 return res;
184}
185
186static struct kernel_param_ops srp_tmo_ops = {
187 .get = srp_tmo_get,
188 .set = srp_tmo_set,
189};
190
Roland Dreieraef9ec32005-11-02 14:07:13 -0800191static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
192{
193 return (struct srp_target_port *) host->hostdata;
194}
195
196static const char *srp_target_info(struct Scsi_Host *host)
197{
198 return host_to_target(host)->target_name;
199}
200
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700201static int srp_target_is_topspin(struct srp_target_port *target)
202{
203 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700204 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700205
206 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700207 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
208 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700209}
210
Roland Dreieraef9ec32005-11-02 14:07:13 -0800211static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
212 gfp_t gfp_mask,
213 enum dma_data_direction direction)
214{
215 struct srp_iu *iu;
216
217 iu = kmalloc(sizeof *iu, gfp_mask);
218 if (!iu)
219 goto out;
220
221 iu->buf = kzalloc(size, gfp_mask);
222 if (!iu->buf)
223 goto out_free_iu;
224
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100225 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
226 direction);
227 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800228 goto out_free_buf;
229
230 iu->size = size;
231 iu->direction = direction;
232
233 return iu;
234
235out_free_buf:
236 kfree(iu->buf);
237out_free_iu:
238 kfree(iu);
239out:
240 return NULL;
241}
242
243static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
244{
245 if (!iu)
246 return;
247
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100248 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
249 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800250 kfree(iu->buf);
251 kfree(iu);
252}
253
254static void srp_qp_event(struct ib_event *event, void *context)
255{
Bart Van Asschee0bda7d2012-01-14 12:39:44 +0000256 pr_debug("QP event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800257}
258
259static int srp_init_qp(struct srp_target_port *target,
260 struct ib_qp *qp)
261{
262 struct ib_qp_attr *attr;
263 int ret;
264
265 attr = kmalloc(sizeof *attr, GFP_KERNEL);
266 if (!attr)
267 return -ENOMEM;
268
Bart Van Assche56b53902014-07-09 15:58:22 +0200269 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270 target->srp_host->port,
271 be16_to_cpu(target->pkey),
272 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800273 if (ret)
274 goto out;
275
276 attr->qp_state = IB_QPS_INIT;
277 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278 IB_ACCESS_REMOTE_WRITE);
279 attr->port_num = target->srp_host->port;
280
281 ret = ib_modify_qp(qp, attr,
282 IB_QP_STATE |
283 IB_QP_PKEY_INDEX |
284 IB_QP_ACCESS_FLAGS |
285 IB_QP_PORT);
286
287out:
288 kfree(attr);
289 return ret;
290}
291
Bart Van Assche509c07b2014-10-30 14:48:30 +0100292static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500293{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100294 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500295 struct ib_cm_id *new_cm_id;
296
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100297 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100298 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500299 if (IS_ERR(new_cm_id))
300 return PTR_ERR(new_cm_id);
301
Bart Van Assche509c07b2014-10-30 14:48:30 +0100302 if (ch->cm_id)
303 ib_destroy_cm_id(ch->cm_id);
304 ch->cm_id = new_cm_id;
305 ch->path.sgid = target->sgid;
306 ch->path.dgid = target->orig_dgid;
307 ch->path.pkey = target->pkey;
308 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500309
310 return 0;
311}
312
Bart Van Assched1b42892014-05-20 15:07:20 +0200313static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314{
315 struct srp_device *dev = target->srp_host->srp_dev;
316 struct ib_fmr_pool_param fmr_param;
317
318 memset(&fmr_param, 0, sizeof(fmr_param));
319 fmr_param.pool_size = target->scsi_host->can_queue;
320 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
321 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200322 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200324 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
325 IB_ACCESS_REMOTE_WRITE |
326 IB_ACCESS_REMOTE_READ);
327
328 return ib_create_fmr_pool(dev->pd, &fmr_param);
329}
330
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200331/**
332 * srp_destroy_fr_pool() - free the resources owned by a pool
333 * @pool: Fast registration pool to be destroyed.
334 */
335static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
336{
337 int i;
338 struct srp_fr_desc *d;
339
340 if (!pool)
341 return;
342
343 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
344 if (d->frpl)
345 ib_free_fast_reg_page_list(d->frpl);
346 if (d->mr)
347 ib_dereg_mr(d->mr);
348 }
349 kfree(pool);
350}
351
352/**
353 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
354 * @device: IB device to allocate fast registration descriptors for.
355 * @pd: Protection domain associated with the FR descriptors.
356 * @pool_size: Number of descriptors to allocate.
357 * @max_page_list_len: Maximum fast registration work request page list length.
358 */
359static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
360 struct ib_pd *pd, int pool_size,
361 int max_page_list_len)
362{
363 struct srp_fr_pool *pool;
364 struct srp_fr_desc *d;
365 struct ib_mr *mr;
366 struct ib_fast_reg_page_list *frpl;
367 int i, ret = -EINVAL;
368
369 if (pool_size <= 0)
370 goto err;
371 ret = -ENOMEM;
372 pool = kzalloc(sizeof(struct srp_fr_pool) +
373 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
374 if (!pool)
375 goto err;
376 pool->size = pool_size;
377 pool->max_page_list_len = max_page_list_len;
378 spin_lock_init(&pool->lock);
379 INIT_LIST_HEAD(&pool->free_list);
380
381 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
382 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
383 if (IS_ERR(mr)) {
384 ret = PTR_ERR(mr);
385 goto destroy_pool;
386 }
387 d->mr = mr;
388 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
389 if (IS_ERR(frpl)) {
390 ret = PTR_ERR(frpl);
391 goto destroy_pool;
392 }
393 d->frpl = frpl;
394 list_add_tail(&d->entry, &pool->free_list);
395 }
396
397out:
398 return pool;
399
400destroy_pool:
401 srp_destroy_fr_pool(pool);
402
403err:
404 pool = ERR_PTR(ret);
405 goto out;
406}
407
408/**
409 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
410 * @pool: Pool to obtain descriptor from.
411 */
412static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
413{
414 struct srp_fr_desc *d = NULL;
415 unsigned long flags;
416
417 spin_lock_irqsave(&pool->lock, flags);
418 if (!list_empty(&pool->free_list)) {
419 d = list_first_entry(&pool->free_list, typeof(*d), entry);
420 list_del(&d->entry);
421 }
422 spin_unlock_irqrestore(&pool->lock, flags);
423
424 return d;
425}
426
427/**
428 * srp_fr_pool_put() - put an FR descriptor back in the free list
429 * @pool: Pool the descriptor was allocated from.
430 * @desc: Pointer to an array of fast registration descriptor pointers.
431 * @n: Number of descriptors to put back.
432 *
433 * Note: The caller must already have queued an invalidation request for
434 * desc->mr->rkey before calling this function.
435 */
436static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
437 int n)
438{
439 unsigned long flags;
440 int i;
441
442 spin_lock_irqsave(&pool->lock, flags);
443 for (i = 0; i < n; i++)
444 list_add(&desc[i]->entry, &pool->free_list);
445 spin_unlock_irqrestore(&pool->lock, flags);
446}
447
448static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
449{
450 struct srp_device *dev = target->srp_host->srp_dev;
451
452 return srp_create_fr_pool(dev->dev, dev->pd,
453 target->scsi_host->can_queue,
454 dev->max_pages_per_mr);
455}
456
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200457/**
458 * srp_destroy_qp() - destroy an RDMA queue pair
459 * @ch: SRP RDMA channel.
460 *
461 * Change a queue pair into the error state and wait until all receive
462 * completions have been processed before destroying it. This avoids that
463 * the receive completion handler can access the queue pair while it is
464 * being destroyed.
465 */
466static void srp_destroy_qp(struct srp_rdma_ch *ch)
467{
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
471 int ret;
472
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200474 WARN_ON_ONCE(ch->connected);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200475
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
478 if (ret)
479 goto out;
480
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
484 if (ret == 0)
485 wait_for_completion(&ch->done);
486
487out:
488 ib_destroy_qp(ch->qp);
489}
490
Bart Van Assche509c07b2014-10-30 14:48:30 +0100491static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800492{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100493 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200494 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800495 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100496 struct ib_cq *recv_cq, *send_cq;
497 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200498 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200499 struct srp_fr_pool *fr_pool = NULL;
500 const int m = 1 + dev->use_fast_reg;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800501 int ret;
502
503 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
504 if (!init_attr)
505 return -ENOMEM;
506
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200507 /* + 1 for SRP_LAST_WR_ID */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100508 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200509 target->queue_size + 1, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100510 if (IS_ERR(recv_cq)) {
511 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800512 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800513 }
514
Bart Van Assche509c07b2014-10-30 14:48:30 +0100515 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
516 m * target->queue_size, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100517 if (IS_ERR(send_cq)) {
518 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800519 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000520 }
521
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100522 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800523
524 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200525 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200526 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800527 init_attr->cap.max_recv_sge = 1;
528 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200529 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800530 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100531 init_attr->send_cq = send_cq;
532 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800533
Bart Van Assche62154b22014-05-20 15:04:45 +0200534 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100535 if (IS_ERR(qp)) {
536 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800537 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800538 }
539
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100540 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800541 if (ret)
542 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800543
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200544 if (dev->use_fast_reg && dev->has_fr) {
545 fr_pool = srp_alloc_fr_pool(target);
546 if (IS_ERR(fr_pool)) {
547 ret = PTR_ERR(fr_pool);
548 shost_printk(KERN_WARNING, target->scsi_host, PFX
549 "FR pool allocation failed (%d)\n", ret);
550 goto err_qp;
551 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100552 if (ch->fr_pool)
553 srp_destroy_fr_pool(ch->fr_pool);
554 ch->fr_pool = fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200555 } else if (!dev->use_fast_reg && dev->has_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200556 fmr_pool = srp_alloc_fmr_pool(target);
557 if (IS_ERR(fmr_pool)) {
558 ret = PTR_ERR(fmr_pool);
559 shost_printk(KERN_WARNING, target->scsi_host, PFX
560 "FMR pool allocation failed (%d)\n", ret);
561 goto err_qp;
562 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100563 if (ch->fmr_pool)
564 ib_destroy_fmr_pool(ch->fmr_pool);
565 ch->fmr_pool = fmr_pool;
Bart Van Assched1b42892014-05-20 15:07:20 +0200566 }
567
Bart Van Assche509c07b2014-10-30 14:48:30 +0100568 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200569 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100570 if (ch->recv_cq)
571 ib_destroy_cq(ch->recv_cq);
572 if (ch->send_cq)
573 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100574
Bart Van Assche509c07b2014-10-30 14:48:30 +0100575 ch->qp = qp;
576 ch->recv_cq = recv_cq;
577 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100578
Roland Dreierda9d2f02010-02-24 15:07:59 -0800579 kfree(init_attr);
580 return 0;
581
582err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100583 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800584
585err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100586 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800587
588err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100589 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800590
591err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800592 kfree(init_attr);
593 return ret;
594}
595
Bart Van Assche4d73f952013-10-26 14:40:37 +0200596/*
597 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100598 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200599 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100600static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800602{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200603 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800604 int i;
605
Bart Van Assched92c0da2014-10-06 17:14:36 +0200606 if (!ch->target)
607 return;
608
Bart Van Assche509c07b2014-10-30 14:48:30 +0100609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100612 }
613
Bart Van Assched92c0da2014-10-06 17:14:36 +0200614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200618 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100619 if (ch->fr_pool)
620 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200621 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100622 if (ch->fmr_pool)
623 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200624 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200625 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100626 ib_destroy_cq(ch->send_cq);
627 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800628
Bart Van Assched92c0da2014-10-06 17:14:36 +0200629 /*
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
636
Bart Van Assche509c07b2014-10-30 14:48:30 +0100637 ch->qp = NULL;
638 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100639
Bart Van Assche509c07b2014-10-30 14:48:30 +0100640 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200641 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
643 kfree(ch->rx_ring);
644 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200645 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100646 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200647 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
649 kfree(ch->tx_ring);
650 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200651 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800652}
653
654static void srp_path_rec_completion(int status,
655 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100656 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800657{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800660
Bart Van Assche509c07b2014-10-30 14:48:30 +0100661 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800662 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500663 shost_printk(KERN_ERR, target->scsi_host,
664 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800665 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100666 ch->path = *pathrec;
667 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800668}
669
Bart Van Assche509c07b2014-10-30 14:48:30 +0100670static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800671{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100672 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100673 int ret;
674
Bart Van Assche509c07b2014-10-30 14:48:30 +0100675 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800676
Bart Van Assche509c07b2014-10-30 14:48:30 +0100677 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800678
Bart Van Assche509c07b2014-10-30 14:48:30 +0100679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
680 target->srp_host->srp_dev->dev,
681 target->srp_host->port,
682 &ch->path,
683 IB_SA_PATH_REC_SERVICE_ID |
684 IB_SA_PATH_REC_DGID |
685 IB_SA_PATH_REC_SGID |
686 IB_SA_PATH_REC_NUMB_PATH |
687 IB_SA_PATH_REC_PKEY,
688 SRP_PATH_REC_TIMEOUT_MS,
689 GFP_KERNEL,
690 srp_path_rec_completion,
691 ch, &ch->path_query);
692 if (ch->path_query_id < 0)
693 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800694
Bart Van Assche509c07b2014-10-30 14:48:30 +0100695 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100696 if (ret < 0)
697 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800698
Bart Van Assche509c07b2014-10-30 14:48:30 +0100699 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500700 shost_printk(KERN_WARNING, target->scsi_host,
701 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800702
Bart Van Assche509c07b2014-10-30 14:48:30 +0100703 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800704}
705
Bart Van Assched92c0da2014-10-06 17:14:36 +0200706static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800707{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100708 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800709 struct {
710 struct ib_cm_req_param param;
711 struct srp_login_req priv;
712 } *req = NULL;
713 int status;
714
715 req = kzalloc(sizeof *req, GFP_KERNEL);
716 if (!req)
717 return -ENOMEM;
718
Bart Van Assche509c07b2014-10-30 14:48:30 +0100719 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800720 req->param.alternate_path = NULL;
721 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100722 req->param.qp_num = ch->qp->qp_num;
723 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800724 req->param.private_data = &req->priv;
725 req->param.private_data_len = sizeof req->priv;
726 req->param.flow_control = 1;
727
728 get_random_bytes(&req->param.starting_psn, 4);
729 req->param.starting_psn &= 0xffffff;
730
731 /*
732 * Pick some arbitrary defaults here; we could make these
733 * module parameters if anyone cared about setting them.
734 */
735 req->param.responder_resources = 4;
736 req->param.remote_cm_response_timeout = 20;
737 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200738 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800739 req->param.rnr_retry_count = 7;
740 req->param.max_cm_retries = 15;
741
742 req->priv.opcode = SRP_LOGIN_REQ;
743 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
746 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700749 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700750 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700751 * port identifier format is 8 bytes of ID extension followed
752 * by 8 bytes of GUID. Older drafts put the two halves in the
753 * opposite order, so that the GUID comes first.
754 *
755 * Targets conforming to these obsolete drafts can be
756 * recognized by the I/O Class they report.
757 */
758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
759 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100760 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700761 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200762 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
764 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
765 } else {
766 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200767 &target->initiator_ext, 8);
768 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100769 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
772 }
773
Roland Dreieraef9ec32005-11-02 14:07:13 -0800774 /*
775 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200776 * zero out the first 8 bytes of our initiator port ID and set
777 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800778 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700779 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500780 shost_printk(KERN_DEBUG, target->scsi_host,
781 PFX "Topspin/Cisco initiator port ID workaround "
782 "activated for target GUID %016llx\n",
783 (unsigned long long) be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800784 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200785 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100786 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800787 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800788
Bart Van Assche509c07b2014-10-30 14:48:30 +0100789 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800790
791 kfree(req);
792
793 return status;
794}
795
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000796static bool srp_queue_remove_work(struct srp_target_port *target)
797{
798 bool changed = false;
799
800 spin_lock_irq(&target->lock);
801 if (target->state != SRP_TARGET_REMOVED) {
802 target->state = SRP_TARGET_REMOVED;
803 changed = true;
804 }
805 spin_unlock_irq(&target->lock);
806
807 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200808 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000809
810 return changed;
811}
812
Roland Dreieraef9ec32005-11-02 14:07:13 -0800813static void srp_disconnect_target(struct srp_target_port *target)
814{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200815 struct srp_rdma_ch *ch;
816 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100817
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200818 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800819
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200820 for (i = 0; i < target->ch_count; i++) {
821 ch = &target->ch[i];
822 ch->connected = false;
823 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
824 shost_printk(KERN_DEBUG, target->scsi_host,
825 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000826 }
Roland Dreiere6581052006-05-17 09:13:21 -0700827 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800828}
829
Bart Van Assche509c07b2014-10-30 14:48:30 +0100830static void srp_free_req_data(struct srp_target_port *target,
831 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500832{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200833 struct srp_device *dev = target->srp_host->srp_dev;
834 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500835 struct srp_request *req;
836 int i;
837
Bart Van Assched92c0da2014-10-06 17:14:36 +0200838 if (!ch->target || !ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200839 return;
840
841 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100842 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200843 if (dev->use_fast_reg)
844 kfree(req->fr_list);
845 else
846 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500847 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500848 if (req->indirect_dma_addr) {
849 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
850 target->indirect_size,
851 DMA_TO_DEVICE);
852 }
853 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500854 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200855
Bart Van Assche509c07b2014-10-30 14:48:30 +0100856 kfree(ch->req_ring);
857 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500858}
859
Bart Van Assche509c07b2014-10-30 14:48:30 +0100860static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200861{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100862 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200863 struct srp_device *srp_dev = target->srp_host->srp_dev;
864 struct ib_device *ibdev = srp_dev->dev;
865 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200866 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200867 dma_addr_t dma_addr;
868 int i, ret = -ENOMEM;
869
Bart Van Assche509c07b2014-10-30 14:48:30 +0100870 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
871 GFP_KERNEL);
872 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200873 goto out;
874
875 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100876 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200877 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
878 GFP_KERNEL);
879 if (!mr_list)
880 goto out;
881 if (srp_dev->use_fast_reg)
882 req->fr_list = mr_list;
883 else
884 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200885 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200886 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200887 if (!req->map_page)
888 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200889 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200890 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200891 goto out;
892
893 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
894 target->indirect_size,
895 DMA_TO_DEVICE);
896 if (ib_dma_mapping_error(ibdev, dma_addr))
897 goto out;
898
899 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200900 }
901 ret = 0;
902
903out:
904 return ret;
905}
906
Bart Van Assche683b1592012-01-14 12:40:44 +0000907/**
908 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
909 * @shost: SCSI host whose attributes to remove from sysfs.
910 *
911 * Note: Any attributes defined in the host template and that did not exist
912 * before invocation of this function will be ignored.
913 */
914static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
915{
916 struct device_attribute **attr;
917
918 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
919 device_remove_file(&shost->shost_dev, *attr);
920}
921
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000922static void srp_remove_target(struct srp_target_port *target)
923{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200924 struct srp_rdma_ch *ch;
925 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100926
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000927 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
928
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000929 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200930 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000931 srp_remove_host(target->scsi_host);
932 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100933 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000934 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200935 for (i = 0; i < target->ch_count; i++) {
936 ch = &target->ch[i];
937 srp_free_ch_ib(target, ch);
938 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200939 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200940 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200941 for (i = 0; i < target->ch_count; i++) {
942 ch = &target->ch[i];
943 srp_free_req_data(target, ch);
944 }
945 kfree(target->ch);
946 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200947
948 spin_lock(&target->srp_host->target_lock);
949 list_del(&target->list);
950 spin_unlock(&target->srp_host->target_lock);
951
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000952 scsi_host_put(target->scsi_host);
953}
954
David Howellsc4028952006-11-22 14:57:56 +0000955static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800956{
David Howellsc4028952006-11-22 14:57:56 +0000957 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000958 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800959
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000960 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800961
Bart Van Assche96fc2482013-06-28 14:51:26 +0200962 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800963}
964
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200965static void srp_rport_delete(struct srp_rport *rport)
966{
967 struct srp_target_port *target = rport->lld_data;
968
969 srp_queue_remove_work(target);
970}
971
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200972/**
973 * srp_connected_ch() - number of connected channels
974 * @target: SRP target port.
975 */
976static int srp_connected_ch(struct srp_target_port *target)
977{
978 int i, c = 0;
979
980 for (i = 0; i < target->ch_count; i++)
981 c += target->ch[i].connected;
982
983 return c;
984}
985
Bart Van Assched92c0da2014-10-06 17:14:36 +0200986static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800987{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100988 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800989 int ret;
990
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200991 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000992
Bart Van Assche509c07b2014-10-30 14:48:30 +0100993 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800994 if (ret)
995 return ret;
996
997 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100998 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200999 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001000 if (ret)
1001 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001002 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001003 if (ret < 0)
1004 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001005
1006 /*
1007 * The CM event handling code will set status to
1008 * SRP_PORT_REDIRECT if we get a port redirect REJ
1009 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1010 * redirect REJ back.
1011 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001012 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001013 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001014 ch->connected = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001015 return 0;
1016
1017 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001018 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001019 if (ret)
1020 return ret;
1021 break;
1022
1023 case SRP_DLID_REDIRECT:
1024 break;
1025
David Dillow9fe4bcf2008-01-08 17:08:52 -05001026 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001027 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001028 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001029 ch->status = -ECONNRESET;
1030 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001031
Roland Dreieraef9ec32005-11-02 14:07:13 -08001032 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001033 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001034 }
1035 }
1036}
1037
Bart Van Assche509c07b2014-10-30 14:48:30 +01001038static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001039{
1040 struct ib_send_wr *bad_wr;
1041 struct ib_send_wr wr = {
1042 .opcode = IB_WR_LOCAL_INV,
1043 .wr_id = LOCAL_INV_WR_ID_MASK,
1044 .next = NULL,
1045 .num_sge = 0,
1046 .send_flags = 0,
1047 .ex.invalidate_rkey = rkey,
1048 };
1049
Bart Van Assche509c07b2014-10-30 14:48:30 +01001050 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001051}
1052
Roland Dreierd945e1d2006-05-09 10:50:28 -07001053static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001054 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001055 struct srp_request *req)
1056{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001057 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001058 struct srp_device *dev = target->srp_host->srp_dev;
1059 struct ib_device *ibdev = dev->dev;
1060 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001061
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001062 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001063 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1064 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1065 return;
1066
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001067 if (dev->use_fast_reg) {
1068 struct srp_fr_desc **pfr;
1069
1070 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001071 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001072 if (res < 0) {
1073 shost_printk(KERN_ERR, target->scsi_host, PFX
1074 "Queueing INV WR for rkey %#x failed (%d)\n",
1075 (*pfr)->mr->rkey, res);
1076 queue_work(system_long_wq,
1077 &target->tl_err_work);
1078 }
1079 }
1080 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001081 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001082 req->nmdesc);
1083 } else {
1084 struct ib_pool_fmr **pfmr;
1085
1086 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1087 ib_fmr_pool_unmap(*pfmr);
1088 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001089
David Dillow8f26c9f2011-01-14 19:45:50 -05001090 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1091 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001092}
1093
Bart Van Assche22032992012-08-14 13:18:53 +00001094/**
1095 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001096 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001097 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001098 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001099 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1100 * ownership of @req->scmnd if it equals @scmnd.
1101 *
1102 * Return value:
1103 * Either NULL or a pointer to the SCSI command the caller became owner of.
1104 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001105static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001106 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001107 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001108 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001109{
Bart Van Assche94a91742010-11-26 14:50:09 -05001110 unsigned long flags;
1111
Bart Van Assche509c07b2014-10-30 14:48:30 +01001112 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001113 if (req->scmnd &&
1114 (!sdev || req->scmnd->device == sdev) &&
1115 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001116 scmnd = req->scmnd;
1117 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001118 } else {
1119 scmnd = NULL;
1120 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001121 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001122
1123 return scmnd;
1124}
1125
1126/**
1127 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001128 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001129 * @req: Request to be freed.
1130 * @scmnd: SCSI command associated with @req.
1131 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001132 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001133static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1134 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001135{
1136 unsigned long flags;
1137
Bart Van Assche509c07b2014-10-30 14:48:30 +01001138 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001139
Bart Van Assche509c07b2014-10-30 14:48:30 +01001140 spin_lock_irqsave(&ch->lock, flags);
1141 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001142 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001143}
1144
Bart Van Assche509c07b2014-10-30 14:48:30 +01001145static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1146 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001147{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001148 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001149
1150 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001151 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001152 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001153 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001154 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001155}
1156
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001157static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001158{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001159 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001160 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001161 struct Scsi_Host *shost = target->scsi_host;
1162 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001163 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001164
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001165 /*
1166 * Invoking srp_terminate_io() while srp_queuecommand() is running
1167 * is not safe. Hence the warning statement below.
1168 */
1169 shost_for_each_device(sdev, shost)
1170 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1171
Bart Van Assched92c0da2014-10-06 17:14:36 +02001172 for (i = 0; i < target->ch_count; i++) {
1173 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001174
Bart Van Assched92c0da2014-10-06 17:14:36 +02001175 for (j = 0; j < target->req_ring_size; ++j) {
1176 struct srp_request *req = &ch->req_ring[j];
1177
1178 srp_finish_req(ch, req, NULL,
1179 DID_TRANSPORT_FAILFAST << 16);
1180 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001181 }
1182}
1183
1184/*
1185 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1186 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1187 * srp_reset_device() or srp_reset_host() calls will occur while this function
1188 * is in progress. One way to realize that is not to call this function
1189 * directly but to call srp_reconnect_rport() instead since that last function
1190 * serializes calls of this function via rport->mutex and also blocks
1191 * srp_queuecommand() calls before invoking this function.
1192 */
1193static int srp_rport_reconnect(struct srp_rport *rport)
1194{
1195 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001196 struct srp_rdma_ch *ch;
1197 int i, j, ret = 0;
1198 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001199
Roland Dreieraef9ec32005-11-02 14:07:13 -08001200 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001201
1202 if (target->state == SRP_TARGET_SCANNING)
1203 return -ENODEV;
1204
Roland Dreieraef9ec32005-11-02 14:07:13 -08001205 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001206 * Now get a new local CM ID so that we avoid confusing the target in
1207 * case things are really fouled up. Doing so also ensures that all CM
1208 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001209 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001210 for (i = 0; i < target->ch_count; i++) {
1211 ch = &target->ch[i];
1212 if (!ch->target)
1213 break;
1214 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001215 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001216 for (i = 0; i < target->ch_count; i++) {
1217 ch = &target->ch[i];
1218 if (!ch->target)
1219 break;
1220 for (j = 0; j < target->req_ring_size; ++j) {
1221 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001222
Bart Van Assched92c0da2014-10-06 17:14:36 +02001223 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1224 }
1225 }
1226 for (i = 0; i < target->ch_count; i++) {
1227 ch = &target->ch[i];
1228 if (!ch->target)
1229 break;
1230 /*
1231 * Whether or not creating a new CM ID succeeded, create a new
1232 * QP. This guarantees that all completion callback function
1233 * invocations have finished before request resetting starts.
1234 */
1235 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001236
Bart Van Assched92c0da2014-10-06 17:14:36 +02001237 INIT_LIST_HEAD(&ch->free_tx);
1238 for (j = 0; j < target->queue_size; ++j)
1239 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1240 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001241
1242 target->qp_in_error = false;
1243
Bart Van Assched92c0da2014-10-06 17:14:36 +02001244 for (i = 0; i < target->ch_count; i++) {
1245 ch = &target->ch[i];
1246 if (ret || !ch->target) {
1247 if (i > 1)
1248 ret = 0;
1249 break;
1250 }
1251 ret = srp_connect_ch(ch, multich);
1252 multich = true;
1253 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001254
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001255 if (ret == 0)
1256 shost_printk(KERN_INFO, target->scsi_host,
1257 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001258
1259 return ret;
1260}
1261
David Dillow8f26c9f2011-01-14 19:45:50 -05001262static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1263 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001264{
David Dillow8f26c9f2011-01-14 19:45:50 -05001265 struct srp_direct_buf *desc = state->desc;
1266
1267 desc->va = cpu_to_be64(dma_addr);
1268 desc->key = cpu_to_be32(rkey);
1269 desc->len = cpu_to_be32(dma_len);
1270
1271 state->total_len += dma_len;
1272 state->desc++;
1273 state->ndesc++;
1274}
1275
1276static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001277 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001278{
David Dillow8f26c9f2011-01-14 19:45:50 -05001279 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001280 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001281
Bart Van Assche509c07b2014-10-30 14:48:30 +01001282 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001283 state->npages, io_addr);
1284 if (IS_ERR(fmr))
1285 return PTR_ERR(fmr);
1286
1287 *state->next_fmr++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001288 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001289
Bart Van Assche52ede082014-05-20 15:07:45 +02001290 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001291
David Dillow8f26c9f2011-01-14 19:45:50 -05001292 return 0;
1293}
1294
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001295static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001296 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001297{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001298 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001299 struct srp_device *dev = target->srp_host->srp_dev;
1300 struct ib_send_wr *bad_wr;
1301 struct ib_send_wr wr;
1302 struct srp_fr_desc *desc;
1303 u32 rkey;
1304
Bart Van Assche509c07b2014-10-30 14:48:30 +01001305 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001306 if (!desc)
1307 return -ENOMEM;
1308
1309 rkey = ib_inc_rkey(desc->mr->rkey);
1310 ib_update_fast_reg_key(desc->mr, rkey);
1311
1312 memcpy(desc->frpl->page_list, state->pages,
1313 sizeof(state->pages[0]) * state->npages);
1314
1315 memset(&wr, 0, sizeof(wr));
1316 wr.opcode = IB_WR_FAST_REG_MR;
1317 wr.wr_id = FAST_REG_WR_ID_MASK;
1318 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1319 wr.wr.fast_reg.page_list = desc->frpl;
1320 wr.wr.fast_reg.page_list_len = state->npages;
1321 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1322 wr.wr.fast_reg.length = state->dma_len;
1323 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1324 IB_ACCESS_REMOTE_READ |
1325 IB_ACCESS_REMOTE_WRITE);
1326 wr.wr.fast_reg.rkey = desc->mr->lkey;
1327
1328 *state->next_fr++ = desc;
1329 state->nmdesc++;
1330
1331 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1332 desc->mr->rkey);
1333
Bart Van Assche509c07b2014-10-30 14:48:30 +01001334 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001335}
1336
Bart Van Assche539dde62014-05-20 15:05:46 +02001337static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001338 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001339{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001340 struct srp_target_port *target = ch->target;
Bart Van Assche539dde62014-05-20 15:05:46 +02001341 int ret = 0;
1342
1343 if (state->npages == 0)
1344 return 0;
1345
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001346 if (state->npages == 1 && !register_always)
Bart Van Assche52ede082014-05-20 15:07:45 +02001347 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche539dde62014-05-20 15:05:46 +02001348 target->rkey);
1349 else
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001350 ret = target->srp_host->srp_dev->use_fast_reg ?
Bart Van Assche509c07b2014-10-30 14:48:30 +01001351 srp_map_finish_fr(state, ch) :
1352 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001353
1354 if (ret == 0) {
1355 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001356 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001357 }
1358
1359 return ret;
1360}
1361
David Dillow8f26c9f2011-01-14 19:45:50 -05001362static void srp_map_update_start(struct srp_map_state *state,
1363 struct scatterlist *sg, int sg_index,
1364 dma_addr_t dma_addr)
1365{
1366 state->unmapped_sg = sg;
1367 state->unmapped_index = sg_index;
1368 state->unmapped_addr = dma_addr;
1369}
1370
1371static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001372 struct srp_rdma_ch *ch,
David Dillow8f26c9f2011-01-14 19:45:50 -05001373 struct scatterlist *sg, int sg_index,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001374 bool use_mr)
David Dillow8f26c9f2011-01-14 19:45:50 -05001375{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001376 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001377 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001378 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001379 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1380 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1381 unsigned int len;
1382 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001383
David Dillow8f26c9f2011-01-14 19:45:50 -05001384 if (!dma_len)
1385 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -07001386
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001387 if (!use_mr) {
1388 /*
1389 * Once we're in direct map mode for a request, we don't
1390 * go back to FMR or FR mode, so no need to update anything
David Dillow8f26c9f2011-01-14 19:45:50 -05001391 * other than the descriptor.
1392 */
1393 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1394 return 0;
1395 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001396
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001397 /*
1398 * Since not all RDMA HW drivers support non-zero page offsets for
1399 * FMR, if we start at an offset into a page, don't merge into the
1400 * current FMR mapping. Finish it out, and use the kernel's MR for
1401 * this sg entry.
David Dillow8f26c9f2011-01-14 19:45:50 -05001402 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001403 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1404 dma_len > dev->mr_max_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001405 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001406 if (ret)
1407 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001408
David Dillow8f26c9f2011-01-14 19:45:50 -05001409 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1410 srp_map_update_start(state, NULL, 0, 0);
1411 return 0;
1412 }
1413
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001414 /*
1415 * If this is the first sg that will be mapped via FMR or via FR, save
1416 * our position. We need to know the first unmapped entry, its index,
1417 * and the first unmapped address within that entry to be able to
1418 * restart mapping after an error.
David Dillow8f26c9f2011-01-14 19:45:50 -05001419 */
1420 if (!state->unmapped_sg)
1421 srp_map_update_start(state, sg, sg_index, dma_addr);
1422
1423 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001424 unsigned offset = dma_addr & ~dev->mr_page_mask;
1425 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001426 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001427 if (ret)
1428 return ret;
1429
1430 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001431 }
1432
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001433 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001434
1435 if (!state->npages)
1436 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001437 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001438 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001439 dma_addr += len;
1440 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001441 }
1442
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001443 /*
1444 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001445 * close it out and start a new one -- we can only merge at page
1446 * boundries.
1447 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001448 ret = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001449 if (len != dev->mr_page_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001450 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001451 if (!ret)
1452 srp_map_update_start(state, NULL, 0, 0);
1453 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001454 return ret;
1455}
1456
Bart Van Assche509c07b2014-10-30 14:48:30 +01001457static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1458 struct srp_request *req, struct scatterlist *scat,
1459 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001460{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001461 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001462 struct srp_device *dev = target->srp_host->srp_dev;
1463 struct ib_device *ibdev = dev->dev;
1464 struct scatterlist *sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001465 int i;
1466 bool use_mr;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001467
1468 state->desc = req->indirect_desc;
1469 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001470 if (dev->use_fast_reg) {
1471 state->next_fr = req->fr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001472 use_mr = !!ch->fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001473 } else {
1474 state->next_fmr = req->fmr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001475 use_mr = !!ch->fmr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001476 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001477
1478 for_each_sg(scat, sg, count, i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001479 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001480 /*
1481 * Memory registration failed, so backtrack to the
1482 * first unmapped entry and continue on without using
1483 * memory registration.
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001484 */
1485 dma_addr_t dma_addr;
1486 unsigned int dma_len;
1487
1488backtrack:
1489 sg = state->unmapped_sg;
1490 i = state->unmapped_index;
1491
1492 dma_addr = ib_sg_dma_address(ibdev, sg);
1493 dma_len = ib_sg_dma_len(ibdev, sg);
1494 dma_len -= (state->unmapped_addr - dma_addr);
1495 dma_addr = state->unmapped_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001496 use_mr = false;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001497 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1498 }
1499 }
1500
Bart Van Assche509c07b2014-10-30 14:48:30 +01001501 if (use_mr && srp_finish_mapping(state, ch))
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001502 goto backtrack;
1503
Bart Van Assche52ede082014-05-20 15:07:45 +02001504 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001505
1506 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001507}
1508
Bart Van Assche509c07b2014-10-30 14:48:30 +01001509static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001510 struct srp_request *req)
1511{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001512 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001513 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001514 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001515 int len, nents, count;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001516 struct srp_device *dev;
1517 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001518 struct srp_map_state state;
1519 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001520 u32 table_len;
1521 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001522
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001523 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001524 return sizeof (struct srp_cmd);
1525
1526 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1527 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001528 shost_printk(KERN_WARNING, target->scsi_host,
1529 PFX "Unhandled data direction %d\n",
1530 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001531 return -EINVAL;
1532 }
1533
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001534 nents = scsi_sg_count(scmnd);
1535 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001536
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001537 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001538 ibdev = dev->dev;
1539
1540 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001541 if (unlikely(count == 0))
1542 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001543
1544 fmt = SRP_DATA_DESC_DIRECT;
1545 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001546
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001547 if (count == 1 && !register_always) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001548 /*
1549 * The midlayer only generated a single gather/scatter
1550 * entry, or DMA mapping coalesced everything to a
1551 * single entry. So a direct descriptor along with
1552 * the DMA MR suffices.
1553 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001554 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001555
Ralph Campbell85507bc2006-12-12 14:30:55 -08001556 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001557 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001558 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001559
Bart Van Assche52ede082014-05-20 15:07:45 +02001560 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001561 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001562 }
1563
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001564 /*
1565 * We have more than one scatter/gather entry, so build our indirect
1566 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001567 */
1568 indirect_hdr = (void *) cmd->add_data;
1569
David Dillowc07d4242011-01-16 13:57:10 -05001570 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1571 target->indirect_size, DMA_TO_DEVICE);
1572
David Dillow8f26c9f2011-01-14 19:45:50 -05001573 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001574 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001575
David Dillowc07d4242011-01-16 13:57:10 -05001576 /* We've mapped the request, now pull as much of the indirect
1577 * descriptor table as we can into the command buffer. If this
1578 * target is not using an external indirect table, we are
1579 * guaranteed to fit into the command, as the SCSI layer won't
1580 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001581 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001582 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001583 /*
1584 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001585 * so use a direct descriptor.
1586 */
1587 struct srp_direct_buf *buf = (void *) cmd->add_data;
1588
David Dillowc07d4242011-01-16 13:57:10 -05001589 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001590 goto map_complete;
1591 }
1592
David Dillowc07d4242011-01-16 13:57:10 -05001593 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1594 !target->allow_ext_sg)) {
1595 shost_printk(KERN_ERR, target->scsi_host,
1596 "Could not fit S/G list into SRP_CMD\n");
1597 return -EIO;
1598 }
1599
1600 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001601 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1602
1603 fmt = SRP_DATA_DESC_INDIRECT;
1604 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001605 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001606
David Dillowc07d4242011-01-16 13:57:10 -05001607 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1608 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001609
David Dillowc07d4242011-01-16 13:57:10 -05001610 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001611 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1612 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1613 indirect_hdr->len = cpu_to_be32(state.total_len);
1614
1615 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001616 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001617 else
David Dillowc07d4242011-01-16 13:57:10 -05001618 cmd->data_in_desc_cnt = count;
1619
1620 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1621 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001622
1623map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001624 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1625 cmd->buf_fmt = fmt << 4;
1626 else
1627 cmd->buf_fmt = fmt;
1628
Roland Dreieraef9ec32005-11-02 14:07:13 -08001629 return len;
1630}
1631
David Dillow05a1d752010-10-08 14:48:14 -04001632/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001633 * Return an IU and possible credit to the free pool
1634 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001635static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001636 enum srp_iu_type iu_type)
1637{
1638 unsigned long flags;
1639
Bart Van Assche509c07b2014-10-30 14:48:30 +01001640 spin_lock_irqsave(&ch->lock, flags);
1641 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001642 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001643 ++ch->req_lim;
1644 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001645}
1646
1647/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001648 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001649 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001650 *
1651 * Note:
1652 * An upper limit for the number of allocated information units for each
1653 * request type is:
1654 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1655 * more than Scsi_Host.can_queue requests.
1656 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1657 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1658 * one unanswered SRP request to an initiator.
1659 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001660static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001661 enum srp_iu_type iu_type)
1662{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001663 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001664 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1665 struct srp_iu *iu;
1666
Bart Van Assche509c07b2014-10-30 14:48:30 +01001667 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001668
Bart Van Assche509c07b2014-10-30 14:48:30 +01001669 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001670 return NULL;
1671
1672 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001673 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001674 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001675 ++target->zero_req_lim;
1676 return NULL;
1677 }
1678
Bart Van Assche509c07b2014-10-30 14:48:30 +01001679 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001680 }
1681
Bart Van Assche509c07b2014-10-30 14:48:30 +01001682 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001683 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001684 return iu;
1685}
1686
Bart Van Assche509c07b2014-10-30 14:48:30 +01001687static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001688{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001689 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001690 struct ib_sge list;
1691 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001692
1693 list.addr = iu->dma;
1694 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001695 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001696
1697 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001698 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001699 wr.sg_list = &list;
1700 wr.num_sge = 1;
1701 wr.opcode = IB_WR_SEND;
1702 wr.send_flags = IB_SEND_SIGNALED;
1703
Bart Van Assche509c07b2014-10-30 14:48:30 +01001704 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001705}
1706
Bart Van Assche509c07b2014-10-30 14:48:30 +01001707static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001708{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001709 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001710 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001711 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001712
1713 list.addr = iu->dma;
1714 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001715 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001716
1717 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001718 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001719 wr.sg_list = &list;
1720 wr.num_sge = 1;
1721
Bart Van Assche509c07b2014-10-30 14:48:30 +01001722 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001723}
1724
Bart Van Assche509c07b2014-10-30 14:48:30 +01001725static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001726{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001727 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001728 struct srp_request *req;
1729 struct scsi_cmnd *scmnd;
1730 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001731
Roland Dreieraef9ec32005-11-02 14:07:13 -08001732 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001733 spin_lock_irqsave(&ch->lock, flags);
1734 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1735 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001736
Bart Van Assche509c07b2014-10-30 14:48:30 +01001737 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001738 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001739 ch->tsk_mgmt_status = rsp->data[3];
1740 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001741 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001742 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1743 if (scmnd) {
1744 req = (void *)scmnd->host_scribble;
1745 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1746 }
Bart Van Assche22032992012-08-14 13:18:53 +00001747 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001748 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001749 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1750 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001751
Bart Van Assche509c07b2014-10-30 14:48:30 +01001752 spin_lock_irqsave(&ch->lock, flags);
1753 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1754 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001755
1756 return;
1757 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001758 scmnd->result = rsp->status;
1759
1760 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1761 memcpy(scmnd->sense_buffer, rsp->data +
1762 be32_to_cpu(rsp->resp_data_len),
1763 min_t(int, be32_to_cpu(rsp->sense_data_len),
1764 SCSI_SENSE_BUFFERSIZE));
1765 }
1766
Bart Van Asschee7145312014-07-09 15:57:51 +02001767 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001768 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001769 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1770 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1771 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1772 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1773 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1774 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001775
Bart Van Assche509c07b2014-10-30 14:48:30 +01001776 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001777 be32_to_cpu(rsp->req_lim_delta));
1778
David Dillowf8b6e312010-11-26 13:02:21 -05001779 scmnd->host_scribble = NULL;
1780 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001781 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001782}
1783
Bart Van Assche509c07b2014-10-30 14:48:30 +01001784static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001785 void *rsp, int len)
1786{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001787 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001788 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001789 unsigned long flags;
1790 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001791 int err;
David Dillowbb125882010-10-08 14:40:47 -04001792
Bart Van Assche509c07b2014-10-30 14:48:30 +01001793 spin_lock_irqsave(&ch->lock, flags);
1794 ch->req_lim += req_delta;
1795 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1796 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001797
David Dillowbb125882010-10-08 14:40:47 -04001798 if (!iu) {
1799 shost_printk(KERN_ERR, target->scsi_host, PFX
1800 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001801 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001802 }
1803
1804 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1805 memcpy(iu->buf, rsp, len);
1806 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1807
Bart Van Assche509c07b2014-10-30 14:48:30 +01001808 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001809 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001810 shost_printk(KERN_ERR, target->scsi_host, PFX
1811 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001812 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001813 }
David Dillowbb125882010-10-08 14:40:47 -04001814
David Dillowbb125882010-10-08 14:40:47 -04001815 return err;
1816}
1817
Bart Van Assche509c07b2014-10-30 14:48:30 +01001818static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001819 struct srp_cred_req *req)
1820{
1821 struct srp_cred_rsp rsp = {
1822 .opcode = SRP_CRED_RSP,
1823 .tag = req->tag,
1824 };
1825 s32 delta = be32_to_cpu(req->req_lim_delta);
1826
Bart Van Assche509c07b2014-10-30 14:48:30 +01001827 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1828 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001829 "problems processing SRP_CRED_REQ\n");
1830}
1831
Bart Van Assche509c07b2014-10-30 14:48:30 +01001832static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001833 struct srp_aer_req *req)
1834{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001835 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001836 struct srp_aer_rsp rsp = {
1837 .opcode = SRP_AER_RSP,
1838 .tag = req->tag,
1839 };
1840 s32 delta = be32_to_cpu(req->req_lim_delta);
1841
1842 shost_printk(KERN_ERR, target->scsi_host, PFX
1843 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1844
Bart Van Assche509c07b2014-10-30 14:48:30 +01001845 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001846 shost_printk(KERN_ERR, target->scsi_host, PFX
1847 "problems processing SRP_AER_REQ\n");
1848}
1849
Bart Van Assche509c07b2014-10-30 14:48:30 +01001850static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001851{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001852 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001853 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001854 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001855 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001856 u8 opcode;
1857
Bart Van Assche509c07b2014-10-30 14:48:30 +01001858 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001859 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001860
1861 opcode = *(u8 *) iu->buf;
1862
1863 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001864 shost_printk(KERN_ERR, target->scsi_host,
1865 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001866 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1867 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001868 }
1869
1870 switch (opcode) {
1871 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001872 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001873 break;
1874
David Dillowbb125882010-10-08 14:40:47 -04001875 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001876 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001877 break;
1878
1879 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001880 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001881 break;
1882
Roland Dreieraef9ec32005-11-02 14:07:13 -08001883 case SRP_T_LOGOUT:
1884 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001885 shost_printk(KERN_WARNING, target->scsi_host,
1886 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001887 break;
1888
1889 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001890 shost_printk(KERN_WARNING, target->scsi_host,
1891 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001892 break;
1893 }
1894
Bart Van Assche509c07b2014-10-30 14:48:30 +01001895 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001896 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001897
Bart Van Assche509c07b2014-10-30 14:48:30 +01001898 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001899 if (res != 0)
1900 shost_printk(KERN_ERR, target->scsi_host,
1901 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001902}
1903
Bart Van Asschec1120f82013-10-26 14:35:08 +02001904/**
1905 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001906 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001907 *
1908 * Note: This function may get invoked before the rport has been created,
1909 * hence the target->rport test.
1910 */
1911static void srp_tl_err_work(struct work_struct *work)
1912{
1913 struct srp_target_port *target;
1914
1915 target = container_of(work, struct srp_target_port, tl_err_work);
1916 if (target->rport)
1917 srp_start_tl_fail_timers(target->rport);
1918}
1919
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001920static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001921 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001922{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001923 struct srp_target_port *target = ch->target;
1924
1925 if (wr_id == SRP_LAST_WR_ID) {
1926 complete(&ch->done);
1927 return;
1928 }
1929
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001930 if (ch->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001931 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1932 shost_printk(KERN_ERR, target->scsi_host, PFX
1933 "LOCAL_INV failed with status %d\n",
1934 wc_status);
1935 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1936 shost_printk(KERN_ERR, target->scsi_host, PFX
1937 "FAST_REG_MR failed status %d\n",
1938 wc_status);
1939 } else {
1940 shost_printk(KERN_ERR, target->scsi_host,
1941 PFX "failed %s status %d for iu %p\n",
1942 send_err ? "send" : "receive",
1943 wc_status, (void *)(uintptr_t)wr_id);
1944 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001945 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001946 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001947 target->qp_in_error = true;
1948}
1949
Bart Van Assche509c07b2014-10-30 14:48:30 +01001950static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001951{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001952 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001953 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001954
1955 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1956 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001957 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001958 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001959 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001960 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001961 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001962 }
1963}
1964
Bart Van Assche509c07b2014-10-30 14:48:30 +01001965static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001966{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001967 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001968 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001969 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001970
1971 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001972 if (likely(wc.status == IB_WC_SUCCESS)) {
1973 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001974 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001975 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001976 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001977 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001978 }
1979}
1980
Bart Van Assche76c75b22010-11-26 14:37:47 -05001981static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001982{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001983 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001984 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001985 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001986 struct srp_request *req;
1987 struct srp_iu *iu;
1988 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001989 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001990 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001991 u32 tag;
1992 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02001993 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001994 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1995
1996 /*
1997 * The SCSI EH thread is the only context from which srp_queuecommand()
1998 * can get invoked for blocked devices (SDEV_BLOCK /
1999 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2000 * locking the rport mutex if invoked from inside the SCSI EH.
2001 */
2002 if (in_scsi_eh)
2003 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002004
Bart Van Assched1b42892014-05-20 15:07:20 +02002005 scmnd->result = srp_chkready(target->rport);
2006 if (unlikely(scmnd->result))
2007 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002008
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002009 WARN_ON_ONCE(scmnd->request->tag < 0);
2010 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002011 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002012 idx = blk_mq_unique_tag_to_tag(tag);
2013 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2014 dev_name(&shost->shost_gendev), tag, idx,
2015 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002016
2017 spin_lock_irqsave(&ch->lock, flags);
2018 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002019 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002020
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002021 if (!iu)
2022 goto err;
2023
2024 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002025 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002026 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002027 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002028
David Dillowf8b6e312010-11-26 13:02:21 -05002029 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002030
2031 cmd = iu->buf;
2032 memset(cmd, 0, sizeof *cmd);
2033
2034 cmd->opcode = SRP_CMD;
2035 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002036 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002037 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2038
Roland Dreieraef9ec32005-11-02 14:07:13 -08002039 req->scmnd = scmnd;
2040 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002041
Bart Van Assche509c07b2014-10-30 14:48:30 +01002042 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002043 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002044 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002045 PFX "Failed to map data (%d)\n", len);
2046 /*
2047 * If we ran out of memory descriptors (-ENOMEM) because an
2048 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002049 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002050 * to reduce queue depth temporarily.
2051 */
2052 scmnd->result = len == -ENOMEM ?
2053 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002054 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002055 }
2056
David Dillow49248642011-01-14 18:23:24 -05002057 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002058 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002059
Bart Van Assche509c07b2014-10-30 14:48:30 +01002060 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002061 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002062 goto err_unmap;
2063 }
2064
Bart Van Assched1b42892014-05-20 15:07:20 +02002065 ret = 0;
2066
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002067unlock_rport:
2068 if (in_scsi_eh)
2069 mutex_unlock(&rport->mutex);
2070
Bart Van Assched1b42892014-05-20 15:07:20 +02002071 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002072
2073err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002074 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002075
Bart Van Assche76c75b22010-11-26 14:37:47 -05002076err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002077 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002078
Bart Van Assche024ca902014-05-20 15:03:49 +02002079 /*
2080 * Avoid that the loops that iterate over the request ring can
2081 * encounter a dangling SCSI command pointer.
2082 */
2083 req->scmnd = NULL;
2084
Bart Van Assched1b42892014-05-20 15:07:20 +02002085err:
2086 if (scmnd->result) {
2087 scmnd->scsi_done(scmnd);
2088 ret = 0;
2089 } else {
2090 ret = SCSI_MLQUEUE_HOST_BUSY;
2091 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002092
Bart Van Assched1b42892014-05-20 15:07:20 +02002093 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002094}
2095
Bart Van Assche4d73f952013-10-26 14:40:37 +02002096/*
2097 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002098 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002099 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002100static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002101{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002102 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002103 int i;
2104
Bart Van Assche509c07b2014-10-30 14:48:30 +01002105 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2106 GFP_KERNEL);
2107 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002108 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002109 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2110 GFP_KERNEL);
2111 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002112 goto err_no_ring;
2113
2114 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002115 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2116 ch->max_ti_iu_len,
2117 GFP_KERNEL, DMA_FROM_DEVICE);
2118 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002119 goto err;
2120 }
2121
Bart Van Assche4d73f952013-10-26 14:40:37 +02002122 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002123 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2124 target->max_iu_len,
2125 GFP_KERNEL, DMA_TO_DEVICE);
2126 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002127 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002128
Bart Van Assche509c07b2014-10-30 14:48:30 +01002129 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002130 }
2131
2132 return 0;
2133
2134err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002135 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002136 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2137 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002138 }
2139
Bart Van Assche4d73f952013-10-26 14:40:37 +02002140
2141err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002142 kfree(ch->tx_ring);
2143 ch->tx_ring = NULL;
2144 kfree(ch->rx_ring);
2145 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002146
2147 return -ENOMEM;
2148}
2149
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002150static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2151{
2152 uint64_t T_tr_ns, max_compl_time_ms;
2153 uint32_t rq_tmo_jiffies;
2154
2155 /*
2156 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2157 * table 91), both the QP timeout and the retry count have to be set
2158 * for RC QP's during the RTR to RTS transition.
2159 */
2160 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2161 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2162
2163 /*
2164 * Set target->rq_tmo_jiffies to one second more than the largest time
2165 * it can take before an error completion is generated. See also
2166 * C9-140..142 in the IBTA spec for more information about how to
2167 * convert the QP Local ACK Timeout value to nanoseconds.
2168 */
2169 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2170 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2171 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2172 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2173
2174 return rq_tmo_jiffies;
2175}
2176
David Dillow961e0be2011-01-14 17:32:07 -05002177static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2178 struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002179 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002180{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002181 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002182 struct ib_qp_attr *qp_attr = NULL;
2183 int attr_mask = 0;
2184 int ret;
2185 int i;
2186
2187 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002188 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2189 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002190
2191 /*
2192 * Reserve credits for task management so we don't
2193 * bounce requests back to the SCSI mid-layer.
2194 */
2195 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002196 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002197 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002198 target->scsi_host->cmd_per_lun
2199 = min_t(int, target->scsi_host->can_queue,
2200 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002201 } else {
2202 shost_printk(KERN_WARNING, target->scsi_host,
2203 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2204 ret = -ECONNRESET;
2205 goto error;
2206 }
2207
Bart Van Assche509c07b2014-10-30 14:48:30 +01002208 if (!ch->rx_ring) {
2209 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002210 if (ret)
2211 goto error;
2212 }
2213
2214 ret = -ENOMEM;
2215 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2216 if (!qp_attr)
2217 goto error;
2218
2219 qp_attr->qp_state = IB_QPS_RTR;
2220 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2221 if (ret)
2222 goto error_free;
2223
Bart Van Assche509c07b2014-10-30 14:48:30 +01002224 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002225 if (ret)
2226 goto error_free;
2227
Bart Van Assche4d73f952013-10-26 14:40:37 +02002228 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002229 struct srp_iu *iu = ch->rx_ring[i];
2230
2231 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002232 if (ret)
2233 goto error_free;
2234 }
2235
2236 qp_attr->qp_state = IB_QPS_RTS;
2237 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2238 if (ret)
2239 goto error_free;
2240
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002241 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2242
Bart Van Assche509c07b2014-10-30 14:48:30 +01002243 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002244 if (ret)
2245 goto error_free;
2246
2247 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2248
2249error_free:
2250 kfree(qp_attr);
2251
2252error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002253 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002254}
2255
Roland Dreieraef9ec32005-11-02 14:07:13 -08002256static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2257 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002258 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002259{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002260 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002261 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002262 struct ib_class_port_info *cpi;
2263 int opcode;
2264
2265 switch (event->param.rej_rcvd.reason) {
2266 case IB_CM_REJ_PORT_CM_REDIRECT:
2267 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002268 ch->path.dlid = cpi->redirect_lid;
2269 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002270 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002271 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002272
Bart Van Assche509c07b2014-10-30 14:48:30 +01002273 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002274 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2275 break;
2276
2277 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002278 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002279 /*
2280 * Topspin/Cisco SRP gateways incorrectly send
2281 * reject reason code 25 when they mean 24
2282 * (port redirect).
2283 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002284 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002285 event->param.rej_rcvd.ari, 16);
2286
David Dillow7aa54bd2008-01-07 18:23:41 -05002287 shost_printk(KERN_DEBUG, shost,
2288 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002289 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2290 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002291
Bart Van Assche509c07b2014-10-30 14:48:30 +01002292 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002293 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002294 shost_printk(KERN_WARNING, shost,
2295 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002296 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002297 }
2298 break;
2299
2300 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002301 shost_printk(KERN_WARNING, shost,
2302 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002303 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002304 break;
2305
2306 case IB_CM_REJ_CONSUMER_DEFINED:
2307 opcode = *(u8 *) event->private_data;
2308 if (opcode == SRP_LOGIN_REJ) {
2309 struct srp_login_rej *rej = event->private_data;
2310 u32 reason = be32_to_cpu(rej->reason);
2311
2312 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002313 shost_printk(KERN_WARNING, shost,
2314 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002315 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002316 shost_printk(KERN_WARNING, shost, PFX
2317 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002318 target->sgid.raw,
2319 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002320 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002321 shost_printk(KERN_WARNING, shost,
2322 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2323 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002324 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002325 break;
2326
David Dillow9fe4bcf2008-01-08 17:08:52 -05002327 case IB_CM_REJ_STALE_CONN:
2328 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002329 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002330 break;
2331
Roland Dreieraef9ec32005-11-02 14:07:13 -08002332 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002333 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2334 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002335 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002336 }
2337}
2338
2339static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2340{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002341 struct srp_rdma_ch *ch = cm_id->context;
2342 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002343 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002344
2345 switch (event->event) {
2346 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002347 shost_printk(KERN_DEBUG, target->scsi_host,
2348 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002349 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002350 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002351 break;
2352
2353 case IB_CM_REP_RECEIVED:
2354 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002355 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002356 break;
2357
2358 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002359 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002360 comp = 1;
2361
Bart Van Assche509c07b2014-10-30 14:48:30 +01002362 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002363 break;
2364
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002365 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002366 shost_printk(KERN_WARNING, target->scsi_host,
2367 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002368 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002369 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002370 shost_printk(KERN_ERR, target->scsi_host,
2371 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002372 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002373 break;
2374
2375 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002376 shost_printk(KERN_ERR, target->scsi_host,
2377 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002378 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002379
Bart Van Assche509c07b2014-10-30 14:48:30 +01002380 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002381 break;
2382
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002383 case IB_CM_MRA_RECEIVED:
2384 case IB_CM_DREQ_ERROR:
2385 case IB_CM_DREP_RECEIVED:
2386 break;
2387
Roland Dreieraef9ec32005-11-02 14:07:13 -08002388 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002389 shost_printk(KERN_WARNING, target->scsi_host,
2390 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002391 break;
2392 }
2393
2394 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002395 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002396
Roland Dreieraef9ec32005-11-02 14:07:13 -08002397 return 0;
2398}
2399
Jack Wang71444b92013-11-07 11:37:37 +01002400/**
Jack Wang71444b92013-11-07 11:37:37 +01002401 * srp_change_queue_depth - setting device queue depth
2402 * @sdev: scsi device struct
2403 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002404 *
2405 * Returns queue depth.
2406 */
2407static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002408srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002409{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002410 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002411 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002412 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002413}
2414
Bart Van Assche509c07b2014-10-30 14:48:30 +01002415static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2416 unsigned int lun, u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002417{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002418 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002419 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002420 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002421 struct srp_iu *iu;
2422 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002423
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002424 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002425 return -1;
2426
Bart Van Assche509c07b2014-10-30 14:48:30 +01002427 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002428
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002429 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002430 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002431 * invoked while a task management function is being sent.
2432 */
2433 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002434 spin_lock_irq(&ch->lock);
2435 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2436 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002437
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002438 if (!iu) {
2439 mutex_unlock(&rport->mutex);
2440
Bart Van Assche76c75b22010-11-26 14:37:47 -05002441 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002442 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002443
David Dillow19081f32010-10-18 08:54:49 -04002444 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2445 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002446 tsk_mgmt = iu->buf;
2447 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2448
2449 tsk_mgmt->opcode = SRP_TSK_MGMT;
David Dillowf8b6e312010-11-26 13:02:21 -05002450 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2451 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002452 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002453 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002454
David Dillow19081f32010-10-18 08:54:49 -04002455 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2456 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002457 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2458 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002459 mutex_unlock(&rport->mutex);
2460
Bart Van Assche76c75b22010-11-26 14:37:47 -05002461 return -1;
2462 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002463 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002464
Bart Van Assche509c07b2014-10-30 14:48:30 +01002465 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002466 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002467 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002468
Roland Dreierd945e1d2006-05-09 10:50:28 -07002469 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002470}
2471
Roland Dreieraef9ec32005-11-02 14:07:13 -08002472static int srp_abort(struct scsi_cmnd *scmnd)
2473{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002474 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002475 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002476 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002477 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002478 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002479 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002480
David Dillow7aa54bd2008-01-07 18:23:41 -05002481 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002482
Bart Van Assched92c0da2014-10-06 17:14:36 +02002483 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002484 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002485 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002486 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2487 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2488 return SUCCESS;
2489 ch = &target->ch[ch_idx];
2490 if (!srp_claim_req(ch, req, NULL, scmnd))
2491 return SUCCESS;
2492 shost_printk(KERN_ERR, target->scsi_host,
2493 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002494 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002495 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002496 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002497 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002498 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002499 else
2500 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002501 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002502 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002503 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002504
Bart Van Assche086f44f2013-06-12 15:23:04 +02002505 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002506}
2507
2508static int srp_reset_device(struct scsi_cmnd *scmnd)
2509{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002510 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002511 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002512 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002513
David Dillow7aa54bd2008-01-07 18:23:41 -05002514 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002515
Bart Van Assched92c0da2014-10-06 17:14:36 +02002516 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002517 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002518 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002519 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002520 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002521 return FAILED;
2522
Bart Van Assched92c0da2014-10-06 17:14:36 +02002523 for (i = 0; i < target->ch_count; i++) {
2524 ch = &target->ch[i];
2525 for (i = 0; i < target->req_ring_size; ++i) {
2526 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002527
Bart Van Assched92c0da2014-10-06 17:14:36 +02002528 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2529 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002530 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002531
Roland Dreierd945e1d2006-05-09 10:50:28 -07002532 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002533}
2534
2535static int srp_reset_host(struct scsi_cmnd *scmnd)
2536{
2537 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002538
David Dillow7aa54bd2008-01-07 18:23:41 -05002539 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002540
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002541 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002542}
2543
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002544static int srp_slave_configure(struct scsi_device *sdev)
2545{
2546 struct Scsi_Host *shost = sdev->host;
2547 struct srp_target_port *target = host_to_target(shost);
2548 struct request_queue *q = sdev->request_queue;
2549 unsigned long timeout;
2550
2551 if (sdev->type == TYPE_DISK) {
2552 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2553 blk_queue_rq_timeout(q, timeout);
2554 }
2555
2556 return 0;
2557}
2558
Tony Jonesee959b02008-02-22 00:13:36 +01002559static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2560 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002561{
Tony Jonesee959b02008-02-22 00:13:36 +01002562 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002563
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002564 return sprintf(buf, "0x%016llx\n",
2565 (unsigned long long) be64_to_cpu(target->id_ext));
2566}
2567
Tony Jonesee959b02008-02-22 00:13:36 +01002568static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2569 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002570{
Tony Jonesee959b02008-02-22 00:13:36 +01002571 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002572
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002573 return sprintf(buf, "0x%016llx\n",
2574 (unsigned long long) be64_to_cpu(target->ioc_guid));
2575}
2576
Tony Jonesee959b02008-02-22 00:13:36 +01002577static ssize_t show_service_id(struct device *dev,
2578 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002579{
Tony Jonesee959b02008-02-22 00:13:36 +01002580 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002581
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002582 return sprintf(buf, "0x%016llx\n",
2583 (unsigned long long) be64_to_cpu(target->service_id));
2584}
2585
Tony Jonesee959b02008-02-22 00:13:36 +01002586static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2587 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002588{
Tony Jonesee959b02008-02-22 00:13:36 +01002589 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002590
Bart Van Assche747fe002014-10-30 14:48:05 +01002591 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002592}
2593
Bart Van Assche848b3082013-10-26 14:38:12 +02002594static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2595 char *buf)
2596{
2597 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2598
Bart Van Assche747fe002014-10-30 14:48:05 +01002599 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002600}
2601
Tony Jonesee959b02008-02-22 00:13:36 +01002602static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2603 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002604{
Tony Jonesee959b02008-02-22 00:13:36 +01002605 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002606 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002607
Bart Van Assche509c07b2014-10-30 14:48:30 +01002608 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002609}
2610
Tony Jonesee959b02008-02-22 00:13:36 +01002611static ssize_t show_orig_dgid(struct device *dev,
2612 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002613{
Tony Jonesee959b02008-02-22 00:13:36 +01002614 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002615
Bart Van Assche747fe002014-10-30 14:48:05 +01002616 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002617}
2618
Bart Van Assche89de7482010-08-03 14:08:45 +00002619static ssize_t show_req_lim(struct device *dev,
2620 struct device_attribute *attr, char *buf)
2621{
2622 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002623 struct srp_rdma_ch *ch;
2624 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002625
Bart Van Assched92c0da2014-10-06 17:14:36 +02002626 for (i = 0; i < target->ch_count; i++) {
2627 ch = &target->ch[i];
2628 req_lim = min(req_lim, ch->req_lim);
2629 }
2630 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002631}
2632
Tony Jonesee959b02008-02-22 00:13:36 +01002633static ssize_t show_zero_req_lim(struct device *dev,
2634 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002635{
Tony Jonesee959b02008-02-22 00:13:36 +01002636 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002637
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002638 return sprintf(buf, "%d\n", target->zero_req_lim);
2639}
2640
Tony Jonesee959b02008-02-22 00:13:36 +01002641static ssize_t show_local_ib_port(struct device *dev,
2642 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002643{
Tony Jonesee959b02008-02-22 00:13:36 +01002644 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002645
2646 return sprintf(buf, "%d\n", target->srp_host->port);
2647}
2648
Tony Jonesee959b02008-02-22 00:13:36 +01002649static ssize_t show_local_ib_device(struct device *dev,
2650 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002651{
Tony Jonesee959b02008-02-22 00:13:36 +01002652 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002653
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002654 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002655}
2656
Bart Van Assched92c0da2014-10-06 17:14:36 +02002657static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2658 char *buf)
2659{
2660 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2661
2662 return sprintf(buf, "%d\n", target->ch_count);
2663}
2664
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002665static ssize_t show_comp_vector(struct device *dev,
2666 struct device_attribute *attr, char *buf)
2667{
2668 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2669
2670 return sprintf(buf, "%d\n", target->comp_vector);
2671}
2672
Vu Pham7bb312e2013-10-26 14:31:27 +02002673static ssize_t show_tl_retry_count(struct device *dev,
2674 struct device_attribute *attr, char *buf)
2675{
2676 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2677
2678 return sprintf(buf, "%d\n", target->tl_retry_count);
2679}
2680
David Dillow49248642011-01-14 18:23:24 -05002681static ssize_t show_cmd_sg_entries(struct device *dev,
2682 struct device_attribute *attr, char *buf)
2683{
2684 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2685
2686 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2687}
2688
David Dillowc07d4242011-01-16 13:57:10 -05002689static ssize_t show_allow_ext_sg(struct device *dev,
2690 struct device_attribute *attr, char *buf)
2691{
2692 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2693
2694 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2695}
2696
Tony Jonesee959b02008-02-22 00:13:36 +01002697static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2698static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2699static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2700static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002701static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002702static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2703static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002704static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002705static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2706static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2707static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002708static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002709static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002710static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002711static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002712static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002713
Tony Jonesee959b02008-02-22 00:13:36 +01002714static struct device_attribute *srp_host_attrs[] = {
2715 &dev_attr_id_ext,
2716 &dev_attr_ioc_guid,
2717 &dev_attr_service_id,
2718 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002719 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002720 &dev_attr_dgid,
2721 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002722 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002723 &dev_attr_zero_req_lim,
2724 &dev_attr_local_ib_port,
2725 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002726 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002727 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002728 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002729 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002730 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002731 NULL
2732};
2733
Roland Dreieraef9ec32005-11-02 14:07:13 -08002734static struct scsi_host_template srp_template = {
2735 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002736 .name = "InfiniBand SRP initiator",
2737 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002738 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002739 .info = srp_target_info,
2740 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002741 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002742 .eh_abort_handler = srp_abort,
2743 .eh_device_reset_handler = srp_reset_device,
2744 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002745 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002746 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002747 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002748 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002749 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002750 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002751 .shost_attrs = srp_host_attrs,
2752 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002753 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002754};
2755
Bart Van Assche34aa6542014-10-30 14:47:22 +01002756static int srp_sdev_count(struct Scsi_Host *host)
2757{
2758 struct scsi_device *sdev;
2759 int c = 0;
2760
2761 shost_for_each_device(sdev, host)
2762 c++;
2763
2764 return c;
2765}
2766
Roland Dreieraef9ec32005-11-02 14:07:13 -08002767static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2768{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002769 struct srp_rport_identifiers ids;
2770 struct srp_rport *rport;
2771
Bart Van Assche34aa6542014-10-30 14:47:22 +01002772 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002773 sprintf(target->target_name, "SRP.T10:%016llX",
2774 (unsigned long long) be64_to_cpu(target->id_ext));
2775
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002776 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002777 return -ENODEV;
2778
FUJITA Tomonori32368222007-06-27 16:33:12 +09002779 memcpy(ids.port_id, &target->id_ext, 8);
2780 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002781 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002782 rport = srp_rport_add(target->scsi_host, &ids);
2783 if (IS_ERR(rport)) {
2784 scsi_remove_host(target->scsi_host);
2785 return PTR_ERR(rport);
2786 }
2787
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002788 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002789 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002790
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002791 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002792 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002793 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002794
Roland Dreieraef9ec32005-11-02 14:07:13 -08002795 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002796 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002797
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002798 if (srp_connected_ch(target) < target->ch_count ||
2799 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002800 shost_printk(KERN_INFO, target->scsi_host,
2801 PFX "SCSI scan failed - removing SCSI host\n");
2802 srp_queue_remove_work(target);
2803 goto out;
2804 }
2805
2806 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2807 dev_name(&target->scsi_host->shost_gendev),
2808 srp_sdev_count(target->scsi_host));
2809
2810 spin_lock_irq(&target->lock);
2811 if (target->state == SRP_TARGET_SCANNING)
2812 target->state = SRP_TARGET_LIVE;
2813 spin_unlock_irq(&target->lock);
2814
2815out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002816 return 0;
2817}
2818
Tony Jonesee959b02008-02-22 00:13:36 +01002819static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002820{
2821 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002822 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002823
2824 complete(&host->released);
2825}
2826
2827static struct class srp_class = {
2828 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002829 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002830};
2831
Bart Van Assche96fc2482013-06-28 14:51:26 +02002832/**
2833 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002834 * @host: SRP host.
2835 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002836 */
2837static bool srp_conn_unique(struct srp_host *host,
2838 struct srp_target_port *target)
2839{
2840 struct srp_target_port *t;
2841 bool ret = false;
2842
2843 if (target->state == SRP_TARGET_REMOVED)
2844 goto out;
2845
2846 ret = true;
2847
2848 spin_lock(&host->target_lock);
2849 list_for_each_entry(t, &host->target_list, list) {
2850 if (t != target &&
2851 target->id_ext == t->id_ext &&
2852 target->ioc_guid == t->ioc_guid &&
2853 target->initiator_ext == t->initiator_ext) {
2854 ret = false;
2855 break;
2856 }
2857 }
2858 spin_unlock(&host->target_lock);
2859
2860out:
2861 return ret;
2862}
2863
Roland Dreieraef9ec32005-11-02 14:07:13 -08002864/*
2865 * Target ports are added by writing
2866 *
2867 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2868 * pkey=<P_Key>,service_id=<service ID>
2869 *
2870 * to the add_target sysfs attribute.
2871 */
2872enum {
2873 SRP_OPT_ERR = 0,
2874 SRP_OPT_ID_EXT = 1 << 0,
2875 SRP_OPT_IOC_GUID = 1 << 1,
2876 SRP_OPT_DGID = 1 << 2,
2877 SRP_OPT_PKEY = 1 << 3,
2878 SRP_OPT_SERVICE_ID = 1 << 4,
2879 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002880 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002881 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002882 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002883 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002884 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2885 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002886 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002887 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002888 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002889 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2890 SRP_OPT_IOC_GUID |
2891 SRP_OPT_DGID |
2892 SRP_OPT_PKEY |
2893 SRP_OPT_SERVICE_ID),
2894};
2895
Steven Whitehousea447c092008-10-13 10:46:57 +01002896static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002897 { SRP_OPT_ID_EXT, "id_ext=%s" },
2898 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2899 { SRP_OPT_DGID, "dgid=%s" },
2900 { SRP_OPT_PKEY, "pkey=%x" },
2901 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2902 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2903 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002904 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002905 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002906 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002907 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2908 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002909 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002910 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002911 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002912 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002913};
2914
2915static int srp_parse_options(const char *buf, struct srp_target_port *target)
2916{
2917 char *options, *sep_opt;
2918 char *p;
2919 char dgid[3];
2920 substring_t args[MAX_OPT_ARGS];
2921 int opt_mask = 0;
2922 int token;
2923 int ret = -EINVAL;
2924 int i;
2925
2926 options = kstrdup(buf, GFP_KERNEL);
2927 if (!options)
2928 return -ENOMEM;
2929
2930 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002931 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002932 if (!*p)
2933 continue;
2934
2935 token = match_token(p, srp_opt_tokens, args);
2936 opt_mask |= token;
2937
2938 switch (token) {
2939 case SRP_OPT_ID_EXT:
2940 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002941 if (!p) {
2942 ret = -ENOMEM;
2943 goto out;
2944 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002945 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2946 kfree(p);
2947 break;
2948
2949 case SRP_OPT_IOC_GUID:
2950 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002951 if (!p) {
2952 ret = -ENOMEM;
2953 goto out;
2954 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002955 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2956 kfree(p);
2957 break;
2958
2959 case SRP_OPT_DGID:
2960 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002961 if (!p) {
2962 ret = -ENOMEM;
2963 goto out;
2964 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002965 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002966 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002967 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002968 goto out;
2969 }
2970
2971 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002972 strlcpy(dgid, p + i * 2, sizeof(dgid));
2973 if (sscanf(dgid, "%hhx",
2974 &target->orig_dgid.raw[i]) < 1) {
2975 ret = -EINVAL;
2976 kfree(p);
2977 goto out;
2978 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002979 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002980 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002981 break;
2982
2983 case SRP_OPT_PKEY:
2984 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002985 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002986 goto out;
2987 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002988 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002989 break;
2990
2991 case SRP_OPT_SERVICE_ID:
2992 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002993 if (!p) {
2994 ret = -ENOMEM;
2995 goto out;
2996 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002997 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2998 kfree(p);
2999 break;
3000
3001 case SRP_OPT_MAX_SECT:
3002 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003003 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003004 goto out;
3005 }
3006 target->scsi_host->max_sectors = token;
3007 break;
3008
Bart Van Assche4d73f952013-10-26 14:40:37 +02003009 case SRP_OPT_QUEUE_SIZE:
3010 if (match_int(args, &token) || token < 1) {
3011 pr_warn("bad queue_size parameter '%s'\n", p);
3012 goto out;
3013 }
3014 target->scsi_host->can_queue = token;
3015 target->queue_size = token + SRP_RSP_SQ_SIZE +
3016 SRP_TSK_MGMT_SQ_SIZE;
3017 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3018 target->scsi_host->cmd_per_lun = token;
3019 break;
3020
Vu Pham52fb2b502006-06-17 20:37:31 -07003021 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003022 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003023 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3024 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003025 goto out;
3026 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003027 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003028 break;
3029
Ramachandra K0c0450db2006-06-17 20:37:38 -07003030 case SRP_OPT_IO_CLASS:
3031 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003032 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003033 goto out;
3034 }
3035 if (token != SRP_REV10_IB_IO_CLASS &&
3036 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003037 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3038 token, SRP_REV10_IB_IO_CLASS,
3039 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003040 goto out;
3041 }
3042 target->io_class = token;
3043 break;
3044
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003045 case SRP_OPT_INITIATOR_EXT:
3046 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003047 if (!p) {
3048 ret = -ENOMEM;
3049 goto out;
3050 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003051 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3052 kfree(p);
3053 break;
3054
David Dillow49248642011-01-14 18:23:24 -05003055 case SRP_OPT_CMD_SG_ENTRIES:
3056 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003057 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3058 p);
David Dillow49248642011-01-14 18:23:24 -05003059 goto out;
3060 }
3061 target->cmd_sg_cnt = token;
3062 break;
3063
David Dillowc07d4242011-01-16 13:57:10 -05003064 case SRP_OPT_ALLOW_EXT_SG:
3065 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003066 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003067 goto out;
3068 }
3069 target->allow_ext_sg = !!token;
3070 break;
3071
3072 case SRP_OPT_SG_TABLESIZE:
3073 if (match_int(args, &token) || token < 1 ||
3074 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003075 pr_warn("bad max sg_tablesize parameter '%s'\n",
3076 p);
David Dillowc07d4242011-01-16 13:57:10 -05003077 goto out;
3078 }
3079 target->sg_tablesize = token;
3080 break;
3081
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003082 case SRP_OPT_COMP_VECTOR:
3083 if (match_int(args, &token) || token < 0) {
3084 pr_warn("bad comp_vector parameter '%s'\n", p);
3085 goto out;
3086 }
3087 target->comp_vector = token;
3088 break;
3089
Vu Pham7bb312e2013-10-26 14:31:27 +02003090 case SRP_OPT_TL_RETRY_COUNT:
3091 if (match_int(args, &token) || token < 2 || token > 7) {
3092 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3093 p);
3094 goto out;
3095 }
3096 target->tl_retry_count = token;
3097 break;
3098
Roland Dreieraef9ec32005-11-02 14:07:13 -08003099 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003100 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3101 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003102 goto out;
3103 }
3104 }
3105
3106 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3107 ret = 0;
3108 else
3109 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3110 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3111 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003112 pr_warn("target creation request is missing parameter '%s'\n",
3113 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003114
Bart Van Assche4d73f952013-10-26 14:40:37 +02003115 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3116 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3117 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3118 target->scsi_host->cmd_per_lun,
3119 target->scsi_host->can_queue);
3120
Roland Dreieraef9ec32005-11-02 14:07:13 -08003121out:
3122 kfree(options);
3123 return ret;
3124}
3125
Tony Jonesee959b02008-02-22 00:13:36 +01003126static ssize_t srp_create_target(struct device *dev,
3127 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003128 const char *buf, size_t count)
3129{
3130 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003131 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003132 struct Scsi_Host *target_host;
3133 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003134 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003135 struct srp_device *srp_dev = host->srp_dev;
3136 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003137 int ret, node_idx, node, cpu, i;
3138 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003139
3140 target_host = scsi_host_alloc(&srp_template,
3141 sizeof (struct srp_target_port));
3142 if (!target_host)
3143 return -ENOMEM;
3144
David Dillow49248642011-01-14 18:23:24 -05003145 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003146 target_host->max_channel = 0;
3147 target_host->max_id = 1;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003148 target_host->max_lun = SRP_MAX_LUN;
3149 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003150
Roland Dreieraef9ec32005-11-02 14:07:13 -08003151 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003152
David Dillow49248642011-01-14 18:23:24 -05003153 target->io_class = SRP_REV16A_IB_IO_CLASS;
3154 target->scsi_host = target_host;
3155 target->srp_host = host;
3156 target->lkey = host->srp_dev->mr->lkey;
3157 target->rkey = host->srp_dev->mr->rkey;
3158 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003159 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3160 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003161 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003162 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003163
Bart Van Assche34aa6542014-10-30 14:47:22 +01003164 /*
3165 * Avoid that the SCSI host can be removed by srp_remove_target()
3166 * before this function returns.
3167 */
3168 scsi_host_get(target->scsi_host);
3169
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003170 mutex_lock(&host->add_target_mutex);
3171
Roland Dreieraef9ec32005-11-02 14:07:13 -08003172 ret = srp_parse_options(buf, target);
3173 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003174 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003175
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003176 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3177 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003178 goto out;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003179
Bart Van Assche4d73f952013-10-26 14:40:37 +02003180 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3181
Bart Van Assche96fc2482013-06-28 14:51:26 +02003182 if (!srp_conn_unique(target->srp_host, target)) {
3183 shost_printk(KERN_INFO, target->scsi_host,
3184 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3185 be64_to_cpu(target->id_ext),
3186 be64_to_cpu(target->ioc_guid),
3187 be64_to_cpu(target->initiator_ext));
3188 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003189 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003190 }
3191
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003192 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003193 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003194 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003195 target->sg_tablesize = target->cmd_sg_cnt;
3196 }
3197
3198 target_host->sg_tablesize = target->sg_tablesize;
3199 target->indirect_size = target->sg_tablesize *
3200 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003201 target->max_iu_len = sizeof (struct srp_cmd) +
3202 sizeof (struct srp_indirect_buf) +
3203 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3204
Bart Van Asschec1120f82013-10-26 14:35:08 +02003205 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003206 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003207 spin_lock_init(&target->lock);
Bart Van Assche747fe002014-10-30 14:48:05 +01003208 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003209 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003210 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003211
Bart Van Assched92c0da2014-10-06 17:14:36 +02003212 ret = -ENOMEM;
3213 target->ch_count = max_t(unsigned, num_online_nodes(),
3214 min(ch_count ? :
3215 min(4 * num_online_nodes(),
3216 ibdev->num_comp_vectors),
3217 num_online_cpus()));
3218 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3219 GFP_KERNEL);
3220 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003221 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003222
Bart Van Assched92c0da2014-10-06 17:14:36 +02003223 node_idx = 0;
3224 for_each_online_node(node) {
3225 const int ch_start = (node_idx * target->ch_count /
3226 num_online_nodes());
3227 const int ch_end = ((node_idx + 1) * target->ch_count /
3228 num_online_nodes());
3229 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3230 num_online_nodes() + target->comp_vector)
3231 % ibdev->num_comp_vectors;
3232 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3233 num_online_nodes() + target->comp_vector)
3234 % ibdev->num_comp_vectors;
3235 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003236
Bart Van Assched92c0da2014-10-06 17:14:36 +02003237 for_each_online_cpu(cpu) {
3238 if (cpu_to_node(cpu) != node)
3239 continue;
3240 if (ch_start + cpu_idx >= ch_end)
3241 continue;
3242 ch = &target->ch[ch_start + cpu_idx];
3243 ch->target = target;
3244 ch->comp_vector = cv_start == cv_end ? cv_start :
3245 cv_start + cpu_idx % (cv_end - cv_start);
3246 spin_lock_init(&ch->lock);
3247 INIT_LIST_HEAD(&ch->free_tx);
3248 ret = srp_new_cm_id(ch);
3249 if (ret)
3250 goto err_disconnect;
3251
3252 ret = srp_create_ch_ib(ch);
3253 if (ret)
3254 goto err_disconnect;
3255
3256 ret = srp_alloc_req_data(ch);
3257 if (ret)
3258 goto err_disconnect;
3259
3260 ret = srp_connect_ch(ch, multich);
3261 if (ret) {
3262 shost_printk(KERN_ERR, target->scsi_host,
3263 PFX "Connection %d/%d failed\n",
3264 ch_start + cpu_idx,
3265 target->ch_count);
3266 if (node_idx == 0 && cpu_idx == 0) {
3267 goto err_disconnect;
3268 } else {
3269 srp_free_ch_ib(target, ch);
3270 srp_free_req_data(target, ch);
3271 target->ch_count = ch - target->ch;
3272 break;
3273 }
3274 }
3275
3276 multich = true;
3277 cpu_idx++;
3278 }
3279 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003280 }
3281
Bart Van Assched92c0da2014-10-06 17:14:36 +02003282 target->scsi_host->nr_hw_queues = target->ch_count;
3283
Roland Dreieraef9ec32005-11-02 14:07:13 -08003284 ret = srp_add_target(host, target);
3285 if (ret)
3286 goto err_disconnect;
3287
Bart Van Assche34aa6542014-10-30 14:47:22 +01003288 if (target->state != SRP_TARGET_REMOVED) {
3289 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3290 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3291 be64_to_cpu(target->id_ext),
3292 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003293 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003294 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003295 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003296 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003297
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003298 ret = count;
3299
3300out:
3301 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003302
3303 scsi_host_put(target->scsi_host);
3304
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003305 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003306
3307err_disconnect:
3308 srp_disconnect_target(target);
3309
Bart Van Assched92c0da2014-10-06 17:14:36 +02003310 for (i = 0; i < target->ch_count; i++) {
3311 ch = &target->ch[i];
3312 srp_free_ch_ib(target, ch);
3313 srp_free_req_data(target, ch);
3314 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003315
Bart Van Assched92c0da2014-10-06 17:14:36 +02003316 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003317 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003318}
3319
Tony Jonesee959b02008-02-22 00:13:36 +01003320static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003321
Tony Jonesee959b02008-02-22 00:13:36 +01003322static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3323 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003324{
Tony Jonesee959b02008-02-22 00:13:36 +01003325 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003326
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003327 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003328}
3329
Tony Jonesee959b02008-02-22 00:13:36 +01003330static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003331
Tony Jonesee959b02008-02-22 00:13:36 +01003332static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3333 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003334{
Tony Jonesee959b02008-02-22 00:13:36 +01003335 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003336
3337 return sprintf(buf, "%d\n", host->port);
3338}
3339
Tony Jonesee959b02008-02-22 00:13:36 +01003340static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003341
Roland Dreierf5358a12006-06-17 20:37:29 -07003342static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003343{
3344 struct srp_host *host;
3345
3346 host = kzalloc(sizeof *host, GFP_KERNEL);
3347 if (!host)
3348 return NULL;
3349
3350 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003351 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003352 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003353 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003354 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003355 host->port = port;
3356
Tony Jonesee959b02008-02-22 00:13:36 +01003357 host->dev.class = &srp_class;
3358 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003359 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003360
Tony Jonesee959b02008-02-22 00:13:36 +01003361 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003362 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003363 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003364 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003365 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003366 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003367 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003368 goto err_class;
3369
3370 return host;
3371
3372err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003373 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003374
Roland Dreierf5358a12006-06-17 20:37:29 -07003375free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003376 kfree(host);
3377
3378 return NULL;
3379}
3380
3381static void srp_add_one(struct ib_device *device)
3382{
Roland Dreierf5358a12006-06-17 20:37:29 -07003383 struct srp_device *srp_dev;
3384 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003385 struct srp_host *host;
Bart Van Assche52ede082014-05-20 15:07:45 +02003386 int mr_page_shift, s, e, p;
3387 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003388
Roland Dreierf5358a12006-06-17 20:37:29 -07003389 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3390 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003391 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003392
Roland Dreierf5358a12006-06-17 20:37:29 -07003393 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003394 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003395 goto free_attr;
3396 }
3397
3398 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3399 if (!srp_dev)
3400 goto free_attr;
3401
Bart Van Assched1b42892014-05-20 15:07:20 +02003402 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3403 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003404 srp_dev->has_fr = (dev_attr->device_cap_flags &
3405 IB_DEVICE_MEM_MGT_EXTENSIONS);
3406 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3407 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3408
3409 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3410 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assched1b42892014-05-20 15:07:20 +02003411
Roland Dreierf5358a12006-06-17 20:37:29 -07003412 /*
3413 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003414 * minimum of 4096 bytes. We're unlikely to build large sglists
3415 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003416 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003417 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3418 srp_dev->mr_page_size = 1 << mr_page_shift;
3419 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3420 max_pages_per_mr = dev_attr->max_mr_size;
3421 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3422 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3423 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003424 if (srp_dev->use_fast_reg) {
3425 srp_dev->max_pages_per_mr =
3426 min_t(u32, srp_dev->max_pages_per_mr,
3427 dev_attr->max_fast_reg_page_list_len);
3428 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003429 srp_dev->mr_max_size = srp_dev->mr_page_size *
3430 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003431 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003432 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003433 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003434 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003435
3436 INIT_LIST_HEAD(&srp_dev->dev_list);
3437
3438 srp_dev->dev = device;
3439 srp_dev->pd = ib_alloc_pd(device);
3440 if (IS_ERR(srp_dev->pd))
3441 goto free_dev;
3442
3443 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3444 IB_ACCESS_LOCAL_WRITE |
3445 IB_ACCESS_REMOTE_READ |
3446 IB_ACCESS_REMOTE_WRITE);
3447 if (IS_ERR(srp_dev->mr))
3448 goto err_pd;
3449
Tom Tucker07ebafb2006-08-03 16:02:42 -05003450 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08003451 s = 0;
3452 e = 0;
3453 } else {
3454 s = 1;
3455 e = device->phys_port_cnt;
3456 }
3457
3458 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003459 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003460 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003461 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003462 }
3463
Roland Dreierf5358a12006-06-17 20:37:29 -07003464 ib_set_client_data(device, &srp_client, srp_dev);
3465
3466 goto free_attr;
3467
3468err_pd:
3469 ib_dealloc_pd(srp_dev->pd);
3470
3471free_dev:
3472 kfree(srp_dev);
3473
3474free_attr:
3475 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003476}
3477
3478static void srp_remove_one(struct ib_device *device)
3479{
Roland Dreierf5358a12006-06-17 20:37:29 -07003480 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003481 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003482 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003483
Roland Dreierf5358a12006-06-17 20:37:29 -07003484 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003485 if (!srp_dev)
3486 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003487
Roland Dreierf5358a12006-06-17 20:37:29 -07003488 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003489 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003490 /*
3491 * Wait for the sysfs entry to go away, so that no new
3492 * target ports can be created.
3493 */
3494 wait_for_completion(&host->released);
3495
3496 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003497 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003498 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003499 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003500 list_for_each_entry(target, &host->target_list, list)
3501 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003502 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003503
3504 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003505 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003506 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003507 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003508 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003509
Roland Dreieraef9ec32005-11-02 14:07:13 -08003510 kfree(host);
3511 }
3512
Roland Dreierf5358a12006-06-17 20:37:29 -07003513 ib_dereg_mr(srp_dev->mr);
3514 ib_dealloc_pd(srp_dev->pd);
3515
3516 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003517}
3518
FUJITA Tomonori32368222007-06-27 16:33:12 +09003519static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003520 .has_rport_state = true,
3521 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003522 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003523 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3524 .dev_loss_tmo = &srp_dev_loss_tmo,
3525 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003526 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003527 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003528};
3529
Roland Dreieraef9ec32005-11-02 14:07:13 -08003530static int __init srp_init_module(void)
3531{
3532 int ret;
3533
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003534 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003535
David Dillow49248642011-01-14 18:23:24 -05003536 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003537 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003538 if (!cmd_sg_entries)
3539 cmd_sg_entries = srp_sg_tablesize;
3540 }
3541
3542 if (!cmd_sg_entries)
3543 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3544
3545 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003546 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003547 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003548 }
3549
David Dillowc07d4242011-01-16 13:57:10 -05003550 if (!indirect_sg_entries)
3551 indirect_sg_entries = cmd_sg_entries;
3552 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003553 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3554 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003555 indirect_sg_entries = cmd_sg_entries;
3556 }
3557
Bart Van Asschebcc05912014-07-09 15:57:26 +02003558 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003559 if (!srp_remove_wq) {
3560 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003561 goto out;
3562 }
3563
3564 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003565 ib_srp_transport_template =
3566 srp_attach_transport(&ib_srp_transport_functions);
3567 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003568 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003569
Roland Dreieraef9ec32005-11-02 14:07:13 -08003570 ret = class_register(&srp_class);
3571 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003572 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003573 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003574 }
3575
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003576 ib_sa_register_client(&srp_sa_client);
3577
Roland Dreieraef9ec32005-11-02 14:07:13 -08003578 ret = ib_register_client(&srp_client);
3579 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003580 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003581 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003582 }
3583
Bart Van Asschebcc05912014-07-09 15:57:26 +02003584out:
3585 return ret;
3586
3587unreg_sa:
3588 ib_sa_unregister_client(&srp_sa_client);
3589 class_unregister(&srp_class);
3590
3591release_tr:
3592 srp_release_transport(ib_srp_transport_template);
3593
3594destroy_wq:
3595 destroy_workqueue(srp_remove_wq);
3596 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003597}
3598
3599static void __exit srp_cleanup_module(void)
3600{
3601 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003602 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003603 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003604 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003605 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003606}
3607
3608module_init(srp_init_module);
3609module_exit(srp_cleanup_module);