blob: f2daabd1e91d2b373ea3792f67ba0fba4b6ab833 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020058#define DRV_VERSION "1.0"
59#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
63 "v" DRV_VERSION " (" DRV_RELDATE ")");
64MODULE_LICENSE("Dual BSD/GPL");
65
David Dillow49248642011-01-14 18:23:24 -050066static unsigned int srp_sg_tablesize;
67static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050068static unsigned int indirect_sg_entries;
69static bool allow_ext_sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +020070static bool prefer_fr;
Bart Van Asscheb1b88542014-05-20 15:06:41 +020071static bool register_always;
Roland Dreieraef9ec32005-11-02 14:07:13 -080072static int topspin_workarounds = 1;
73
David Dillow49248642011-01-14 18:23:24 -050074module_param(srp_sg_tablesize, uint, 0444);
75MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
76
77module_param(cmd_sg_entries, uint, 0444);
78MODULE_PARM_DESC(cmd_sg_entries,
79 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
80
David Dillowc07d4242011-01-16 13:57:10 -050081module_param(indirect_sg_entries, uint, 0444);
82MODULE_PARM_DESC(indirect_sg_entries,
83 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
84
85module_param(allow_ext_sg, bool, 0444);
86MODULE_PARM_DESC(allow_ext_sg,
87 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
88
Roland Dreieraef9ec32005-11-02 14:07:13 -080089module_param(topspin_workarounds, int, 0444);
90MODULE_PARM_DESC(topspin_workarounds,
91 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
92
Bart Van Assche5cfb1782014-05-20 15:08:34 +020093module_param(prefer_fr, bool, 0444);
94MODULE_PARM_DESC(prefer_fr,
95"Whether to use fast registration if both FMR and fast registration are supported");
96
Bart Van Asscheb1b88542014-05-20 15:06:41 +020097module_param(register_always, bool, 0444);
98MODULE_PARM_DESC(register_always,
99 "Use memory registration even for contiguous memory regions");
100
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200101static struct kernel_param_ops srp_tmo_ops;
102
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200103static int srp_reconnect_delay = 10;
104module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
105 S_IRUGO | S_IWUSR);
106MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
107
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200108static int srp_fast_io_fail_tmo = 15;
109module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
110 S_IRUGO | S_IWUSR);
111MODULE_PARM_DESC(fast_io_fail_tmo,
112 "Number of seconds between the observation of a transport"
113 " layer error and failing all I/O. \"off\" means that this"
114 " functionality is disabled.");
115
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200116static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200117module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
118 S_IRUGO | S_IWUSR);
119MODULE_PARM_DESC(dev_loss_tmo,
120 "Maximum number of seconds that the SRP transport should"
121 " insulate transport layer errors. After this time has been"
122 " exceeded the SCSI host is removed. Should be"
123 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
124 " if fast_io_fail_tmo has not been set. \"off\" means that"
125 " this functionality is disabled.");
126
Bart Van Assched92c0da2014-10-06 17:14:36 +0200127static unsigned ch_count;
128module_param(ch_count, uint, 0444);
129MODULE_PARM_DESC(ch_count,
130 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
131
Roland Dreieraef9ec32005-11-02 14:07:13 -0800132static void srp_add_one(struct ib_device *device);
133static void srp_remove_one(struct ib_device *device);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100134static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
135static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800136static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
137
FUJITA Tomonori32368222007-06-27 16:33:12 +0900138static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200139static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900140
Roland Dreieraef9ec32005-11-02 14:07:13 -0800141static struct ib_client srp_client = {
142 .name = "srp",
143 .add = srp_add_one,
144 .remove = srp_remove_one
145};
146
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700147static struct ib_sa_client srp_sa_client;
148
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200149static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
150{
151 int tmo = *(int *)kp->arg;
152
153 if (tmo >= 0)
154 return sprintf(buffer, "%d", tmo);
155 else
156 return sprintf(buffer, "off");
157}
158
159static int srp_tmo_set(const char *val, const struct kernel_param *kp)
160{
161 int tmo, res;
162
163 if (strncmp(val, "off", 3) != 0) {
164 res = kstrtoint(val, 0, &tmo);
165 if (res)
166 goto out;
167 } else {
168 tmo = -1;
169 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200170 if (kp->arg == &srp_reconnect_delay)
171 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
172 srp_dev_loss_tmo);
173 else if (kp->arg == &srp_fast_io_fail_tmo)
174 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200175 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200176 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
177 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200178 if (res)
179 goto out;
180 *(int *)kp->arg = tmo;
181
182out:
183 return res;
184}
185
186static struct kernel_param_ops srp_tmo_ops = {
187 .get = srp_tmo_get,
188 .set = srp_tmo_set,
189};
190
Roland Dreieraef9ec32005-11-02 14:07:13 -0800191static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
192{
193 return (struct srp_target_port *) host->hostdata;
194}
195
196static const char *srp_target_info(struct Scsi_Host *host)
197{
198 return host_to_target(host)->target_name;
199}
200
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700201static int srp_target_is_topspin(struct srp_target_port *target)
202{
203 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700204 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700205
206 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700207 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
208 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700209}
210
Roland Dreieraef9ec32005-11-02 14:07:13 -0800211static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
212 gfp_t gfp_mask,
213 enum dma_data_direction direction)
214{
215 struct srp_iu *iu;
216
217 iu = kmalloc(sizeof *iu, gfp_mask);
218 if (!iu)
219 goto out;
220
221 iu->buf = kzalloc(size, gfp_mask);
222 if (!iu->buf)
223 goto out_free_iu;
224
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100225 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
226 direction);
227 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800228 goto out_free_buf;
229
230 iu->size = size;
231 iu->direction = direction;
232
233 return iu;
234
235out_free_buf:
236 kfree(iu->buf);
237out_free_iu:
238 kfree(iu);
239out:
240 return NULL;
241}
242
243static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
244{
245 if (!iu)
246 return;
247
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100248 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
249 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800250 kfree(iu->buf);
251 kfree(iu);
252}
253
254static void srp_qp_event(struct ib_event *event, void *context)
255{
Bart Van Asschee0bda7d2012-01-14 12:39:44 +0000256 pr_debug("QP event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800257}
258
259static int srp_init_qp(struct srp_target_port *target,
260 struct ib_qp *qp)
261{
262 struct ib_qp_attr *attr;
263 int ret;
264
265 attr = kmalloc(sizeof *attr, GFP_KERNEL);
266 if (!attr)
267 return -ENOMEM;
268
Bart Van Assche56b53902014-07-09 15:58:22 +0200269 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270 target->srp_host->port,
271 be16_to_cpu(target->pkey),
272 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800273 if (ret)
274 goto out;
275
276 attr->qp_state = IB_QPS_INIT;
277 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278 IB_ACCESS_REMOTE_WRITE);
279 attr->port_num = target->srp_host->port;
280
281 ret = ib_modify_qp(qp, attr,
282 IB_QP_STATE |
283 IB_QP_PKEY_INDEX |
284 IB_QP_ACCESS_FLAGS |
285 IB_QP_PORT);
286
287out:
288 kfree(attr);
289 return ret;
290}
291
Bart Van Assche509c07b2014-10-30 14:48:30 +0100292static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500293{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100294 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500295 struct ib_cm_id *new_cm_id;
296
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100297 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100298 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500299 if (IS_ERR(new_cm_id))
300 return PTR_ERR(new_cm_id);
301
Bart Van Assche509c07b2014-10-30 14:48:30 +0100302 if (ch->cm_id)
303 ib_destroy_cm_id(ch->cm_id);
304 ch->cm_id = new_cm_id;
305 ch->path.sgid = target->sgid;
306 ch->path.dgid = target->orig_dgid;
307 ch->path.pkey = target->pkey;
308 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500309
310 return 0;
311}
312
Bart Van Assched1b42892014-05-20 15:07:20 +0200313static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314{
315 struct srp_device *dev = target->srp_host->srp_dev;
316 struct ib_fmr_pool_param fmr_param;
317
318 memset(&fmr_param, 0, sizeof(fmr_param));
319 fmr_param.pool_size = target->scsi_host->can_queue;
320 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
321 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200322 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200324 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
325 IB_ACCESS_REMOTE_WRITE |
326 IB_ACCESS_REMOTE_READ);
327
328 return ib_create_fmr_pool(dev->pd, &fmr_param);
329}
330
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200331/**
332 * srp_destroy_fr_pool() - free the resources owned by a pool
333 * @pool: Fast registration pool to be destroyed.
334 */
335static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
336{
337 int i;
338 struct srp_fr_desc *d;
339
340 if (!pool)
341 return;
342
343 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
344 if (d->frpl)
345 ib_free_fast_reg_page_list(d->frpl);
346 if (d->mr)
347 ib_dereg_mr(d->mr);
348 }
349 kfree(pool);
350}
351
352/**
353 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
354 * @device: IB device to allocate fast registration descriptors for.
355 * @pd: Protection domain associated with the FR descriptors.
356 * @pool_size: Number of descriptors to allocate.
357 * @max_page_list_len: Maximum fast registration work request page list length.
358 */
359static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
360 struct ib_pd *pd, int pool_size,
361 int max_page_list_len)
362{
363 struct srp_fr_pool *pool;
364 struct srp_fr_desc *d;
365 struct ib_mr *mr;
366 struct ib_fast_reg_page_list *frpl;
367 int i, ret = -EINVAL;
368
369 if (pool_size <= 0)
370 goto err;
371 ret = -ENOMEM;
372 pool = kzalloc(sizeof(struct srp_fr_pool) +
373 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
374 if (!pool)
375 goto err;
376 pool->size = pool_size;
377 pool->max_page_list_len = max_page_list_len;
378 spin_lock_init(&pool->lock);
379 INIT_LIST_HEAD(&pool->free_list);
380
381 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
382 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
383 if (IS_ERR(mr)) {
384 ret = PTR_ERR(mr);
385 goto destroy_pool;
386 }
387 d->mr = mr;
388 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
389 if (IS_ERR(frpl)) {
390 ret = PTR_ERR(frpl);
391 goto destroy_pool;
392 }
393 d->frpl = frpl;
394 list_add_tail(&d->entry, &pool->free_list);
395 }
396
397out:
398 return pool;
399
400destroy_pool:
401 srp_destroy_fr_pool(pool);
402
403err:
404 pool = ERR_PTR(ret);
405 goto out;
406}
407
408/**
409 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
410 * @pool: Pool to obtain descriptor from.
411 */
412static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
413{
414 struct srp_fr_desc *d = NULL;
415 unsigned long flags;
416
417 spin_lock_irqsave(&pool->lock, flags);
418 if (!list_empty(&pool->free_list)) {
419 d = list_first_entry(&pool->free_list, typeof(*d), entry);
420 list_del(&d->entry);
421 }
422 spin_unlock_irqrestore(&pool->lock, flags);
423
424 return d;
425}
426
427/**
428 * srp_fr_pool_put() - put an FR descriptor back in the free list
429 * @pool: Pool the descriptor was allocated from.
430 * @desc: Pointer to an array of fast registration descriptor pointers.
431 * @n: Number of descriptors to put back.
432 *
433 * Note: The caller must already have queued an invalidation request for
434 * desc->mr->rkey before calling this function.
435 */
436static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
437 int n)
438{
439 unsigned long flags;
440 int i;
441
442 spin_lock_irqsave(&pool->lock, flags);
443 for (i = 0; i < n; i++)
444 list_add(&desc[i]->entry, &pool->free_list);
445 spin_unlock_irqrestore(&pool->lock, flags);
446}
447
448static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
449{
450 struct srp_device *dev = target->srp_host->srp_dev;
451
452 return srp_create_fr_pool(dev->dev, dev->pd,
453 target->scsi_host->can_queue,
454 dev->max_pages_per_mr);
455}
456
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200457/**
458 * srp_destroy_qp() - destroy an RDMA queue pair
459 * @ch: SRP RDMA channel.
460 *
461 * Change a queue pair into the error state and wait until all receive
462 * completions have been processed before destroying it. This avoids that
463 * the receive completion handler can access the queue pair while it is
464 * being destroyed.
465 */
466static void srp_destroy_qp(struct srp_rdma_ch *ch)
467{
468 struct srp_target_port *target = ch->target;
469 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
470 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
471 struct ib_recv_wr *bad_wr;
472 int ret;
473
474 /* Destroying a QP and reusing ch->done is only safe if not connected */
475 WARN_ON_ONCE(target->connected);
476
477 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
478 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
479 if (ret)
480 goto out;
481
482 init_completion(&ch->done);
483 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
484 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
485 if (ret == 0)
486 wait_for_completion(&ch->done);
487
488out:
489 ib_destroy_qp(ch->qp);
490}
491
Bart Van Assche509c07b2014-10-30 14:48:30 +0100492static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800493{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100494 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200495 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800496 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100497 struct ib_cq *recv_cq, *send_cq;
498 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200499 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200500 struct srp_fr_pool *fr_pool = NULL;
501 const int m = 1 + dev->use_fast_reg;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800502 int ret;
503
504 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
505 if (!init_attr)
506 return -ENOMEM;
507
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200508 /* + 1 for SRP_LAST_WR_ID */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100509 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200510 target->queue_size + 1, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100511 if (IS_ERR(recv_cq)) {
512 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800513 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800514 }
515
Bart Van Assche509c07b2014-10-30 14:48:30 +0100516 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
517 m * target->queue_size, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100518 if (IS_ERR(send_cq)) {
519 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800520 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000521 }
522
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100523 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800524
525 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200526 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200527 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800528 init_attr->cap.max_recv_sge = 1;
529 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200530 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800531 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100532 init_attr->send_cq = send_cq;
533 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800534
Bart Van Assche62154b22014-05-20 15:04:45 +0200535 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100536 if (IS_ERR(qp)) {
537 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800538 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800539 }
540
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100541 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800542 if (ret)
543 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800544
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200545 if (dev->use_fast_reg && dev->has_fr) {
546 fr_pool = srp_alloc_fr_pool(target);
547 if (IS_ERR(fr_pool)) {
548 ret = PTR_ERR(fr_pool);
549 shost_printk(KERN_WARNING, target->scsi_host, PFX
550 "FR pool allocation failed (%d)\n", ret);
551 goto err_qp;
552 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100553 if (ch->fr_pool)
554 srp_destroy_fr_pool(ch->fr_pool);
555 ch->fr_pool = fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200556 } else if (!dev->use_fast_reg && dev->has_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200557 fmr_pool = srp_alloc_fmr_pool(target);
558 if (IS_ERR(fmr_pool)) {
559 ret = PTR_ERR(fmr_pool);
560 shost_printk(KERN_WARNING, target->scsi_host, PFX
561 "FMR pool allocation failed (%d)\n", ret);
562 goto err_qp;
563 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100564 if (ch->fmr_pool)
565 ib_destroy_fmr_pool(ch->fmr_pool);
566 ch->fmr_pool = fmr_pool;
Bart Van Assched1b42892014-05-20 15:07:20 +0200567 }
568
Bart Van Assche509c07b2014-10-30 14:48:30 +0100569 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200570 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100571 if (ch->recv_cq)
572 ib_destroy_cq(ch->recv_cq);
573 if (ch->send_cq)
574 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100575
Bart Van Assche509c07b2014-10-30 14:48:30 +0100576 ch->qp = qp;
577 ch->recv_cq = recv_cq;
578 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100579
Roland Dreierda9d2f02010-02-24 15:07:59 -0800580 kfree(init_attr);
581 return 0;
582
583err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100584 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800585
586err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100587 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800588
589err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100590 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800591
592err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800593 kfree(init_attr);
594 return ret;
595}
596
Bart Van Assche4d73f952013-10-26 14:40:37 +0200597/*
598 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100599 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200600 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100601static void srp_free_ch_ib(struct srp_target_port *target,
602 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800603{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200604 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800605 int i;
606
Bart Van Assched92c0da2014-10-06 17:14:36 +0200607 if (!ch->target)
608 return;
609
Bart Van Assche509c07b2014-10-30 14:48:30 +0100610 if (ch->cm_id) {
611 ib_destroy_cm_id(ch->cm_id);
612 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100613 }
614
Bart Van Assched92c0da2014-10-06 17:14:36 +0200615 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
616 if (!ch->qp)
617 return;
618
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200619 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100620 if (ch->fr_pool)
621 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200622 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100623 if (ch->fmr_pool)
624 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200625 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200626 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100627 ib_destroy_cq(ch->send_cq);
628 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800629
Bart Van Assched92c0da2014-10-06 17:14:36 +0200630 /*
631 * Avoid that the SCSI error handler tries to use this channel after
632 * it has been freed. The SCSI error handler can namely continue
633 * trying to perform recovery actions after scsi_remove_host()
634 * returned.
635 */
636 ch->target = NULL;
637
Bart Van Assche509c07b2014-10-30 14:48:30 +0100638 ch->qp = NULL;
639 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100640
Bart Van Assche509c07b2014-10-30 14:48:30 +0100641 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200642 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100643 srp_free_iu(target->srp_host, ch->rx_ring[i]);
644 kfree(ch->rx_ring);
645 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200646 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100647 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200648 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100649 srp_free_iu(target->srp_host, ch->tx_ring[i]);
650 kfree(ch->tx_ring);
651 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200652 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800653}
654
655static void srp_path_rec_completion(int status,
656 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100657 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800658{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100659 struct srp_rdma_ch *ch = ch_ptr;
660 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800661
Bart Van Assche509c07b2014-10-30 14:48:30 +0100662 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800663 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500664 shost_printk(KERN_ERR, target->scsi_host,
665 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800666 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100667 ch->path = *pathrec;
668 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800669}
670
Bart Van Assche509c07b2014-10-30 14:48:30 +0100671static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800672{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100673 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100674 int ret;
675
Bart Van Assche509c07b2014-10-30 14:48:30 +0100676 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800677
Bart Van Assche509c07b2014-10-30 14:48:30 +0100678 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800679
Bart Van Assche509c07b2014-10-30 14:48:30 +0100680 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
681 target->srp_host->srp_dev->dev,
682 target->srp_host->port,
683 &ch->path,
684 IB_SA_PATH_REC_SERVICE_ID |
685 IB_SA_PATH_REC_DGID |
686 IB_SA_PATH_REC_SGID |
687 IB_SA_PATH_REC_NUMB_PATH |
688 IB_SA_PATH_REC_PKEY,
689 SRP_PATH_REC_TIMEOUT_MS,
690 GFP_KERNEL,
691 srp_path_rec_completion,
692 ch, &ch->path_query);
693 if (ch->path_query_id < 0)
694 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800695
Bart Van Assche509c07b2014-10-30 14:48:30 +0100696 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100697 if (ret < 0)
698 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800699
Bart Van Assche509c07b2014-10-30 14:48:30 +0100700 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500701 shost_printk(KERN_WARNING, target->scsi_host,
702 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800703
Bart Van Assche509c07b2014-10-30 14:48:30 +0100704 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800705}
706
Bart Van Assched92c0da2014-10-06 17:14:36 +0200707static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800708{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100709 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800710 struct {
711 struct ib_cm_req_param param;
712 struct srp_login_req priv;
713 } *req = NULL;
714 int status;
715
716 req = kzalloc(sizeof *req, GFP_KERNEL);
717 if (!req)
718 return -ENOMEM;
719
Bart Van Assche509c07b2014-10-30 14:48:30 +0100720 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800721 req->param.alternate_path = NULL;
722 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100723 req->param.qp_num = ch->qp->qp_num;
724 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800725 req->param.private_data = &req->priv;
726 req->param.private_data_len = sizeof req->priv;
727 req->param.flow_control = 1;
728
729 get_random_bytes(&req->param.starting_psn, 4);
730 req->param.starting_psn &= 0xffffff;
731
732 /*
733 * Pick some arbitrary defaults here; we could make these
734 * module parameters if anyone cared about setting them.
735 */
736 req->param.responder_resources = 4;
737 req->param.remote_cm_response_timeout = 20;
738 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200739 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800740 req->param.rnr_retry_count = 7;
741 req->param.max_cm_retries = 15;
742
743 req->priv.opcode = SRP_LOGIN_REQ;
744 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500745 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800746 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
747 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200748 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
749 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700750 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700751 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700752 * port identifier format is 8 bytes of ID extension followed
753 * by 8 bytes of GUID. Older drafts put the two halves in the
754 * opposite order, so that the GUID comes first.
755 *
756 * Targets conforming to these obsolete drafts can be
757 * recognized by the I/O Class they report.
758 */
759 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
760 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100761 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700762 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200763 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700764 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
765 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
766 } else {
767 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200768 &target->initiator_ext, 8);
769 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100770 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700771 memcpy(req->priv.target_port_id, &target->id_ext, 8);
772 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
773 }
774
Roland Dreieraef9ec32005-11-02 14:07:13 -0800775 /*
776 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200777 * zero out the first 8 bytes of our initiator port ID and set
778 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800779 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700780 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500781 shost_printk(KERN_DEBUG, target->scsi_host,
782 PFX "Topspin/Cisco initiator port ID workaround "
783 "activated for target GUID %016llx\n",
784 (unsigned long long) be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800785 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200786 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100787 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800788 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800789
Bart Van Assche509c07b2014-10-30 14:48:30 +0100790 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800791
792 kfree(req);
793
794 return status;
795}
796
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000797static bool srp_queue_remove_work(struct srp_target_port *target)
798{
799 bool changed = false;
800
801 spin_lock_irq(&target->lock);
802 if (target->state != SRP_TARGET_REMOVED) {
803 target->state = SRP_TARGET_REMOVED;
804 changed = true;
805 }
806 spin_unlock_irq(&target->lock);
807
808 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200809 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000810
811 return changed;
812}
813
Bart Van Assche294c8752011-12-25 12:18:12 +0000814static bool srp_change_conn_state(struct srp_target_port *target,
815 bool connected)
816{
817 bool changed = false;
818
819 spin_lock_irq(&target->lock);
820 if (target->connected != connected) {
821 target->connected = connected;
822 changed = true;
823 }
824 spin_unlock_irq(&target->lock);
825
826 return changed;
827}
828
Roland Dreieraef9ec32005-11-02 14:07:13 -0800829static void srp_disconnect_target(struct srp_target_port *target)
830{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200831 struct srp_rdma_ch *ch;
832 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100833
Bart Van Assche294c8752011-12-25 12:18:12 +0000834 if (srp_change_conn_state(target, false)) {
835 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800836
Bart Van Assched92c0da2014-10-06 17:14:36 +0200837 for (i = 0; i < target->ch_count; i++) {
838 ch = &target->ch[i];
839 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
840 shost_printk(KERN_DEBUG, target->scsi_host,
841 PFX "Sending CM DREQ failed\n");
842 }
Bart Van Assche294c8752011-12-25 12:18:12 +0000843 }
Roland Dreiere6581052006-05-17 09:13:21 -0700844 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800845}
846
Bart Van Assche509c07b2014-10-30 14:48:30 +0100847static void srp_free_req_data(struct srp_target_port *target,
848 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500849{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200850 struct srp_device *dev = target->srp_host->srp_dev;
851 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500852 struct srp_request *req;
853 int i;
854
Bart Van Assched92c0da2014-10-06 17:14:36 +0200855 if (!ch->target || !ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200856 return;
857
858 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100859 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200860 if (dev->use_fast_reg)
861 kfree(req->fr_list);
862 else
863 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500864 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500865 if (req->indirect_dma_addr) {
866 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
867 target->indirect_size,
868 DMA_TO_DEVICE);
869 }
870 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500871 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200872
Bart Van Assche509c07b2014-10-30 14:48:30 +0100873 kfree(ch->req_ring);
874 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500875}
876
Bart Van Assche509c07b2014-10-30 14:48:30 +0100877static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200878{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100879 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200880 struct srp_device *srp_dev = target->srp_host->srp_dev;
881 struct ib_device *ibdev = srp_dev->dev;
882 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200883 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200884 dma_addr_t dma_addr;
885 int i, ret = -ENOMEM;
886
Bart Van Assche509c07b2014-10-30 14:48:30 +0100887 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
888 GFP_KERNEL);
889 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200890 goto out;
891
892 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100893 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200894 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
895 GFP_KERNEL);
896 if (!mr_list)
897 goto out;
898 if (srp_dev->use_fast_reg)
899 req->fr_list = mr_list;
900 else
901 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200902 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200903 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200904 if (!req->map_page)
905 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200906 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200907 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200908 goto out;
909
910 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
911 target->indirect_size,
912 DMA_TO_DEVICE);
913 if (ib_dma_mapping_error(ibdev, dma_addr))
914 goto out;
915
916 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200917 }
918 ret = 0;
919
920out:
921 return ret;
922}
923
Bart Van Assche683b1592012-01-14 12:40:44 +0000924/**
925 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
926 * @shost: SCSI host whose attributes to remove from sysfs.
927 *
928 * Note: Any attributes defined in the host template and that did not exist
929 * before invocation of this function will be ignored.
930 */
931static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
932{
933 struct device_attribute **attr;
934
935 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
936 device_remove_file(&shost->shost_dev, *attr);
937}
938
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000939static void srp_remove_target(struct srp_target_port *target)
940{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200941 struct srp_rdma_ch *ch;
942 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100943
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000944 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
945
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000946 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200947 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000948 srp_remove_host(target->scsi_host);
949 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100950 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000951 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200952 for (i = 0; i < target->ch_count; i++) {
953 ch = &target->ch[i];
954 srp_free_ch_ib(target, ch);
955 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200956 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200957 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200958 for (i = 0; i < target->ch_count; i++) {
959 ch = &target->ch[i];
960 srp_free_req_data(target, ch);
961 }
962 kfree(target->ch);
963 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200964
965 spin_lock(&target->srp_host->target_lock);
966 list_del(&target->list);
967 spin_unlock(&target->srp_host->target_lock);
968
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000969 scsi_host_put(target->scsi_host);
970}
971
David Howellsc4028952006-11-22 14:57:56 +0000972static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800973{
David Howellsc4028952006-11-22 14:57:56 +0000974 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000975 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800976
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000977 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800978
Bart Van Assche96fc2482013-06-28 14:51:26 +0200979 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800980}
981
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200982static void srp_rport_delete(struct srp_rport *rport)
983{
984 struct srp_target_port *target = rport->lld_data;
985
986 srp_queue_remove_work(target);
987}
988
Bart Van Assched92c0da2014-10-06 17:14:36 +0200989static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800990{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100991 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800992 int ret;
993
Bart Van Assched92c0da2014-10-06 17:14:36 +0200994 WARN_ON_ONCE(!multich && target->connected);
Bart Van Assche294c8752011-12-25 12:18:12 +0000995
Bart Van Assche509c07b2014-10-30 14:48:30 +0100996 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800997 if (ret)
998 return ret;
999
1000 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001001 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +02001002 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001003 if (ret)
1004 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001005 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001006 if (ret < 0)
1007 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001008
1009 /*
1010 * The CM event handling code will set status to
1011 * SRP_PORT_REDIRECT if we get a port redirect REJ
1012 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1013 * redirect REJ back.
1014 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001015 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001016 case 0:
Bart Van Assche294c8752011-12-25 12:18:12 +00001017 srp_change_conn_state(target, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001018 return 0;
1019
1020 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001021 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001022 if (ret)
1023 return ret;
1024 break;
1025
1026 case SRP_DLID_REDIRECT:
1027 break;
1028
David Dillow9fe4bcf2008-01-08 17:08:52 -05001029 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001030 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001031 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001032 ch->status = -ECONNRESET;
1033 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001034
Roland Dreieraef9ec32005-11-02 14:07:13 -08001035 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001036 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001037 }
1038 }
1039}
1040
Bart Van Assche509c07b2014-10-30 14:48:30 +01001041static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001042{
1043 struct ib_send_wr *bad_wr;
1044 struct ib_send_wr wr = {
1045 .opcode = IB_WR_LOCAL_INV,
1046 .wr_id = LOCAL_INV_WR_ID_MASK,
1047 .next = NULL,
1048 .num_sge = 0,
1049 .send_flags = 0,
1050 .ex.invalidate_rkey = rkey,
1051 };
1052
Bart Van Assche509c07b2014-10-30 14:48:30 +01001053 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001054}
1055
Roland Dreierd945e1d2006-05-09 10:50:28 -07001056static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001057 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001058 struct srp_request *req)
1059{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001060 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001061 struct srp_device *dev = target->srp_host->srp_dev;
1062 struct ib_device *ibdev = dev->dev;
1063 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001064
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001065 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001066 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1067 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1068 return;
1069
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001070 if (dev->use_fast_reg) {
1071 struct srp_fr_desc **pfr;
1072
1073 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001074 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001075 if (res < 0) {
1076 shost_printk(KERN_ERR, target->scsi_host, PFX
1077 "Queueing INV WR for rkey %#x failed (%d)\n",
1078 (*pfr)->mr->rkey, res);
1079 queue_work(system_long_wq,
1080 &target->tl_err_work);
1081 }
1082 }
1083 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001084 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001085 req->nmdesc);
1086 } else {
1087 struct ib_pool_fmr **pfmr;
1088
1089 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1090 ib_fmr_pool_unmap(*pfmr);
1091 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001092
David Dillow8f26c9f2011-01-14 19:45:50 -05001093 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1094 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001095}
1096
Bart Van Assche22032992012-08-14 13:18:53 +00001097/**
1098 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001099 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001100 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001101 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001102 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1103 * ownership of @req->scmnd if it equals @scmnd.
1104 *
1105 * Return value:
1106 * Either NULL or a pointer to the SCSI command the caller became owner of.
1107 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001108static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001109 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001110 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001111 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001112{
Bart Van Assche94a91742010-11-26 14:50:09 -05001113 unsigned long flags;
1114
Bart Van Assche509c07b2014-10-30 14:48:30 +01001115 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001116 if (req->scmnd &&
1117 (!sdev || req->scmnd->device == sdev) &&
1118 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001119 scmnd = req->scmnd;
1120 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001121 } else {
1122 scmnd = NULL;
1123 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001124 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001125
1126 return scmnd;
1127}
1128
1129/**
1130 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001131 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001132 * @req: Request to be freed.
1133 * @scmnd: SCSI command associated with @req.
1134 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001135 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001136static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1137 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001138{
1139 unsigned long flags;
1140
Bart Van Assche509c07b2014-10-30 14:48:30 +01001141 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001142
Bart Van Assche509c07b2014-10-30 14:48:30 +01001143 spin_lock_irqsave(&ch->lock, flags);
1144 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001145 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001146}
1147
Bart Van Assche509c07b2014-10-30 14:48:30 +01001148static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1149 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001150{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001151 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001152
1153 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001154 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001155 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001156 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001157 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001158}
1159
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001160static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001161{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001162 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001163 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001164 struct Scsi_Host *shost = target->scsi_host;
1165 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001166 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001167
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001168 /*
1169 * Invoking srp_terminate_io() while srp_queuecommand() is running
1170 * is not safe. Hence the warning statement below.
1171 */
1172 shost_for_each_device(sdev, shost)
1173 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1174
Bart Van Assched92c0da2014-10-06 17:14:36 +02001175 for (i = 0; i < target->ch_count; i++) {
1176 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001177
Bart Van Assched92c0da2014-10-06 17:14:36 +02001178 for (j = 0; j < target->req_ring_size; ++j) {
1179 struct srp_request *req = &ch->req_ring[j];
1180
1181 srp_finish_req(ch, req, NULL,
1182 DID_TRANSPORT_FAILFAST << 16);
1183 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001184 }
1185}
1186
1187/*
1188 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1189 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1190 * srp_reset_device() or srp_reset_host() calls will occur while this function
1191 * is in progress. One way to realize that is not to call this function
1192 * directly but to call srp_reconnect_rport() instead since that last function
1193 * serializes calls of this function via rport->mutex and also blocks
1194 * srp_queuecommand() calls before invoking this function.
1195 */
1196static int srp_rport_reconnect(struct srp_rport *rport)
1197{
1198 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001199 struct srp_rdma_ch *ch;
1200 int i, j, ret = 0;
1201 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001202
Roland Dreieraef9ec32005-11-02 14:07:13 -08001203 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001204
1205 if (target->state == SRP_TARGET_SCANNING)
1206 return -ENODEV;
1207
Roland Dreieraef9ec32005-11-02 14:07:13 -08001208 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001209 * Now get a new local CM ID so that we avoid confusing the target in
1210 * case things are really fouled up. Doing so also ensures that all CM
1211 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001212 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001213 for (i = 0; i < target->ch_count; i++) {
1214 ch = &target->ch[i];
1215 if (!ch->target)
1216 break;
1217 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001218 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001219 for (i = 0; i < target->ch_count; i++) {
1220 ch = &target->ch[i];
1221 if (!ch->target)
1222 break;
1223 for (j = 0; j < target->req_ring_size; ++j) {
1224 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001225
Bart Van Assched92c0da2014-10-06 17:14:36 +02001226 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1227 }
1228 }
1229 for (i = 0; i < target->ch_count; i++) {
1230 ch = &target->ch[i];
1231 if (!ch->target)
1232 break;
1233 /*
1234 * Whether or not creating a new CM ID succeeded, create a new
1235 * QP. This guarantees that all completion callback function
1236 * invocations have finished before request resetting starts.
1237 */
1238 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001239
Bart Van Assched92c0da2014-10-06 17:14:36 +02001240 INIT_LIST_HEAD(&ch->free_tx);
1241 for (j = 0; j < target->queue_size; ++j)
1242 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1243 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001244
1245 target->qp_in_error = false;
1246
Bart Van Assched92c0da2014-10-06 17:14:36 +02001247 for (i = 0; i < target->ch_count; i++) {
1248 ch = &target->ch[i];
1249 if (ret || !ch->target) {
1250 if (i > 1)
1251 ret = 0;
1252 break;
1253 }
1254 ret = srp_connect_ch(ch, multich);
1255 multich = true;
1256 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001257
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001258 if (ret == 0)
1259 shost_printk(KERN_INFO, target->scsi_host,
1260 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001261
1262 return ret;
1263}
1264
David Dillow8f26c9f2011-01-14 19:45:50 -05001265static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1266 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001267{
David Dillow8f26c9f2011-01-14 19:45:50 -05001268 struct srp_direct_buf *desc = state->desc;
1269
1270 desc->va = cpu_to_be64(dma_addr);
1271 desc->key = cpu_to_be32(rkey);
1272 desc->len = cpu_to_be32(dma_len);
1273
1274 state->total_len += dma_len;
1275 state->desc++;
1276 state->ndesc++;
1277}
1278
1279static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001280 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001281{
David Dillow8f26c9f2011-01-14 19:45:50 -05001282 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001283 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001284
Bart Van Assche509c07b2014-10-30 14:48:30 +01001285 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001286 state->npages, io_addr);
1287 if (IS_ERR(fmr))
1288 return PTR_ERR(fmr);
1289
1290 *state->next_fmr++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001291 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001292
Bart Van Assche52ede082014-05-20 15:07:45 +02001293 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001294
David Dillow8f26c9f2011-01-14 19:45:50 -05001295 return 0;
1296}
1297
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001298static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001299 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001300{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001301 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001302 struct srp_device *dev = target->srp_host->srp_dev;
1303 struct ib_send_wr *bad_wr;
1304 struct ib_send_wr wr;
1305 struct srp_fr_desc *desc;
1306 u32 rkey;
1307
Bart Van Assche509c07b2014-10-30 14:48:30 +01001308 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001309 if (!desc)
1310 return -ENOMEM;
1311
1312 rkey = ib_inc_rkey(desc->mr->rkey);
1313 ib_update_fast_reg_key(desc->mr, rkey);
1314
1315 memcpy(desc->frpl->page_list, state->pages,
1316 sizeof(state->pages[0]) * state->npages);
1317
1318 memset(&wr, 0, sizeof(wr));
1319 wr.opcode = IB_WR_FAST_REG_MR;
1320 wr.wr_id = FAST_REG_WR_ID_MASK;
1321 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1322 wr.wr.fast_reg.page_list = desc->frpl;
1323 wr.wr.fast_reg.page_list_len = state->npages;
1324 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1325 wr.wr.fast_reg.length = state->dma_len;
1326 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1327 IB_ACCESS_REMOTE_READ |
1328 IB_ACCESS_REMOTE_WRITE);
1329 wr.wr.fast_reg.rkey = desc->mr->lkey;
1330
1331 *state->next_fr++ = desc;
1332 state->nmdesc++;
1333
1334 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1335 desc->mr->rkey);
1336
Bart Van Assche509c07b2014-10-30 14:48:30 +01001337 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001338}
1339
Bart Van Assche539dde62014-05-20 15:05:46 +02001340static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001341 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001342{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001343 struct srp_target_port *target = ch->target;
Bart Van Assche539dde62014-05-20 15:05:46 +02001344 int ret = 0;
1345
1346 if (state->npages == 0)
1347 return 0;
1348
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001349 if (state->npages == 1 && !register_always)
Bart Van Assche52ede082014-05-20 15:07:45 +02001350 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche539dde62014-05-20 15:05:46 +02001351 target->rkey);
1352 else
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001353 ret = target->srp_host->srp_dev->use_fast_reg ?
Bart Van Assche509c07b2014-10-30 14:48:30 +01001354 srp_map_finish_fr(state, ch) :
1355 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001356
1357 if (ret == 0) {
1358 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001359 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001360 }
1361
1362 return ret;
1363}
1364
David Dillow8f26c9f2011-01-14 19:45:50 -05001365static void srp_map_update_start(struct srp_map_state *state,
1366 struct scatterlist *sg, int sg_index,
1367 dma_addr_t dma_addr)
1368{
1369 state->unmapped_sg = sg;
1370 state->unmapped_index = sg_index;
1371 state->unmapped_addr = dma_addr;
1372}
1373
1374static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001375 struct srp_rdma_ch *ch,
David Dillow8f26c9f2011-01-14 19:45:50 -05001376 struct scatterlist *sg, int sg_index,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001377 bool use_mr)
David Dillow8f26c9f2011-01-14 19:45:50 -05001378{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001379 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001380 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001381 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001382 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1383 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1384 unsigned int len;
1385 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001386
David Dillow8f26c9f2011-01-14 19:45:50 -05001387 if (!dma_len)
1388 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -07001389
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001390 if (!use_mr) {
1391 /*
1392 * Once we're in direct map mode for a request, we don't
1393 * go back to FMR or FR mode, so no need to update anything
David Dillow8f26c9f2011-01-14 19:45:50 -05001394 * other than the descriptor.
1395 */
1396 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1397 return 0;
1398 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001399
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001400 /*
1401 * Since not all RDMA HW drivers support non-zero page offsets for
1402 * FMR, if we start at an offset into a page, don't merge into the
1403 * current FMR mapping. Finish it out, and use the kernel's MR for
1404 * this sg entry.
David Dillow8f26c9f2011-01-14 19:45:50 -05001405 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001406 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1407 dma_len > dev->mr_max_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001408 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001409 if (ret)
1410 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001411
David Dillow8f26c9f2011-01-14 19:45:50 -05001412 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1413 srp_map_update_start(state, NULL, 0, 0);
1414 return 0;
1415 }
1416
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001417 /*
1418 * If this is the first sg that will be mapped via FMR or via FR, save
1419 * our position. We need to know the first unmapped entry, its index,
1420 * and the first unmapped address within that entry to be able to
1421 * restart mapping after an error.
David Dillow8f26c9f2011-01-14 19:45:50 -05001422 */
1423 if (!state->unmapped_sg)
1424 srp_map_update_start(state, sg, sg_index, dma_addr);
1425
1426 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001427 unsigned offset = dma_addr & ~dev->mr_page_mask;
1428 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001429 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001430 if (ret)
1431 return ret;
1432
1433 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001434 }
1435
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001436 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001437
1438 if (!state->npages)
1439 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001440 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001441 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001442 dma_addr += len;
1443 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001444 }
1445
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001446 /*
1447 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001448 * close it out and start a new one -- we can only merge at page
1449 * boundries.
1450 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001451 ret = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001452 if (len != dev->mr_page_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001453 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001454 if (!ret)
1455 srp_map_update_start(state, NULL, 0, 0);
1456 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001457 return ret;
1458}
1459
Bart Van Assche509c07b2014-10-30 14:48:30 +01001460static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1461 struct srp_request *req, struct scatterlist *scat,
1462 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001463{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001464 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001465 struct srp_device *dev = target->srp_host->srp_dev;
1466 struct ib_device *ibdev = dev->dev;
1467 struct scatterlist *sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001468 int i;
1469 bool use_mr;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001470
1471 state->desc = req->indirect_desc;
1472 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001473 if (dev->use_fast_reg) {
1474 state->next_fr = req->fr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001475 use_mr = !!ch->fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001476 } else {
1477 state->next_fmr = req->fmr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001478 use_mr = !!ch->fmr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001479 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001480
1481 for_each_sg(scat, sg, count, i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001482 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001483 /*
1484 * Memory registration failed, so backtrack to the
1485 * first unmapped entry and continue on without using
1486 * memory registration.
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001487 */
1488 dma_addr_t dma_addr;
1489 unsigned int dma_len;
1490
1491backtrack:
1492 sg = state->unmapped_sg;
1493 i = state->unmapped_index;
1494
1495 dma_addr = ib_sg_dma_address(ibdev, sg);
1496 dma_len = ib_sg_dma_len(ibdev, sg);
1497 dma_len -= (state->unmapped_addr - dma_addr);
1498 dma_addr = state->unmapped_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001499 use_mr = false;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001500 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1501 }
1502 }
1503
Bart Van Assche509c07b2014-10-30 14:48:30 +01001504 if (use_mr && srp_finish_mapping(state, ch))
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001505 goto backtrack;
1506
Bart Van Assche52ede082014-05-20 15:07:45 +02001507 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001508
1509 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001510}
1511
Bart Van Assche509c07b2014-10-30 14:48:30 +01001512static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001513 struct srp_request *req)
1514{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001515 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001516 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001517 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001518 int len, nents, count;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001519 struct srp_device *dev;
1520 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001521 struct srp_map_state state;
1522 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001523 u32 table_len;
1524 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001525
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001526 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001527 return sizeof (struct srp_cmd);
1528
1529 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1530 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001531 shost_printk(KERN_WARNING, target->scsi_host,
1532 PFX "Unhandled data direction %d\n",
1533 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001534 return -EINVAL;
1535 }
1536
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001537 nents = scsi_sg_count(scmnd);
1538 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001539
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001540 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001541 ibdev = dev->dev;
1542
1543 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001544 if (unlikely(count == 0))
1545 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001546
1547 fmt = SRP_DATA_DESC_DIRECT;
1548 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001549
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001550 if (count == 1 && !register_always) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001551 /*
1552 * The midlayer only generated a single gather/scatter
1553 * entry, or DMA mapping coalesced everything to a
1554 * single entry. So a direct descriptor along with
1555 * the DMA MR suffices.
1556 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001557 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001558
Ralph Campbell85507bc2006-12-12 14:30:55 -08001559 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001560 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001561 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001562
Bart Van Assche52ede082014-05-20 15:07:45 +02001563 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001564 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001565 }
1566
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001567 /*
1568 * We have more than one scatter/gather entry, so build our indirect
1569 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001570 */
1571 indirect_hdr = (void *) cmd->add_data;
1572
David Dillowc07d4242011-01-16 13:57:10 -05001573 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1574 target->indirect_size, DMA_TO_DEVICE);
1575
David Dillow8f26c9f2011-01-14 19:45:50 -05001576 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001577 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001578
David Dillowc07d4242011-01-16 13:57:10 -05001579 /* We've mapped the request, now pull as much of the indirect
1580 * descriptor table as we can into the command buffer. If this
1581 * target is not using an external indirect table, we are
1582 * guaranteed to fit into the command, as the SCSI layer won't
1583 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001584 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001585 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001586 /*
1587 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001588 * so use a direct descriptor.
1589 */
1590 struct srp_direct_buf *buf = (void *) cmd->add_data;
1591
David Dillowc07d4242011-01-16 13:57:10 -05001592 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001593 goto map_complete;
1594 }
1595
David Dillowc07d4242011-01-16 13:57:10 -05001596 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1597 !target->allow_ext_sg)) {
1598 shost_printk(KERN_ERR, target->scsi_host,
1599 "Could not fit S/G list into SRP_CMD\n");
1600 return -EIO;
1601 }
1602
1603 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001604 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1605
1606 fmt = SRP_DATA_DESC_INDIRECT;
1607 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001608 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001609
David Dillowc07d4242011-01-16 13:57:10 -05001610 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1611 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001612
David Dillowc07d4242011-01-16 13:57:10 -05001613 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001614 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1615 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1616 indirect_hdr->len = cpu_to_be32(state.total_len);
1617
1618 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001619 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001620 else
David Dillowc07d4242011-01-16 13:57:10 -05001621 cmd->data_in_desc_cnt = count;
1622
1623 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1624 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001625
1626map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001627 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1628 cmd->buf_fmt = fmt << 4;
1629 else
1630 cmd->buf_fmt = fmt;
1631
Roland Dreieraef9ec32005-11-02 14:07:13 -08001632 return len;
1633}
1634
David Dillow05a1d752010-10-08 14:48:14 -04001635/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001636 * Return an IU and possible credit to the free pool
1637 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001638static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001639 enum srp_iu_type iu_type)
1640{
1641 unsigned long flags;
1642
Bart Van Assche509c07b2014-10-30 14:48:30 +01001643 spin_lock_irqsave(&ch->lock, flags);
1644 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001645 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001646 ++ch->req_lim;
1647 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001648}
1649
1650/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001651 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001652 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001653 *
1654 * Note:
1655 * An upper limit for the number of allocated information units for each
1656 * request type is:
1657 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1658 * more than Scsi_Host.can_queue requests.
1659 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1660 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1661 * one unanswered SRP request to an initiator.
1662 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001663static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001664 enum srp_iu_type iu_type)
1665{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001666 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001667 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1668 struct srp_iu *iu;
1669
Bart Van Assche509c07b2014-10-30 14:48:30 +01001670 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001671
Bart Van Assche509c07b2014-10-30 14:48:30 +01001672 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001673 return NULL;
1674
1675 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001676 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001677 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001678 ++target->zero_req_lim;
1679 return NULL;
1680 }
1681
Bart Van Assche509c07b2014-10-30 14:48:30 +01001682 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001683 }
1684
Bart Van Assche509c07b2014-10-30 14:48:30 +01001685 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001686 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001687 return iu;
1688}
1689
Bart Van Assche509c07b2014-10-30 14:48:30 +01001690static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001691{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001692 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001693 struct ib_sge list;
1694 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001695
1696 list.addr = iu->dma;
1697 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001698 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001699
1700 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001701 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001702 wr.sg_list = &list;
1703 wr.num_sge = 1;
1704 wr.opcode = IB_WR_SEND;
1705 wr.send_flags = IB_SEND_SIGNALED;
1706
Bart Van Assche509c07b2014-10-30 14:48:30 +01001707 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001708}
1709
Bart Van Assche509c07b2014-10-30 14:48:30 +01001710static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001711{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001712 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001713 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001714 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001715
1716 list.addr = iu->dma;
1717 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001718 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001719
1720 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001721 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001722 wr.sg_list = &list;
1723 wr.num_sge = 1;
1724
Bart Van Assche509c07b2014-10-30 14:48:30 +01001725 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001726}
1727
Bart Van Assche509c07b2014-10-30 14:48:30 +01001728static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001729{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001730 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001731 struct srp_request *req;
1732 struct scsi_cmnd *scmnd;
1733 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001734
Roland Dreieraef9ec32005-11-02 14:07:13 -08001735 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001736 spin_lock_irqsave(&ch->lock, flags);
1737 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1738 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001739
Bart Van Assche509c07b2014-10-30 14:48:30 +01001740 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001741 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001742 ch->tsk_mgmt_status = rsp->data[3];
1743 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001744 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001745 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1746 if (scmnd) {
1747 req = (void *)scmnd->host_scribble;
1748 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1749 }
Bart Van Assche22032992012-08-14 13:18:53 +00001750 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001751 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001752 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1753 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001754
Bart Van Assche509c07b2014-10-30 14:48:30 +01001755 spin_lock_irqsave(&ch->lock, flags);
1756 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1757 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001758
1759 return;
1760 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001761 scmnd->result = rsp->status;
1762
1763 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1764 memcpy(scmnd->sense_buffer, rsp->data +
1765 be32_to_cpu(rsp->resp_data_len),
1766 min_t(int, be32_to_cpu(rsp->sense_data_len),
1767 SCSI_SENSE_BUFFERSIZE));
1768 }
1769
Bart Van Asschee7145312014-07-09 15:57:51 +02001770 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001771 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001772 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1773 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1774 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1775 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1776 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1777 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001778
Bart Van Assche509c07b2014-10-30 14:48:30 +01001779 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001780 be32_to_cpu(rsp->req_lim_delta));
1781
David Dillowf8b6e312010-11-26 13:02:21 -05001782 scmnd->host_scribble = NULL;
1783 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001784 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001785}
1786
Bart Van Assche509c07b2014-10-30 14:48:30 +01001787static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001788 void *rsp, int len)
1789{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001790 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001791 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001792 unsigned long flags;
1793 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001794 int err;
David Dillowbb125882010-10-08 14:40:47 -04001795
Bart Van Assche509c07b2014-10-30 14:48:30 +01001796 spin_lock_irqsave(&ch->lock, flags);
1797 ch->req_lim += req_delta;
1798 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1799 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001800
David Dillowbb125882010-10-08 14:40:47 -04001801 if (!iu) {
1802 shost_printk(KERN_ERR, target->scsi_host, PFX
1803 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001804 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001805 }
1806
1807 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1808 memcpy(iu->buf, rsp, len);
1809 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1810
Bart Van Assche509c07b2014-10-30 14:48:30 +01001811 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001812 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001813 shost_printk(KERN_ERR, target->scsi_host, PFX
1814 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001815 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001816 }
David Dillowbb125882010-10-08 14:40:47 -04001817
David Dillowbb125882010-10-08 14:40:47 -04001818 return err;
1819}
1820
Bart Van Assche509c07b2014-10-30 14:48:30 +01001821static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001822 struct srp_cred_req *req)
1823{
1824 struct srp_cred_rsp rsp = {
1825 .opcode = SRP_CRED_RSP,
1826 .tag = req->tag,
1827 };
1828 s32 delta = be32_to_cpu(req->req_lim_delta);
1829
Bart Van Assche509c07b2014-10-30 14:48:30 +01001830 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1831 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001832 "problems processing SRP_CRED_REQ\n");
1833}
1834
Bart Van Assche509c07b2014-10-30 14:48:30 +01001835static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001836 struct srp_aer_req *req)
1837{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001838 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001839 struct srp_aer_rsp rsp = {
1840 .opcode = SRP_AER_RSP,
1841 .tag = req->tag,
1842 };
1843 s32 delta = be32_to_cpu(req->req_lim_delta);
1844
1845 shost_printk(KERN_ERR, target->scsi_host, PFX
1846 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1847
Bart Van Assche509c07b2014-10-30 14:48:30 +01001848 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001849 shost_printk(KERN_ERR, target->scsi_host, PFX
1850 "problems processing SRP_AER_REQ\n");
1851}
1852
Bart Van Assche509c07b2014-10-30 14:48:30 +01001853static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001854{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001855 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001856 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001857 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001858 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001859 u8 opcode;
1860
Bart Van Assche509c07b2014-10-30 14:48:30 +01001861 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001862 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001863
1864 opcode = *(u8 *) iu->buf;
1865
1866 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001867 shost_printk(KERN_ERR, target->scsi_host,
1868 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001869 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1870 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001871 }
1872
1873 switch (opcode) {
1874 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001875 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001876 break;
1877
David Dillowbb125882010-10-08 14:40:47 -04001878 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001879 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001880 break;
1881
1882 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001883 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001884 break;
1885
Roland Dreieraef9ec32005-11-02 14:07:13 -08001886 case SRP_T_LOGOUT:
1887 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001888 shost_printk(KERN_WARNING, target->scsi_host,
1889 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001890 break;
1891
1892 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001893 shost_printk(KERN_WARNING, target->scsi_host,
1894 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001895 break;
1896 }
1897
Bart Van Assche509c07b2014-10-30 14:48:30 +01001898 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001899 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001900
Bart Van Assche509c07b2014-10-30 14:48:30 +01001901 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001902 if (res != 0)
1903 shost_printk(KERN_ERR, target->scsi_host,
1904 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001905}
1906
Bart Van Asschec1120f82013-10-26 14:35:08 +02001907/**
1908 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001909 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001910 *
1911 * Note: This function may get invoked before the rport has been created,
1912 * hence the target->rport test.
1913 */
1914static void srp_tl_err_work(struct work_struct *work)
1915{
1916 struct srp_target_port *target;
1917
1918 target = container_of(work, struct srp_target_port, tl_err_work);
1919 if (target->rport)
1920 srp_start_tl_fail_timers(target->rport);
1921}
1922
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001923static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001924 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001925{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001926 struct srp_target_port *target = ch->target;
1927
1928 if (wr_id == SRP_LAST_WR_ID) {
1929 complete(&ch->done);
1930 return;
1931 }
1932
Bart Van Assche294c8752011-12-25 12:18:12 +00001933 if (target->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001934 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1935 shost_printk(KERN_ERR, target->scsi_host, PFX
1936 "LOCAL_INV failed with status %d\n",
1937 wc_status);
1938 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1939 shost_printk(KERN_ERR, target->scsi_host, PFX
1940 "FAST_REG_MR failed status %d\n",
1941 wc_status);
1942 } else {
1943 shost_printk(KERN_ERR, target->scsi_host,
1944 PFX "failed %s status %d for iu %p\n",
1945 send_err ? "send" : "receive",
1946 wc_status, (void *)(uintptr_t)wr_id);
1947 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001948 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001949 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001950 target->qp_in_error = true;
1951}
1952
Bart Van Assche509c07b2014-10-30 14:48:30 +01001953static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001954{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001955 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001956 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001957
1958 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1959 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001960 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001961 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001962 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001963 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001964 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001965 }
1966}
1967
Bart Van Assche509c07b2014-10-30 14:48:30 +01001968static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001969{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001970 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001971 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001972 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001973
1974 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001975 if (likely(wc.status == IB_WC_SUCCESS)) {
1976 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001977 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001978 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001979 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001980 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001981 }
1982}
1983
Bart Van Assche76c75b22010-11-26 14:37:47 -05001984static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001985{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001986 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001987 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001988 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001989 struct srp_request *req;
1990 struct srp_iu *iu;
1991 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001992 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001993 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001994 u32 tag;
1995 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02001996 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001997 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1998
1999 /*
2000 * The SCSI EH thread is the only context from which srp_queuecommand()
2001 * can get invoked for blocked devices (SDEV_BLOCK /
2002 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2003 * locking the rport mutex if invoked from inside the SCSI EH.
2004 */
2005 if (in_scsi_eh)
2006 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002007
Bart Van Assched1b42892014-05-20 15:07:20 +02002008 scmnd->result = srp_chkready(target->rport);
2009 if (unlikely(scmnd->result))
2010 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002011
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002012 WARN_ON_ONCE(scmnd->request->tag < 0);
2013 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002014 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002015 idx = blk_mq_unique_tag_to_tag(tag);
2016 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2017 dev_name(&shost->shost_gendev), tag, idx,
2018 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002019
2020 spin_lock_irqsave(&ch->lock, flags);
2021 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002022 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002023
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002024 if (!iu)
2025 goto err;
2026
2027 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002028 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002029 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002030 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002031
David Dillowf8b6e312010-11-26 13:02:21 -05002032 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002033
2034 cmd = iu->buf;
2035 memset(cmd, 0, sizeof *cmd);
2036
2037 cmd->opcode = SRP_CMD;
2038 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002039 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002040 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2041
Roland Dreieraef9ec32005-11-02 14:07:13 -08002042 req->scmnd = scmnd;
2043 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002044
Bart Van Assche509c07b2014-10-30 14:48:30 +01002045 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002046 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002047 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002048 PFX "Failed to map data (%d)\n", len);
2049 /*
2050 * If we ran out of memory descriptors (-ENOMEM) because an
2051 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002052 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002053 * to reduce queue depth temporarily.
2054 */
2055 scmnd->result = len == -ENOMEM ?
2056 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002057 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002058 }
2059
David Dillow49248642011-01-14 18:23:24 -05002060 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002061 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002062
Bart Van Assche509c07b2014-10-30 14:48:30 +01002063 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002064 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002065 goto err_unmap;
2066 }
2067
Bart Van Assched1b42892014-05-20 15:07:20 +02002068 ret = 0;
2069
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002070unlock_rport:
2071 if (in_scsi_eh)
2072 mutex_unlock(&rport->mutex);
2073
Bart Van Assched1b42892014-05-20 15:07:20 +02002074 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002075
2076err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002077 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002078
Bart Van Assche76c75b22010-11-26 14:37:47 -05002079err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002080 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002081
Bart Van Assche024ca902014-05-20 15:03:49 +02002082 /*
2083 * Avoid that the loops that iterate over the request ring can
2084 * encounter a dangling SCSI command pointer.
2085 */
2086 req->scmnd = NULL;
2087
Bart Van Assched1b42892014-05-20 15:07:20 +02002088err:
2089 if (scmnd->result) {
2090 scmnd->scsi_done(scmnd);
2091 ret = 0;
2092 } else {
2093 ret = SCSI_MLQUEUE_HOST_BUSY;
2094 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002095
Bart Van Assched1b42892014-05-20 15:07:20 +02002096 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002097}
2098
Bart Van Assche4d73f952013-10-26 14:40:37 +02002099/*
2100 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002101 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002102 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002103static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002104{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002105 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002106 int i;
2107
Bart Van Assche509c07b2014-10-30 14:48:30 +01002108 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2109 GFP_KERNEL);
2110 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002111 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002112 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2113 GFP_KERNEL);
2114 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002115 goto err_no_ring;
2116
2117 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002118 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2119 ch->max_ti_iu_len,
2120 GFP_KERNEL, DMA_FROM_DEVICE);
2121 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002122 goto err;
2123 }
2124
Bart Van Assche4d73f952013-10-26 14:40:37 +02002125 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002126 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2127 target->max_iu_len,
2128 GFP_KERNEL, DMA_TO_DEVICE);
2129 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002130 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002131
Bart Van Assche509c07b2014-10-30 14:48:30 +01002132 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002133 }
2134
2135 return 0;
2136
2137err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002138 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002139 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2140 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002141 }
2142
Bart Van Assche4d73f952013-10-26 14:40:37 +02002143
2144err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002145 kfree(ch->tx_ring);
2146 ch->tx_ring = NULL;
2147 kfree(ch->rx_ring);
2148 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002149
2150 return -ENOMEM;
2151}
2152
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002153static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2154{
2155 uint64_t T_tr_ns, max_compl_time_ms;
2156 uint32_t rq_tmo_jiffies;
2157
2158 /*
2159 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2160 * table 91), both the QP timeout and the retry count have to be set
2161 * for RC QP's during the RTR to RTS transition.
2162 */
2163 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2164 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2165
2166 /*
2167 * Set target->rq_tmo_jiffies to one second more than the largest time
2168 * it can take before an error completion is generated. See also
2169 * C9-140..142 in the IBTA spec for more information about how to
2170 * convert the QP Local ACK Timeout value to nanoseconds.
2171 */
2172 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2173 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2174 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2175 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2176
2177 return rq_tmo_jiffies;
2178}
2179
David Dillow961e0be2011-01-14 17:32:07 -05002180static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2181 struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002182 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002183{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002184 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002185 struct ib_qp_attr *qp_attr = NULL;
2186 int attr_mask = 0;
2187 int ret;
2188 int i;
2189
2190 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002191 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2192 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002193
2194 /*
2195 * Reserve credits for task management so we don't
2196 * bounce requests back to the SCSI mid-layer.
2197 */
2198 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002199 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002200 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002201 target->scsi_host->cmd_per_lun
2202 = min_t(int, target->scsi_host->can_queue,
2203 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002204 } else {
2205 shost_printk(KERN_WARNING, target->scsi_host,
2206 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2207 ret = -ECONNRESET;
2208 goto error;
2209 }
2210
Bart Van Assche509c07b2014-10-30 14:48:30 +01002211 if (!ch->rx_ring) {
2212 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002213 if (ret)
2214 goto error;
2215 }
2216
2217 ret = -ENOMEM;
2218 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2219 if (!qp_attr)
2220 goto error;
2221
2222 qp_attr->qp_state = IB_QPS_RTR;
2223 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2224 if (ret)
2225 goto error_free;
2226
Bart Van Assche509c07b2014-10-30 14:48:30 +01002227 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002228 if (ret)
2229 goto error_free;
2230
Bart Van Assche4d73f952013-10-26 14:40:37 +02002231 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002232 struct srp_iu *iu = ch->rx_ring[i];
2233
2234 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002235 if (ret)
2236 goto error_free;
2237 }
2238
2239 qp_attr->qp_state = IB_QPS_RTS;
2240 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2241 if (ret)
2242 goto error_free;
2243
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002244 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2245
Bart Van Assche509c07b2014-10-30 14:48:30 +01002246 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002247 if (ret)
2248 goto error_free;
2249
2250 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2251
2252error_free:
2253 kfree(qp_attr);
2254
2255error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002256 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002257}
2258
Roland Dreieraef9ec32005-11-02 14:07:13 -08002259static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2260 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002261 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002262{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002263 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002264 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002265 struct ib_class_port_info *cpi;
2266 int opcode;
2267
2268 switch (event->param.rej_rcvd.reason) {
2269 case IB_CM_REJ_PORT_CM_REDIRECT:
2270 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002271 ch->path.dlid = cpi->redirect_lid;
2272 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002273 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002274 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002275
Bart Van Assche509c07b2014-10-30 14:48:30 +01002276 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002277 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2278 break;
2279
2280 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002281 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002282 /*
2283 * Topspin/Cisco SRP gateways incorrectly send
2284 * reject reason code 25 when they mean 24
2285 * (port redirect).
2286 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002287 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002288 event->param.rej_rcvd.ari, 16);
2289
David Dillow7aa54bd2008-01-07 18:23:41 -05002290 shost_printk(KERN_DEBUG, shost,
2291 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002292 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2293 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002294
Bart Van Assche509c07b2014-10-30 14:48:30 +01002295 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002296 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002297 shost_printk(KERN_WARNING, shost,
2298 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002299 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002300 }
2301 break;
2302
2303 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002304 shost_printk(KERN_WARNING, shost,
2305 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002306 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002307 break;
2308
2309 case IB_CM_REJ_CONSUMER_DEFINED:
2310 opcode = *(u8 *) event->private_data;
2311 if (opcode == SRP_LOGIN_REJ) {
2312 struct srp_login_rej *rej = event->private_data;
2313 u32 reason = be32_to_cpu(rej->reason);
2314
2315 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002316 shost_printk(KERN_WARNING, shost,
2317 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002318 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002319 shost_printk(KERN_WARNING, shost, PFX
2320 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002321 target->sgid.raw,
2322 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002323 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002324 shost_printk(KERN_WARNING, shost,
2325 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2326 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002327 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002328 break;
2329
David Dillow9fe4bcf2008-01-08 17:08:52 -05002330 case IB_CM_REJ_STALE_CONN:
2331 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002332 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002333 break;
2334
Roland Dreieraef9ec32005-11-02 14:07:13 -08002335 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002336 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2337 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002338 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002339 }
2340}
2341
2342static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2343{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002344 struct srp_rdma_ch *ch = cm_id->context;
2345 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002346 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002347
2348 switch (event->event) {
2349 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002350 shost_printk(KERN_DEBUG, target->scsi_host,
2351 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002352 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002353 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002354 break;
2355
2356 case IB_CM_REP_RECEIVED:
2357 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002358 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002359 break;
2360
2361 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002362 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002363 comp = 1;
2364
Bart Van Assche509c07b2014-10-30 14:48:30 +01002365 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002366 break;
2367
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002368 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002369 shost_printk(KERN_WARNING, target->scsi_host,
2370 PFX "DREQ received - connection closed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +00002371 srp_change_conn_state(target, false);
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002372 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002373 shost_printk(KERN_ERR, target->scsi_host,
2374 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002375 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002376 break;
2377
2378 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002379 shost_printk(KERN_ERR, target->scsi_host,
2380 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002381 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002382
Bart Van Assche509c07b2014-10-30 14:48:30 +01002383 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002384 break;
2385
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002386 case IB_CM_MRA_RECEIVED:
2387 case IB_CM_DREQ_ERROR:
2388 case IB_CM_DREP_RECEIVED:
2389 break;
2390
Roland Dreieraef9ec32005-11-02 14:07:13 -08002391 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002392 shost_printk(KERN_WARNING, target->scsi_host,
2393 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002394 break;
2395 }
2396
2397 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002398 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002399
Roland Dreieraef9ec32005-11-02 14:07:13 -08002400 return 0;
2401}
2402
Jack Wang71444b92013-11-07 11:37:37 +01002403/**
Jack Wang71444b92013-11-07 11:37:37 +01002404 * srp_change_queue_depth - setting device queue depth
2405 * @sdev: scsi device struct
2406 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002407 *
2408 * Returns queue depth.
2409 */
2410static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002411srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002412{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002413 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002414 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002415 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002416}
2417
Bart Van Assche509c07b2014-10-30 14:48:30 +01002418static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2419 unsigned int lun, u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002420{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002421 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002422 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002423 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002424 struct srp_iu *iu;
2425 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002426
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002427 if (!target->connected || target->qp_in_error)
2428 return -1;
2429
Bart Van Assche509c07b2014-10-30 14:48:30 +01002430 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002431
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002432 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002433 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002434 * invoked while a task management function is being sent.
2435 */
2436 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002437 spin_lock_irq(&ch->lock);
2438 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2439 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002440
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002441 if (!iu) {
2442 mutex_unlock(&rport->mutex);
2443
Bart Van Assche76c75b22010-11-26 14:37:47 -05002444 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002445 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002446
David Dillow19081f32010-10-18 08:54:49 -04002447 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2448 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002449 tsk_mgmt = iu->buf;
2450 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2451
2452 tsk_mgmt->opcode = SRP_TSK_MGMT;
David Dillowf8b6e312010-11-26 13:02:21 -05002453 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2454 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002455 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002456 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002457
David Dillow19081f32010-10-18 08:54:49 -04002458 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2459 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002460 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2461 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002462 mutex_unlock(&rport->mutex);
2463
Bart Van Assche76c75b22010-11-26 14:37:47 -05002464 return -1;
2465 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002466 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002467
Bart Van Assche509c07b2014-10-30 14:48:30 +01002468 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002469 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002470 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002471
Roland Dreierd945e1d2006-05-09 10:50:28 -07002472 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002473}
2474
Roland Dreieraef9ec32005-11-02 14:07:13 -08002475static int srp_abort(struct scsi_cmnd *scmnd)
2476{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002477 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002478 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002479 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002480 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002481 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002482 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002483
David Dillow7aa54bd2008-01-07 18:23:41 -05002484 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002485
Bart Van Assched92c0da2014-10-06 17:14:36 +02002486 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002487 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002488 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002489 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2490 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2491 return SUCCESS;
2492 ch = &target->ch[ch_idx];
2493 if (!srp_claim_req(ch, req, NULL, scmnd))
2494 return SUCCESS;
2495 shost_printk(KERN_ERR, target->scsi_host,
2496 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002497 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002498 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002499 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002500 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002501 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002502 else
2503 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002504 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002505 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002506 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002507
Bart Van Assche086f44f2013-06-12 15:23:04 +02002508 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002509}
2510
2511static int srp_reset_device(struct scsi_cmnd *scmnd)
2512{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002513 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002514 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002515 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002516
David Dillow7aa54bd2008-01-07 18:23:41 -05002517 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002518
Bart Van Assched92c0da2014-10-06 17:14:36 +02002519 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002520 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002521 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002522 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002523 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002524 return FAILED;
2525
Bart Van Assched92c0da2014-10-06 17:14:36 +02002526 for (i = 0; i < target->ch_count; i++) {
2527 ch = &target->ch[i];
2528 for (i = 0; i < target->req_ring_size; ++i) {
2529 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002530
Bart Van Assched92c0da2014-10-06 17:14:36 +02002531 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2532 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002533 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002534
Roland Dreierd945e1d2006-05-09 10:50:28 -07002535 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002536}
2537
2538static int srp_reset_host(struct scsi_cmnd *scmnd)
2539{
2540 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002541
David Dillow7aa54bd2008-01-07 18:23:41 -05002542 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002543
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002544 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002545}
2546
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002547static int srp_slave_configure(struct scsi_device *sdev)
2548{
2549 struct Scsi_Host *shost = sdev->host;
2550 struct srp_target_port *target = host_to_target(shost);
2551 struct request_queue *q = sdev->request_queue;
2552 unsigned long timeout;
2553
2554 if (sdev->type == TYPE_DISK) {
2555 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2556 blk_queue_rq_timeout(q, timeout);
2557 }
2558
2559 return 0;
2560}
2561
Tony Jonesee959b02008-02-22 00:13:36 +01002562static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2563 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002564{
Tony Jonesee959b02008-02-22 00:13:36 +01002565 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002566
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002567 return sprintf(buf, "0x%016llx\n",
2568 (unsigned long long) be64_to_cpu(target->id_ext));
2569}
2570
Tony Jonesee959b02008-02-22 00:13:36 +01002571static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2572 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002573{
Tony Jonesee959b02008-02-22 00:13:36 +01002574 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002575
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002576 return sprintf(buf, "0x%016llx\n",
2577 (unsigned long long) be64_to_cpu(target->ioc_guid));
2578}
2579
Tony Jonesee959b02008-02-22 00:13:36 +01002580static ssize_t show_service_id(struct device *dev,
2581 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002582{
Tony Jonesee959b02008-02-22 00:13:36 +01002583 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002584
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002585 return sprintf(buf, "0x%016llx\n",
2586 (unsigned long long) be64_to_cpu(target->service_id));
2587}
2588
Tony Jonesee959b02008-02-22 00:13:36 +01002589static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2590 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002591{
Tony Jonesee959b02008-02-22 00:13:36 +01002592 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002593
Bart Van Assche747fe002014-10-30 14:48:05 +01002594 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002595}
2596
Bart Van Assche848b3082013-10-26 14:38:12 +02002597static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2598 char *buf)
2599{
2600 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2601
Bart Van Assche747fe002014-10-30 14:48:05 +01002602 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002603}
2604
Tony Jonesee959b02008-02-22 00:13:36 +01002605static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2606 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002607{
Tony Jonesee959b02008-02-22 00:13:36 +01002608 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002609 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002610
Bart Van Assche509c07b2014-10-30 14:48:30 +01002611 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002612}
2613
Tony Jonesee959b02008-02-22 00:13:36 +01002614static ssize_t show_orig_dgid(struct device *dev,
2615 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002616{
Tony Jonesee959b02008-02-22 00:13:36 +01002617 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002618
Bart Van Assche747fe002014-10-30 14:48:05 +01002619 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002620}
2621
Bart Van Assche89de7482010-08-03 14:08:45 +00002622static ssize_t show_req_lim(struct device *dev,
2623 struct device_attribute *attr, char *buf)
2624{
2625 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002626 struct srp_rdma_ch *ch;
2627 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002628
Bart Van Assched92c0da2014-10-06 17:14:36 +02002629 for (i = 0; i < target->ch_count; i++) {
2630 ch = &target->ch[i];
2631 req_lim = min(req_lim, ch->req_lim);
2632 }
2633 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002634}
2635
Tony Jonesee959b02008-02-22 00:13:36 +01002636static ssize_t show_zero_req_lim(struct device *dev,
2637 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002638{
Tony Jonesee959b02008-02-22 00:13:36 +01002639 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002640
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002641 return sprintf(buf, "%d\n", target->zero_req_lim);
2642}
2643
Tony Jonesee959b02008-02-22 00:13:36 +01002644static ssize_t show_local_ib_port(struct device *dev,
2645 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002646{
Tony Jonesee959b02008-02-22 00:13:36 +01002647 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002648
2649 return sprintf(buf, "%d\n", target->srp_host->port);
2650}
2651
Tony Jonesee959b02008-02-22 00:13:36 +01002652static ssize_t show_local_ib_device(struct device *dev,
2653 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002654{
Tony Jonesee959b02008-02-22 00:13:36 +01002655 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002656
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002657 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002658}
2659
Bart Van Assched92c0da2014-10-06 17:14:36 +02002660static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2661 char *buf)
2662{
2663 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2664
2665 return sprintf(buf, "%d\n", target->ch_count);
2666}
2667
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002668static ssize_t show_comp_vector(struct device *dev,
2669 struct device_attribute *attr, char *buf)
2670{
2671 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2672
2673 return sprintf(buf, "%d\n", target->comp_vector);
2674}
2675
Vu Pham7bb312e2013-10-26 14:31:27 +02002676static ssize_t show_tl_retry_count(struct device *dev,
2677 struct device_attribute *attr, char *buf)
2678{
2679 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2680
2681 return sprintf(buf, "%d\n", target->tl_retry_count);
2682}
2683
David Dillow49248642011-01-14 18:23:24 -05002684static ssize_t show_cmd_sg_entries(struct device *dev,
2685 struct device_attribute *attr, char *buf)
2686{
2687 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2688
2689 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2690}
2691
David Dillowc07d4242011-01-16 13:57:10 -05002692static ssize_t show_allow_ext_sg(struct device *dev,
2693 struct device_attribute *attr, char *buf)
2694{
2695 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2696
2697 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2698}
2699
Tony Jonesee959b02008-02-22 00:13:36 +01002700static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2701static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2702static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2703static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002704static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002705static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2706static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002707static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002708static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2709static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2710static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002711static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002712static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002713static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002714static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002715static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002716
Tony Jonesee959b02008-02-22 00:13:36 +01002717static struct device_attribute *srp_host_attrs[] = {
2718 &dev_attr_id_ext,
2719 &dev_attr_ioc_guid,
2720 &dev_attr_service_id,
2721 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002722 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002723 &dev_attr_dgid,
2724 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002725 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002726 &dev_attr_zero_req_lim,
2727 &dev_attr_local_ib_port,
2728 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002729 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002730 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002731 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002732 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002733 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002734 NULL
2735};
2736
Roland Dreieraef9ec32005-11-02 14:07:13 -08002737static struct scsi_host_template srp_template = {
2738 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002739 .name = "InfiniBand SRP initiator",
2740 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002741 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002742 .info = srp_target_info,
2743 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002744 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002745 .eh_abort_handler = srp_abort,
2746 .eh_device_reset_handler = srp_reset_device,
2747 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002748 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002749 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002750 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002751 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002752 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002753 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002754 .shost_attrs = srp_host_attrs,
2755 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002756 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002757};
2758
Bart Van Assche34aa6542014-10-30 14:47:22 +01002759static int srp_sdev_count(struct Scsi_Host *host)
2760{
2761 struct scsi_device *sdev;
2762 int c = 0;
2763
2764 shost_for_each_device(sdev, host)
2765 c++;
2766
2767 return c;
2768}
2769
Roland Dreieraef9ec32005-11-02 14:07:13 -08002770static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2771{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002772 struct srp_rport_identifiers ids;
2773 struct srp_rport *rport;
2774
Bart Van Assche34aa6542014-10-30 14:47:22 +01002775 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002776 sprintf(target->target_name, "SRP.T10:%016llX",
2777 (unsigned long long) be64_to_cpu(target->id_ext));
2778
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002779 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002780 return -ENODEV;
2781
FUJITA Tomonori32368222007-06-27 16:33:12 +09002782 memcpy(ids.port_id, &target->id_ext, 8);
2783 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002784 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002785 rport = srp_rport_add(target->scsi_host, &ids);
2786 if (IS_ERR(rport)) {
2787 scsi_remove_host(target->scsi_host);
2788 return PTR_ERR(rport);
2789 }
2790
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002791 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002792 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002793
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002794 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002795 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002796 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002797
Roland Dreieraef9ec32005-11-02 14:07:13 -08002798 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002799 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002800
Bart Van Assche34aa6542014-10-30 14:47:22 +01002801 if (!target->connected || target->qp_in_error) {
2802 shost_printk(KERN_INFO, target->scsi_host,
2803 PFX "SCSI scan failed - removing SCSI host\n");
2804 srp_queue_remove_work(target);
2805 goto out;
2806 }
2807
2808 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2809 dev_name(&target->scsi_host->shost_gendev),
2810 srp_sdev_count(target->scsi_host));
2811
2812 spin_lock_irq(&target->lock);
2813 if (target->state == SRP_TARGET_SCANNING)
2814 target->state = SRP_TARGET_LIVE;
2815 spin_unlock_irq(&target->lock);
2816
2817out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002818 return 0;
2819}
2820
Tony Jonesee959b02008-02-22 00:13:36 +01002821static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002822{
2823 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002824 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002825
2826 complete(&host->released);
2827}
2828
2829static struct class srp_class = {
2830 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002831 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002832};
2833
Bart Van Assche96fc2482013-06-28 14:51:26 +02002834/**
2835 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002836 * @host: SRP host.
2837 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002838 */
2839static bool srp_conn_unique(struct srp_host *host,
2840 struct srp_target_port *target)
2841{
2842 struct srp_target_port *t;
2843 bool ret = false;
2844
2845 if (target->state == SRP_TARGET_REMOVED)
2846 goto out;
2847
2848 ret = true;
2849
2850 spin_lock(&host->target_lock);
2851 list_for_each_entry(t, &host->target_list, list) {
2852 if (t != target &&
2853 target->id_ext == t->id_ext &&
2854 target->ioc_guid == t->ioc_guid &&
2855 target->initiator_ext == t->initiator_ext) {
2856 ret = false;
2857 break;
2858 }
2859 }
2860 spin_unlock(&host->target_lock);
2861
2862out:
2863 return ret;
2864}
2865
Roland Dreieraef9ec32005-11-02 14:07:13 -08002866/*
2867 * Target ports are added by writing
2868 *
2869 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2870 * pkey=<P_Key>,service_id=<service ID>
2871 *
2872 * to the add_target sysfs attribute.
2873 */
2874enum {
2875 SRP_OPT_ERR = 0,
2876 SRP_OPT_ID_EXT = 1 << 0,
2877 SRP_OPT_IOC_GUID = 1 << 1,
2878 SRP_OPT_DGID = 1 << 2,
2879 SRP_OPT_PKEY = 1 << 3,
2880 SRP_OPT_SERVICE_ID = 1 << 4,
2881 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002882 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002883 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002884 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002885 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002886 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2887 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002888 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002889 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002890 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002891 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2892 SRP_OPT_IOC_GUID |
2893 SRP_OPT_DGID |
2894 SRP_OPT_PKEY |
2895 SRP_OPT_SERVICE_ID),
2896};
2897
Steven Whitehousea447c092008-10-13 10:46:57 +01002898static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002899 { SRP_OPT_ID_EXT, "id_ext=%s" },
2900 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2901 { SRP_OPT_DGID, "dgid=%s" },
2902 { SRP_OPT_PKEY, "pkey=%x" },
2903 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2904 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2905 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002906 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002907 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002908 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002909 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2910 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002911 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002912 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002913 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002914 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002915};
2916
2917static int srp_parse_options(const char *buf, struct srp_target_port *target)
2918{
2919 char *options, *sep_opt;
2920 char *p;
2921 char dgid[3];
2922 substring_t args[MAX_OPT_ARGS];
2923 int opt_mask = 0;
2924 int token;
2925 int ret = -EINVAL;
2926 int i;
2927
2928 options = kstrdup(buf, GFP_KERNEL);
2929 if (!options)
2930 return -ENOMEM;
2931
2932 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002933 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002934 if (!*p)
2935 continue;
2936
2937 token = match_token(p, srp_opt_tokens, args);
2938 opt_mask |= token;
2939
2940 switch (token) {
2941 case SRP_OPT_ID_EXT:
2942 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002943 if (!p) {
2944 ret = -ENOMEM;
2945 goto out;
2946 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002947 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2948 kfree(p);
2949 break;
2950
2951 case SRP_OPT_IOC_GUID:
2952 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002953 if (!p) {
2954 ret = -ENOMEM;
2955 goto out;
2956 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002957 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2958 kfree(p);
2959 break;
2960
2961 case SRP_OPT_DGID:
2962 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002963 if (!p) {
2964 ret = -ENOMEM;
2965 goto out;
2966 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002967 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002968 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002969 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002970 goto out;
2971 }
2972
2973 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002974 strlcpy(dgid, p + i * 2, sizeof(dgid));
2975 if (sscanf(dgid, "%hhx",
2976 &target->orig_dgid.raw[i]) < 1) {
2977 ret = -EINVAL;
2978 kfree(p);
2979 goto out;
2980 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002981 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002982 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002983 break;
2984
2985 case SRP_OPT_PKEY:
2986 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002987 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002988 goto out;
2989 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002990 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002991 break;
2992
2993 case SRP_OPT_SERVICE_ID:
2994 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002995 if (!p) {
2996 ret = -ENOMEM;
2997 goto out;
2998 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002999 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3000 kfree(p);
3001 break;
3002
3003 case SRP_OPT_MAX_SECT:
3004 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003005 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003006 goto out;
3007 }
3008 target->scsi_host->max_sectors = token;
3009 break;
3010
Bart Van Assche4d73f952013-10-26 14:40:37 +02003011 case SRP_OPT_QUEUE_SIZE:
3012 if (match_int(args, &token) || token < 1) {
3013 pr_warn("bad queue_size parameter '%s'\n", p);
3014 goto out;
3015 }
3016 target->scsi_host->can_queue = token;
3017 target->queue_size = token + SRP_RSP_SQ_SIZE +
3018 SRP_TSK_MGMT_SQ_SIZE;
3019 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3020 target->scsi_host->cmd_per_lun = token;
3021 break;
3022
Vu Pham52fb2b502006-06-17 20:37:31 -07003023 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003024 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003025 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3026 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003027 goto out;
3028 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003029 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003030 break;
3031
Ramachandra K0c0450db2006-06-17 20:37:38 -07003032 case SRP_OPT_IO_CLASS:
3033 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003034 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003035 goto out;
3036 }
3037 if (token != SRP_REV10_IB_IO_CLASS &&
3038 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003039 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3040 token, SRP_REV10_IB_IO_CLASS,
3041 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003042 goto out;
3043 }
3044 target->io_class = token;
3045 break;
3046
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003047 case SRP_OPT_INITIATOR_EXT:
3048 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003049 if (!p) {
3050 ret = -ENOMEM;
3051 goto out;
3052 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003053 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3054 kfree(p);
3055 break;
3056
David Dillow49248642011-01-14 18:23:24 -05003057 case SRP_OPT_CMD_SG_ENTRIES:
3058 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003059 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3060 p);
David Dillow49248642011-01-14 18:23:24 -05003061 goto out;
3062 }
3063 target->cmd_sg_cnt = token;
3064 break;
3065
David Dillowc07d4242011-01-16 13:57:10 -05003066 case SRP_OPT_ALLOW_EXT_SG:
3067 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003068 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003069 goto out;
3070 }
3071 target->allow_ext_sg = !!token;
3072 break;
3073
3074 case SRP_OPT_SG_TABLESIZE:
3075 if (match_int(args, &token) || token < 1 ||
3076 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003077 pr_warn("bad max sg_tablesize parameter '%s'\n",
3078 p);
David Dillowc07d4242011-01-16 13:57:10 -05003079 goto out;
3080 }
3081 target->sg_tablesize = token;
3082 break;
3083
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003084 case SRP_OPT_COMP_VECTOR:
3085 if (match_int(args, &token) || token < 0) {
3086 pr_warn("bad comp_vector parameter '%s'\n", p);
3087 goto out;
3088 }
3089 target->comp_vector = token;
3090 break;
3091
Vu Pham7bb312e2013-10-26 14:31:27 +02003092 case SRP_OPT_TL_RETRY_COUNT:
3093 if (match_int(args, &token) || token < 2 || token > 7) {
3094 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3095 p);
3096 goto out;
3097 }
3098 target->tl_retry_count = token;
3099 break;
3100
Roland Dreieraef9ec32005-11-02 14:07:13 -08003101 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003102 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3103 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003104 goto out;
3105 }
3106 }
3107
3108 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3109 ret = 0;
3110 else
3111 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3112 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3113 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003114 pr_warn("target creation request is missing parameter '%s'\n",
3115 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003116
Bart Van Assche4d73f952013-10-26 14:40:37 +02003117 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3118 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3119 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3120 target->scsi_host->cmd_per_lun,
3121 target->scsi_host->can_queue);
3122
Roland Dreieraef9ec32005-11-02 14:07:13 -08003123out:
3124 kfree(options);
3125 return ret;
3126}
3127
Tony Jonesee959b02008-02-22 00:13:36 +01003128static ssize_t srp_create_target(struct device *dev,
3129 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003130 const char *buf, size_t count)
3131{
3132 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003133 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003134 struct Scsi_Host *target_host;
3135 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003136 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003137 struct srp_device *srp_dev = host->srp_dev;
3138 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003139 int ret, node_idx, node, cpu, i;
3140 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003141
3142 target_host = scsi_host_alloc(&srp_template,
3143 sizeof (struct srp_target_port));
3144 if (!target_host)
3145 return -ENOMEM;
3146
David Dillow49248642011-01-14 18:23:24 -05003147 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003148 target_host->max_channel = 0;
3149 target_host->max_id = 1;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003150 target_host->max_lun = SRP_MAX_LUN;
3151 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003152
Roland Dreieraef9ec32005-11-02 14:07:13 -08003153 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003154
David Dillow49248642011-01-14 18:23:24 -05003155 target->io_class = SRP_REV16A_IB_IO_CLASS;
3156 target->scsi_host = target_host;
3157 target->srp_host = host;
3158 target->lkey = host->srp_dev->mr->lkey;
3159 target->rkey = host->srp_dev->mr->rkey;
3160 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003161 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3162 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003163 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003164 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003165
Bart Van Assche34aa6542014-10-30 14:47:22 +01003166 /*
3167 * Avoid that the SCSI host can be removed by srp_remove_target()
3168 * before this function returns.
3169 */
3170 scsi_host_get(target->scsi_host);
3171
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003172 mutex_lock(&host->add_target_mutex);
3173
Roland Dreieraef9ec32005-11-02 14:07:13 -08003174 ret = srp_parse_options(buf, target);
3175 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003176 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003177
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003178 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3179 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003180 goto out;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003181
Bart Van Assche4d73f952013-10-26 14:40:37 +02003182 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3183
Bart Van Assche96fc2482013-06-28 14:51:26 +02003184 if (!srp_conn_unique(target->srp_host, target)) {
3185 shost_printk(KERN_INFO, target->scsi_host,
3186 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3187 be64_to_cpu(target->id_ext),
3188 be64_to_cpu(target->ioc_guid),
3189 be64_to_cpu(target->initiator_ext));
3190 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003191 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003192 }
3193
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003194 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003195 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003196 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003197 target->sg_tablesize = target->cmd_sg_cnt;
3198 }
3199
3200 target_host->sg_tablesize = target->sg_tablesize;
3201 target->indirect_size = target->sg_tablesize *
3202 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003203 target->max_iu_len = sizeof (struct srp_cmd) +
3204 sizeof (struct srp_indirect_buf) +
3205 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3206
Bart Van Asschec1120f82013-10-26 14:35:08 +02003207 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003208 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003209 spin_lock_init(&target->lock);
Bart Van Assche747fe002014-10-30 14:48:05 +01003210 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003211 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003212 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003213
Bart Van Assched92c0da2014-10-06 17:14:36 +02003214 ret = -ENOMEM;
3215 target->ch_count = max_t(unsigned, num_online_nodes(),
3216 min(ch_count ? :
3217 min(4 * num_online_nodes(),
3218 ibdev->num_comp_vectors),
3219 num_online_cpus()));
3220 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3221 GFP_KERNEL);
3222 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003223 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003224
Bart Van Assched92c0da2014-10-06 17:14:36 +02003225 node_idx = 0;
3226 for_each_online_node(node) {
3227 const int ch_start = (node_idx * target->ch_count /
3228 num_online_nodes());
3229 const int ch_end = ((node_idx + 1) * target->ch_count /
3230 num_online_nodes());
3231 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3232 num_online_nodes() + target->comp_vector)
3233 % ibdev->num_comp_vectors;
3234 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3235 num_online_nodes() + target->comp_vector)
3236 % ibdev->num_comp_vectors;
3237 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003238
Bart Van Assched92c0da2014-10-06 17:14:36 +02003239 for_each_online_cpu(cpu) {
3240 if (cpu_to_node(cpu) != node)
3241 continue;
3242 if (ch_start + cpu_idx >= ch_end)
3243 continue;
3244 ch = &target->ch[ch_start + cpu_idx];
3245 ch->target = target;
3246 ch->comp_vector = cv_start == cv_end ? cv_start :
3247 cv_start + cpu_idx % (cv_end - cv_start);
3248 spin_lock_init(&ch->lock);
3249 INIT_LIST_HEAD(&ch->free_tx);
3250 ret = srp_new_cm_id(ch);
3251 if (ret)
3252 goto err_disconnect;
3253
3254 ret = srp_create_ch_ib(ch);
3255 if (ret)
3256 goto err_disconnect;
3257
3258 ret = srp_alloc_req_data(ch);
3259 if (ret)
3260 goto err_disconnect;
3261
3262 ret = srp_connect_ch(ch, multich);
3263 if (ret) {
3264 shost_printk(KERN_ERR, target->scsi_host,
3265 PFX "Connection %d/%d failed\n",
3266 ch_start + cpu_idx,
3267 target->ch_count);
3268 if (node_idx == 0 && cpu_idx == 0) {
3269 goto err_disconnect;
3270 } else {
3271 srp_free_ch_ib(target, ch);
3272 srp_free_req_data(target, ch);
3273 target->ch_count = ch - target->ch;
3274 break;
3275 }
3276 }
3277
3278 multich = true;
3279 cpu_idx++;
3280 }
3281 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003282 }
3283
Bart Van Assched92c0da2014-10-06 17:14:36 +02003284 target->scsi_host->nr_hw_queues = target->ch_count;
3285
Roland Dreieraef9ec32005-11-02 14:07:13 -08003286 ret = srp_add_target(host, target);
3287 if (ret)
3288 goto err_disconnect;
3289
Bart Van Assche34aa6542014-10-30 14:47:22 +01003290 if (target->state != SRP_TARGET_REMOVED) {
3291 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3292 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3293 be64_to_cpu(target->id_ext),
3294 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003295 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003296 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003297 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003298 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003299
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003300 ret = count;
3301
3302out:
3303 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003304
3305 scsi_host_put(target->scsi_host);
3306
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003307 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003308
3309err_disconnect:
3310 srp_disconnect_target(target);
3311
Bart Van Assched92c0da2014-10-06 17:14:36 +02003312 for (i = 0; i < target->ch_count; i++) {
3313 ch = &target->ch[i];
3314 srp_free_ch_ib(target, ch);
3315 srp_free_req_data(target, ch);
3316 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003317
Bart Van Assched92c0da2014-10-06 17:14:36 +02003318 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003319 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003320}
3321
Tony Jonesee959b02008-02-22 00:13:36 +01003322static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003323
Tony Jonesee959b02008-02-22 00:13:36 +01003324static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3325 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003326{
Tony Jonesee959b02008-02-22 00:13:36 +01003327 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003328
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003329 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003330}
3331
Tony Jonesee959b02008-02-22 00:13:36 +01003332static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003333
Tony Jonesee959b02008-02-22 00:13:36 +01003334static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3335 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003336{
Tony Jonesee959b02008-02-22 00:13:36 +01003337 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003338
3339 return sprintf(buf, "%d\n", host->port);
3340}
3341
Tony Jonesee959b02008-02-22 00:13:36 +01003342static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003343
Roland Dreierf5358a12006-06-17 20:37:29 -07003344static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003345{
3346 struct srp_host *host;
3347
3348 host = kzalloc(sizeof *host, GFP_KERNEL);
3349 if (!host)
3350 return NULL;
3351
3352 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003353 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003354 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003355 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003356 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003357 host->port = port;
3358
Tony Jonesee959b02008-02-22 00:13:36 +01003359 host->dev.class = &srp_class;
3360 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003361 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003362
Tony Jonesee959b02008-02-22 00:13:36 +01003363 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003364 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003365 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003366 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003367 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003368 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003369 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003370 goto err_class;
3371
3372 return host;
3373
3374err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003375 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003376
Roland Dreierf5358a12006-06-17 20:37:29 -07003377free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003378 kfree(host);
3379
3380 return NULL;
3381}
3382
3383static void srp_add_one(struct ib_device *device)
3384{
Roland Dreierf5358a12006-06-17 20:37:29 -07003385 struct srp_device *srp_dev;
3386 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003387 struct srp_host *host;
Bart Van Assche52ede082014-05-20 15:07:45 +02003388 int mr_page_shift, s, e, p;
3389 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003390
Roland Dreierf5358a12006-06-17 20:37:29 -07003391 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3392 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003393 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003394
Roland Dreierf5358a12006-06-17 20:37:29 -07003395 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003396 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003397 goto free_attr;
3398 }
3399
3400 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3401 if (!srp_dev)
3402 goto free_attr;
3403
Bart Van Assched1b42892014-05-20 15:07:20 +02003404 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3405 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003406 srp_dev->has_fr = (dev_attr->device_cap_flags &
3407 IB_DEVICE_MEM_MGT_EXTENSIONS);
3408 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3409 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3410
3411 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3412 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assched1b42892014-05-20 15:07:20 +02003413
Roland Dreierf5358a12006-06-17 20:37:29 -07003414 /*
3415 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003416 * minimum of 4096 bytes. We're unlikely to build large sglists
3417 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003418 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003419 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3420 srp_dev->mr_page_size = 1 << mr_page_shift;
3421 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3422 max_pages_per_mr = dev_attr->max_mr_size;
3423 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3424 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3425 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003426 if (srp_dev->use_fast_reg) {
3427 srp_dev->max_pages_per_mr =
3428 min_t(u32, srp_dev->max_pages_per_mr,
3429 dev_attr->max_fast_reg_page_list_len);
3430 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003431 srp_dev->mr_max_size = srp_dev->mr_page_size *
3432 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003433 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003434 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003435 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003436 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003437
3438 INIT_LIST_HEAD(&srp_dev->dev_list);
3439
3440 srp_dev->dev = device;
3441 srp_dev->pd = ib_alloc_pd(device);
3442 if (IS_ERR(srp_dev->pd))
3443 goto free_dev;
3444
3445 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3446 IB_ACCESS_LOCAL_WRITE |
3447 IB_ACCESS_REMOTE_READ |
3448 IB_ACCESS_REMOTE_WRITE);
3449 if (IS_ERR(srp_dev->mr))
3450 goto err_pd;
3451
Tom Tucker07ebafb2006-08-03 16:02:42 -05003452 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08003453 s = 0;
3454 e = 0;
3455 } else {
3456 s = 1;
3457 e = device->phys_port_cnt;
3458 }
3459
3460 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003461 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003462 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003463 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003464 }
3465
Roland Dreierf5358a12006-06-17 20:37:29 -07003466 ib_set_client_data(device, &srp_client, srp_dev);
3467
3468 goto free_attr;
3469
3470err_pd:
3471 ib_dealloc_pd(srp_dev->pd);
3472
3473free_dev:
3474 kfree(srp_dev);
3475
3476free_attr:
3477 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003478}
3479
3480static void srp_remove_one(struct ib_device *device)
3481{
Roland Dreierf5358a12006-06-17 20:37:29 -07003482 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003483 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003484 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003485
Roland Dreierf5358a12006-06-17 20:37:29 -07003486 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003487 if (!srp_dev)
3488 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003489
Roland Dreierf5358a12006-06-17 20:37:29 -07003490 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003491 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003492 /*
3493 * Wait for the sysfs entry to go away, so that no new
3494 * target ports can be created.
3495 */
3496 wait_for_completion(&host->released);
3497
3498 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003499 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003500 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003501 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003502 list_for_each_entry(target, &host->target_list, list)
3503 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003504 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003505
3506 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003507 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003508 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003509 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003510 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003511
Roland Dreieraef9ec32005-11-02 14:07:13 -08003512 kfree(host);
3513 }
3514
Roland Dreierf5358a12006-06-17 20:37:29 -07003515 ib_dereg_mr(srp_dev->mr);
3516 ib_dealloc_pd(srp_dev->pd);
3517
3518 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003519}
3520
FUJITA Tomonori32368222007-06-27 16:33:12 +09003521static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003522 .has_rport_state = true,
3523 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003524 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003525 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3526 .dev_loss_tmo = &srp_dev_loss_tmo,
3527 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003528 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003529 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003530};
3531
Roland Dreieraef9ec32005-11-02 14:07:13 -08003532static int __init srp_init_module(void)
3533{
3534 int ret;
3535
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003536 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003537
David Dillow49248642011-01-14 18:23:24 -05003538 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003539 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003540 if (!cmd_sg_entries)
3541 cmd_sg_entries = srp_sg_tablesize;
3542 }
3543
3544 if (!cmd_sg_entries)
3545 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3546
3547 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003548 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003549 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003550 }
3551
David Dillowc07d4242011-01-16 13:57:10 -05003552 if (!indirect_sg_entries)
3553 indirect_sg_entries = cmd_sg_entries;
3554 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003555 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3556 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003557 indirect_sg_entries = cmd_sg_entries;
3558 }
3559
Bart Van Asschebcc05912014-07-09 15:57:26 +02003560 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003561 if (!srp_remove_wq) {
3562 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003563 goto out;
3564 }
3565
3566 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003567 ib_srp_transport_template =
3568 srp_attach_transport(&ib_srp_transport_functions);
3569 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003570 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003571
Roland Dreieraef9ec32005-11-02 14:07:13 -08003572 ret = class_register(&srp_class);
3573 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003574 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003575 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003576 }
3577
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003578 ib_sa_register_client(&srp_sa_client);
3579
Roland Dreieraef9ec32005-11-02 14:07:13 -08003580 ret = ib_register_client(&srp_client);
3581 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003582 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003583 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003584 }
3585
Bart Van Asschebcc05912014-07-09 15:57:26 +02003586out:
3587 return ret;
3588
3589unreg_sa:
3590 ib_sa_unregister_client(&srp_sa_client);
3591 class_unregister(&srp_class);
3592
3593release_tr:
3594 srp_release_transport(ib_srp_transport_template);
3595
3596destroy_wq:
3597 destroy_workqueue(srp_remove_wq);
3598 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003599}
3600
3601static void __exit srp_cleanup_module(void)
3602{
3603 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003604 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003605 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003606 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003607 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003608}
3609
3610module_init(srp_init_module);
3611module_exit(srp_cleanup_module);