blob: 880255dbaca4c28da171fcd356fb185ae8acef49 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020058#define DRV_VERSION "1.0"
59#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
63 "v" DRV_VERSION " (" DRV_RELDATE ")");
64MODULE_LICENSE("Dual BSD/GPL");
65
David Dillow49248642011-01-14 18:23:24 -050066static unsigned int srp_sg_tablesize;
67static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050068static unsigned int indirect_sg_entries;
69static bool allow_ext_sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +020070static bool prefer_fr;
Bart Van Asscheb1b88542014-05-20 15:06:41 +020071static bool register_always;
Roland Dreieraef9ec32005-11-02 14:07:13 -080072static int topspin_workarounds = 1;
73
David Dillow49248642011-01-14 18:23:24 -050074module_param(srp_sg_tablesize, uint, 0444);
75MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
76
77module_param(cmd_sg_entries, uint, 0444);
78MODULE_PARM_DESC(cmd_sg_entries,
79 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
80
David Dillowc07d4242011-01-16 13:57:10 -050081module_param(indirect_sg_entries, uint, 0444);
82MODULE_PARM_DESC(indirect_sg_entries,
83 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
84
85module_param(allow_ext_sg, bool, 0444);
86MODULE_PARM_DESC(allow_ext_sg,
87 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
88
Roland Dreieraef9ec32005-11-02 14:07:13 -080089module_param(topspin_workarounds, int, 0444);
90MODULE_PARM_DESC(topspin_workarounds,
91 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
92
Bart Van Assche5cfb1782014-05-20 15:08:34 +020093module_param(prefer_fr, bool, 0444);
94MODULE_PARM_DESC(prefer_fr,
95"Whether to use fast registration if both FMR and fast registration are supported");
96
Bart Van Asscheb1b88542014-05-20 15:06:41 +020097module_param(register_always, bool, 0444);
98MODULE_PARM_DESC(register_always,
99 "Use memory registration even for contiguous memory regions");
100
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200101static struct kernel_param_ops srp_tmo_ops;
102
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200103static int srp_reconnect_delay = 10;
104module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
105 S_IRUGO | S_IWUSR);
106MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
107
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200108static int srp_fast_io_fail_tmo = 15;
109module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
110 S_IRUGO | S_IWUSR);
111MODULE_PARM_DESC(fast_io_fail_tmo,
112 "Number of seconds between the observation of a transport"
113 " layer error and failing all I/O. \"off\" means that this"
114 " functionality is disabled.");
115
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200116static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200117module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
118 S_IRUGO | S_IWUSR);
119MODULE_PARM_DESC(dev_loss_tmo,
120 "Maximum number of seconds that the SRP transport should"
121 " insulate transport layer errors. After this time has been"
122 " exceeded the SCSI host is removed. Should be"
123 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
124 " if fast_io_fail_tmo has not been set. \"off\" means that"
125 " this functionality is disabled.");
126
Bart Van Assched92c0da2014-10-06 17:14:36 +0200127static unsigned ch_count;
128module_param(ch_count, uint, 0444);
129MODULE_PARM_DESC(ch_count,
130 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
131
Roland Dreieraef9ec32005-11-02 14:07:13 -0800132static void srp_add_one(struct ib_device *device);
133static void srp_remove_one(struct ib_device *device);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100134static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
135static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800136static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
137
FUJITA Tomonori32368222007-06-27 16:33:12 +0900138static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200139static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900140
Roland Dreieraef9ec32005-11-02 14:07:13 -0800141static struct ib_client srp_client = {
142 .name = "srp",
143 .add = srp_add_one,
144 .remove = srp_remove_one
145};
146
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700147static struct ib_sa_client srp_sa_client;
148
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200149static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
150{
151 int tmo = *(int *)kp->arg;
152
153 if (tmo >= 0)
154 return sprintf(buffer, "%d", tmo);
155 else
156 return sprintf(buffer, "off");
157}
158
159static int srp_tmo_set(const char *val, const struct kernel_param *kp)
160{
161 int tmo, res;
162
163 if (strncmp(val, "off", 3) != 0) {
164 res = kstrtoint(val, 0, &tmo);
165 if (res)
166 goto out;
167 } else {
168 tmo = -1;
169 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200170 if (kp->arg == &srp_reconnect_delay)
171 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
172 srp_dev_loss_tmo);
173 else if (kp->arg == &srp_fast_io_fail_tmo)
174 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200175 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200176 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
177 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200178 if (res)
179 goto out;
180 *(int *)kp->arg = tmo;
181
182out:
183 return res;
184}
185
186static struct kernel_param_ops srp_tmo_ops = {
187 .get = srp_tmo_get,
188 .set = srp_tmo_set,
189};
190
Roland Dreieraef9ec32005-11-02 14:07:13 -0800191static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
192{
193 return (struct srp_target_port *) host->hostdata;
194}
195
196static const char *srp_target_info(struct Scsi_Host *host)
197{
198 return host_to_target(host)->target_name;
199}
200
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700201static int srp_target_is_topspin(struct srp_target_port *target)
202{
203 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700204 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700205
206 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700207 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
208 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700209}
210
Roland Dreieraef9ec32005-11-02 14:07:13 -0800211static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
212 gfp_t gfp_mask,
213 enum dma_data_direction direction)
214{
215 struct srp_iu *iu;
216
217 iu = kmalloc(sizeof *iu, gfp_mask);
218 if (!iu)
219 goto out;
220
221 iu->buf = kzalloc(size, gfp_mask);
222 if (!iu->buf)
223 goto out_free_iu;
224
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100225 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
226 direction);
227 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800228 goto out_free_buf;
229
230 iu->size = size;
231 iu->direction = direction;
232
233 return iu;
234
235out_free_buf:
236 kfree(iu->buf);
237out_free_iu:
238 kfree(iu);
239out:
240 return NULL;
241}
242
243static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
244{
245 if (!iu)
246 return;
247
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100248 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
249 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800250 kfree(iu->buf);
251 kfree(iu);
252}
253
254static void srp_qp_event(struct ib_event *event, void *context)
255{
Bart Van Asschee0bda7d2012-01-14 12:39:44 +0000256 pr_debug("QP event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800257}
258
259static int srp_init_qp(struct srp_target_port *target,
260 struct ib_qp *qp)
261{
262 struct ib_qp_attr *attr;
263 int ret;
264
265 attr = kmalloc(sizeof *attr, GFP_KERNEL);
266 if (!attr)
267 return -ENOMEM;
268
Bart Van Assche56b53902014-07-09 15:58:22 +0200269 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270 target->srp_host->port,
271 be16_to_cpu(target->pkey),
272 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800273 if (ret)
274 goto out;
275
276 attr->qp_state = IB_QPS_INIT;
277 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278 IB_ACCESS_REMOTE_WRITE);
279 attr->port_num = target->srp_host->port;
280
281 ret = ib_modify_qp(qp, attr,
282 IB_QP_STATE |
283 IB_QP_PKEY_INDEX |
284 IB_QP_ACCESS_FLAGS |
285 IB_QP_PORT);
286
287out:
288 kfree(attr);
289 return ret;
290}
291
Bart Van Assche509c07b2014-10-30 14:48:30 +0100292static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500293{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100294 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500295 struct ib_cm_id *new_cm_id;
296
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100297 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100298 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500299 if (IS_ERR(new_cm_id))
300 return PTR_ERR(new_cm_id);
301
Bart Van Assche509c07b2014-10-30 14:48:30 +0100302 if (ch->cm_id)
303 ib_destroy_cm_id(ch->cm_id);
304 ch->cm_id = new_cm_id;
305 ch->path.sgid = target->sgid;
306 ch->path.dgid = target->orig_dgid;
307 ch->path.pkey = target->pkey;
308 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500309
310 return 0;
311}
312
Bart Van Assched1b42892014-05-20 15:07:20 +0200313static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314{
315 struct srp_device *dev = target->srp_host->srp_dev;
316 struct ib_fmr_pool_param fmr_param;
317
318 memset(&fmr_param, 0, sizeof(fmr_param));
319 fmr_param.pool_size = target->scsi_host->can_queue;
320 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
321 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200322 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
323 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200324 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
325 IB_ACCESS_REMOTE_WRITE |
326 IB_ACCESS_REMOTE_READ);
327
328 return ib_create_fmr_pool(dev->pd, &fmr_param);
329}
330
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200331/**
332 * srp_destroy_fr_pool() - free the resources owned by a pool
333 * @pool: Fast registration pool to be destroyed.
334 */
335static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
336{
337 int i;
338 struct srp_fr_desc *d;
339
340 if (!pool)
341 return;
342
343 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
344 if (d->frpl)
345 ib_free_fast_reg_page_list(d->frpl);
346 if (d->mr)
347 ib_dereg_mr(d->mr);
348 }
349 kfree(pool);
350}
351
352/**
353 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
354 * @device: IB device to allocate fast registration descriptors for.
355 * @pd: Protection domain associated with the FR descriptors.
356 * @pool_size: Number of descriptors to allocate.
357 * @max_page_list_len: Maximum fast registration work request page list length.
358 */
359static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
360 struct ib_pd *pd, int pool_size,
361 int max_page_list_len)
362{
363 struct srp_fr_pool *pool;
364 struct srp_fr_desc *d;
365 struct ib_mr *mr;
366 struct ib_fast_reg_page_list *frpl;
367 int i, ret = -EINVAL;
368
369 if (pool_size <= 0)
370 goto err;
371 ret = -ENOMEM;
372 pool = kzalloc(sizeof(struct srp_fr_pool) +
373 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
374 if (!pool)
375 goto err;
376 pool->size = pool_size;
377 pool->max_page_list_len = max_page_list_len;
378 spin_lock_init(&pool->lock);
379 INIT_LIST_HEAD(&pool->free_list);
380
381 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
382 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
383 if (IS_ERR(mr)) {
384 ret = PTR_ERR(mr);
385 goto destroy_pool;
386 }
387 d->mr = mr;
388 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
389 if (IS_ERR(frpl)) {
390 ret = PTR_ERR(frpl);
391 goto destroy_pool;
392 }
393 d->frpl = frpl;
394 list_add_tail(&d->entry, &pool->free_list);
395 }
396
397out:
398 return pool;
399
400destroy_pool:
401 srp_destroy_fr_pool(pool);
402
403err:
404 pool = ERR_PTR(ret);
405 goto out;
406}
407
408/**
409 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
410 * @pool: Pool to obtain descriptor from.
411 */
412static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
413{
414 struct srp_fr_desc *d = NULL;
415 unsigned long flags;
416
417 spin_lock_irqsave(&pool->lock, flags);
418 if (!list_empty(&pool->free_list)) {
419 d = list_first_entry(&pool->free_list, typeof(*d), entry);
420 list_del(&d->entry);
421 }
422 spin_unlock_irqrestore(&pool->lock, flags);
423
424 return d;
425}
426
427/**
428 * srp_fr_pool_put() - put an FR descriptor back in the free list
429 * @pool: Pool the descriptor was allocated from.
430 * @desc: Pointer to an array of fast registration descriptor pointers.
431 * @n: Number of descriptors to put back.
432 *
433 * Note: The caller must already have queued an invalidation request for
434 * desc->mr->rkey before calling this function.
435 */
436static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
437 int n)
438{
439 unsigned long flags;
440 int i;
441
442 spin_lock_irqsave(&pool->lock, flags);
443 for (i = 0; i < n; i++)
444 list_add(&desc[i]->entry, &pool->free_list);
445 spin_unlock_irqrestore(&pool->lock, flags);
446}
447
448static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
449{
450 struct srp_device *dev = target->srp_host->srp_dev;
451
452 return srp_create_fr_pool(dev->dev, dev->pd,
453 target->scsi_host->can_queue,
454 dev->max_pages_per_mr);
455}
456
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200457/**
458 * srp_destroy_qp() - destroy an RDMA queue pair
459 * @ch: SRP RDMA channel.
460 *
461 * Change a queue pair into the error state and wait until all receive
462 * completions have been processed before destroying it. This avoids that
463 * the receive completion handler can access the queue pair while it is
464 * being destroyed.
465 */
466static void srp_destroy_qp(struct srp_rdma_ch *ch)
467{
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
471 int ret;
472
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200474 WARN_ON_ONCE(ch->connected);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200475
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
478 if (ret)
479 goto out;
480
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
484 if (ret == 0)
485 wait_for_completion(&ch->done);
486
487out:
488 ib_destroy_qp(ch->qp);
489}
490
Bart Van Assche509c07b2014-10-30 14:48:30 +0100491static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800492{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100493 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200494 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800495 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100496 struct ib_cq *recv_cq, *send_cq;
497 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200498 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200499 struct srp_fr_pool *fr_pool = NULL;
500 const int m = 1 + dev->use_fast_reg;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800501 int ret;
502
503 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
504 if (!init_attr)
505 return -ENOMEM;
506
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200507 /* + 1 for SRP_LAST_WR_ID */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100508 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200509 target->queue_size + 1, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100510 if (IS_ERR(recv_cq)) {
511 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800512 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800513 }
514
Bart Van Assche509c07b2014-10-30 14:48:30 +0100515 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
516 m * target->queue_size, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100517 if (IS_ERR(send_cq)) {
518 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800519 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000520 }
521
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100522 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800523
524 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200525 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200526 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800527 init_attr->cap.max_recv_sge = 1;
528 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200529 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800530 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100531 init_attr->send_cq = send_cq;
532 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800533
Bart Van Assche62154b22014-05-20 15:04:45 +0200534 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100535 if (IS_ERR(qp)) {
536 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800537 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800538 }
539
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100540 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800541 if (ret)
542 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800543
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200544 if (dev->use_fast_reg && dev->has_fr) {
545 fr_pool = srp_alloc_fr_pool(target);
546 if (IS_ERR(fr_pool)) {
547 ret = PTR_ERR(fr_pool);
548 shost_printk(KERN_WARNING, target->scsi_host, PFX
549 "FR pool allocation failed (%d)\n", ret);
550 goto err_qp;
551 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100552 if (ch->fr_pool)
553 srp_destroy_fr_pool(ch->fr_pool);
554 ch->fr_pool = fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200555 } else if (!dev->use_fast_reg && dev->has_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200556 fmr_pool = srp_alloc_fmr_pool(target);
557 if (IS_ERR(fmr_pool)) {
558 ret = PTR_ERR(fmr_pool);
559 shost_printk(KERN_WARNING, target->scsi_host, PFX
560 "FMR pool allocation failed (%d)\n", ret);
561 goto err_qp;
562 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100563 if (ch->fmr_pool)
564 ib_destroy_fmr_pool(ch->fmr_pool);
565 ch->fmr_pool = fmr_pool;
Bart Van Assched1b42892014-05-20 15:07:20 +0200566 }
567
Bart Van Assche509c07b2014-10-30 14:48:30 +0100568 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200569 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100570 if (ch->recv_cq)
571 ib_destroy_cq(ch->recv_cq);
572 if (ch->send_cq)
573 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100574
Bart Van Assche509c07b2014-10-30 14:48:30 +0100575 ch->qp = qp;
576 ch->recv_cq = recv_cq;
577 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100578
Roland Dreierda9d2f02010-02-24 15:07:59 -0800579 kfree(init_attr);
580 return 0;
581
582err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100583 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800584
585err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100586 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800587
588err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100589 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800590
591err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800592 kfree(init_attr);
593 return ret;
594}
595
Bart Van Assche4d73f952013-10-26 14:40:37 +0200596/*
597 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100598 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200599 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100600static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800602{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200603 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800604 int i;
605
Bart Van Assched92c0da2014-10-06 17:14:36 +0200606 if (!ch->target)
607 return;
608
Bart Van Assche509c07b2014-10-30 14:48:30 +0100609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100612 }
613
Bart Van Assched92c0da2014-10-06 17:14:36 +0200614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200618 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100619 if (ch->fr_pool)
620 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200621 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100622 if (ch->fmr_pool)
623 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200624 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200625 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100626 ib_destroy_cq(ch->send_cq);
627 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800628
Bart Van Assched92c0da2014-10-06 17:14:36 +0200629 /*
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
636
Bart Van Assche509c07b2014-10-30 14:48:30 +0100637 ch->qp = NULL;
638 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100639
Bart Van Assche509c07b2014-10-30 14:48:30 +0100640 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200641 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
643 kfree(ch->rx_ring);
644 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200645 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100646 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200647 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
649 kfree(ch->tx_ring);
650 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200651 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800652}
653
654static void srp_path_rec_completion(int status,
655 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100656 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800657{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800660
Bart Van Assche509c07b2014-10-30 14:48:30 +0100661 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800662 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500663 shost_printk(KERN_ERR, target->scsi_host,
664 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800665 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100666 ch->path = *pathrec;
667 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800668}
669
Bart Van Assche509c07b2014-10-30 14:48:30 +0100670static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800671{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100672 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100673 int ret;
674
Bart Van Assche509c07b2014-10-30 14:48:30 +0100675 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800676
Bart Van Assche509c07b2014-10-30 14:48:30 +0100677 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800678
Bart Van Assche509c07b2014-10-30 14:48:30 +0100679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
680 target->srp_host->srp_dev->dev,
681 target->srp_host->port,
682 &ch->path,
683 IB_SA_PATH_REC_SERVICE_ID |
684 IB_SA_PATH_REC_DGID |
685 IB_SA_PATH_REC_SGID |
686 IB_SA_PATH_REC_NUMB_PATH |
687 IB_SA_PATH_REC_PKEY,
688 SRP_PATH_REC_TIMEOUT_MS,
689 GFP_KERNEL,
690 srp_path_rec_completion,
691 ch, &ch->path_query);
692 if (ch->path_query_id < 0)
693 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800694
Bart Van Assche509c07b2014-10-30 14:48:30 +0100695 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100696 if (ret < 0)
697 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800698
Bart Van Assche509c07b2014-10-30 14:48:30 +0100699 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500700 shost_printk(KERN_WARNING, target->scsi_host,
701 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800702
Bart Van Assche509c07b2014-10-30 14:48:30 +0100703 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800704}
705
Bart Van Assched92c0da2014-10-06 17:14:36 +0200706static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800707{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100708 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800709 struct {
710 struct ib_cm_req_param param;
711 struct srp_login_req priv;
712 } *req = NULL;
713 int status;
714
715 req = kzalloc(sizeof *req, GFP_KERNEL);
716 if (!req)
717 return -ENOMEM;
718
Bart Van Assche509c07b2014-10-30 14:48:30 +0100719 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800720 req->param.alternate_path = NULL;
721 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100722 req->param.qp_num = ch->qp->qp_num;
723 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800724 req->param.private_data = &req->priv;
725 req->param.private_data_len = sizeof req->priv;
726 req->param.flow_control = 1;
727
728 get_random_bytes(&req->param.starting_psn, 4);
729 req->param.starting_psn &= 0xffffff;
730
731 /*
732 * Pick some arbitrary defaults here; we could make these
733 * module parameters if anyone cared about setting them.
734 */
735 req->param.responder_resources = 4;
736 req->param.remote_cm_response_timeout = 20;
737 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200738 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800739 req->param.rnr_retry_count = 7;
740 req->param.max_cm_retries = 15;
741
742 req->priv.opcode = SRP_LOGIN_REQ;
743 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
746 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700749 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700750 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700751 * port identifier format is 8 bytes of ID extension followed
752 * by 8 bytes of GUID. Older drafts put the two halves in the
753 * opposite order, so that the GUID comes first.
754 *
755 * Targets conforming to these obsolete drafts can be
756 * recognized by the I/O Class they report.
757 */
758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
759 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100760 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700761 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200762 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
764 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
765 } else {
766 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200767 &target->initiator_ext, 8);
768 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100769 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
772 }
773
Roland Dreieraef9ec32005-11-02 14:07:13 -0800774 /*
775 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200776 * zero out the first 8 bytes of our initiator port ID and set
777 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800778 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700779 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500780 shost_printk(KERN_DEBUG, target->scsi_host,
781 PFX "Topspin/Cisco initiator port ID workaround "
782 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200783 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800784 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200785 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100786 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800787 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800788
Bart Van Assche509c07b2014-10-30 14:48:30 +0100789 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800790
791 kfree(req);
792
793 return status;
794}
795
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000796static bool srp_queue_remove_work(struct srp_target_port *target)
797{
798 bool changed = false;
799
800 spin_lock_irq(&target->lock);
801 if (target->state != SRP_TARGET_REMOVED) {
802 target->state = SRP_TARGET_REMOVED;
803 changed = true;
804 }
805 spin_unlock_irq(&target->lock);
806
807 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200808 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000809
810 return changed;
811}
812
Roland Dreieraef9ec32005-11-02 14:07:13 -0800813static void srp_disconnect_target(struct srp_target_port *target)
814{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200815 struct srp_rdma_ch *ch;
816 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100817
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200818 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800819
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200820 for (i = 0; i < target->ch_count; i++) {
821 ch = &target->ch[i];
822 ch->connected = false;
823 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
824 shost_printk(KERN_DEBUG, target->scsi_host,
825 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000826 }
Roland Dreiere6581052006-05-17 09:13:21 -0700827 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800828}
829
Bart Van Assche509c07b2014-10-30 14:48:30 +0100830static void srp_free_req_data(struct srp_target_port *target,
831 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500832{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200833 struct srp_device *dev = target->srp_host->srp_dev;
834 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500835 struct srp_request *req;
836 int i;
837
Bart Van Assched92c0da2014-10-06 17:14:36 +0200838 if (!ch->target || !ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200839 return;
840
841 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100842 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200843 if (dev->use_fast_reg)
844 kfree(req->fr_list);
845 else
846 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500847 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500848 if (req->indirect_dma_addr) {
849 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
850 target->indirect_size,
851 DMA_TO_DEVICE);
852 }
853 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500854 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200855
Bart Van Assche509c07b2014-10-30 14:48:30 +0100856 kfree(ch->req_ring);
857 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500858}
859
Bart Van Assche509c07b2014-10-30 14:48:30 +0100860static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200861{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100862 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200863 struct srp_device *srp_dev = target->srp_host->srp_dev;
864 struct ib_device *ibdev = srp_dev->dev;
865 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200866 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200867 dma_addr_t dma_addr;
868 int i, ret = -ENOMEM;
869
Bart Van Assche509c07b2014-10-30 14:48:30 +0100870 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
871 GFP_KERNEL);
872 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200873 goto out;
874
875 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100876 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200877 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
878 GFP_KERNEL);
879 if (!mr_list)
880 goto out;
881 if (srp_dev->use_fast_reg)
882 req->fr_list = mr_list;
883 else
884 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200885 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200886 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200887 if (!req->map_page)
888 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200889 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200890 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200891 goto out;
892
893 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
894 target->indirect_size,
895 DMA_TO_DEVICE);
896 if (ib_dma_mapping_error(ibdev, dma_addr))
897 goto out;
898
899 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200900 }
901 ret = 0;
902
903out:
904 return ret;
905}
906
Bart Van Assche683b1592012-01-14 12:40:44 +0000907/**
908 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
909 * @shost: SCSI host whose attributes to remove from sysfs.
910 *
911 * Note: Any attributes defined in the host template and that did not exist
912 * before invocation of this function will be ignored.
913 */
914static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
915{
916 struct device_attribute **attr;
917
918 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
919 device_remove_file(&shost->shost_dev, *attr);
920}
921
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000922static void srp_remove_target(struct srp_target_port *target)
923{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200924 struct srp_rdma_ch *ch;
925 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100926
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000927 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
928
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000929 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200930 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000931 srp_remove_host(target->scsi_host);
932 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100933 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000934 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200935 for (i = 0; i < target->ch_count; i++) {
936 ch = &target->ch[i];
937 srp_free_ch_ib(target, ch);
938 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200939 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200940 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200941 for (i = 0; i < target->ch_count; i++) {
942 ch = &target->ch[i];
943 srp_free_req_data(target, ch);
944 }
945 kfree(target->ch);
946 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200947
948 spin_lock(&target->srp_host->target_lock);
949 list_del(&target->list);
950 spin_unlock(&target->srp_host->target_lock);
951
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000952 scsi_host_put(target->scsi_host);
953}
954
David Howellsc4028952006-11-22 14:57:56 +0000955static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800956{
David Howellsc4028952006-11-22 14:57:56 +0000957 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000958 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800959
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000960 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800961
Bart Van Assche96fc2482013-06-28 14:51:26 +0200962 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800963}
964
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200965static void srp_rport_delete(struct srp_rport *rport)
966{
967 struct srp_target_port *target = rport->lld_data;
968
969 srp_queue_remove_work(target);
970}
971
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200972/**
973 * srp_connected_ch() - number of connected channels
974 * @target: SRP target port.
975 */
976static int srp_connected_ch(struct srp_target_port *target)
977{
978 int i, c = 0;
979
980 for (i = 0; i < target->ch_count; i++)
981 c += target->ch[i].connected;
982
983 return c;
984}
985
Bart Van Assched92c0da2014-10-06 17:14:36 +0200986static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800987{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100988 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800989 int ret;
990
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200991 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000992
Bart Van Assche509c07b2014-10-30 14:48:30 +0100993 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800994 if (ret)
995 return ret;
996
997 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100998 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200999 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001000 if (ret)
1001 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001002 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001003 if (ret < 0)
1004 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001005
1006 /*
1007 * The CM event handling code will set status to
1008 * SRP_PORT_REDIRECT if we get a port redirect REJ
1009 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1010 * redirect REJ back.
1011 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001012 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001013 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001014 ch->connected = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001015 return 0;
1016
1017 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001018 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001019 if (ret)
1020 return ret;
1021 break;
1022
1023 case SRP_DLID_REDIRECT:
1024 break;
1025
David Dillow9fe4bcf2008-01-08 17:08:52 -05001026 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001027 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001028 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001029 ch->status = -ECONNRESET;
1030 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001031
Roland Dreieraef9ec32005-11-02 14:07:13 -08001032 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001033 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001034 }
1035 }
1036}
1037
Bart Van Assche509c07b2014-10-30 14:48:30 +01001038static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001039{
1040 struct ib_send_wr *bad_wr;
1041 struct ib_send_wr wr = {
1042 .opcode = IB_WR_LOCAL_INV,
1043 .wr_id = LOCAL_INV_WR_ID_MASK,
1044 .next = NULL,
1045 .num_sge = 0,
1046 .send_flags = 0,
1047 .ex.invalidate_rkey = rkey,
1048 };
1049
Bart Van Assche509c07b2014-10-30 14:48:30 +01001050 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001051}
1052
Roland Dreierd945e1d2006-05-09 10:50:28 -07001053static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001054 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001055 struct srp_request *req)
1056{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001057 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001058 struct srp_device *dev = target->srp_host->srp_dev;
1059 struct ib_device *ibdev = dev->dev;
1060 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001061
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001062 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001063 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1064 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1065 return;
1066
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001067 if (dev->use_fast_reg) {
1068 struct srp_fr_desc **pfr;
1069
1070 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001071 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001072 if (res < 0) {
1073 shost_printk(KERN_ERR, target->scsi_host, PFX
1074 "Queueing INV WR for rkey %#x failed (%d)\n",
1075 (*pfr)->mr->rkey, res);
1076 queue_work(system_long_wq,
1077 &target->tl_err_work);
1078 }
1079 }
1080 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001081 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001082 req->nmdesc);
1083 } else {
1084 struct ib_pool_fmr **pfmr;
1085
1086 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1087 ib_fmr_pool_unmap(*pfmr);
1088 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001089
David Dillow8f26c9f2011-01-14 19:45:50 -05001090 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1091 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001092}
1093
Bart Van Assche22032992012-08-14 13:18:53 +00001094/**
1095 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001096 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001097 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001098 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001099 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1100 * ownership of @req->scmnd if it equals @scmnd.
1101 *
1102 * Return value:
1103 * Either NULL or a pointer to the SCSI command the caller became owner of.
1104 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001105static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001106 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001107 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001108 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001109{
Bart Van Assche94a91742010-11-26 14:50:09 -05001110 unsigned long flags;
1111
Bart Van Assche509c07b2014-10-30 14:48:30 +01001112 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001113 if (req->scmnd &&
1114 (!sdev || req->scmnd->device == sdev) &&
1115 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001116 scmnd = req->scmnd;
1117 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001118 } else {
1119 scmnd = NULL;
1120 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001121 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001122
1123 return scmnd;
1124}
1125
1126/**
1127 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001128 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001129 * @req: Request to be freed.
1130 * @scmnd: SCSI command associated with @req.
1131 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001132 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001133static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1134 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001135{
1136 unsigned long flags;
1137
Bart Van Assche509c07b2014-10-30 14:48:30 +01001138 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001139
Bart Van Assche509c07b2014-10-30 14:48:30 +01001140 spin_lock_irqsave(&ch->lock, flags);
1141 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001142 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001143}
1144
Bart Van Assche509c07b2014-10-30 14:48:30 +01001145static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1146 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001147{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001148 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001149
1150 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001151 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001152 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001153 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001154 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001155}
1156
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001157static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001158{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001159 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001160 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001161 struct Scsi_Host *shost = target->scsi_host;
1162 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001163 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001164
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001165 /*
1166 * Invoking srp_terminate_io() while srp_queuecommand() is running
1167 * is not safe. Hence the warning statement below.
1168 */
1169 shost_for_each_device(sdev, shost)
1170 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1171
Bart Van Assched92c0da2014-10-06 17:14:36 +02001172 for (i = 0; i < target->ch_count; i++) {
1173 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001174
Bart Van Assched92c0da2014-10-06 17:14:36 +02001175 for (j = 0; j < target->req_ring_size; ++j) {
1176 struct srp_request *req = &ch->req_ring[j];
1177
1178 srp_finish_req(ch, req, NULL,
1179 DID_TRANSPORT_FAILFAST << 16);
1180 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001181 }
1182}
1183
1184/*
1185 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1186 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1187 * srp_reset_device() or srp_reset_host() calls will occur while this function
1188 * is in progress. One way to realize that is not to call this function
1189 * directly but to call srp_reconnect_rport() instead since that last function
1190 * serializes calls of this function via rport->mutex and also blocks
1191 * srp_queuecommand() calls before invoking this function.
1192 */
1193static int srp_rport_reconnect(struct srp_rport *rport)
1194{
1195 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001196 struct srp_rdma_ch *ch;
1197 int i, j, ret = 0;
1198 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001199
Roland Dreieraef9ec32005-11-02 14:07:13 -08001200 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001201
1202 if (target->state == SRP_TARGET_SCANNING)
1203 return -ENODEV;
1204
Roland Dreieraef9ec32005-11-02 14:07:13 -08001205 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001206 * Now get a new local CM ID so that we avoid confusing the target in
1207 * case things are really fouled up. Doing so also ensures that all CM
1208 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001209 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001210 for (i = 0; i < target->ch_count; i++) {
1211 ch = &target->ch[i];
1212 if (!ch->target)
1213 break;
1214 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001215 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001216 for (i = 0; i < target->ch_count; i++) {
1217 ch = &target->ch[i];
1218 if (!ch->target)
1219 break;
1220 for (j = 0; j < target->req_ring_size; ++j) {
1221 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001222
Bart Van Assched92c0da2014-10-06 17:14:36 +02001223 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1224 }
1225 }
1226 for (i = 0; i < target->ch_count; i++) {
1227 ch = &target->ch[i];
1228 if (!ch->target)
1229 break;
1230 /*
1231 * Whether or not creating a new CM ID succeeded, create a new
1232 * QP. This guarantees that all completion callback function
1233 * invocations have finished before request resetting starts.
1234 */
1235 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001236
Bart Van Assched92c0da2014-10-06 17:14:36 +02001237 INIT_LIST_HEAD(&ch->free_tx);
1238 for (j = 0; j < target->queue_size; ++j)
1239 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1240 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001241
1242 target->qp_in_error = false;
1243
Bart Van Assched92c0da2014-10-06 17:14:36 +02001244 for (i = 0; i < target->ch_count; i++) {
1245 ch = &target->ch[i];
Bart Van Asschea44074f2015-05-18 13:24:17 +02001246 if (ret || !ch->target)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001247 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001248 ret = srp_connect_ch(ch, multich);
1249 multich = true;
1250 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001251
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001252 if (ret == 0)
1253 shost_printk(KERN_INFO, target->scsi_host,
1254 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001255
1256 return ret;
1257}
1258
David Dillow8f26c9f2011-01-14 19:45:50 -05001259static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1260 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001261{
David Dillow8f26c9f2011-01-14 19:45:50 -05001262 struct srp_direct_buf *desc = state->desc;
1263
1264 desc->va = cpu_to_be64(dma_addr);
1265 desc->key = cpu_to_be32(rkey);
1266 desc->len = cpu_to_be32(dma_len);
1267
1268 state->total_len += dma_len;
1269 state->desc++;
1270 state->ndesc++;
1271}
1272
1273static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001274 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001275{
David Dillow8f26c9f2011-01-14 19:45:50 -05001276 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001277 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001278
Bart Van Assche509c07b2014-10-30 14:48:30 +01001279 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001280 state->npages, io_addr);
1281 if (IS_ERR(fmr))
1282 return PTR_ERR(fmr);
1283
1284 *state->next_fmr++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001285 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001286
Bart Van Assche52ede082014-05-20 15:07:45 +02001287 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001288
David Dillow8f26c9f2011-01-14 19:45:50 -05001289 return 0;
1290}
1291
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001292static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001293 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001294{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001295 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001296 struct srp_device *dev = target->srp_host->srp_dev;
1297 struct ib_send_wr *bad_wr;
1298 struct ib_send_wr wr;
1299 struct srp_fr_desc *desc;
1300 u32 rkey;
1301
Bart Van Assche509c07b2014-10-30 14:48:30 +01001302 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001303 if (!desc)
1304 return -ENOMEM;
1305
1306 rkey = ib_inc_rkey(desc->mr->rkey);
1307 ib_update_fast_reg_key(desc->mr, rkey);
1308
1309 memcpy(desc->frpl->page_list, state->pages,
1310 sizeof(state->pages[0]) * state->npages);
1311
1312 memset(&wr, 0, sizeof(wr));
1313 wr.opcode = IB_WR_FAST_REG_MR;
1314 wr.wr_id = FAST_REG_WR_ID_MASK;
1315 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1316 wr.wr.fast_reg.page_list = desc->frpl;
1317 wr.wr.fast_reg.page_list_len = state->npages;
1318 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1319 wr.wr.fast_reg.length = state->dma_len;
1320 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1321 IB_ACCESS_REMOTE_READ |
1322 IB_ACCESS_REMOTE_WRITE);
1323 wr.wr.fast_reg.rkey = desc->mr->lkey;
1324
1325 *state->next_fr++ = desc;
1326 state->nmdesc++;
1327
1328 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1329 desc->mr->rkey);
1330
Bart Van Assche509c07b2014-10-30 14:48:30 +01001331 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001332}
1333
Bart Van Assche539dde62014-05-20 15:05:46 +02001334static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001335 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001336{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001337 struct srp_target_port *target = ch->target;
Bart Van Assche539dde62014-05-20 15:05:46 +02001338 int ret = 0;
1339
1340 if (state->npages == 0)
1341 return 0;
1342
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001343 if (state->npages == 1 && !register_always)
Bart Van Assche52ede082014-05-20 15:07:45 +02001344 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche539dde62014-05-20 15:05:46 +02001345 target->rkey);
1346 else
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001347 ret = target->srp_host->srp_dev->use_fast_reg ?
Bart Van Assche509c07b2014-10-30 14:48:30 +01001348 srp_map_finish_fr(state, ch) :
1349 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001350
1351 if (ret == 0) {
1352 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001353 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001354 }
1355
1356 return ret;
1357}
1358
David Dillow8f26c9f2011-01-14 19:45:50 -05001359static void srp_map_update_start(struct srp_map_state *state,
1360 struct scatterlist *sg, int sg_index,
1361 dma_addr_t dma_addr)
1362{
1363 state->unmapped_sg = sg;
1364 state->unmapped_index = sg_index;
1365 state->unmapped_addr = dma_addr;
1366}
1367
1368static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001369 struct srp_rdma_ch *ch,
David Dillow8f26c9f2011-01-14 19:45:50 -05001370 struct scatterlist *sg, int sg_index,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001371 bool use_mr)
David Dillow8f26c9f2011-01-14 19:45:50 -05001372{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001373 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001374 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001375 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001376 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1377 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1378 unsigned int len;
1379 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001380
David Dillow8f26c9f2011-01-14 19:45:50 -05001381 if (!dma_len)
1382 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -07001383
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001384 if (!use_mr) {
1385 /*
1386 * Once we're in direct map mode for a request, we don't
1387 * go back to FMR or FR mode, so no need to update anything
David Dillow8f26c9f2011-01-14 19:45:50 -05001388 * other than the descriptor.
1389 */
1390 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1391 return 0;
1392 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001393
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001394 /*
1395 * Since not all RDMA HW drivers support non-zero page offsets for
1396 * FMR, if we start at an offset into a page, don't merge into the
1397 * current FMR mapping. Finish it out, and use the kernel's MR for
1398 * this sg entry.
David Dillow8f26c9f2011-01-14 19:45:50 -05001399 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001400 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1401 dma_len > dev->mr_max_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001402 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001403 if (ret)
1404 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001405
David Dillow8f26c9f2011-01-14 19:45:50 -05001406 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1407 srp_map_update_start(state, NULL, 0, 0);
1408 return 0;
1409 }
1410
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001411 /*
1412 * If this is the first sg that will be mapped via FMR or via FR, save
1413 * our position. We need to know the first unmapped entry, its index,
1414 * and the first unmapped address within that entry to be able to
1415 * restart mapping after an error.
David Dillow8f26c9f2011-01-14 19:45:50 -05001416 */
1417 if (!state->unmapped_sg)
1418 srp_map_update_start(state, sg, sg_index, dma_addr);
1419
1420 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001421 unsigned offset = dma_addr & ~dev->mr_page_mask;
1422 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001423 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001424 if (ret)
1425 return ret;
1426
1427 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001428 }
1429
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001430 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001431
1432 if (!state->npages)
1433 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001434 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001435 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001436 dma_addr += len;
1437 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001438 }
1439
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001440 /*
1441 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001442 * close it out and start a new one -- we can only merge at page
1443 * boundries.
1444 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001445 ret = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001446 if (len != dev->mr_page_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001447 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001448 if (!ret)
1449 srp_map_update_start(state, NULL, 0, 0);
1450 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001451 return ret;
1452}
1453
Bart Van Assche509c07b2014-10-30 14:48:30 +01001454static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1455 struct srp_request *req, struct scatterlist *scat,
1456 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001457{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001458 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001459 struct srp_device *dev = target->srp_host->srp_dev;
1460 struct ib_device *ibdev = dev->dev;
1461 struct scatterlist *sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001462 int i;
1463 bool use_mr;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001464
1465 state->desc = req->indirect_desc;
1466 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001467 if (dev->use_fast_reg) {
1468 state->next_fr = req->fr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001469 use_mr = !!ch->fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001470 } else {
1471 state->next_fmr = req->fmr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001472 use_mr = !!ch->fmr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001473 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001474
1475 for_each_sg(scat, sg, count, i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001476 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001477 /*
1478 * Memory registration failed, so backtrack to the
1479 * first unmapped entry and continue on without using
1480 * memory registration.
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001481 */
1482 dma_addr_t dma_addr;
1483 unsigned int dma_len;
1484
1485backtrack:
1486 sg = state->unmapped_sg;
1487 i = state->unmapped_index;
1488
1489 dma_addr = ib_sg_dma_address(ibdev, sg);
1490 dma_len = ib_sg_dma_len(ibdev, sg);
1491 dma_len -= (state->unmapped_addr - dma_addr);
1492 dma_addr = state->unmapped_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001493 use_mr = false;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001494 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1495 }
1496 }
1497
Bart Van Assche509c07b2014-10-30 14:48:30 +01001498 if (use_mr && srp_finish_mapping(state, ch))
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001499 goto backtrack;
1500
Bart Van Assche52ede082014-05-20 15:07:45 +02001501 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001502
1503 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001504}
1505
Bart Van Assche509c07b2014-10-30 14:48:30 +01001506static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001507 struct srp_request *req)
1508{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001509 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001510 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001511 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001512 int len, nents, count;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001513 struct srp_device *dev;
1514 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001515 struct srp_map_state state;
1516 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001517 u32 table_len;
1518 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001519
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001520 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001521 return sizeof (struct srp_cmd);
1522
1523 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1524 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001525 shost_printk(KERN_WARNING, target->scsi_host,
1526 PFX "Unhandled data direction %d\n",
1527 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001528 return -EINVAL;
1529 }
1530
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001531 nents = scsi_sg_count(scmnd);
1532 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001533
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001534 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001535 ibdev = dev->dev;
1536
1537 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001538 if (unlikely(count == 0))
1539 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001540
1541 fmt = SRP_DATA_DESC_DIRECT;
1542 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001543
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001544 if (count == 1 && !register_always) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001545 /*
1546 * The midlayer only generated a single gather/scatter
1547 * entry, or DMA mapping coalesced everything to a
1548 * single entry. So a direct descriptor along with
1549 * the DMA MR suffices.
1550 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001551 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001552
Ralph Campbell85507bc2006-12-12 14:30:55 -08001553 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001554 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001555 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001556
Bart Van Assche52ede082014-05-20 15:07:45 +02001557 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001558 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001559 }
1560
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001561 /*
1562 * We have more than one scatter/gather entry, so build our indirect
1563 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001564 */
1565 indirect_hdr = (void *) cmd->add_data;
1566
David Dillowc07d4242011-01-16 13:57:10 -05001567 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1568 target->indirect_size, DMA_TO_DEVICE);
1569
David Dillow8f26c9f2011-01-14 19:45:50 -05001570 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001571 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001572
David Dillowc07d4242011-01-16 13:57:10 -05001573 /* We've mapped the request, now pull as much of the indirect
1574 * descriptor table as we can into the command buffer. If this
1575 * target is not using an external indirect table, we are
1576 * guaranteed to fit into the command, as the SCSI layer won't
1577 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001578 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001579 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001580 /*
1581 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001582 * so use a direct descriptor.
1583 */
1584 struct srp_direct_buf *buf = (void *) cmd->add_data;
1585
David Dillowc07d4242011-01-16 13:57:10 -05001586 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001587 goto map_complete;
1588 }
1589
David Dillowc07d4242011-01-16 13:57:10 -05001590 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1591 !target->allow_ext_sg)) {
1592 shost_printk(KERN_ERR, target->scsi_host,
1593 "Could not fit S/G list into SRP_CMD\n");
1594 return -EIO;
1595 }
1596
1597 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001598 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1599
1600 fmt = SRP_DATA_DESC_INDIRECT;
1601 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001602 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001603
David Dillowc07d4242011-01-16 13:57:10 -05001604 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1605 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001606
David Dillowc07d4242011-01-16 13:57:10 -05001607 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001608 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1609 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1610 indirect_hdr->len = cpu_to_be32(state.total_len);
1611
1612 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001613 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001614 else
David Dillowc07d4242011-01-16 13:57:10 -05001615 cmd->data_in_desc_cnt = count;
1616
1617 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1618 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001619
1620map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001621 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1622 cmd->buf_fmt = fmt << 4;
1623 else
1624 cmd->buf_fmt = fmt;
1625
Roland Dreieraef9ec32005-11-02 14:07:13 -08001626 return len;
1627}
1628
David Dillow05a1d752010-10-08 14:48:14 -04001629/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001630 * Return an IU and possible credit to the free pool
1631 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001632static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001633 enum srp_iu_type iu_type)
1634{
1635 unsigned long flags;
1636
Bart Van Assche509c07b2014-10-30 14:48:30 +01001637 spin_lock_irqsave(&ch->lock, flags);
1638 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001639 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001640 ++ch->req_lim;
1641 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001642}
1643
1644/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001645 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001646 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001647 *
1648 * Note:
1649 * An upper limit for the number of allocated information units for each
1650 * request type is:
1651 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1652 * more than Scsi_Host.can_queue requests.
1653 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1654 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1655 * one unanswered SRP request to an initiator.
1656 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001657static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001658 enum srp_iu_type iu_type)
1659{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001660 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001661 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1662 struct srp_iu *iu;
1663
Bart Van Assche509c07b2014-10-30 14:48:30 +01001664 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001665
Bart Van Assche509c07b2014-10-30 14:48:30 +01001666 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001667 return NULL;
1668
1669 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001670 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001671 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001672 ++target->zero_req_lim;
1673 return NULL;
1674 }
1675
Bart Van Assche509c07b2014-10-30 14:48:30 +01001676 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001677 }
1678
Bart Van Assche509c07b2014-10-30 14:48:30 +01001679 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001680 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001681 return iu;
1682}
1683
Bart Van Assche509c07b2014-10-30 14:48:30 +01001684static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001685{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001686 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001687 struct ib_sge list;
1688 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001689
1690 list.addr = iu->dma;
1691 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001692 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001693
1694 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001695 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001696 wr.sg_list = &list;
1697 wr.num_sge = 1;
1698 wr.opcode = IB_WR_SEND;
1699 wr.send_flags = IB_SEND_SIGNALED;
1700
Bart Van Assche509c07b2014-10-30 14:48:30 +01001701 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001702}
1703
Bart Van Assche509c07b2014-10-30 14:48:30 +01001704static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001705{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001706 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001707 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001708 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001709
1710 list.addr = iu->dma;
1711 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001712 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001713
1714 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001715 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001716 wr.sg_list = &list;
1717 wr.num_sge = 1;
1718
Bart Van Assche509c07b2014-10-30 14:48:30 +01001719 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001720}
1721
Bart Van Assche509c07b2014-10-30 14:48:30 +01001722static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001723{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001724 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001725 struct srp_request *req;
1726 struct scsi_cmnd *scmnd;
1727 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001728
Roland Dreieraef9ec32005-11-02 14:07:13 -08001729 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001730 spin_lock_irqsave(&ch->lock, flags);
1731 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1732 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001733
Bart Van Assche509c07b2014-10-30 14:48:30 +01001734 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001735 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001736 ch->tsk_mgmt_status = rsp->data[3];
1737 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001738 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001739 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1740 if (scmnd) {
1741 req = (void *)scmnd->host_scribble;
1742 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1743 }
Bart Van Assche22032992012-08-14 13:18:53 +00001744 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001745 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001746 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1747 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001748
Bart Van Assche509c07b2014-10-30 14:48:30 +01001749 spin_lock_irqsave(&ch->lock, flags);
1750 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1751 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001752
1753 return;
1754 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001755 scmnd->result = rsp->status;
1756
1757 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1758 memcpy(scmnd->sense_buffer, rsp->data +
1759 be32_to_cpu(rsp->resp_data_len),
1760 min_t(int, be32_to_cpu(rsp->sense_data_len),
1761 SCSI_SENSE_BUFFERSIZE));
1762 }
1763
Bart Van Asschee7145312014-07-09 15:57:51 +02001764 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001765 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001766 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1767 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1768 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1769 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1770 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1771 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001772
Bart Van Assche509c07b2014-10-30 14:48:30 +01001773 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001774 be32_to_cpu(rsp->req_lim_delta));
1775
David Dillowf8b6e312010-11-26 13:02:21 -05001776 scmnd->host_scribble = NULL;
1777 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001778 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001779}
1780
Bart Van Assche509c07b2014-10-30 14:48:30 +01001781static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001782 void *rsp, int len)
1783{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001784 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001785 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001786 unsigned long flags;
1787 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001788 int err;
David Dillowbb125882010-10-08 14:40:47 -04001789
Bart Van Assche509c07b2014-10-30 14:48:30 +01001790 spin_lock_irqsave(&ch->lock, flags);
1791 ch->req_lim += req_delta;
1792 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1793 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001794
David Dillowbb125882010-10-08 14:40:47 -04001795 if (!iu) {
1796 shost_printk(KERN_ERR, target->scsi_host, PFX
1797 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001798 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001799 }
1800
1801 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1802 memcpy(iu->buf, rsp, len);
1803 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1804
Bart Van Assche509c07b2014-10-30 14:48:30 +01001805 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001806 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001807 shost_printk(KERN_ERR, target->scsi_host, PFX
1808 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001809 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001810 }
David Dillowbb125882010-10-08 14:40:47 -04001811
David Dillowbb125882010-10-08 14:40:47 -04001812 return err;
1813}
1814
Bart Van Assche509c07b2014-10-30 14:48:30 +01001815static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001816 struct srp_cred_req *req)
1817{
1818 struct srp_cred_rsp rsp = {
1819 .opcode = SRP_CRED_RSP,
1820 .tag = req->tag,
1821 };
1822 s32 delta = be32_to_cpu(req->req_lim_delta);
1823
Bart Van Assche509c07b2014-10-30 14:48:30 +01001824 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1825 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001826 "problems processing SRP_CRED_REQ\n");
1827}
1828
Bart Van Assche509c07b2014-10-30 14:48:30 +01001829static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001830 struct srp_aer_req *req)
1831{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001832 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001833 struct srp_aer_rsp rsp = {
1834 .opcode = SRP_AER_RSP,
1835 .tag = req->tag,
1836 };
1837 s32 delta = be32_to_cpu(req->req_lim_delta);
1838
1839 shost_printk(KERN_ERR, target->scsi_host, PFX
1840 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1841
Bart Van Assche509c07b2014-10-30 14:48:30 +01001842 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001843 shost_printk(KERN_ERR, target->scsi_host, PFX
1844 "problems processing SRP_AER_REQ\n");
1845}
1846
Bart Van Assche509c07b2014-10-30 14:48:30 +01001847static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001848{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001849 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001850 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001851 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001852 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001853 u8 opcode;
1854
Bart Van Assche509c07b2014-10-30 14:48:30 +01001855 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001856 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001857
1858 opcode = *(u8 *) iu->buf;
1859
1860 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001861 shost_printk(KERN_ERR, target->scsi_host,
1862 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001863 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1864 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001865 }
1866
1867 switch (opcode) {
1868 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001869 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001870 break;
1871
David Dillowbb125882010-10-08 14:40:47 -04001872 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001873 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001874 break;
1875
1876 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001877 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001878 break;
1879
Roland Dreieraef9ec32005-11-02 14:07:13 -08001880 case SRP_T_LOGOUT:
1881 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001882 shost_printk(KERN_WARNING, target->scsi_host,
1883 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001884 break;
1885
1886 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001887 shost_printk(KERN_WARNING, target->scsi_host,
1888 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001889 break;
1890 }
1891
Bart Van Assche509c07b2014-10-30 14:48:30 +01001892 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001893 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001894
Bart Van Assche509c07b2014-10-30 14:48:30 +01001895 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001896 if (res != 0)
1897 shost_printk(KERN_ERR, target->scsi_host,
1898 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001899}
1900
Bart Van Asschec1120f82013-10-26 14:35:08 +02001901/**
1902 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001903 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001904 *
1905 * Note: This function may get invoked before the rport has been created,
1906 * hence the target->rport test.
1907 */
1908static void srp_tl_err_work(struct work_struct *work)
1909{
1910 struct srp_target_port *target;
1911
1912 target = container_of(work, struct srp_target_port, tl_err_work);
1913 if (target->rport)
1914 srp_start_tl_fail_timers(target->rport);
1915}
1916
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001917static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001918 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001919{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001920 struct srp_target_port *target = ch->target;
1921
1922 if (wr_id == SRP_LAST_WR_ID) {
1923 complete(&ch->done);
1924 return;
1925 }
1926
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001927 if (ch->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001928 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1929 shost_printk(KERN_ERR, target->scsi_host, PFX
1930 "LOCAL_INV failed with status %d\n",
1931 wc_status);
1932 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1933 shost_printk(KERN_ERR, target->scsi_host, PFX
1934 "FAST_REG_MR failed status %d\n",
1935 wc_status);
1936 } else {
1937 shost_printk(KERN_ERR, target->scsi_host,
1938 PFX "failed %s status %d for iu %p\n",
1939 send_err ? "send" : "receive",
1940 wc_status, (void *)(uintptr_t)wr_id);
1941 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001942 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001943 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001944 target->qp_in_error = true;
1945}
1946
Bart Van Assche509c07b2014-10-30 14:48:30 +01001947static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001948{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001949 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001950 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001951
1952 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1953 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001954 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001955 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001956 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001957 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001958 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001959 }
1960}
1961
Bart Van Assche509c07b2014-10-30 14:48:30 +01001962static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001963{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001964 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001965 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001966 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001967
1968 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001969 if (likely(wc.status == IB_WC_SUCCESS)) {
1970 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001971 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001972 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001973 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001974 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001975 }
1976}
1977
Bart Van Assche76c75b22010-11-26 14:37:47 -05001978static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001979{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001980 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001981 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001982 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001983 struct srp_request *req;
1984 struct srp_iu *iu;
1985 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001986 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001987 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001988 u32 tag;
1989 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02001990 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001991 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1992
1993 /*
1994 * The SCSI EH thread is the only context from which srp_queuecommand()
1995 * can get invoked for blocked devices (SDEV_BLOCK /
1996 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1997 * locking the rport mutex if invoked from inside the SCSI EH.
1998 */
1999 if (in_scsi_eh)
2000 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002001
Bart Van Assched1b42892014-05-20 15:07:20 +02002002 scmnd->result = srp_chkready(target->rport);
2003 if (unlikely(scmnd->result))
2004 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002005
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002006 WARN_ON_ONCE(scmnd->request->tag < 0);
2007 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002008 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002009 idx = blk_mq_unique_tag_to_tag(tag);
2010 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2011 dev_name(&shost->shost_gendev), tag, idx,
2012 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002013
2014 spin_lock_irqsave(&ch->lock, flags);
2015 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002016 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002017
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002018 if (!iu)
2019 goto err;
2020
2021 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002022 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002023 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002024 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002025
David Dillowf8b6e312010-11-26 13:02:21 -05002026 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002027
2028 cmd = iu->buf;
2029 memset(cmd, 0, sizeof *cmd);
2030
2031 cmd->opcode = SRP_CMD;
2032 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002033 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002034 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2035
Roland Dreieraef9ec32005-11-02 14:07:13 -08002036 req->scmnd = scmnd;
2037 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002038
Bart Van Assche509c07b2014-10-30 14:48:30 +01002039 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002040 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002041 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002042 PFX "Failed to map data (%d)\n", len);
2043 /*
2044 * If we ran out of memory descriptors (-ENOMEM) because an
2045 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002046 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002047 * to reduce queue depth temporarily.
2048 */
2049 scmnd->result = len == -ENOMEM ?
2050 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002051 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002052 }
2053
David Dillow49248642011-01-14 18:23:24 -05002054 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002055 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002056
Bart Van Assche509c07b2014-10-30 14:48:30 +01002057 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002058 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002059 goto err_unmap;
2060 }
2061
Bart Van Assched1b42892014-05-20 15:07:20 +02002062 ret = 0;
2063
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002064unlock_rport:
2065 if (in_scsi_eh)
2066 mutex_unlock(&rport->mutex);
2067
Bart Van Assched1b42892014-05-20 15:07:20 +02002068 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002069
2070err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002071 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002072
Bart Van Assche76c75b22010-11-26 14:37:47 -05002073err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002074 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002075
Bart Van Assche024ca902014-05-20 15:03:49 +02002076 /*
2077 * Avoid that the loops that iterate over the request ring can
2078 * encounter a dangling SCSI command pointer.
2079 */
2080 req->scmnd = NULL;
2081
Bart Van Assched1b42892014-05-20 15:07:20 +02002082err:
2083 if (scmnd->result) {
2084 scmnd->scsi_done(scmnd);
2085 ret = 0;
2086 } else {
2087 ret = SCSI_MLQUEUE_HOST_BUSY;
2088 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002089
Bart Van Assched1b42892014-05-20 15:07:20 +02002090 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002091}
2092
Bart Van Assche4d73f952013-10-26 14:40:37 +02002093/*
2094 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002095 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002096 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002097static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002098{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002099 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002100 int i;
2101
Bart Van Assche509c07b2014-10-30 14:48:30 +01002102 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2103 GFP_KERNEL);
2104 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002105 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002106 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2107 GFP_KERNEL);
2108 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002109 goto err_no_ring;
2110
2111 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002112 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2113 ch->max_ti_iu_len,
2114 GFP_KERNEL, DMA_FROM_DEVICE);
2115 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002116 goto err;
2117 }
2118
Bart Van Assche4d73f952013-10-26 14:40:37 +02002119 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002120 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2121 target->max_iu_len,
2122 GFP_KERNEL, DMA_TO_DEVICE);
2123 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002124 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002125
Bart Van Assche509c07b2014-10-30 14:48:30 +01002126 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002127 }
2128
2129 return 0;
2130
2131err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002132 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002133 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2134 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002135 }
2136
Bart Van Assche4d73f952013-10-26 14:40:37 +02002137
2138err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002139 kfree(ch->tx_ring);
2140 ch->tx_ring = NULL;
2141 kfree(ch->rx_ring);
2142 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002143
2144 return -ENOMEM;
2145}
2146
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002147static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2148{
2149 uint64_t T_tr_ns, max_compl_time_ms;
2150 uint32_t rq_tmo_jiffies;
2151
2152 /*
2153 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2154 * table 91), both the QP timeout and the retry count have to be set
2155 * for RC QP's during the RTR to RTS transition.
2156 */
2157 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2158 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2159
2160 /*
2161 * Set target->rq_tmo_jiffies to one second more than the largest time
2162 * it can take before an error completion is generated. See also
2163 * C9-140..142 in the IBTA spec for more information about how to
2164 * convert the QP Local ACK Timeout value to nanoseconds.
2165 */
2166 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2167 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2168 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2169 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2170
2171 return rq_tmo_jiffies;
2172}
2173
David Dillow961e0be2011-01-14 17:32:07 -05002174static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2175 struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002176 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002177{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002178 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002179 struct ib_qp_attr *qp_attr = NULL;
2180 int attr_mask = 0;
2181 int ret;
2182 int i;
2183
2184 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002185 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2186 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002187
2188 /*
2189 * Reserve credits for task management so we don't
2190 * bounce requests back to the SCSI mid-layer.
2191 */
2192 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002193 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002194 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002195 target->scsi_host->cmd_per_lun
2196 = min_t(int, target->scsi_host->can_queue,
2197 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002198 } else {
2199 shost_printk(KERN_WARNING, target->scsi_host,
2200 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2201 ret = -ECONNRESET;
2202 goto error;
2203 }
2204
Bart Van Assche509c07b2014-10-30 14:48:30 +01002205 if (!ch->rx_ring) {
2206 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002207 if (ret)
2208 goto error;
2209 }
2210
2211 ret = -ENOMEM;
2212 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2213 if (!qp_attr)
2214 goto error;
2215
2216 qp_attr->qp_state = IB_QPS_RTR;
2217 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2218 if (ret)
2219 goto error_free;
2220
Bart Van Assche509c07b2014-10-30 14:48:30 +01002221 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002222 if (ret)
2223 goto error_free;
2224
Bart Van Assche4d73f952013-10-26 14:40:37 +02002225 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002226 struct srp_iu *iu = ch->rx_ring[i];
2227
2228 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002229 if (ret)
2230 goto error_free;
2231 }
2232
2233 qp_attr->qp_state = IB_QPS_RTS;
2234 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2235 if (ret)
2236 goto error_free;
2237
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002238 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2239
Bart Van Assche509c07b2014-10-30 14:48:30 +01002240 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002241 if (ret)
2242 goto error_free;
2243
2244 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2245
2246error_free:
2247 kfree(qp_attr);
2248
2249error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002250 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002251}
2252
Roland Dreieraef9ec32005-11-02 14:07:13 -08002253static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2254 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002255 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002256{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002257 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002258 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002259 struct ib_class_port_info *cpi;
2260 int opcode;
2261
2262 switch (event->param.rej_rcvd.reason) {
2263 case IB_CM_REJ_PORT_CM_REDIRECT:
2264 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002265 ch->path.dlid = cpi->redirect_lid;
2266 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002267 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002268 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002269
Bart Van Assche509c07b2014-10-30 14:48:30 +01002270 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002271 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2272 break;
2273
2274 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002275 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002276 /*
2277 * Topspin/Cisco SRP gateways incorrectly send
2278 * reject reason code 25 when they mean 24
2279 * (port redirect).
2280 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002281 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002282 event->param.rej_rcvd.ari, 16);
2283
David Dillow7aa54bd2008-01-07 18:23:41 -05002284 shost_printk(KERN_DEBUG, shost,
2285 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002286 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2287 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002288
Bart Van Assche509c07b2014-10-30 14:48:30 +01002289 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002290 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002291 shost_printk(KERN_WARNING, shost,
2292 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002293 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002294 }
2295 break;
2296
2297 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002298 shost_printk(KERN_WARNING, shost,
2299 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002300 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002301 break;
2302
2303 case IB_CM_REJ_CONSUMER_DEFINED:
2304 opcode = *(u8 *) event->private_data;
2305 if (opcode == SRP_LOGIN_REJ) {
2306 struct srp_login_rej *rej = event->private_data;
2307 u32 reason = be32_to_cpu(rej->reason);
2308
2309 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002310 shost_printk(KERN_WARNING, shost,
2311 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002312 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002313 shost_printk(KERN_WARNING, shost, PFX
2314 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002315 target->sgid.raw,
2316 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002317 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002318 shost_printk(KERN_WARNING, shost,
2319 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2320 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002321 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002322 break;
2323
David Dillow9fe4bcf2008-01-08 17:08:52 -05002324 case IB_CM_REJ_STALE_CONN:
2325 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002326 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002327 break;
2328
Roland Dreieraef9ec32005-11-02 14:07:13 -08002329 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002330 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2331 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002332 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002333 }
2334}
2335
2336static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2337{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002338 struct srp_rdma_ch *ch = cm_id->context;
2339 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002340 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002341
2342 switch (event->event) {
2343 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002344 shost_printk(KERN_DEBUG, target->scsi_host,
2345 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002346 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002347 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002348 break;
2349
2350 case IB_CM_REP_RECEIVED:
2351 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002352 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002353 break;
2354
2355 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002356 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002357 comp = 1;
2358
Bart Van Assche509c07b2014-10-30 14:48:30 +01002359 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002360 break;
2361
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002362 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002363 shost_printk(KERN_WARNING, target->scsi_host,
2364 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002365 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002366 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002367 shost_printk(KERN_ERR, target->scsi_host,
2368 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002369 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002370 break;
2371
2372 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002373 shost_printk(KERN_ERR, target->scsi_host,
2374 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002375 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002376
Bart Van Assche509c07b2014-10-30 14:48:30 +01002377 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002378 break;
2379
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002380 case IB_CM_MRA_RECEIVED:
2381 case IB_CM_DREQ_ERROR:
2382 case IB_CM_DREP_RECEIVED:
2383 break;
2384
Roland Dreieraef9ec32005-11-02 14:07:13 -08002385 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002386 shost_printk(KERN_WARNING, target->scsi_host,
2387 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002388 break;
2389 }
2390
2391 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002392 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002393
Roland Dreieraef9ec32005-11-02 14:07:13 -08002394 return 0;
2395}
2396
Jack Wang71444b92013-11-07 11:37:37 +01002397/**
Jack Wang71444b92013-11-07 11:37:37 +01002398 * srp_change_queue_depth - setting device queue depth
2399 * @sdev: scsi device struct
2400 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002401 *
2402 * Returns queue depth.
2403 */
2404static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002405srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002406{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002407 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002408 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002409 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002410}
2411
Bart Van Assche509c07b2014-10-30 14:48:30 +01002412static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2413 unsigned int lun, u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002414{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002415 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002416 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002417 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002418 struct srp_iu *iu;
2419 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002420
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002421 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002422 return -1;
2423
Bart Van Assche509c07b2014-10-30 14:48:30 +01002424 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002425
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002426 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002427 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002428 * invoked while a task management function is being sent.
2429 */
2430 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002431 spin_lock_irq(&ch->lock);
2432 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2433 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002434
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002435 if (!iu) {
2436 mutex_unlock(&rport->mutex);
2437
Bart Van Assche76c75b22010-11-26 14:37:47 -05002438 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002439 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002440
David Dillow19081f32010-10-18 08:54:49 -04002441 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2442 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002443 tsk_mgmt = iu->buf;
2444 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2445
2446 tsk_mgmt->opcode = SRP_TSK_MGMT;
David Dillowf8b6e312010-11-26 13:02:21 -05002447 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2448 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002449 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002450 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002451
David Dillow19081f32010-10-18 08:54:49 -04002452 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2453 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002454 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2455 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002456 mutex_unlock(&rport->mutex);
2457
Bart Van Assche76c75b22010-11-26 14:37:47 -05002458 return -1;
2459 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002460 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002461
Bart Van Assche509c07b2014-10-30 14:48:30 +01002462 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002463 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002464 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002465
Roland Dreierd945e1d2006-05-09 10:50:28 -07002466 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002467}
2468
Roland Dreieraef9ec32005-11-02 14:07:13 -08002469static int srp_abort(struct scsi_cmnd *scmnd)
2470{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002471 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002472 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002473 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002474 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002475 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002476 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002477
David Dillow7aa54bd2008-01-07 18:23:41 -05002478 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002479
Bart Van Assched92c0da2014-10-06 17:14:36 +02002480 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002481 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002482 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002483 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2484 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2485 return SUCCESS;
2486 ch = &target->ch[ch_idx];
2487 if (!srp_claim_req(ch, req, NULL, scmnd))
2488 return SUCCESS;
2489 shost_printk(KERN_ERR, target->scsi_host,
2490 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002491 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002492 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002493 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002494 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002495 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002496 else
2497 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002498 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002499 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002500 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002501
Bart Van Assche086f44f2013-06-12 15:23:04 +02002502 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002503}
2504
2505static int srp_reset_device(struct scsi_cmnd *scmnd)
2506{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002507 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002508 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002509 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002510
David Dillow7aa54bd2008-01-07 18:23:41 -05002511 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002512
Bart Van Assched92c0da2014-10-06 17:14:36 +02002513 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002514 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002515 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002516 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002517 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002518 return FAILED;
2519
Bart Van Assched92c0da2014-10-06 17:14:36 +02002520 for (i = 0; i < target->ch_count; i++) {
2521 ch = &target->ch[i];
2522 for (i = 0; i < target->req_ring_size; ++i) {
2523 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002524
Bart Van Assched92c0da2014-10-06 17:14:36 +02002525 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2526 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002527 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002528
Roland Dreierd945e1d2006-05-09 10:50:28 -07002529 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002530}
2531
2532static int srp_reset_host(struct scsi_cmnd *scmnd)
2533{
2534 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002535
David Dillow7aa54bd2008-01-07 18:23:41 -05002536 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002537
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002538 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002539}
2540
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002541static int srp_slave_configure(struct scsi_device *sdev)
2542{
2543 struct Scsi_Host *shost = sdev->host;
2544 struct srp_target_port *target = host_to_target(shost);
2545 struct request_queue *q = sdev->request_queue;
2546 unsigned long timeout;
2547
2548 if (sdev->type == TYPE_DISK) {
2549 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2550 blk_queue_rq_timeout(q, timeout);
2551 }
2552
2553 return 0;
2554}
2555
Tony Jonesee959b02008-02-22 00:13:36 +01002556static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2557 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002558{
Tony Jonesee959b02008-02-22 00:13:36 +01002559 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002560
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002561 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002562}
2563
Tony Jonesee959b02008-02-22 00:13:36 +01002564static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2565 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002566{
Tony Jonesee959b02008-02-22 00:13:36 +01002567 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002568
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002569 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002570}
2571
Tony Jonesee959b02008-02-22 00:13:36 +01002572static ssize_t show_service_id(struct device *dev,
2573 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002574{
Tony Jonesee959b02008-02-22 00:13:36 +01002575 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002576
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002577 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002578}
2579
Tony Jonesee959b02008-02-22 00:13:36 +01002580static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2581 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002582{
Tony Jonesee959b02008-02-22 00:13:36 +01002583 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002584
Bart Van Assche747fe002014-10-30 14:48:05 +01002585 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002586}
2587
Bart Van Assche848b3082013-10-26 14:38:12 +02002588static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2589 char *buf)
2590{
2591 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2592
Bart Van Assche747fe002014-10-30 14:48:05 +01002593 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002594}
2595
Tony Jonesee959b02008-02-22 00:13:36 +01002596static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2597 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002598{
Tony Jonesee959b02008-02-22 00:13:36 +01002599 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002600 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002601
Bart Van Assche509c07b2014-10-30 14:48:30 +01002602 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002603}
2604
Tony Jonesee959b02008-02-22 00:13:36 +01002605static ssize_t show_orig_dgid(struct device *dev,
2606 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002607{
Tony Jonesee959b02008-02-22 00:13:36 +01002608 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002609
Bart Van Assche747fe002014-10-30 14:48:05 +01002610 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002611}
2612
Bart Van Assche89de7482010-08-03 14:08:45 +00002613static ssize_t show_req_lim(struct device *dev,
2614 struct device_attribute *attr, char *buf)
2615{
2616 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002617 struct srp_rdma_ch *ch;
2618 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002619
Bart Van Assched92c0da2014-10-06 17:14:36 +02002620 for (i = 0; i < target->ch_count; i++) {
2621 ch = &target->ch[i];
2622 req_lim = min(req_lim, ch->req_lim);
2623 }
2624 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002625}
2626
Tony Jonesee959b02008-02-22 00:13:36 +01002627static ssize_t show_zero_req_lim(struct device *dev,
2628 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002629{
Tony Jonesee959b02008-02-22 00:13:36 +01002630 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002631
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002632 return sprintf(buf, "%d\n", target->zero_req_lim);
2633}
2634
Tony Jonesee959b02008-02-22 00:13:36 +01002635static ssize_t show_local_ib_port(struct device *dev,
2636 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002637{
Tony Jonesee959b02008-02-22 00:13:36 +01002638 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002639
2640 return sprintf(buf, "%d\n", target->srp_host->port);
2641}
2642
Tony Jonesee959b02008-02-22 00:13:36 +01002643static ssize_t show_local_ib_device(struct device *dev,
2644 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002645{
Tony Jonesee959b02008-02-22 00:13:36 +01002646 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002647
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002648 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002649}
2650
Bart Van Assched92c0da2014-10-06 17:14:36 +02002651static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2652 char *buf)
2653{
2654 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2655
2656 return sprintf(buf, "%d\n", target->ch_count);
2657}
2658
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002659static ssize_t show_comp_vector(struct device *dev,
2660 struct device_attribute *attr, char *buf)
2661{
2662 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2663
2664 return sprintf(buf, "%d\n", target->comp_vector);
2665}
2666
Vu Pham7bb312e2013-10-26 14:31:27 +02002667static ssize_t show_tl_retry_count(struct device *dev,
2668 struct device_attribute *attr, char *buf)
2669{
2670 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2671
2672 return sprintf(buf, "%d\n", target->tl_retry_count);
2673}
2674
David Dillow49248642011-01-14 18:23:24 -05002675static ssize_t show_cmd_sg_entries(struct device *dev,
2676 struct device_attribute *attr, char *buf)
2677{
2678 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2679
2680 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2681}
2682
David Dillowc07d4242011-01-16 13:57:10 -05002683static ssize_t show_allow_ext_sg(struct device *dev,
2684 struct device_attribute *attr, char *buf)
2685{
2686 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2687
2688 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2689}
2690
Tony Jonesee959b02008-02-22 00:13:36 +01002691static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2692static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2693static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2694static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002695static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002696static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2697static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002698static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002699static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2700static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2701static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002702static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002703static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002704static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002705static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002706static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002707
Tony Jonesee959b02008-02-22 00:13:36 +01002708static struct device_attribute *srp_host_attrs[] = {
2709 &dev_attr_id_ext,
2710 &dev_attr_ioc_guid,
2711 &dev_attr_service_id,
2712 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002713 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002714 &dev_attr_dgid,
2715 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002716 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002717 &dev_attr_zero_req_lim,
2718 &dev_attr_local_ib_port,
2719 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002720 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002721 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002722 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002723 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002724 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002725 NULL
2726};
2727
Roland Dreieraef9ec32005-11-02 14:07:13 -08002728static struct scsi_host_template srp_template = {
2729 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002730 .name = "InfiniBand SRP initiator",
2731 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002732 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002733 .info = srp_target_info,
2734 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002735 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002736 .eh_abort_handler = srp_abort,
2737 .eh_device_reset_handler = srp_reset_device,
2738 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002739 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002740 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002741 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002742 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002743 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002744 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002745 .shost_attrs = srp_host_attrs,
2746 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002747 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002748};
2749
Bart Van Assche34aa6542014-10-30 14:47:22 +01002750static int srp_sdev_count(struct Scsi_Host *host)
2751{
2752 struct scsi_device *sdev;
2753 int c = 0;
2754
2755 shost_for_each_device(sdev, host)
2756 c++;
2757
2758 return c;
2759}
2760
Roland Dreieraef9ec32005-11-02 14:07:13 -08002761static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2762{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002763 struct srp_rport_identifiers ids;
2764 struct srp_rport *rport;
2765
Bart Van Assche34aa6542014-10-30 14:47:22 +01002766 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002767 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002768 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002769
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002770 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002771 return -ENODEV;
2772
FUJITA Tomonori32368222007-06-27 16:33:12 +09002773 memcpy(ids.port_id, &target->id_ext, 8);
2774 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002775 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002776 rport = srp_rport_add(target->scsi_host, &ids);
2777 if (IS_ERR(rport)) {
2778 scsi_remove_host(target->scsi_host);
2779 return PTR_ERR(rport);
2780 }
2781
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002782 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002783 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002784
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002785 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002786 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002787 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002788
Roland Dreieraef9ec32005-11-02 14:07:13 -08002789 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002790 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002791
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002792 if (srp_connected_ch(target) < target->ch_count ||
2793 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002794 shost_printk(KERN_INFO, target->scsi_host,
2795 PFX "SCSI scan failed - removing SCSI host\n");
2796 srp_queue_remove_work(target);
2797 goto out;
2798 }
2799
2800 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2801 dev_name(&target->scsi_host->shost_gendev),
2802 srp_sdev_count(target->scsi_host));
2803
2804 spin_lock_irq(&target->lock);
2805 if (target->state == SRP_TARGET_SCANNING)
2806 target->state = SRP_TARGET_LIVE;
2807 spin_unlock_irq(&target->lock);
2808
2809out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002810 return 0;
2811}
2812
Tony Jonesee959b02008-02-22 00:13:36 +01002813static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002814{
2815 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002816 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002817
2818 complete(&host->released);
2819}
2820
2821static struct class srp_class = {
2822 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002823 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002824};
2825
Bart Van Assche96fc2482013-06-28 14:51:26 +02002826/**
2827 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002828 * @host: SRP host.
2829 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002830 */
2831static bool srp_conn_unique(struct srp_host *host,
2832 struct srp_target_port *target)
2833{
2834 struct srp_target_port *t;
2835 bool ret = false;
2836
2837 if (target->state == SRP_TARGET_REMOVED)
2838 goto out;
2839
2840 ret = true;
2841
2842 spin_lock(&host->target_lock);
2843 list_for_each_entry(t, &host->target_list, list) {
2844 if (t != target &&
2845 target->id_ext == t->id_ext &&
2846 target->ioc_guid == t->ioc_guid &&
2847 target->initiator_ext == t->initiator_ext) {
2848 ret = false;
2849 break;
2850 }
2851 }
2852 spin_unlock(&host->target_lock);
2853
2854out:
2855 return ret;
2856}
2857
Roland Dreieraef9ec32005-11-02 14:07:13 -08002858/*
2859 * Target ports are added by writing
2860 *
2861 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2862 * pkey=<P_Key>,service_id=<service ID>
2863 *
2864 * to the add_target sysfs attribute.
2865 */
2866enum {
2867 SRP_OPT_ERR = 0,
2868 SRP_OPT_ID_EXT = 1 << 0,
2869 SRP_OPT_IOC_GUID = 1 << 1,
2870 SRP_OPT_DGID = 1 << 2,
2871 SRP_OPT_PKEY = 1 << 3,
2872 SRP_OPT_SERVICE_ID = 1 << 4,
2873 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002874 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002875 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002876 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002877 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002878 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2879 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002880 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002881 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002882 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002883 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2884 SRP_OPT_IOC_GUID |
2885 SRP_OPT_DGID |
2886 SRP_OPT_PKEY |
2887 SRP_OPT_SERVICE_ID),
2888};
2889
Steven Whitehousea447c092008-10-13 10:46:57 +01002890static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002891 { SRP_OPT_ID_EXT, "id_ext=%s" },
2892 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2893 { SRP_OPT_DGID, "dgid=%s" },
2894 { SRP_OPT_PKEY, "pkey=%x" },
2895 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2896 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2897 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002898 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002899 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002900 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002901 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2902 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002903 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002904 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002905 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002906 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002907};
2908
2909static int srp_parse_options(const char *buf, struct srp_target_port *target)
2910{
2911 char *options, *sep_opt;
2912 char *p;
2913 char dgid[3];
2914 substring_t args[MAX_OPT_ARGS];
2915 int opt_mask = 0;
2916 int token;
2917 int ret = -EINVAL;
2918 int i;
2919
2920 options = kstrdup(buf, GFP_KERNEL);
2921 if (!options)
2922 return -ENOMEM;
2923
2924 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002925 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002926 if (!*p)
2927 continue;
2928
2929 token = match_token(p, srp_opt_tokens, args);
2930 opt_mask |= token;
2931
2932 switch (token) {
2933 case SRP_OPT_ID_EXT:
2934 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002935 if (!p) {
2936 ret = -ENOMEM;
2937 goto out;
2938 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002939 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2940 kfree(p);
2941 break;
2942
2943 case SRP_OPT_IOC_GUID:
2944 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002945 if (!p) {
2946 ret = -ENOMEM;
2947 goto out;
2948 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002949 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2950 kfree(p);
2951 break;
2952
2953 case SRP_OPT_DGID:
2954 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002955 if (!p) {
2956 ret = -ENOMEM;
2957 goto out;
2958 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002959 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002960 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002961 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002962 goto out;
2963 }
2964
2965 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002966 strlcpy(dgid, p + i * 2, sizeof(dgid));
2967 if (sscanf(dgid, "%hhx",
2968 &target->orig_dgid.raw[i]) < 1) {
2969 ret = -EINVAL;
2970 kfree(p);
2971 goto out;
2972 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002973 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002974 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002975 break;
2976
2977 case SRP_OPT_PKEY:
2978 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002979 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002980 goto out;
2981 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002982 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002983 break;
2984
2985 case SRP_OPT_SERVICE_ID:
2986 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002987 if (!p) {
2988 ret = -ENOMEM;
2989 goto out;
2990 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002991 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2992 kfree(p);
2993 break;
2994
2995 case SRP_OPT_MAX_SECT:
2996 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002997 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002998 goto out;
2999 }
3000 target->scsi_host->max_sectors = token;
3001 break;
3002
Bart Van Assche4d73f952013-10-26 14:40:37 +02003003 case SRP_OPT_QUEUE_SIZE:
3004 if (match_int(args, &token) || token < 1) {
3005 pr_warn("bad queue_size parameter '%s'\n", p);
3006 goto out;
3007 }
3008 target->scsi_host->can_queue = token;
3009 target->queue_size = token + SRP_RSP_SQ_SIZE +
3010 SRP_TSK_MGMT_SQ_SIZE;
3011 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3012 target->scsi_host->cmd_per_lun = token;
3013 break;
3014
Vu Pham52fb2b502006-06-17 20:37:31 -07003015 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003016 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003017 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3018 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003019 goto out;
3020 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003021 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003022 break;
3023
Ramachandra K0c0450db2006-06-17 20:37:38 -07003024 case SRP_OPT_IO_CLASS:
3025 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003026 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003027 goto out;
3028 }
3029 if (token != SRP_REV10_IB_IO_CLASS &&
3030 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003031 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3032 token, SRP_REV10_IB_IO_CLASS,
3033 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003034 goto out;
3035 }
3036 target->io_class = token;
3037 break;
3038
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003039 case SRP_OPT_INITIATOR_EXT:
3040 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003041 if (!p) {
3042 ret = -ENOMEM;
3043 goto out;
3044 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003045 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3046 kfree(p);
3047 break;
3048
David Dillow49248642011-01-14 18:23:24 -05003049 case SRP_OPT_CMD_SG_ENTRIES:
3050 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003051 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3052 p);
David Dillow49248642011-01-14 18:23:24 -05003053 goto out;
3054 }
3055 target->cmd_sg_cnt = token;
3056 break;
3057
David Dillowc07d4242011-01-16 13:57:10 -05003058 case SRP_OPT_ALLOW_EXT_SG:
3059 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003060 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003061 goto out;
3062 }
3063 target->allow_ext_sg = !!token;
3064 break;
3065
3066 case SRP_OPT_SG_TABLESIZE:
3067 if (match_int(args, &token) || token < 1 ||
3068 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003069 pr_warn("bad max sg_tablesize parameter '%s'\n",
3070 p);
David Dillowc07d4242011-01-16 13:57:10 -05003071 goto out;
3072 }
3073 target->sg_tablesize = token;
3074 break;
3075
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003076 case SRP_OPT_COMP_VECTOR:
3077 if (match_int(args, &token) || token < 0) {
3078 pr_warn("bad comp_vector parameter '%s'\n", p);
3079 goto out;
3080 }
3081 target->comp_vector = token;
3082 break;
3083
Vu Pham7bb312e2013-10-26 14:31:27 +02003084 case SRP_OPT_TL_RETRY_COUNT:
3085 if (match_int(args, &token) || token < 2 || token > 7) {
3086 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3087 p);
3088 goto out;
3089 }
3090 target->tl_retry_count = token;
3091 break;
3092
Roland Dreieraef9ec32005-11-02 14:07:13 -08003093 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003094 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3095 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003096 goto out;
3097 }
3098 }
3099
3100 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3101 ret = 0;
3102 else
3103 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3104 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3105 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003106 pr_warn("target creation request is missing parameter '%s'\n",
3107 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003108
Bart Van Assche4d73f952013-10-26 14:40:37 +02003109 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3110 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3111 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3112 target->scsi_host->cmd_per_lun,
3113 target->scsi_host->can_queue);
3114
Roland Dreieraef9ec32005-11-02 14:07:13 -08003115out:
3116 kfree(options);
3117 return ret;
3118}
3119
Tony Jonesee959b02008-02-22 00:13:36 +01003120static ssize_t srp_create_target(struct device *dev,
3121 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003122 const char *buf, size_t count)
3123{
3124 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003125 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003126 struct Scsi_Host *target_host;
3127 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003128 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003129 struct srp_device *srp_dev = host->srp_dev;
3130 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003131 int ret, node_idx, node, cpu, i;
3132 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003133
3134 target_host = scsi_host_alloc(&srp_template,
3135 sizeof (struct srp_target_port));
3136 if (!target_host)
3137 return -ENOMEM;
3138
David Dillow49248642011-01-14 18:23:24 -05003139 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003140 target_host->max_channel = 0;
3141 target_host->max_id = 1;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003142 target_host->max_lun = SRP_MAX_LUN;
3143 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003144
Roland Dreieraef9ec32005-11-02 14:07:13 -08003145 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003146
David Dillow49248642011-01-14 18:23:24 -05003147 target->io_class = SRP_REV16A_IB_IO_CLASS;
3148 target->scsi_host = target_host;
3149 target->srp_host = host;
3150 target->lkey = host->srp_dev->mr->lkey;
3151 target->rkey = host->srp_dev->mr->rkey;
3152 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003153 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3154 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003155 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003156 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003157
Bart Van Assche34aa6542014-10-30 14:47:22 +01003158 /*
3159 * Avoid that the SCSI host can be removed by srp_remove_target()
3160 * before this function returns.
3161 */
3162 scsi_host_get(target->scsi_host);
3163
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003164 mutex_lock(&host->add_target_mutex);
3165
Roland Dreieraef9ec32005-11-02 14:07:13 -08003166 ret = srp_parse_options(buf, target);
3167 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003168 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003169
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003170 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3171 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003172 goto out;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003173
Bart Van Assche4d73f952013-10-26 14:40:37 +02003174 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3175
Bart Van Assche96fc2482013-06-28 14:51:26 +02003176 if (!srp_conn_unique(target->srp_host, target)) {
3177 shost_printk(KERN_INFO, target->scsi_host,
3178 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3179 be64_to_cpu(target->id_ext),
3180 be64_to_cpu(target->ioc_guid),
3181 be64_to_cpu(target->initiator_ext));
3182 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003183 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003184 }
3185
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003186 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003187 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003188 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003189 target->sg_tablesize = target->cmd_sg_cnt;
3190 }
3191
3192 target_host->sg_tablesize = target->sg_tablesize;
3193 target->indirect_size = target->sg_tablesize *
3194 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003195 target->max_iu_len = sizeof (struct srp_cmd) +
3196 sizeof (struct srp_indirect_buf) +
3197 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3198
Bart Van Asschec1120f82013-10-26 14:35:08 +02003199 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003200 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003201 spin_lock_init(&target->lock);
Bart Van Assche747fe002014-10-30 14:48:05 +01003202 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003203 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003204 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003205
Bart Van Assched92c0da2014-10-06 17:14:36 +02003206 ret = -ENOMEM;
3207 target->ch_count = max_t(unsigned, num_online_nodes(),
3208 min(ch_count ? :
3209 min(4 * num_online_nodes(),
3210 ibdev->num_comp_vectors),
3211 num_online_cpus()));
3212 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3213 GFP_KERNEL);
3214 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003215 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003216
Bart Van Assched92c0da2014-10-06 17:14:36 +02003217 node_idx = 0;
3218 for_each_online_node(node) {
3219 const int ch_start = (node_idx * target->ch_count /
3220 num_online_nodes());
3221 const int ch_end = ((node_idx + 1) * target->ch_count /
3222 num_online_nodes());
3223 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3224 num_online_nodes() + target->comp_vector)
3225 % ibdev->num_comp_vectors;
3226 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3227 num_online_nodes() + target->comp_vector)
3228 % ibdev->num_comp_vectors;
3229 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003230
Bart Van Assched92c0da2014-10-06 17:14:36 +02003231 for_each_online_cpu(cpu) {
3232 if (cpu_to_node(cpu) != node)
3233 continue;
3234 if (ch_start + cpu_idx >= ch_end)
3235 continue;
3236 ch = &target->ch[ch_start + cpu_idx];
3237 ch->target = target;
3238 ch->comp_vector = cv_start == cv_end ? cv_start :
3239 cv_start + cpu_idx % (cv_end - cv_start);
3240 spin_lock_init(&ch->lock);
3241 INIT_LIST_HEAD(&ch->free_tx);
3242 ret = srp_new_cm_id(ch);
3243 if (ret)
3244 goto err_disconnect;
3245
3246 ret = srp_create_ch_ib(ch);
3247 if (ret)
3248 goto err_disconnect;
3249
3250 ret = srp_alloc_req_data(ch);
3251 if (ret)
3252 goto err_disconnect;
3253
3254 ret = srp_connect_ch(ch, multich);
3255 if (ret) {
3256 shost_printk(KERN_ERR, target->scsi_host,
3257 PFX "Connection %d/%d failed\n",
3258 ch_start + cpu_idx,
3259 target->ch_count);
3260 if (node_idx == 0 && cpu_idx == 0) {
3261 goto err_disconnect;
3262 } else {
3263 srp_free_ch_ib(target, ch);
3264 srp_free_req_data(target, ch);
3265 target->ch_count = ch - target->ch;
3266 break;
3267 }
3268 }
3269
3270 multich = true;
3271 cpu_idx++;
3272 }
3273 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003274 }
3275
Bart Van Assched92c0da2014-10-06 17:14:36 +02003276 target->scsi_host->nr_hw_queues = target->ch_count;
3277
Roland Dreieraef9ec32005-11-02 14:07:13 -08003278 ret = srp_add_target(host, target);
3279 if (ret)
3280 goto err_disconnect;
3281
Bart Van Assche34aa6542014-10-30 14:47:22 +01003282 if (target->state != SRP_TARGET_REMOVED) {
3283 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3284 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3285 be64_to_cpu(target->id_ext),
3286 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003287 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003288 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003289 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003290 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003291
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003292 ret = count;
3293
3294out:
3295 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003296
3297 scsi_host_put(target->scsi_host);
3298
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003299 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003300
3301err_disconnect:
3302 srp_disconnect_target(target);
3303
Bart Van Assched92c0da2014-10-06 17:14:36 +02003304 for (i = 0; i < target->ch_count; i++) {
3305 ch = &target->ch[i];
3306 srp_free_ch_ib(target, ch);
3307 srp_free_req_data(target, ch);
3308 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003309
Bart Van Assched92c0da2014-10-06 17:14:36 +02003310 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003311 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003312}
3313
Tony Jonesee959b02008-02-22 00:13:36 +01003314static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003315
Tony Jonesee959b02008-02-22 00:13:36 +01003316static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3317 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003318{
Tony Jonesee959b02008-02-22 00:13:36 +01003319 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003320
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003321 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003322}
3323
Tony Jonesee959b02008-02-22 00:13:36 +01003324static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003325
Tony Jonesee959b02008-02-22 00:13:36 +01003326static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3327 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003328{
Tony Jonesee959b02008-02-22 00:13:36 +01003329 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003330
3331 return sprintf(buf, "%d\n", host->port);
3332}
3333
Tony Jonesee959b02008-02-22 00:13:36 +01003334static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003335
Roland Dreierf5358a12006-06-17 20:37:29 -07003336static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003337{
3338 struct srp_host *host;
3339
3340 host = kzalloc(sizeof *host, GFP_KERNEL);
3341 if (!host)
3342 return NULL;
3343
3344 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003345 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003346 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003347 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003348 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003349 host->port = port;
3350
Tony Jonesee959b02008-02-22 00:13:36 +01003351 host->dev.class = &srp_class;
3352 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003353 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003354
Tony Jonesee959b02008-02-22 00:13:36 +01003355 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003356 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003357 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003358 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003359 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003360 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003361 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003362 goto err_class;
3363
3364 return host;
3365
3366err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003367 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003368
Roland Dreierf5358a12006-06-17 20:37:29 -07003369free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003370 kfree(host);
3371
3372 return NULL;
3373}
3374
3375static void srp_add_one(struct ib_device *device)
3376{
Roland Dreierf5358a12006-06-17 20:37:29 -07003377 struct srp_device *srp_dev;
3378 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003379 struct srp_host *host;
Bart Van Assche52ede082014-05-20 15:07:45 +02003380 int mr_page_shift, s, e, p;
3381 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003382
Roland Dreierf5358a12006-06-17 20:37:29 -07003383 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3384 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003385 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003386
Roland Dreierf5358a12006-06-17 20:37:29 -07003387 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003388 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003389 goto free_attr;
3390 }
3391
3392 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3393 if (!srp_dev)
3394 goto free_attr;
3395
Bart Van Assched1b42892014-05-20 15:07:20 +02003396 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3397 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003398 srp_dev->has_fr = (dev_attr->device_cap_flags &
3399 IB_DEVICE_MEM_MGT_EXTENSIONS);
3400 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3401 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3402
3403 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3404 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assched1b42892014-05-20 15:07:20 +02003405
Roland Dreierf5358a12006-06-17 20:37:29 -07003406 /*
3407 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003408 * minimum of 4096 bytes. We're unlikely to build large sglists
3409 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003410 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003411 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3412 srp_dev->mr_page_size = 1 << mr_page_shift;
3413 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3414 max_pages_per_mr = dev_attr->max_mr_size;
3415 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3416 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3417 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003418 if (srp_dev->use_fast_reg) {
3419 srp_dev->max_pages_per_mr =
3420 min_t(u32, srp_dev->max_pages_per_mr,
3421 dev_attr->max_fast_reg_page_list_len);
3422 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003423 srp_dev->mr_max_size = srp_dev->mr_page_size *
3424 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003425 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003426 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003427 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003428 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003429
3430 INIT_LIST_HEAD(&srp_dev->dev_list);
3431
3432 srp_dev->dev = device;
3433 srp_dev->pd = ib_alloc_pd(device);
3434 if (IS_ERR(srp_dev->pd))
3435 goto free_dev;
3436
3437 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3438 IB_ACCESS_LOCAL_WRITE |
3439 IB_ACCESS_REMOTE_READ |
3440 IB_ACCESS_REMOTE_WRITE);
3441 if (IS_ERR(srp_dev->mr))
3442 goto err_pd;
3443
Tom Tucker07ebafb2006-08-03 16:02:42 -05003444 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08003445 s = 0;
3446 e = 0;
3447 } else {
3448 s = 1;
3449 e = device->phys_port_cnt;
3450 }
3451
3452 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003453 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003454 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003455 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003456 }
3457
Roland Dreierf5358a12006-06-17 20:37:29 -07003458 ib_set_client_data(device, &srp_client, srp_dev);
3459
3460 goto free_attr;
3461
3462err_pd:
3463 ib_dealloc_pd(srp_dev->pd);
3464
3465free_dev:
3466 kfree(srp_dev);
3467
3468free_attr:
3469 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003470}
3471
3472static void srp_remove_one(struct ib_device *device)
3473{
Roland Dreierf5358a12006-06-17 20:37:29 -07003474 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003475 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003476 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003477
Roland Dreierf5358a12006-06-17 20:37:29 -07003478 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003479 if (!srp_dev)
3480 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003481
Roland Dreierf5358a12006-06-17 20:37:29 -07003482 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003483 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003484 /*
3485 * Wait for the sysfs entry to go away, so that no new
3486 * target ports can be created.
3487 */
3488 wait_for_completion(&host->released);
3489
3490 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003491 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003492 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003493 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003494 list_for_each_entry(target, &host->target_list, list)
3495 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003496 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003497
3498 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003499 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003500 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003501 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003502 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003503
Roland Dreieraef9ec32005-11-02 14:07:13 -08003504 kfree(host);
3505 }
3506
Roland Dreierf5358a12006-06-17 20:37:29 -07003507 ib_dereg_mr(srp_dev->mr);
3508 ib_dealloc_pd(srp_dev->pd);
3509
3510 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003511}
3512
FUJITA Tomonori32368222007-06-27 16:33:12 +09003513static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003514 .has_rport_state = true,
3515 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003516 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003517 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3518 .dev_loss_tmo = &srp_dev_loss_tmo,
3519 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003520 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003521 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003522};
3523
Roland Dreieraef9ec32005-11-02 14:07:13 -08003524static int __init srp_init_module(void)
3525{
3526 int ret;
3527
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003528 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003529
David Dillow49248642011-01-14 18:23:24 -05003530 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003531 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003532 if (!cmd_sg_entries)
3533 cmd_sg_entries = srp_sg_tablesize;
3534 }
3535
3536 if (!cmd_sg_entries)
3537 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3538
3539 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003540 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003541 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003542 }
3543
David Dillowc07d4242011-01-16 13:57:10 -05003544 if (!indirect_sg_entries)
3545 indirect_sg_entries = cmd_sg_entries;
3546 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003547 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3548 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003549 indirect_sg_entries = cmd_sg_entries;
3550 }
3551
Bart Van Asschebcc05912014-07-09 15:57:26 +02003552 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003553 if (!srp_remove_wq) {
3554 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003555 goto out;
3556 }
3557
3558 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003559 ib_srp_transport_template =
3560 srp_attach_transport(&ib_srp_transport_functions);
3561 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003562 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003563
Roland Dreieraef9ec32005-11-02 14:07:13 -08003564 ret = class_register(&srp_class);
3565 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003566 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003567 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003568 }
3569
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003570 ib_sa_register_client(&srp_sa_client);
3571
Roland Dreieraef9ec32005-11-02 14:07:13 -08003572 ret = ib_register_client(&srp_client);
3573 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003574 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003575 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003576 }
3577
Bart Van Asschebcc05912014-07-09 15:57:26 +02003578out:
3579 return ret;
3580
3581unreg_sa:
3582 ib_sa_unregister_client(&srp_sa_client);
3583 class_unregister(&srp_class);
3584
3585release_tr:
3586 srp_release_transport(ib_srp_transport_template);
3587
3588destroy_wq:
3589 destroy_workqueue(srp_remove_wq);
3590 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003591}
3592
3593static void __exit srp_cleanup_module(void)
3594{
3595 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003596 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003597 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003598 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003599 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003600}
3601
3602module_init(srp_init_module);
3603module_exit(srp_cleanup_module);