blob: d980fb458ad49c7eb37038facebb9ef14d96a1de [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Bart Van Assche713ef242015-07-31 14:13:52 -070058#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020062MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
Roland Dreieraef9ec32005-11-02 14:07:13 -080063MODULE_LICENSE("Dual BSD/GPL");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020064MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
Roland Dreieraef9ec32005-11-02 14:07:13 -080066
David Dillow49248642011-01-14 18:23:24 -050067static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050069static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
Bart Van Assche03f6fb92015-08-10 17:09:36 -070071static bool prefer_fr = true;
72static bool register_always = true;
Bart Van Asschec222a392016-05-12 10:51:01 -070073static bool never_register;
Roland Dreieraef9ec32005-11-02 14:07:13 -080074static int topspin_workarounds = 1;
75
David Dillow49248642011-01-14 18:23:24 -050076module_param(srp_sg_tablesize, uint, 0444);
77MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
78
79module_param(cmd_sg_entries, uint, 0444);
80MODULE_PARM_DESC(cmd_sg_entries,
81 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
82
David Dillowc07d4242011-01-16 13:57:10 -050083module_param(indirect_sg_entries, uint, 0444);
84MODULE_PARM_DESC(indirect_sg_entries,
Ming Lin65e86172016-04-04 14:48:10 -070085 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
David Dillowc07d4242011-01-16 13:57:10 -050086
87module_param(allow_ext_sg, bool, 0444);
88MODULE_PARM_DESC(allow_ext_sg,
89 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
90
Roland Dreieraef9ec32005-11-02 14:07:13 -080091module_param(topspin_workarounds, int, 0444);
92MODULE_PARM_DESC(topspin_workarounds,
93 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
94
Bart Van Assche5cfb1782014-05-20 15:08:34 +020095module_param(prefer_fr, bool, 0444);
96MODULE_PARM_DESC(prefer_fr,
97"Whether to use fast registration if both FMR and fast registration are supported");
98
Bart Van Asscheb1b88542014-05-20 15:06:41 +020099module_param(register_always, bool, 0444);
100MODULE_PARM_DESC(register_always,
101 "Use memory registration even for contiguous memory regions");
102
Bart Van Asschec222a392016-05-12 10:51:01 -0700103module_param(never_register, bool, 0444);
104MODULE_PARM_DESC(never_register, "Never register memory");
105
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930106static const struct kernel_param_ops srp_tmo_ops;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200107
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200108static int srp_reconnect_delay = 10;
109module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
110 S_IRUGO | S_IWUSR);
111MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
112
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200113static int srp_fast_io_fail_tmo = 15;
114module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
115 S_IRUGO | S_IWUSR);
116MODULE_PARM_DESC(fast_io_fail_tmo,
117 "Number of seconds between the observation of a transport"
118 " layer error and failing all I/O. \"off\" means that this"
119 " functionality is disabled.");
120
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200121static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200122module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
123 S_IRUGO | S_IWUSR);
124MODULE_PARM_DESC(dev_loss_tmo,
125 "Maximum number of seconds that the SRP transport should"
126 " insulate transport layer errors. After this time has been"
127 " exceeded the SCSI host is removed. Should be"
128 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
129 " if fast_io_fail_tmo has not been set. \"off\" means that"
130 " this functionality is disabled.");
131
Bart Van Assched92c0da2014-10-06 17:14:36 +0200132static unsigned ch_count;
133module_param(ch_count, uint, 0444);
134MODULE_PARM_DESC(ch_count,
135 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
136
Roland Dreieraef9ec32005-11-02 14:07:13 -0800137static void srp_add_one(struct ib_device *device);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300138static void srp_remove_one(struct ib_device *device, void *client_data);
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100139static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
140static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
141 const char *opname);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800142static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
143
FUJITA Tomonori32368222007-06-27 16:33:12 +0900144static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200145static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900146
Roland Dreieraef9ec32005-11-02 14:07:13 -0800147static struct ib_client srp_client = {
148 .name = "srp",
149 .add = srp_add_one,
150 .remove = srp_remove_one
151};
152
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700153static struct ib_sa_client srp_sa_client;
154
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200155static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
156{
157 int tmo = *(int *)kp->arg;
158
159 if (tmo >= 0)
160 return sprintf(buffer, "%d", tmo);
161 else
162 return sprintf(buffer, "off");
163}
164
165static int srp_tmo_set(const char *val, const struct kernel_param *kp)
166{
167 int tmo, res;
168
Sagi Grimberg3fdf70a2015-06-25 13:34:15 +0300169 res = srp_parse_tmo(&tmo, val);
170 if (res)
171 goto out;
172
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200173 if (kp->arg == &srp_reconnect_delay)
174 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
175 srp_dev_loss_tmo);
176 else if (kp->arg == &srp_fast_io_fail_tmo)
177 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200178 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200179 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
180 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200181 if (res)
182 goto out;
183 *(int *)kp->arg = tmo;
184
185out:
186 return res;
187}
188
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930189static const struct kernel_param_ops srp_tmo_ops = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200190 .get = srp_tmo_get,
191 .set = srp_tmo_set,
192};
193
Roland Dreieraef9ec32005-11-02 14:07:13 -0800194static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
195{
196 return (struct srp_target_port *) host->hostdata;
197}
198
199static const char *srp_target_info(struct Scsi_Host *host)
200{
201 return host_to_target(host)->target_name;
202}
203
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700204static int srp_target_is_topspin(struct srp_target_port *target)
205{
206 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700207 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700208
209 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700210 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
211 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700212}
213
Roland Dreieraef9ec32005-11-02 14:07:13 -0800214static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
215 gfp_t gfp_mask,
216 enum dma_data_direction direction)
217{
218 struct srp_iu *iu;
219
220 iu = kmalloc(sizeof *iu, gfp_mask);
221 if (!iu)
222 goto out;
223
224 iu->buf = kzalloc(size, gfp_mask);
225 if (!iu->buf)
226 goto out_free_iu;
227
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100228 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
229 direction);
230 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800231 goto out_free_buf;
232
233 iu->size = size;
234 iu->direction = direction;
235
236 return iu;
237
238out_free_buf:
239 kfree(iu->buf);
240out_free_iu:
241 kfree(iu);
242out:
243 return NULL;
244}
245
246static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
247{
248 if (!iu)
249 return;
250
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100251 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
252 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800253 kfree(iu->buf);
254 kfree(iu);
255}
256
257static void srp_qp_event(struct ib_event *event, void *context)
258{
Sagi Grimberg57363d92015-05-18 13:40:29 +0300259 pr_debug("QP event %s (%d)\n",
260 ib_event_msg(event->event), event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800261}
262
263static int srp_init_qp(struct srp_target_port *target,
264 struct ib_qp *qp)
265{
266 struct ib_qp_attr *attr;
267 int ret;
268
269 attr = kmalloc(sizeof *attr, GFP_KERNEL);
270 if (!attr)
271 return -ENOMEM;
272
Bart Van Assche56b53902014-07-09 15:58:22 +0200273 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
274 target->srp_host->port,
275 be16_to_cpu(target->pkey),
276 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800277 if (ret)
278 goto out;
279
280 attr->qp_state = IB_QPS_INIT;
281 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
282 IB_ACCESS_REMOTE_WRITE);
283 attr->port_num = target->srp_host->port;
284
285 ret = ib_modify_qp(qp, attr,
286 IB_QP_STATE |
287 IB_QP_PKEY_INDEX |
288 IB_QP_ACCESS_FLAGS |
289 IB_QP_PORT);
290
291out:
292 kfree(attr);
293 return ret;
294}
295
Bart Van Assche509c07b2014-10-30 14:48:30 +0100296static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500297{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100298 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500299 struct ib_cm_id *new_cm_id;
300
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100301 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100302 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500303 if (IS_ERR(new_cm_id))
304 return PTR_ERR(new_cm_id);
305
Bart Van Assche509c07b2014-10-30 14:48:30 +0100306 if (ch->cm_id)
307 ib_destroy_cm_id(ch->cm_id);
308 ch->cm_id = new_cm_id;
309 ch->path.sgid = target->sgid;
310 ch->path.dgid = target->orig_dgid;
311 ch->path.pkey = target->pkey;
312 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500313
314 return 0;
315}
316
Bart Van Assched1b42892014-05-20 15:07:20 +0200317static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
318{
319 struct srp_device *dev = target->srp_host->srp_dev;
320 struct ib_fmr_pool_param fmr_param;
321
322 memset(&fmr_param, 0, sizeof(fmr_param));
Bart Van Asschefa9863f2016-04-22 14:13:57 -0700323 fmr_param.pool_size = target->mr_pool_size;
Bart Van Assched1b42892014-05-20 15:07:20 +0200324 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
325 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200326 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
327 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200328 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
329 IB_ACCESS_REMOTE_WRITE |
330 IB_ACCESS_REMOTE_READ);
331
332 return ib_create_fmr_pool(dev->pd, &fmr_param);
333}
334
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200335/**
336 * srp_destroy_fr_pool() - free the resources owned by a pool
337 * @pool: Fast registration pool to be destroyed.
338 */
339static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
340{
341 int i;
342 struct srp_fr_desc *d;
343
344 if (!pool)
345 return;
346
347 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200348 if (d->mr)
349 ib_dereg_mr(d->mr);
350 }
351 kfree(pool);
352}
353
354/**
355 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
356 * @device: IB device to allocate fast registration descriptors for.
357 * @pd: Protection domain associated with the FR descriptors.
358 * @pool_size: Number of descriptors to allocate.
359 * @max_page_list_len: Maximum fast registration work request page list length.
360 */
361static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
362 struct ib_pd *pd, int pool_size,
363 int max_page_list_len)
364{
365 struct srp_fr_pool *pool;
366 struct srp_fr_desc *d;
367 struct ib_mr *mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200368 int i, ret = -EINVAL;
369
370 if (pool_size <= 0)
371 goto err;
372 ret = -ENOMEM;
373 pool = kzalloc(sizeof(struct srp_fr_pool) +
374 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
375 if (!pool)
376 goto err;
377 pool->size = pool_size;
378 pool->max_page_list_len = max_page_list_len;
379 spin_lock_init(&pool->lock);
380 INIT_LIST_HEAD(&pool->free_list);
381
382 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
Sagi Grimberg563b67c2015-07-30 10:32:38 +0300383 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
384 max_page_list_len);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200385 if (IS_ERR(mr)) {
386 ret = PTR_ERR(mr);
387 goto destroy_pool;
388 }
389 d->mr = mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200390 list_add_tail(&d->entry, &pool->free_list);
391 }
392
393out:
394 return pool;
395
396destroy_pool:
397 srp_destroy_fr_pool(pool);
398
399err:
400 pool = ERR_PTR(ret);
401 goto out;
402}
403
404/**
405 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
406 * @pool: Pool to obtain descriptor from.
407 */
408static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
409{
410 struct srp_fr_desc *d = NULL;
411 unsigned long flags;
412
413 spin_lock_irqsave(&pool->lock, flags);
414 if (!list_empty(&pool->free_list)) {
415 d = list_first_entry(&pool->free_list, typeof(*d), entry);
416 list_del(&d->entry);
417 }
418 spin_unlock_irqrestore(&pool->lock, flags);
419
420 return d;
421}
422
423/**
424 * srp_fr_pool_put() - put an FR descriptor back in the free list
425 * @pool: Pool the descriptor was allocated from.
426 * @desc: Pointer to an array of fast registration descriptor pointers.
427 * @n: Number of descriptors to put back.
428 *
429 * Note: The caller must already have queued an invalidation request for
430 * desc->mr->rkey before calling this function.
431 */
432static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
433 int n)
434{
435 unsigned long flags;
436 int i;
437
438 spin_lock_irqsave(&pool->lock, flags);
439 for (i = 0; i < n; i++)
440 list_add(&desc[i]->entry, &pool->free_list);
441 spin_unlock_irqrestore(&pool->lock, flags);
442}
443
444static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
445{
446 struct srp_device *dev = target->srp_host->srp_dev;
447
Bart Van Asschefa9863f2016-04-22 14:13:57 -0700448 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200449 dev->max_pages_per_mr);
450}
451
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200452/**
453 * srp_destroy_qp() - destroy an RDMA queue pair
Bart Van Asschef83b2562016-05-12 10:48:48 -0700454 * @qp: RDMA queue pair.
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200455 *
Steve Wise561392d2016-02-17 08:15:42 -0800456 * Drain the qp before destroying it. This avoids that the receive
457 * completion handler can access the queue pair while it is
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200458 * being destroyed.
459 */
Bart Van Asschef83b2562016-05-12 10:48:48 -0700460static void srp_destroy_qp(struct ib_qp *qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200461{
Bart Van Asschef83b2562016-05-12 10:48:48 -0700462 ib_drain_rq(qp);
463 ib_destroy_qp(qp);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200464}
465
Bart Van Assche509c07b2014-10-30 14:48:30 +0100466static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800467{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100468 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200469 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800470 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100471 struct ib_cq *recv_cq, *send_cq;
472 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200473 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200474 struct srp_fr_pool *fr_pool = NULL;
Bart Van Assche509c5f32016-05-12 10:50:35 -0700475 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800476 int ret;
477
478 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
479 if (!init_attr)
480 return -ENOMEM;
481
Steve Wise561392d2016-02-17 08:15:42 -0800482 /* queue_size + 1 for ib_drain_rq() */
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100483 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
484 ch->comp_vector, IB_POLL_SOFTIRQ);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100485 if (IS_ERR(recv_cq)) {
486 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800487 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800488 }
489
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100490 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
491 ch->comp_vector, IB_POLL_DIRECT);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100492 if (IS_ERR(send_cq)) {
493 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800494 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000495 }
496
Roland Dreieraef9ec32005-11-02 14:07:13 -0800497 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200498 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200499 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800500 init_attr->cap.max_recv_sge = 1;
501 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200502 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800503 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100504 init_attr->send_cq = send_cq;
505 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800506
Bart Van Assche62154b22014-05-20 15:04:45 +0200507 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100508 if (IS_ERR(qp)) {
509 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800510 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800511 }
512
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100513 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800514 if (ret)
515 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800516
Bart Van Assche002f1562015-08-10 17:08:44 -0700517 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200518 fr_pool = srp_alloc_fr_pool(target);
519 if (IS_ERR(fr_pool)) {
520 ret = PTR_ERR(fr_pool);
521 shost_printk(KERN_WARNING, target->scsi_host, PFX
522 "FR pool allocation failed (%d)\n", ret);
523 goto err_qp;
524 }
Bart Van Assche002f1562015-08-10 17:08:44 -0700525 } else if (dev->use_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200526 fmr_pool = srp_alloc_fmr_pool(target);
527 if (IS_ERR(fmr_pool)) {
528 ret = PTR_ERR(fmr_pool);
529 shost_printk(KERN_WARNING, target->scsi_host, PFX
530 "FMR pool allocation failed (%d)\n", ret);
531 goto err_qp;
532 }
Bart Van Assched1b42892014-05-20 15:07:20 +0200533 }
534
Bart Van Assche509c07b2014-10-30 14:48:30 +0100535 if (ch->qp)
Bart Van Asschef83b2562016-05-12 10:48:48 -0700536 srp_destroy_qp(ch->qp);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100537 if (ch->recv_cq)
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100538 ib_free_cq(ch->recv_cq);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100539 if (ch->send_cq)
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100540 ib_free_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100541
Bart Van Assche509c07b2014-10-30 14:48:30 +0100542 ch->qp = qp;
543 ch->recv_cq = recv_cq;
544 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100545
Sagi Grimberg7fbc67d2015-08-24 19:04:51 +0300546 if (dev->use_fast_reg) {
547 if (ch->fr_pool)
548 srp_destroy_fr_pool(ch->fr_pool);
549 ch->fr_pool = fr_pool;
550 } else if (dev->use_fmr) {
551 if (ch->fmr_pool)
552 ib_destroy_fmr_pool(ch->fmr_pool);
553 ch->fmr_pool = fmr_pool;
554 }
555
Roland Dreierda9d2f02010-02-24 15:07:59 -0800556 kfree(init_attr);
557 return 0;
558
559err_qp:
Bart Van Asschef83b2562016-05-12 10:48:48 -0700560 srp_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800561
562err_send_cq:
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100563 ib_free_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800564
565err_recv_cq:
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100566 ib_free_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800567
568err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800569 kfree(init_attr);
570 return ret;
571}
572
Bart Van Assche4d73f952013-10-26 14:40:37 +0200573/*
574 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100575 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200576 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100577static void srp_free_ch_ib(struct srp_target_port *target,
578 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800579{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200580 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800581 int i;
582
Bart Van Assched92c0da2014-10-06 17:14:36 +0200583 if (!ch->target)
584 return;
585
Bart Van Assche509c07b2014-10-30 14:48:30 +0100586 if (ch->cm_id) {
587 ib_destroy_cm_id(ch->cm_id);
588 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100589 }
590
Bart Van Assched92c0da2014-10-06 17:14:36 +0200591 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
592 if (!ch->qp)
593 return;
594
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200595 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100596 if (ch->fr_pool)
597 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche002f1562015-08-10 17:08:44 -0700598 } else if (dev->use_fmr) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100599 if (ch->fmr_pool)
600 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200601 }
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100602
Bart Van Asschef83b2562016-05-12 10:48:48 -0700603 srp_destroy_qp(ch->qp);
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +0100604 ib_free_cq(ch->send_cq);
605 ib_free_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800606
Bart Van Assched92c0da2014-10-06 17:14:36 +0200607 /*
608 * Avoid that the SCSI error handler tries to use this channel after
609 * it has been freed. The SCSI error handler can namely continue
610 * trying to perform recovery actions after scsi_remove_host()
611 * returned.
612 */
613 ch->target = NULL;
614
Bart Van Assche509c07b2014-10-30 14:48:30 +0100615 ch->qp = NULL;
616 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100617
Bart Van Assche509c07b2014-10-30 14:48:30 +0100618 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200619 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100620 srp_free_iu(target->srp_host, ch->rx_ring[i]);
621 kfree(ch->rx_ring);
622 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200623 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100624 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200625 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100626 srp_free_iu(target->srp_host, ch->tx_ring[i]);
627 kfree(ch->tx_ring);
628 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200629 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800630}
631
632static void srp_path_rec_completion(int status,
633 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100634 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800635{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100636 struct srp_rdma_ch *ch = ch_ptr;
637 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800638
Bart Van Assche509c07b2014-10-30 14:48:30 +0100639 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800640 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500641 shost_printk(KERN_ERR, target->scsi_host,
642 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800643 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100644 ch->path = *pathrec;
645 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800646}
647
Bart Van Assche509c07b2014-10-30 14:48:30 +0100648static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800649{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100650 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100651 int ret;
652
Bart Van Assche509c07b2014-10-30 14:48:30 +0100653 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800654
Bart Van Assche509c07b2014-10-30 14:48:30 +0100655 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800656
Bart Van Assche509c07b2014-10-30 14:48:30 +0100657 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
658 target->srp_host->srp_dev->dev,
659 target->srp_host->port,
660 &ch->path,
661 IB_SA_PATH_REC_SERVICE_ID |
662 IB_SA_PATH_REC_DGID |
663 IB_SA_PATH_REC_SGID |
664 IB_SA_PATH_REC_NUMB_PATH |
665 IB_SA_PATH_REC_PKEY,
666 SRP_PATH_REC_TIMEOUT_MS,
667 GFP_KERNEL,
668 srp_path_rec_completion,
669 ch, &ch->path_query);
670 if (ch->path_query_id < 0)
671 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800672
Bart Van Assche509c07b2014-10-30 14:48:30 +0100673 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100674 if (ret < 0)
675 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800676
Bart Van Assche509c07b2014-10-30 14:48:30 +0100677 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500678 shost_printk(KERN_WARNING, target->scsi_host,
679 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800680
Bart Van Assche509c07b2014-10-30 14:48:30 +0100681 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800682}
683
Bart Van Assched92c0da2014-10-06 17:14:36 +0200684static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800685{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100686 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800687 struct {
688 struct ib_cm_req_param param;
689 struct srp_login_req priv;
690 } *req = NULL;
691 int status;
692
693 req = kzalloc(sizeof *req, GFP_KERNEL);
694 if (!req)
695 return -ENOMEM;
696
Bart Van Assche509c07b2014-10-30 14:48:30 +0100697 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800698 req->param.alternate_path = NULL;
699 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100700 req->param.qp_num = ch->qp->qp_num;
701 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800702 req->param.private_data = &req->priv;
703 req->param.private_data_len = sizeof req->priv;
704 req->param.flow_control = 1;
705
706 get_random_bytes(&req->param.starting_psn, 4);
707 req->param.starting_psn &= 0xffffff;
708
709 /*
710 * Pick some arbitrary defaults here; we could make these
711 * module parameters if anyone cared about setting them.
712 */
713 req->param.responder_resources = 4;
714 req->param.remote_cm_response_timeout = 20;
715 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200716 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800717 req->param.rnr_retry_count = 7;
718 req->param.max_cm_retries = 15;
719
720 req->priv.opcode = SRP_LOGIN_REQ;
721 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500722 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800723 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
724 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200725 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
726 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700727 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700728 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700729 * port identifier format is 8 bytes of ID extension followed
730 * by 8 bytes of GUID. Older drafts put the two halves in the
731 * opposite order, so that the GUID comes first.
732 *
733 * Targets conforming to these obsolete drafts can be
734 * recognized by the I/O Class they report.
735 */
736 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
737 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100738 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700739 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200740 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700741 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
742 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
743 } else {
744 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200745 &target->initiator_ext, 8);
746 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100747 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700748 memcpy(req->priv.target_port_id, &target->id_ext, 8);
749 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
750 }
751
Roland Dreieraef9ec32005-11-02 14:07:13 -0800752 /*
753 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200754 * zero out the first 8 bytes of our initiator port ID and set
755 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800756 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700757 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500758 shost_printk(KERN_DEBUG, target->scsi_host,
759 PFX "Topspin/Cisco initiator port ID workaround "
760 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200761 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800762 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200763 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100764 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800765 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800766
Bart Van Assche509c07b2014-10-30 14:48:30 +0100767 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800768
769 kfree(req);
770
771 return status;
772}
773
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000774static bool srp_queue_remove_work(struct srp_target_port *target)
775{
776 bool changed = false;
777
778 spin_lock_irq(&target->lock);
779 if (target->state != SRP_TARGET_REMOVED) {
780 target->state = SRP_TARGET_REMOVED;
781 changed = true;
782 }
783 spin_unlock_irq(&target->lock);
784
785 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200786 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000787
788 return changed;
789}
790
Roland Dreieraef9ec32005-11-02 14:07:13 -0800791static void srp_disconnect_target(struct srp_target_port *target)
792{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200793 struct srp_rdma_ch *ch;
794 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100795
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200796 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800797
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200798 for (i = 0; i < target->ch_count; i++) {
799 ch = &target->ch[i];
800 ch->connected = false;
801 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
802 shost_printk(KERN_DEBUG, target->scsi_host,
803 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000804 }
Roland Dreiere6581052006-05-17 09:13:21 -0700805 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800806}
807
Bart Van Assche509c07b2014-10-30 14:48:30 +0100808static void srp_free_req_data(struct srp_target_port *target,
809 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500810{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200811 struct srp_device *dev = target->srp_host->srp_dev;
812 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500813 struct srp_request *req;
814 int i;
815
Bart Van Assche47513cf2015-05-18 13:25:54 +0200816 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200817 return;
818
819 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100820 req = &ch->req_ring[i];
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300821 if (dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200822 kfree(req->fr_list);
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300823 } else {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200824 kfree(req->fmr_list);
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300825 kfree(req->map_page);
826 }
David Dillowc07d4242011-01-16 13:57:10 -0500827 if (req->indirect_dma_addr) {
828 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
829 target->indirect_size,
830 DMA_TO_DEVICE);
831 }
832 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500833 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200834
Bart Van Assche509c07b2014-10-30 14:48:30 +0100835 kfree(ch->req_ring);
836 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500837}
838
Bart Van Assche509c07b2014-10-30 14:48:30 +0100839static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200840{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100841 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200842 struct srp_device *srp_dev = target->srp_host->srp_dev;
843 struct ib_device *ibdev = srp_dev->dev;
844 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200845 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200846 dma_addr_t dma_addr;
847 int i, ret = -ENOMEM;
848
Bart Van Assche509c07b2014-10-30 14:48:30 +0100849 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
850 GFP_KERNEL);
851 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200852 goto out;
853
854 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100855 req = &ch->req_ring[i];
Bart Van Assche509c5f32016-05-12 10:50:35 -0700856 mr_list = kmalloc(target->mr_per_cmd * sizeof(void *),
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200857 GFP_KERNEL);
858 if (!mr_list)
859 goto out;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300860 if (srp_dev->use_fast_reg) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200861 req->fr_list = mr_list;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300862 } else {
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200863 req->fmr_list = mr_list;
Sagi Grimberg9a21be52015-10-13 19:11:41 +0300864 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
865 sizeof(void *), GFP_KERNEL);
866 if (!req->map_page)
867 goto out;
868 }
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200869 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200870 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200871 goto out;
872
873 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
874 target->indirect_size,
875 DMA_TO_DEVICE);
876 if (ib_dma_mapping_error(ibdev, dma_addr))
877 goto out;
878
879 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200880 }
881 ret = 0;
882
883out:
884 return ret;
885}
886
Bart Van Assche683b1592012-01-14 12:40:44 +0000887/**
888 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
889 * @shost: SCSI host whose attributes to remove from sysfs.
890 *
891 * Note: Any attributes defined in the host template and that did not exist
892 * before invocation of this function will be ignored.
893 */
894static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
895{
896 struct device_attribute **attr;
897
898 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
899 device_remove_file(&shost->shost_dev, *attr);
900}
901
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000902static void srp_remove_target(struct srp_target_port *target)
903{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200904 struct srp_rdma_ch *ch;
905 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100906
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000907 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
908
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000909 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200910 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000911 srp_remove_host(target->scsi_host);
912 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100913 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000914 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200915 for (i = 0; i < target->ch_count; i++) {
916 ch = &target->ch[i];
917 srp_free_ch_ib(target, ch);
918 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200919 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200920 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200921 for (i = 0; i < target->ch_count; i++) {
922 ch = &target->ch[i];
923 srp_free_req_data(target, ch);
924 }
925 kfree(target->ch);
926 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200927
928 spin_lock(&target->srp_host->target_lock);
929 list_del(&target->list);
930 spin_unlock(&target->srp_host->target_lock);
931
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000932 scsi_host_put(target->scsi_host);
933}
934
David Howellsc4028952006-11-22 14:57:56 +0000935static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800936{
David Howellsc4028952006-11-22 14:57:56 +0000937 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000938 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800939
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000940 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800941
Bart Van Assche96fc2482013-06-28 14:51:26 +0200942 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800943}
944
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200945static void srp_rport_delete(struct srp_rport *rport)
946{
947 struct srp_target_port *target = rport->lld_data;
948
949 srp_queue_remove_work(target);
950}
951
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200952/**
953 * srp_connected_ch() - number of connected channels
954 * @target: SRP target port.
955 */
956static int srp_connected_ch(struct srp_target_port *target)
957{
958 int i, c = 0;
959
960 for (i = 0; i < target->ch_count; i++)
961 c += target->ch[i].connected;
962
963 return c;
964}
965
Bart Van Assched92c0da2014-10-06 17:14:36 +0200966static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800967{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100968 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800969 int ret;
970
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200971 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000972
Bart Van Assche509c07b2014-10-30 14:48:30 +0100973 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800974 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800975 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800976
977 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100978 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200979 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800980 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800981 goto out;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100982 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100983 if (ret < 0)
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800984 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800985
986 /*
987 * The CM event handling code will set status to
988 * SRP_PORT_REDIRECT if we get a port redirect REJ
989 * back, or SRP_DLID_REDIRECT if we get a lid/qp
990 * redirect REJ back.
991 */
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800992 ret = ch->status;
993 switch (ret) {
Roland Dreieraef9ec32005-11-02 14:07:13 -0800994 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200995 ch->connected = true;
Bart Van Assche4d59ad22015-12-01 10:17:32 -0800996 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800997
998 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +0100999 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001000 if (ret)
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001001 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001002 break;
1003
1004 case SRP_DLID_REDIRECT:
1005 break;
1006
David Dillow9fe4bcf2008-01-08 17:08:52 -05001007 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001008 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001009 "giving up on stale connection\n");
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001010 ret = -ECONNRESET;
1011 goto out;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001012
Roland Dreieraef9ec32005-11-02 14:07:13 -08001013 default:
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001014 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001015 }
1016 }
Bart Van Assche4d59ad22015-12-01 10:17:32 -08001017
1018out:
1019 return ret <= 0 ? ret : -ENODEV;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001020}
1021
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001022static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1023{
1024 srp_handle_qp_err(cq, wc, "INV RKEY");
1025}
1026
1027static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1028 u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001029{
1030 struct ib_send_wr *bad_wr;
1031 struct ib_send_wr wr = {
1032 .opcode = IB_WR_LOCAL_INV,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001033 .next = NULL,
1034 .num_sge = 0,
1035 .send_flags = 0,
1036 .ex.invalidate_rkey = rkey,
1037 };
1038
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001039 wr.wr_cqe = &req->reg_cqe;
1040 req->reg_cqe.done = srp_inv_rkey_err_done;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001041 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001042}
1043
Roland Dreierd945e1d2006-05-09 10:50:28 -07001044static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001045 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001046 struct srp_request *req)
1047{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001048 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001049 struct srp_device *dev = target->srp_host->srp_dev;
1050 struct ib_device *ibdev = dev->dev;
1051 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001052
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001053 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001054 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1055 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1056 return;
1057
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001058 if (dev->use_fast_reg) {
1059 struct srp_fr_desc **pfr;
1060
1061 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001062 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001063 if (res < 0) {
1064 shost_printk(KERN_ERR, target->scsi_host, PFX
1065 "Queueing INV WR for rkey %#x failed (%d)\n",
1066 (*pfr)->mr->rkey, res);
1067 queue_work(system_long_wq,
1068 &target->tl_err_work);
1069 }
1070 }
1071 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001072 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001073 req->nmdesc);
Bart Van Assche002f1562015-08-10 17:08:44 -07001074 } else if (dev->use_fmr) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001075 struct ib_pool_fmr **pfmr;
1076
1077 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1078 ib_fmr_pool_unmap(*pfmr);
1079 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001080
David Dillow8f26c9f2011-01-14 19:45:50 -05001081 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1082 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001083}
1084
Bart Van Assche22032992012-08-14 13:18:53 +00001085/**
1086 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001087 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001088 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001089 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001090 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1091 * ownership of @req->scmnd if it equals @scmnd.
1092 *
1093 * Return value:
1094 * Either NULL or a pointer to the SCSI command the caller became owner of.
1095 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001096static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001097 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001098 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001099 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001100{
Bart Van Assche94a91742010-11-26 14:50:09 -05001101 unsigned long flags;
1102
Bart Van Assche509c07b2014-10-30 14:48:30 +01001103 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001104 if (req->scmnd &&
1105 (!sdev || req->scmnd->device == sdev) &&
1106 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001107 scmnd = req->scmnd;
1108 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001109 } else {
1110 scmnd = NULL;
1111 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001112 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001113
1114 return scmnd;
1115}
1116
1117/**
Bart Van Assche6ec2ba02016-04-22 14:12:47 -07001118 * srp_free_req() - Unmap data and adjust ch->req_lim.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001119 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001120 * @req: Request to be freed.
1121 * @scmnd: SCSI command associated with @req.
1122 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001123 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001124static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1125 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001126{
1127 unsigned long flags;
1128
Bart Van Assche509c07b2014-10-30 14:48:30 +01001129 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001130
Bart Van Assche509c07b2014-10-30 14:48:30 +01001131 spin_lock_irqsave(&ch->lock, flags);
1132 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001133 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001134}
1135
Bart Van Assche509c07b2014-10-30 14:48:30 +01001136static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1137 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001138{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001139 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001140
1141 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001142 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001143 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001144 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001145 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001146}
1147
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001148static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001149{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001150 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001151 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001152 struct Scsi_Host *shost = target->scsi_host;
1153 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001154 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001155
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001156 /*
1157 * Invoking srp_terminate_io() while srp_queuecommand() is running
1158 * is not safe. Hence the warning statement below.
1159 */
1160 shost_for_each_device(sdev, shost)
1161 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1162
Bart Van Assched92c0da2014-10-06 17:14:36 +02001163 for (i = 0; i < target->ch_count; i++) {
1164 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001165
Bart Van Assched92c0da2014-10-06 17:14:36 +02001166 for (j = 0; j < target->req_ring_size; ++j) {
1167 struct srp_request *req = &ch->req_ring[j];
1168
1169 srp_finish_req(ch, req, NULL,
1170 DID_TRANSPORT_FAILFAST << 16);
1171 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001172 }
1173}
1174
1175/*
1176 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1177 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1178 * srp_reset_device() or srp_reset_host() calls will occur while this function
1179 * is in progress. One way to realize that is not to call this function
1180 * directly but to call srp_reconnect_rport() instead since that last function
1181 * serializes calls of this function via rport->mutex and also blocks
1182 * srp_queuecommand() calls before invoking this function.
1183 */
1184static int srp_rport_reconnect(struct srp_rport *rport)
1185{
1186 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001187 struct srp_rdma_ch *ch;
1188 int i, j, ret = 0;
1189 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001190
Roland Dreieraef9ec32005-11-02 14:07:13 -08001191 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001192
1193 if (target->state == SRP_TARGET_SCANNING)
1194 return -ENODEV;
1195
Roland Dreieraef9ec32005-11-02 14:07:13 -08001196 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001197 * Now get a new local CM ID so that we avoid confusing the target in
1198 * case things are really fouled up. Doing so also ensures that all CM
1199 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001200 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001201 for (i = 0; i < target->ch_count; i++) {
1202 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001203 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001204 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001205 for (i = 0; i < target->ch_count; i++) {
1206 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001207 for (j = 0; j < target->req_ring_size; ++j) {
1208 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001209
Bart Van Assched92c0da2014-10-06 17:14:36 +02001210 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1211 }
1212 }
1213 for (i = 0; i < target->ch_count; i++) {
1214 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001215 /*
1216 * Whether or not creating a new CM ID succeeded, create a new
1217 * QP. This guarantees that all completion callback function
1218 * invocations have finished before request resetting starts.
1219 */
1220 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001221
Bart Van Assched92c0da2014-10-06 17:14:36 +02001222 INIT_LIST_HEAD(&ch->free_tx);
1223 for (j = 0; j < target->queue_size; ++j)
1224 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1225 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001226
1227 target->qp_in_error = false;
1228
Bart Van Assched92c0da2014-10-06 17:14:36 +02001229 for (i = 0; i < target->ch_count; i++) {
1230 ch = &target->ch[i];
Bart Van Asschebbac5cc2015-05-18 13:26:17 +02001231 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001232 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001233 ret = srp_connect_ch(ch, multich);
1234 multich = true;
1235 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001236
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001237 if (ret == 0)
1238 shost_printk(KERN_INFO, target->scsi_host,
1239 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001240
1241 return ret;
1242}
1243
David Dillow8f26c9f2011-01-14 19:45:50 -05001244static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1245 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001246{
David Dillow8f26c9f2011-01-14 19:45:50 -05001247 struct srp_direct_buf *desc = state->desc;
1248
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001249 WARN_ON_ONCE(!dma_len);
1250
David Dillow8f26c9f2011-01-14 19:45:50 -05001251 desc->va = cpu_to_be64(dma_addr);
1252 desc->key = cpu_to_be32(rkey);
1253 desc->len = cpu_to_be32(dma_len);
1254
1255 state->total_len += dma_len;
1256 state->desc++;
1257 state->ndesc++;
1258}
1259
1260static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001261 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001262{
Bart Van Assche186fbc62015-08-10 17:06:29 -07001263 struct srp_target_port *target = ch->target;
1264 struct srp_device *dev = target->srp_host->srp_dev;
Christoph Hellwig5f071772016-09-05 12:56:19 +02001265 struct ib_pd *pd = target->pd;
David Dillow8f26c9f2011-01-14 19:45:50 -05001266 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001267 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001268
Bart Van Asschef731ed62015-08-10 17:07:27 -07001269 if (state->fmr.next >= state->fmr.end)
1270 return -ENOMEM;
1271
Sagi Grimberg26630e82015-10-13 19:11:38 +03001272 WARN_ON_ONCE(!dev->use_fmr);
1273
1274 if (state->npages == 0)
1275 return 0;
1276
Christoph Hellwig5f071772016-09-05 12:56:19 +02001277 if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
Sagi Grimberg26630e82015-10-13 19:11:38 +03001278 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Christoph Hellwig5f071772016-09-05 12:56:19 +02001279 pd->unsafe_global_rkey);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001280 goto reset_state;
1281 }
1282
Bart Van Assche509c07b2014-10-30 14:48:30 +01001283 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001284 state->npages, io_addr);
1285 if (IS_ERR(fmr))
1286 return PTR_ERR(fmr);
1287
Bart Van Asschef731ed62015-08-10 17:07:27 -07001288 *state->fmr.next++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001289 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001290
Bart Van Assche186fbc62015-08-10 17:06:29 -07001291 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1292 state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001293
Sagi Grimberg26630e82015-10-13 19:11:38 +03001294reset_state:
1295 state->npages = 0;
1296 state->dma_len = 0;
1297
David Dillow8f26c9f2011-01-14 19:45:50 -05001298 return 0;
1299}
1300
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001301static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1302{
1303 srp_handle_qp_err(cq, wc, "FAST REG");
1304}
1305
Bart Van Assche509c5f32016-05-12 10:50:35 -07001306/*
1307 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1308 * where to start in the first element. If sg_offset_p != NULL then
1309 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1310 * byte that has not yet been mapped.
1311 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001312static int srp_map_finish_fr(struct srp_map_state *state,
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001313 struct srp_request *req,
Bart Van Assche509c5f32016-05-12 10:50:35 -07001314 struct srp_rdma_ch *ch, int sg_nents,
1315 unsigned int *sg_offset_p)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001316{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001317 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001318 struct srp_device *dev = target->srp_host->srp_dev;
Christoph Hellwig5f071772016-09-05 12:56:19 +02001319 struct ib_pd *pd = target->pd;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001320 struct ib_send_wr *bad_wr;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001321 struct ib_reg_wr wr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001322 struct srp_fr_desc *desc;
1323 u32 rkey;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001324 int n, err;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001325
Bart Van Asschef731ed62015-08-10 17:07:27 -07001326 if (state->fr.next >= state->fr.end)
1327 return -ENOMEM;
1328
Sagi Grimberg26630e82015-10-13 19:11:38 +03001329 WARN_ON_ONCE(!dev->use_fast_reg);
1330
Christoph Hellwig5f071772016-09-05 12:56:19 +02001331 if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
Bart Van Assche509c5f32016-05-12 10:50:35 -07001332 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1333
1334 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1335 sg_dma_len(state->sg) - sg_offset,
Christoph Hellwig5f071772016-09-05 12:56:19 +02001336 pd->unsafe_global_rkey);
Bart Van Assche509c5f32016-05-12 10:50:35 -07001337 if (sg_offset_p)
1338 *sg_offset_p = 0;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001339 return 1;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001340 }
1341
Bart Van Assche509c07b2014-10-30 14:48:30 +01001342 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001343 if (!desc)
1344 return -ENOMEM;
1345
1346 rkey = ib_inc_rkey(desc->mr->rkey);
1347 ib_update_fast_reg_key(desc->mr, rkey);
1348
Bart Van Assche509c5f32016-05-12 10:50:35 -07001349 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1350 dev->mr_page_size);
Bart Van Assche9d8e7d02016-05-12 10:48:13 -07001351 if (unlikely(n < 0)) {
1352 srp_fr_pool_put(ch->fr_pool, &desc, 1);
Bart Van Assche509c5f32016-05-12 10:50:35 -07001353 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
Bart Van Assche9d8e7d02016-05-12 10:48:13 -07001354 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
Bart Van Assche509c5f32016-05-12 10:50:35 -07001355 sg_offset_p ? *sg_offset_p : -1, n);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001356 return n;
Bart Van Assche9d8e7d02016-05-12 10:48:13 -07001357 }
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001358
Bart Van Assche509c5f32016-05-12 10:50:35 -07001359 WARN_ON_ONCE(desc->mr->length == 0);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001360
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001361 req->reg_cqe.done = srp_reg_mr_err_done;
1362
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001363 wr.wr.next = NULL;
1364 wr.wr.opcode = IB_WR_REG_MR;
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001365 wr.wr.wr_cqe = &req->reg_cqe;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001366 wr.wr.num_sge = 0;
1367 wr.wr.send_flags = 0;
1368 wr.mr = desc->mr;
1369 wr.key = desc->mr->rkey;
1370 wr.access = (IB_ACCESS_LOCAL_WRITE |
1371 IB_ACCESS_REMOTE_READ |
1372 IB_ACCESS_REMOTE_WRITE);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001373
Bart Van Asschef731ed62015-08-10 17:07:27 -07001374 *state->fr.next++ = desc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001375 state->nmdesc++;
1376
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001377 srp_map_desc(state, desc->mr->iova,
1378 desc->mr->length, desc->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001379
Sagi Grimberg26630e82015-10-13 19:11:38 +03001380 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
Bart Van Assche509c5f32016-05-12 10:50:35 -07001381 if (unlikely(err)) {
1382 WARN_ON_ONCE(err == -ENOMEM);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001383 return err;
Bart Van Assche509c5f32016-05-12 10:50:35 -07001384 }
Sagi Grimberg26630e82015-10-13 19:11:38 +03001385
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001386 return n;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001387}
1388
David Dillow8f26c9f2011-01-14 19:45:50 -05001389static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001390 struct srp_rdma_ch *ch,
Bart Van Assche52bb8c62016-09-26 12:57:10 -07001391 struct scatterlist *sg)
David Dillow8f26c9f2011-01-14 19:45:50 -05001392{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001393 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001394 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001395 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001396 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1397 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001398 unsigned int len = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001399 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001400
Bart Van Assche3ae95da2015-08-10 17:08:18 -07001401 WARN_ON_ONCE(!dma_len);
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001402
David Dillow8f26c9f2011-01-14 19:45:50 -05001403 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001404 unsigned offset = dma_addr & ~dev->mr_page_mask;
Bart Van Assche681cc362016-09-26 12:58:49 -07001405
1406 if (state->npages == dev->max_pages_per_mr ||
1407 (state->npages > 0 && offset != 0)) {
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001408 ret = srp_map_finish_fmr(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001409 if (ret)
1410 return ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001411 }
1412
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001413 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001414
1415 if (!state->npages)
1416 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001417 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001418 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001419 dma_addr += len;
1420 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001421 }
1422
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001423 /*
Bart Van Assche681cc362016-09-26 12:58:49 -07001424 * If the end of the MR is not on a page boundary then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001425 * close it out and start a new one -- we can only merge at page
Bart Van Assche1d3d98c2016-04-22 14:12:10 -07001426 * boundaries.
David Dillow8f26c9f2011-01-14 19:45:50 -05001427 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001428 ret = 0;
Bart Van Assche681cc362016-09-26 12:58:49 -07001429 if ((dma_addr & ~dev->mr_page_mask) != 0)
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001430 ret = srp_map_finish_fmr(state, ch);
Roland Dreierf5358a12006-06-17 20:37:29 -07001431 return ret;
1432}
1433
Sagi Grimberg26630e82015-10-13 19:11:38 +03001434static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1435 struct srp_request *req, struct scatterlist *scat,
1436 int count)
1437{
1438 struct scatterlist *sg;
1439 int i, ret;
1440
Sagi Grimberg26630e82015-10-13 19:11:38 +03001441 state->pages = req->map_page;
1442 state->fmr.next = req->fmr_list;
Bart Van Assche509c5f32016-05-12 10:50:35 -07001443 state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001444
1445 for_each_sg(scat, sg, count, i) {
Bart Van Assche52bb8c62016-09-26 12:57:10 -07001446 ret = srp_map_sg_entry(state, ch, sg);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001447 if (ret)
1448 return ret;
1449 }
1450
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001451 ret = srp_map_finish_fmr(state, ch);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001452 if (ret)
1453 return ret;
1454
Sagi Grimberg26630e82015-10-13 19:11:38 +03001455 return 0;
1456}
1457
1458static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1459 struct srp_request *req, struct scatterlist *scat,
1460 int count)
1461{
Bart Van Assche509c5f32016-05-12 10:50:35 -07001462 unsigned int sg_offset = 0;
1463
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001464 state->fr.next = req->fr_list;
Bart Van Assche509c5f32016-05-12 10:50:35 -07001465 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001466 state->sg = scat;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001467
Bart Van Assche3b59b7a62016-04-22 14:14:43 -07001468 if (count == 0)
1469 return 0;
1470
Bart Van Assche57b0be92015-12-01 10:19:38 -08001471 while (count) {
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001472 int i, n;
1473
Bart Van Assche509c5f32016-05-12 10:50:35 -07001474 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001475 if (unlikely(n < 0))
1476 return n;
1477
Bart Van Assche57b0be92015-12-01 10:19:38 -08001478 count -= n;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001479 for (i = 0; i < n; i++)
1480 state->sg = sg_next(state->sg);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001481 }
1482
Sagi Grimberg26630e82015-10-13 19:11:38 +03001483 return 0;
1484}
1485
1486static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1487 struct srp_request *req, struct scatterlist *scat,
1488 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001489{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001490 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001491 struct srp_device *dev = target->srp_host->srp_dev;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001492 struct scatterlist *sg;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001493 int i;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001494
Sagi Grimberg26630e82015-10-13 19:11:38 +03001495 for_each_sg(scat, sg, count, i) {
1496 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1497 ib_sg_dma_len(dev->dev, sg),
Christoph Hellwig5f071772016-09-05 12:56:19 +02001498 target->pd->unsafe_global_rkey);
Bart Van Assche0e0d3a42015-08-10 17:07:46 -07001499 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001500
Sagi Grimberg26630e82015-10-13 19:11:38 +03001501 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001502}
1503
Bart Van Assche330179f2015-08-10 17:09:05 -07001504/*
1505 * Register the indirect data buffer descriptor with the HCA.
1506 *
1507 * Note: since the indirect data buffer descriptor has been allocated with
1508 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1509 * memory buffer.
1510 */
1511static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1512 void **next_mr, void **end_mr, u32 idb_len,
1513 __be32 *idb_rkey)
1514{
1515 struct srp_target_port *target = ch->target;
1516 struct srp_device *dev = target->srp_host->srp_dev;
1517 struct srp_map_state state;
1518 struct srp_direct_buf idb_desc;
1519 u64 idb_pages[1];
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001520 struct scatterlist idb_sg[1];
Bart Van Assche330179f2015-08-10 17:09:05 -07001521 int ret;
1522
1523 memset(&state, 0, sizeof(state));
1524 memset(&idb_desc, 0, sizeof(idb_desc));
1525 state.gen.next = next_mr;
1526 state.gen.end = end_mr;
1527 state.desc = &idb_desc;
Bart Van Assche330179f2015-08-10 17:09:05 -07001528 state.base_dma_addr = req->indirect_dma_addr;
1529 state.dma_len = idb_len;
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001530
1531 if (dev->use_fast_reg) {
1532 state.sg = idb_sg;
Bart Van Assche54f5c9c2016-04-12 14:39:18 -07001533 sg_init_one(idb_sg, req->indirect_desc, idb_len);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001534 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
Christoph Hellwigfc925512015-12-01 10:18:30 -08001535#ifdef CONFIG_NEED_SG_DMA_LENGTH
1536 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1537#endif
Bart Van Assche509c5f32016-05-12 10:50:35 -07001538 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001539 if (ret < 0)
1540 return ret;
Bart Van Assche509c5f32016-05-12 10:50:35 -07001541 WARN_ON_ONCE(ret < 1);
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001542 } else if (dev->use_fmr) {
1543 state.pages = idb_pages;
1544 state.pages[0] = (req->indirect_dma_addr &
1545 dev->mr_page_mask);
1546 state.npages = 1;
1547 ret = srp_map_finish_fmr(&state, ch);
1548 if (ret < 0)
1549 return ret;
1550 } else {
1551 return -EINVAL;
1552 }
Bart Van Assche330179f2015-08-10 17:09:05 -07001553
1554 *idb_rkey = idb_desc.key;
1555
Sagi Grimbergf7f7aab2015-10-13 19:11:39 +03001556 return 0;
Bart Van Assche330179f2015-08-10 17:09:05 -07001557}
1558
Bart Van Assche509c5f32016-05-12 10:50:35 -07001559#if defined(DYNAMIC_DATA_DEBUG)
1560static void srp_check_mapping(struct srp_map_state *state,
1561 struct srp_rdma_ch *ch, struct srp_request *req,
1562 struct scatterlist *scat, int count)
1563{
1564 struct srp_device *dev = ch->target->srp_host->srp_dev;
1565 struct srp_fr_desc **pfr;
1566 u64 desc_len = 0, mr_len = 0;
1567 int i;
1568
1569 for (i = 0; i < state->ndesc; i++)
1570 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1571 if (dev->use_fast_reg)
1572 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1573 mr_len += (*pfr)->mr->length;
1574 else if (dev->use_fmr)
1575 for (i = 0; i < state->nmdesc; i++)
1576 mr_len += be32_to_cpu(req->indirect_desc[i].len);
1577 if (desc_len != scsi_bufflen(req->scmnd) ||
1578 mr_len > scsi_bufflen(req->scmnd))
1579 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1580 scsi_bufflen(req->scmnd), desc_len, mr_len,
1581 state->ndesc, state->nmdesc);
1582}
1583#endif
1584
Bart Van Assche77269cd2016-04-22 14:13:09 -07001585/**
1586 * srp_map_data() - map SCSI data buffer onto an SRP request
1587 * @scmnd: SCSI command to map
1588 * @ch: SRP RDMA channel
1589 * @req: SRP request
1590 *
1591 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1592 * mapping failed.
1593 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001594static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001595 struct srp_request *req)
1596{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001597 struct srp_target_port *target = ch->target;
Christoph Hellwig5f071772016-09-05 12:56:19 +02001598 struct ib_pd *pd = target->pd;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001599 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001600 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche330179f2015-08-10 17:09:05 -07001601 int len, nents, count, ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001602 struct srp_device *dev;
1603 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001604 struct srp_map_state state;
1605 struct srp_indirect_buf *indirect_hdr;
Bart Van Assche330179f2015-08-10 17:09:05 -07001606 u32 idb_len, table_len;
1607 __be32 idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001608 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001609
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001610 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001611 return sizeof (struct srp_cmd);
1612
1613 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1614 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001615 shost_printk(KERN_WARNING, target->scsi_host,
1616 PFX "Unhandled data direction %d\n",
1617 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001618 return -EINVAL;
1619 }
1620
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001621 nents = scsi_sg_count(scmnd);
1622 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001623
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001624 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001625 ibdev = dev->dev;
1626
1627 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001628 if (unlikely(count == 0))
1629 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001630
1631 fmt = SRP_DATA_DESC_DIRECT;
1632 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001633
Christoph Hellwig5f071772016-09-05 12:56:19 +02001634 if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001635 /*
1636 * The midlayer only generated a single gather/scatter
1637 * entry, or DMA mapping coalesced everything to a
1638 * single entry. So a direct descriptor along with
1639 * the DMA MR suffices.
1640 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001641 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001642
Ralph Campbell85507bc2006-12-12 14:30:55 -08001643 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
Christoph Hellwig5f071772016-09-05 12:56:19 +02001644 buf->key = cpu_to_be32(pd->unsafe_global_rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001645 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001646
Bart Van Assche52ede082014-05-20 15:07:45 +02001647 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001648 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001649 }
1650
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001651 /*
1652 * We have more than one scatter/gather entry, so build our indirect
1653 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001654 */
1655 indirect_hdr = (void *) cmd->add_data;
1656
David Dillowc07d4242011-01-16 13:57:10 -05001657 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1658 target->indirect_size, DMA_TO_DEVICE);
1659
David Dillow8f26c9f2011-01-14 19:45:50 -05001660 memset(&state, 0, sizeof(state));
Bart Van Assche9edba792016-06-03 11:40:24 -07001661 state.desc = req->indirect_desc;
Sagi Grimberg26630e82015-10-13 19:11:38 +03001662 if (dev->use_fast_reg)
Bart Van Asschee012f362016-04-22 14:13:35 -07001663 ret = srp_map_sg_fr(&state, ch, req, scat, count);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001664 else if (dev->use_fmr)
Bart Van Asschee012f362016-04-22 14:13:35 -07001665 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
Sagi Grimberg26630e82015-10-13 19:11:38 +03001666 else
Bart Van Asschee012f362016-04-22 14:13:35 -07001667 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1668 req->nmdesc = state.nmdesc;
1669 if (ret < 0)
1670 goto unmap;
David Dillow8f26c9f2011-01-14 19:45:50 -05001671
Bart Van Assche509c5f32016-05-12 10:50:35 -07001672#if defined(DYNAMIC_DEBUG)
1673 {
1674 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1675 "Memory mapping consistency check");
1676 if (unlikely(ddm.flags & _DPRINTK_FLAGS_PRINT))
1677 srp_check_mapping(&state, ch, req, scat, count);
1678 }
1679#endif
David Dillow8f26c9f2011-01-14 19:45:50 -05001680
David Dillowc07d4242011-01-16 13:57:10 -05001681 /* We've mapped the request, now pull as much of the indirect
1682 * descriptor table as we can into the command buffer. If this
1683 * target is not using an external indirect table, we are
1684 * guaranteed to fit into the command, as the SCSI layer won't
1685 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001686 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001687 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001688 /*
1689 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001690 * so use a direct descriptor.
1691 */
1692 struct srp_direct_buf *buf = (void *) cmd->add_data;
1693
David Dillowc07d4242011-01-16 13:57:10 -05001694 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001695 goto map_complete;
1696 }
1697
David Dillowc07d4242011-01-16 13:57:10 -05001698 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1699 !target->allow_ext_sg)) {
1700 shost_printk(KERN_ERR, target->scsi_host,
1701 "Could not fit S/G list into SRP_CMD\n");
Bart Van Asschee012f362016-04-22 14:13:35 -07001702 ret = -EIO;
1703 goto unmap;
David Dillowc07d4242011-01-16 13:57:10 -05001704 }
1705
1706 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001707 table_len = state.ndesc * sizeof (struct srp_direct_buf);
Bart Van Assche330179f2015-08-10 17:09:05 -07001708 idb_len = sizeof(struct srp_indirect_buf) + table_len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001709
1710 fmt = SRP_DATA_DESC_INDIRECT;
1711 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001712 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001713
David Dillowc07d4242011-01-16 13:57:10 -05001714 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1715 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001716
Christoph Hellwig5f071772016-09-05 12:56:19 +02001717 if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
Bart Van Assche330179f2015-08-10 17:09:05 -07001718 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1719 idb_len, &idb_rkey);
1720 if (ret < 0)
Bart Van Asschee012f362016-04-22 14:13:35 -07001721 goto unmap;
Bart Van Assche330179f2015-08-10 17:09:05 -07001722 req->nmdesc++;
1723 } else {
Christoph Hellwig5f071772016-09-05 12:56:19 +02001724 idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
Bart Van Assche330179f2015-08-10 17:09:05 -07001725 }
1726
David Dillowc07d4242011-01-16 13:57:10 -05001727 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
Bart Van Assche330179f2015-08-10 17:09:05 -07001728 indirect_hdr->table_desc.key = idb_rkey;
David Dillow8f26c9f2011-01-14 19:45:50 -05001729 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1730 indirect_hdr->len = cpu_to_be32(state.total_len);
1731
1732 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001733 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001734 else
David Dillowc07d4242011-01-16 13:57:10 -05001735 cmd->data_in_desc_cnt = count;
1736
1737 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1738 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001739
1740map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001741 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1742 cmd->buf_fmt = fmt << 4;
1743 else
1744 cmd->buf_fmt = fmt;
1745
Roland Dreieraef9ec32005-11-02 14:07:13 -08001746 return len;
Bart Van Asschee012f362016-04-22 14:13:35 -07001747
1748unmap:
1749 srp_unmap_data(scmnd, ch, req);
Bart Van Asscheffc548b2016-04-22 14:14:15 -07001750 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1751 ret = -E2BIG;
Bart Van Asschee012f362016-04-22 14:13:35 -07001752 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001753}
1754
David Dillow05a1d752010-10-08 14:48:14 -04001755/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001756 * Return an IU and possible credit to the free pool
1757 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001758static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001759 enum srp_iu_type iu_type)
1760{
1761 unsigned long flags;
1762
Bart Van Assche509c07b2014-10-30 14:48:30 +01001763 spin_lock_irqsave(&ch->lock, flags);
1764 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001765 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001766 ++ch->req_lim;
1767 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001768}
1769
1770/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001771 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001772 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001773 *
1774 * Note:
1775 * An upper limit for the number of allocated information units for each
1776 * request type is:
1777 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1778 * more than Scsi_Host.can_queue requests.
1779 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1780 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1781 * one unanswered SRP request to an initiator.
1782 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001783static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001784 enum srp_iu_type iu_type)
1785{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001786 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001787 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1788 struct srp_iu *iu;
1789
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001790 ib_process_cq_direct(ch->send_cq, -1);
David Dillow05a1d752010-10-08 14:48:14 -04001791
Bart Van Assche509c07b2014-10-30 14:48:30 +01001792 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001793 return NULL;
1794
1795 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001796 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001797 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001798 ++target->zero_req_lim;
1799 return NULL;
1800 }
1801
Bart Van Assche509c07b2014-10-30 14:48:30 +01001802 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001803 }
1804
Bart Van Assche509c07b2014-10-30 14:48:30 +01001805 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001806 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001807 return iu;
1808}
1809
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001810static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1811{
1812 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1813 struct srp_rdma_ch *ch = cq->cq_context;
1814
1815 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1816 srp_handle_qp_err(cq, wc, "SEND");
1817 return;
1818 }
1819
1820 list_add(&iu->list, &ch->free_tx);
1821}
1822
Bart Van Assche509c07b2014-10-30 14:48:30 +01001823static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001824{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001825 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001826 struct ib_sge list;
1827 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001828
1829 list.addr = iu->dma;
1830 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001831 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001832
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001833 iu->cqe.done = srp_send_done;
1834
David Dillow05a1d752010-10-08 14:48:14 -04001835 wr.next = NULL;
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001836 wr.wr_cqe = &iu->cqe;
David Dillow05a1d752010-10-08 14:48:14 -04001837 wr.sg_list = &list;
1838 wr.num_sge = 1;
1839 wr.opcode = IB_WR_SEND;
1840 wr.send_flags = IB_SEND_SIGNALED;
1841
Bart Van Assche509c07b2014-10-30 14:48:30 +01001842 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001843}
1844
Bart Van Assche509c07b2014-10-30 14:48:30 +01001845static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001846{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001847 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001848 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001849 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001850
1851 list.addr = iu->dma;
1852 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001853 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001854
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001855 iu->cqe.done = srp_recv_done;
1856
Bart Van Asschec996bb42010-07-30 10:59:05 +00001857 wr.next = NULL;
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001858 wr.wr_cqe = &iu->cqe;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001859 wr.sg_list = &list;
1860 wr.num_sge = 1;
1861
Bart Van Assche509c07b2014-10-30 14:48:30 +01001862 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001863}
1864
Bart Van Assche509c07b2014-10-30 14:48:30 +01001865static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001866{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001867 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001868 struct srp_request *req;
1869 struct scsi_cmnd *scmnd;
1870 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001871
Roland Dreieraef9ec32005-11-02 14:07:13 -08001872 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001873 spin_lock_irqsave(&ch->lock, flags);
1874 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1875 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001876
Bart Van Assche509c07b2014-10-30 14:48:30 +01001877 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001878 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001879 ch->tsk_mgmt_status = rsp->data[3];
1880 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001881 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001882 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1883 if (scmnd) {
1884 req = (void *)scmnd->host_scribble;
1885 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1886 }
Bart Van Assche22032992012-08-14 13:18:53 +00001887 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001888 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001889 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1890 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001891
Bart Van Assche509c07b2014-10-30 14:48:30 +01001892 spin_lock_irqsave(&ch->lock, flags);
1893 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1894 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001895
1896 return;
1897 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001898 scmnd->result = rsp->status;
1899
1900 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1901 memcpy(scmnd->sense_buffer, rsp->data +
1902 be32_to_cpu(rsp->resp_data_len),
1903 min_t(int, be32_to_cpu(rsp->sense_data_len),
1904 SCSI_SENSE_BUFFERSIZE));
1905 }
1906
Bart Van Asschee7145312014-07-09 15:57:51 +02001907 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001908 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001909 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1910 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1911 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1912 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1913 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1914 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001915
Bart Van Assche509c07b2014-10-30 14:48:30 +01001916 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001917 be32_to_cpu(rsp->req_lim_delta));
1918
David Dillowf8b6e312010-11-26 13:02:21 -05001919 scmnd->host_scribble = NULL;
1920 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001921 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001922}
1923
Bart Van Assche509c07b2014-10-30 14:48:30 +01001924static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001925 void *rsp, int len)
1926{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001927 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001928 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001929 unsigned long flags;
1930 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001931 int err;
David Dillowbb125882010-10-08 14:40:47 -04001932
Bart Van Assche509c07b2014-10-30 14:48:30 +01001933 spin_lock_irqsave(&ch->lock, flags);
1934 ch->req_lim += req_delta;
1935 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1936 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001937
David Dillowbb125882010-10-08 14:40:47 -04001938 if (!iu) {
1939 shost_printk(KERN_ERR, target->scsi_host, PFX
1940 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001941 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001942 }
1943
1944 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1945 memcpy(iu->buf, rsp, len);
1946 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1947
Bart Van Assche509c07b2014-10-30 14:48:30 +01001948 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001949 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001950 shost_printk(KERN_ERR, target->scsi_host, PFX
1951 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001952 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001953 }
David Dillowbb125882010-10-08 14:40:47 -04001954
David Dillowbb125882010-10-08 14:40:47 -04001955 return err;
1956}
1957
Bart Van Assche509c07b2014-10-30 14:48:30 +01001958static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001959 struct srp_cred_req *req)
1960{
1961 struct srp_cred_rsp rsp = {
1962 .opcode = SRP_CRED_RSP,
1963 .tag = req->tag,
1964 };
1965 s32 delta = be32_to_cpu(req->req_lim_delta);
1966
Bart Van Assche509c07b2014-10-30 14:48:30 +01001967 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1968 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001969 "problems processing SRP_CRED_REQ\n");
1970}
1971
Bart Van Assche509c07b2014-10-30 14:48:30 +01001972static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001973 struct srp_aer_req *req)
1974{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001975 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001976 struct srp_aer_rsp rsp = {
1977 .opcode = SRP_AER_RSP,
1978 .tag = req->tag,
1979 };
1980 s32 delta = be32_to_cpu(req->req_lim_delta);
1981
1982 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche985aa492015-05-18 13:27:14 +02001983 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
David Dillowbb125882010-10-08 14:40:47 -04001984
Bart Van Assche509c07b2014-10-30 14:48:30 +01001985 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001986 shost_printk(KERN_ERR, target->scsi_host, PFX
1987 "problems processing SRP_AER_REQ\n");
1988}
1989
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001990static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001991{
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001992 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1993 struct srp_rdma_ch *ch = cq->cq_context;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001994 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001995 struct ib_device *dev = target->srp_host->srp_dev->dev;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001996 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001997 u8 opcode;
1998
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01001999 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2000 srp_handle_qp_err(cq, wc, "RECV");
2001 return;
2002 }
2003
Bart Van Assche509c07b2014-10-30 14:48:30 +01002004 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002005 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002006
2007 opcode = *(u8 *) iu->buf;
2008
2009 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002010 shost_printk(KERN_ERR, target->scsi_host,
2011 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00002012 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2013 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002014 }
2015
2016 switch (opcode) {
2017 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002018 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002019 break;
2020
David Dillowbb125882010-10-08 14:40:47 -04002021 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002022 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04002023 break;
2024
2025 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002026 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04002027 break;
2028
Roland Dreieraef9ec32005-11-02 14:07:13 -08002029 case SRP_T_LOGOUT:
2030 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05002031 shost_printk(KERN_WARNING, target->scsi_host,
2032 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002033 break;
2034
2035 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002036 shost_printk(KERN_WARNING, target->scsi_host,
2037 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002038 break;
2039 }
2040
Bart Van Assche509c07b2014-10-30 14:48:30 +01002041 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002042 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00002043
Bart Van Assche509c07b2014-10-30 14:48:30 +01002044 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00002045 if (res != 0)
2046 shost_printk(KERN_ERR, target->scsi_host,
2047 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002048}
2049
Bart Van Asschec1120f82013-10-26 14:35:08 +02002050/**
2051 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02002052 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02002053 *
2054 * Note: This function may get invoked before the rport has been created,
2055 * hence the target->rport test.
2056 */
2057static void srp_tl_err_work(struct work_struct *work)
2058{
2059 struct srp_target_port *target;
2060
2061 target = container_of(work, struct srp_target_port, tl_err_work);
2062 if (target->rport)
2063 srp_start_tl_fail_timers(target->rport);
2064}
2065
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01002066static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2067 const char *opname)
Bart Van Assche948d1e82011-09-03 09:25:42 +02002068{
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01002069 struct srp_rdma_ch *ch = cq->cq_context;
Bart Van Assche7dad6b22014-10-21 18:00:35 +02002070 struct srp_target_port *target = ch->target;
2071
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002072 if (ch->connected && !target->qp_in_error) {
Christoph Hellwig1dc7b1f2015-11-13 12:57:13 +01002073 shost_printk(KERN_ERR, target->scsi_host,
2074 PFX "failed %s status %s (%d) for CQE %p\n",
2075 opname, ib_wc_status_msg(wc->status), wc->status,
2076 wc->wr_cqe);
Bart Van Asschec1120f82013-10-26 14:35:08 +02002077 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01002078 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02002079 target->qp_in_error = true;
2080}
2081
Bart Van Assche76c75b22010-11-26 14:37:47 -05002082static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002083{
Bart Van Assche76c75b22010-11-26 14:37:47 -05002084 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002085 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002086 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002087 struct srp_request *req;
2088 struct srp_iu *iu;
2089 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08002090 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002091 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002092 u32 tag;
2093 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02002094 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002095 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2096
2097 /*
2098 * The SCSI EH thread is the only context from which srp_queuecommand()
2099 * can get invoked for blocked devices (SDEV_BLOCK /
2100 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2101 * locking the rport mutex if invoked from inside the SCSI EH.
2102 */
2103 if (in_scsi_eh)
2104 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002105
Bart Van Assched1b42892014-05-20 15:07:20 +02002106 scmnd->result = srp_chkready(target->rport);
2107 if (unlikely(scmnd->result))
2108 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002109
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002110 WARN_ON_ONCE(scmnd->request->tag < 0);
2111 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002112 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002113 idx = blk_mq_unique_tag_to_tag(tag);
2114 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2115 dev_name(&shost->shost_gendev), tag, idx,
2116 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002117
2118 spin_lock_irqsave(&ch->lock, flags);
2119 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002120 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002121
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002122 if (!iu)
2123 goto err;
2124
2125 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002126 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002127 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002128 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002129
David Dillowf8b6e312010-11-26 13:02:21 -05002130 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002131
2132 cmd = iu->buf;
2133 memset(cmd, 0, sizeof *cmd);
2134
2135 cmd->opcode = SRP_CMD;
Bart Van Assche985aa492015-05-18 13:27:14 +02002136 int_to_scsilun(scmnd->device->lun, &cmd->lun);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002137 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002138 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2139
Roland Dreieraef9ec32005-11-02 14:07:13 -08002140 req->scmnd = scmnd;
2141 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002142
Bart Van Assche509c07b2014-10-30 14:48:30 +01002143 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002144 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002145 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002146 PFX "Failed to map data (%d)\n", len);
2147 /*
2148 * If we ran out of memory descriptors (-ENOMEM) because an
2149 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002150 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002151 * to reduce queue depth temporarily.
2152 */
2153 scmnd->result = len == -ENOMEM ?
2154 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002155 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002156 }
2157
David Dillow49248642011-01-14 18:23:24 -05002158 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002159 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002160
Bart Van Assche509c07b2014-10-30 14:48:30 +01002161 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002162 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002163 goto err_unmap;
2164 }
2165
Bart Van Assched1b42892014-05-20 15:07:20 +02002166 ret = 0;
2167
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002168unlock_rport:
2169 if (in_scsi_eh)
2170 mutex_unlock(&rport->mutex);
2171
Bart Van Assched1b42892014-05-20 15:07:20 +02002172 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002173
2174err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002175 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002176
Bart Van Assche76c75b22010-11-26 14:37:47 -05002177err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002178 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002179
Bart Van Assche024ca902014-05-20 15:03:49 +02002180 /*
2181 * Avoid that the loops that iterate over the request ring can
2182 * encounter a dangling SCSI command pointer.
2183 */
2184 req->scmnd = NULL;
2185
Bart Van Assched1b42892014-05-20 15:07:20 +02002186err:
2187 if (scmnd->result) {
2188 scmnd->scsi_done(scmnd);
2189 ret = 0;
2190 } else {
2191 ret = SCSI_MLQUEUE_HOST_BUSY;
2192 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002193
Bart Van Assched1b42892014-05-20 15:07:20 +02002194 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002195}
2196
Bart Van Assche4d73f952013-10-26 14:40:37 +02002197/*
2198 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002199 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002200 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002201static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002202{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002203 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002204 int i;
2205
Bart Van Assche509c07b2014-10-30 14:48:30 +01002206 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2207 GFP_KERNEL);
2208 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002209 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002210 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2211 GFP_KERNEL);
2212 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002213 goto err_no_ring;
2214
2215 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002216 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2217 ch->max_ti_iu_len,
2218 GFP_KERNEL, DMA_FROM_DEVICE);
2219 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002220 goto err;
2221 }
2222
Bart Van Assche4d73f952013-10-26 14:40:37 +02002223 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002224 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2225 target->max_iu_len,
2226 GFP_KERNEL, DMA_TO_DEVICE);
2227 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002228 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002229
Bart Van Assche509c07b2014-10-30 14:48:30 +01002230 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002231 }
2232
2233 return 0;
2234
2235err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002236 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002237 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2238 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002239 }
2240
Bart Van Assche4d73f952013-10-26 14:40:37 +02002241
2242err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002243 kfree(ch->tx_ring);
2244 ch->tx_ring = NULL;
2245 kfree(ch->rx_ring);
2246 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002247
2248 return -ENOMEM;
2249}
2250
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002251static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2252{
2253 uint64_t T_tr_ns, max_compl_time_ms;
2254 uint32_t rq_tmo_jiffies;
2255
2256 /*
2257 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2258 * table 91), both the QP timeout and the retry count have to be set
2259 * for RC QP's during the RTR to RTS transition.
2260 */
2261 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2262 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2263
2264 /*
2265 * Set target->rq_tmo_jiffies to one second more than the largest time
2266 * it can take before an error completion is generated. See also
2267 * C9-140..142 in the IBTA spec for more information about how to
2268 * convert the QP Local ACK Timeout value to nanoseconds.
2269 */
2270 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2271 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2272 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2273 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2274
2275 return rq_tmo_jiffies;
2276}
2277
David Dillow961e0be2011-01-14 17:32:07 -05002278static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
Bart Van Asschee6300cb2015-07-31 14:12:48 -07002279 const struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002280 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002281{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002282 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002283 struct ib_qp_attr *qp_attr = NULL;
2284 int attr_mask = 0;
2285 int ret;
2286 int i;
2287
2288 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002289 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2290 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002291
2292 /*
2293 * Reserve credits for task management so we don't
2294 * bounce requests back to the SCSI mid-layer.
2295 */
2296 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002297 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002298 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002299 target->scsi_host->cmd_per_lun
2300 = min_t(int, target->scsi_host->can_queue,
2301 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002302 } else {
2303 shost_printk(KERN_WARNING, target->scsi_host,
2304 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2305 ret = -ECONNRESET;
2306 goto error;
2307 }
2308
Bart Van Assche509c07b2014-10-30 14:48:30 +01002309 if (!ch->rx_ring) {
2310 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002311 if (ret)
2312 goto error;
2313 }
2314
2315 ret = -ENOMEM;
2316 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2317 if (!qp_attr)
2318 goto error;
2319
2320 qp_attr->qp_state = IB_QPS_RTR;
2321 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2322 if (ret)
2323 goto error_free;
2324
Bart Van Assche509c07b2014-10-30 14:48:30 +01002325 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002326 if (ret)
2327 goto error_free;
2328
Bart Van Assche4d73f952013-10-26 14:40:37 +02002329 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002330 struct srp_iu *iu = ch->rx_ring[i];
2331
2332 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002333 if (ret)
2334 goto error_free;
2335 }
2336
2337 qp_attr->qp_state = IB_QPS_RTS;
2338 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2339 if (ret)
2340 goto error_free;
2341
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002342 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2343
Bart Van Assche509c07b2014-10-30 14:48:30 +01002344 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002345 if (ret)
2346 goto error_free;
2347
2348 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2349
2350error_free:
2351 kfree(qp_attr);
2352
2353error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002354 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002355}
2356
Roland Dreieraef9ec32005-11-02 14:07:13 -08002357static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2358 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002359 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002360{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002361 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002362 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002363 struct ib_class_port_info *cpi;
2364 int opcode;
2365
2366 switch (event->param.rej_rcvd.reason) {
2367 case IB_CM_REJ_PORT_CM_REDIRECT:
2368 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002369 ch->path.dlid = cpi->redirect_lid;
2370 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002371 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002372 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002373
Bart Van Assche509c07b2014-10-30 14:48:30 +01002374 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002375 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2376 break;
2377
2378 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002379 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002380 /*
2381 * Topspin/Cisco SRP gateways incorrectly send
2382 * reject reason code 25 when they mean 24
2383 * (port redirect).
2384 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002385 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002386 event->param.rej_rcvd.ari, 16);
2387
David Dillow7aa54bd2008-01-07 18:23:41 -05002388 shost_printk(KERN_DEBUG, shost,
2389 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002390 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2391 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002392
Bart Van Assche509c07b2014-10-30 14:48:30 +01002393 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002394 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002395 shost_printk(KERN_WARNING, shost,
2396 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002397 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002398 }
2399 break;
2400
2401 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002402 shost_printk(KERN_WARNING, shost,
2403 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002404 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002405 break;
2406
2407 case IB_CM_REJ_CONSUMER_DEFINED:
2408 opcode = *(u8 *) event->private_data;
2409 if (opcode == SRP_LOGIN_REJ) {
2410 struct srp_login_rej *rej = event->private_data;
2411 u32 reason = be32_to_cpu(rej->reason);
2412
2413 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002414 shost_printk(KERN_WARNING, shost,
2415 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002416 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002417 shost_printk(KERN_WARNING, shost, PFX
2418 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002419 target->sgid.raw,
2420 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002421 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002422 shost_printk(KERN_WARNING, shost,
2423 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2424 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002425 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002426 break;
2427
David Dillow9fe4bcf2008-01-08 17:08:52 -05002428 case IB_CM_REJ_STALE_CONN:
2429 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002430 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002431 break;
2432
Roland Dreieraef9ec32005-11-02 14:07:13 -08002433 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002434 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2435 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002436 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002437 }
2438}
2439
2440static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2441{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002442 struct srp_rdma_ch *ch = cm_id->context;
2443 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002444 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002445
2446 switch (event->event) {
2447 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002448 shost_printk(KERN_DEBUG, target->scsi_host,
2449 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002450 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002451 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002452 break;
2453
2454 case IB_CM_REP_RECEIVED:
2455 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002456 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002457 break;
2458
2459 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002460 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002461 comp = 1;
2462
Bart Van Assche509c07b2014-10-30 14:48:30 +01002463 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002464 break;
2465
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002466 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002467 shost_printk(KERN_WARNING, target->scsi_host,
2468 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002469 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002470 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002471 shost_printk(KERN_ERR, target->scsi_host,
2472 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002473 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002474 break;
2475
2476 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002477 shost_printk(KERN_ERR, target->scsi_host,
2478 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002479 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002480
Bart Van Assche509c07b2014-10-30 14:48:30 +01002481 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002482 break;
2483
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002484 case IB_CM_MRA_RECEIVED:
2485 case IB_CM_DREQ_ERROR:
2486 case IB_CM_DREP_RECEIVED:
2487 break;
2488
Roland Dreieraef9ec32005-11-02 14:07:13 -08002489 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002490 shost_printk(KERN_WARNING, target->scsi_host,
2491 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002492 break;
2493 }
2494
2495 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002496 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002497
Roland Dreieraef9ec32005-11-02 14:07:13 -08002498 return 0;
2499}
2500
Jack Wang71444b92013-11-07 11:37:37 +01002501/**
Jack Wang71444b92013-11-07 11:37:37 +01002502 * srp_change_queue_depth - setting device queue depth
2503 * @sdev: scsi device struct
2504 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002505 *
2506 * Returns queue depth.
2507 */
2508static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002509srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002510{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002511 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002512 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002513 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002514}
2515
Bart Van Assche985aa492015-05-18 13:27:14 +02002516static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2517 u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002518{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002519 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002520 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002521 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002522 struct srp_iu *iu;
2523 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002524
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002525 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002526 return -1;
2527
Bart Van Assche509c07b2014-10-30 14:48:30 +01002528 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002529
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002530 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002531 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002532 * invoked while a task management function is being sent.
2533 */
2534 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002535 spin_lock_irq(&ch->lock);
2536 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2537 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002538
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002539 if (!iu) {
2540 mutex_unlock(&rport->mutex);
2541
Bart Van Assche76c75b22010-11-26 14:37:47 -05002542 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002543 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002544
David Dillow19081f32010-10-18 08:54:49 -04002545 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2546 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002547 tsk_mgmt = iu->buf;
2548 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2549
2550 tsk_mgmt->opcode = SRP_TSK_MGMT;
Bart Van Assche985aa492015-05-18 13:27:14 +02002551 int_to_scsilun(lun, &tsk_mgmt->lun);
David Dillowf8b6e312010-11-26 13:02:21 -05002552 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002553 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002554 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002555
David Dillow19081f32010-10-18 08:54:49 -04002556 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2557 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002558 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2559 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002560 mutex_unlock(&rport->mutex);
2561
Bart Van Assche76c75b22010-11-26 14:37:47 -05002562 return -1;
2563 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002564 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002565
Bart Van Assche509c07b2014-10-30 14:48:30 +01002566 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002567 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002568 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002569
Roland Dreierd945e1d2006-05-09 10:50:28 -07002570 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002571}
2572
Roland Dreieraef9ec32005-11-02 14:07:13 -08002573static int srp_abort(struct scsi_cmnd *scmnd)
2574{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002575 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002576 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002577 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002578 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002579 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002580 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002581
David Dillow7aa54bd2008-01-07 18:23:41 -05002582 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002583
Bart Van Assched92c0da2014-10-06 17:14:36 +02002584 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002585 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002586 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002587 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2588 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2589 return SUCCESS;
2590 ch = &target->ch[ch_idx];
2591 if (!srp_claim_req(ch, req, NULL, scmnd))
2592 return SUCCESS;
2593 shost_printk(KERN_ERR, target->scsi_host,
2594 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002595 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002596 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002597 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002598 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002599 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002600 else
2601 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002602 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002603 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002604 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002605
Bart Van Assche086f44f2013-06-12 15:23:04 +02002606 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002607}
2608
2609static int srp_reset_device(struct scsi_cmnd *scmnd)
2610{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002611 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002612 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002613 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002614
David Dillow7aa54bd2008-01-07 18:23:41 -05002615 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002616
Bart Van Assched92c0da2014-10-06 17:14:36 +02002617 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002618 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002619 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002620 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002621 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002622 return FAILED;
2623
Bart Van Assched92c0da2014-10-06 17:14:36 +02002624 for (i = 0; i < target->ch_count; i++) {
2625 ch = &target->ch[i];
2626 for (i = 0; i < target->req_ring_size; ++i) {
2627 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002628
Bart Van Assched92c0da2014-10-06 17:14:36 +02002629 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2630 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002631 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002632
Roland Dreierd945e1d2006-05-09 10:50:28 -07002633 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002634}
2635
2636static int srp_reset_host(struct scsi_cmnd *scmnd)
2637{
2638 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002639
David Dillow7aa54bd2008-01-07 18:23:41 -05002640 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002641
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002642 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002643}
2644
Bart Van Assche509c5f32016-05-12 10:50:35 -07002645static int srp_slave_alloc(struct scsi_device *sdev)
2646{
2647 struct Scsi_Host *shost = sdev->host;
2648 struct srp_target_port *target = host_to_target(shost);
2649 struct srp_device *srp_dev = target->srp_host->srp_dev;
2650 struct ib_device *ibdev = srp_dev->dev;
2651
2652 if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
2653 blk_queue_virt_boundary(sdev->request_queue,
2654 ~srp_dev->mr_page_mask);
2655
2656 return 0;
2657}
2658
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002659static int srp_slave_configure(struct scsi_device *sdev)
2660{
2661 struct Scsi_Host *shost = sdev->host;
2662 struct srp_target_port *target = host_to_target(shost);
2663 struct request_queue *q = sdev->request_queue;
2664 unsigned long timeout;
2665
2666 if (sdev->type == TYPE_DISK) {
2667 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2668 blk_queue_rq_timeout(q, timeout);
2669 }
2670
2671 return 0;
2672}
2673
Tony Jonesee959b02008-02-22 00:13:36 +01002674static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2675 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002676{
Tony Jonesee959b02008-02-22 00:13:36 +01002677 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002678
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002679 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002680}
2681
Tony Jonesee959b02008-02-22 00:13:36 +01002682static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2683 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002684{
Tony Jonesee959b02008-02-22 00:13:36 +01002685 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002686
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002687 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002688}
2689
Tony Jonesee959b02008-02-22 00:13:36 +01002690static ssize_t show_service_id(struct device *dev,
2691 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002692{
Tony Jonesee959b02008-02-22 00:13:36 +01002693 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002694
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002695 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002696}
2697
Tony Jonesee959b02008-02-22 00:13:36 +01002698static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2699 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002700{
Tony Jonesee959b02008-02-22 00:13:36 +01002701 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002702
Bart Van Assche747fe002014-10-30 14:48:05 +01002703 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002704}
2705
Bart Van Assche848b3082013-10-26 14:38:12 +02002706static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2707 char *buf)
2708{
2709 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2710
Bart Van Assche747fe002014-10-30 14:48:05 +01002711 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002712}
2713
Tony Jonesee959b02008-02-22 00:13:36 +01002714static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2715 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002716{
Tony Jonesee959b02008-02-22 00:13:36 +01002717 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002718 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002719
Bart Van Assche509c07b2014-10-30 14:48:30 +01002720 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002721}
2722
Tony Jonesee959b02008-02-22 00:13:36 +01002723static ssize_t show_orig_dgid(struct device *dev,
2724 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002725{
Tony Jonesee959b02008-02-22 00:13:36 +01002726 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002727
Bart Van Assche747fe002014-10-30 14:48:05 +01002728 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002729}
2730
Bart Van Assche89de7482010-08-03 14:08:45 +00002731static ssize_t show_req_lim(struct device *dev,
2732 struct device_attribute *attr, char *buf)
2733{
2734 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002735 struct srp_rdma_ch *ch;
2736 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002737
Bart Van Assched92c0da2014-10-06 17:14:36 +02002738 for (i = 0; i < target->ch_count; i++) {
2739 ch = &target->ch[i];
2740 req_lim = min(req_lim, ch->req_lim);
2741 }
2742 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002743}
2744
Tony Jonesee959b02008-02-22 00:13:36 +01002745static ssize_t show_zero_req_lim(struct device *dev,
2746 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002747{
Tony Jonesee959b02008-02-22 00:13:36 +01002748 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002749
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002750 return sprintf(buf, "%d\n", target->zero_req_lim);
2751}
2752
Tony Jonesee959b02008-02-22 00:13:36 +01002753static ssize_t show_local_ib_port(struct device *dev,
2754 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002755{
Tony Jonesee959b02008-02-22 00:13:36 +01002756 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002757
2758 return sprintf(buf, "%d\n", target->srp_host->port);
2759}
2760
Tony Jonesee959b02008-02-22 00:13:36 +01002761static ssize_t show_local_ib_device(struct device *dev,
2762 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002763{
Tony Jonesee959b02008-02-22 00:13:36 +01002764 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002765
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002766 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002767}
2768
Bart Van Assched92c0da2014-10-06 17:14:36 +02002769static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2770 char *buf)
2771{
2772 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2773
2774 return sprintf(buf, "%d\n", target->ch_count);
2775}
2776
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002777static ssize_t show_comp_vector(struct device *dev,
2778 struct device_attribute *attr, char *buf)
2779{
2780 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2781
2782 return sprintf(buf, "%d\n", target->comp_vector);
2783}
2784
Vu Pham7bb312e2013-10-26 14:31:27 +02002785static ssize_t show_tl_retry_count(struct device *dev,
2786 struct device_attribute *attr, char *buf)
2787{
2788 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2789
2790 return sprintf(buf, "%d\n", target->tl_retry_count);
2791}
2792
David Dillow49248642011-01-14 18:23:24 -05002793static ssize_t show_cmd_sg_entries(struct device *dev,
2794 struct device_attribute *attr, char *buf)
2795{
2796 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2797
2798 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2799}
2800
David Dillowc07d4242011-01-16 13:57:10 -05002801static ssize_t show_allow_ext_sg(struct device *dev,
2802 struct device_attribute *attr, char *buf)
2803{
2804 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2805
2806 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2807}
2808
Tony Jonesee959b02008-02-22 00:13:36 +01002809static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2810static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2811static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2812static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002813static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002814static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2815static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002816static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002817static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2818static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2819static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002820static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002821static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002822static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002823static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002824static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002825
Tony Jonesee959b02008-02-22 00:13:36 +01002826static struct device_attribute *srp_host_attrs[] = {
2827 &dev_attr_id_ext,
2828 &dev_attr_ioc_guid,
2829 &dev_attr_service_id,
2830 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002831 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002832 &dev_attr_dgid,
2833 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002834 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002835 &dev_attr_zero_req_lim,
2836 &dev_attr_local_ib_port,
2837 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002838 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002839 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002840 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002841 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002842 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002843 NULL
2844};
2845
Roland Dreieraef9ec32005-11-02 14:07:13 -08002846static struct scsi_host_template srp_template = {
2847 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002848 .name = "InfiniBand SRP initiator",
2849 .proc_name = DRV_NAME,
Bart Van Assche509c5f32016-05-12 10:50:35 -07002850 .slave_alloc = srp_slave_alloc,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002851 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002852 .info = srp_target_info,
2853 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002854 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002855 .eh_abort_handler = srp_abort,
2856 .eh_device_reset_handler = srp_reset_device,
2857 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002858 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002859 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002860 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002861 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002862 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002863 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002864 .shost_attrs = srp_host_attrs,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002865 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002866};
2867
Bart Van Assche34aa6542014-10-30 14:47:22 +01002868static int srp_sdev_count(struct Scsi_Host *host)
2869{
2870 struct scsi_device *sdev;
2871 int c = 0;
2872
2873 shost_for_each_device(sdev, host)
2874 c++;
2875
2876 return c;
2877}
2878
Bart Van Asschebc44bd12015-08-14 11:01:09 -07002879/*
2880 * Return values:
2881 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2882 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2883 * removal has been scheduled.
2884 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2885 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08002886static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2887{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002888 struct srp_rport_identifiers ids;
2889 struct srp_rport *rport;
2890
Bart Van Assche34aa6542014-10-30 14:47:22 +01002891 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002892 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002893 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002894
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002895 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002896 return -ENODEV;
2897
FUJITA Tomonori32368222007-06-27 16:33:12 +09002898 memcpy(ids.port_id, &target->id_ext, 8);
2899 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002900 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002901 rport = srp_rport_add(target->scsi_host, &ids);
2902 if (IS_ERR(rport)) {
2903 scsi_remove_host(target->scsi_host);
2904 return PTR_ERR(rport);
2905 }
2906
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002907 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002908 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002909
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002910 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002911 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002912 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002913
Roland Dreieraef9ec32005-11-02 14:07:13 -08002914 scsi_scan_target(&target->scsi_host->shost_gendev,
Hannes Reinecke1d645082016-03-17 08:39:45 +01002915 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002916
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002917 if (srp_connected_ch(target) < target->ch_count ||
2918 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002919 shost_printk(KERN_INFO, target->scsi_host,
2920 PFX "SCSI scan failed - removing SCSI host\n");
2921 srp_queue_remove_work(target);
2922 goto out;
2923 }
2924
Bart Van Asschecf1acab2016-05-12 10:47:38 -07002925 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
Bart Van Assche34aa6542014-10-30 14:47:22 +01002926 dev_name(&target->scsi_host->shost_gendev),
2927 srp_sdev_count(target->scsi_host));
2928
2929 spin_lock_irq(&target->lock);
2930 if (target->state == SRP_TARGET_SCANNING)
2931 target->state = SRP_TARGET_LIVE;
2932 spin_unlock_irq(&target->lock);
2933
2934out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002935 return 0;
2936}
2937
Tony Jonesee959b02008-02-22 00:13:36 +01002938static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002939{
2940 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002941 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002942
2943 complete(&host->released);
2944}
2945
2946static struct class srp_class = {
2947 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002948 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002949};
2950
Bart Van Assche96fc2482013-06-28 14:51:26 +02002951/**
2952 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002953 * @host: SRP host.
2954 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002955 */
2956static bool srp_conn_unique(struct srp_host *host,
2957 struct srp_target_port *target)
2958{
2959 struct srp_target_port *t;
2960 bool ret = false;
2961
2962 if (target->state == SRP_TARGET_REMOVED)
2963 goto out;
2964
2965 ret = true;
2966
2967 spin_lock(&host->target_lock);
2968 list_for_each_entry(t, &host->target_list, list) {
2969 if (t != target &&
2970 target->id_ext == t->id_ext &&
2971 target->ioc_guid == t->ioc_guid &&
2972 target->initiator_ext == t->initiator_ext) {
2973 ret = false;
2974 break;
2975 }
2976 }
2977 spin_unlock(&host->target_lock);
2978
2979out:
2980 return ret;
2981}
2982
Roland Dreieraef9ec32005-11-02 14:07:13 -08002983/*
2984 * Target ports are added by writing
2985 *
2986 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2987 * pkey=<P_Key>,service_id=<service ID>
2988 *
2989 * to the add_target sysfs attribute.
2990 */
2991enum {
2992 SRP_OPT_ERR = 0,
2993 SRP_OPT_ID_EXT = 1 << 0,
2994 SRP_OPT_IOC_GUID = 1 << 1,
2995 SRP_OPT_DGID = 1 << 2,
2996 SRP_OPT_PKEY = 1 << 3,
2997 SRP_OPT_SERVICE_ID = 1 << 4,
2998 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002999 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07003000 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003001 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05003002 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05003003 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3004 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003005 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02003006 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02003007 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003008 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
3009 SRP_OPT_IOC_GUID |
3010 SRP_OPT_DGID |
3011 SRP_OPT_PKEY |
3012 SRP_OPT_SERVICE_ID),
3013};
3014
Steven Whitehousea447c092008-10-13 10:46:57 +01003015static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07003016 { SRP_OPT_ID_EXT, "id_ext=%s" },
3017 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3018 { SRP_OPT_DGID, "dgid=%s" },
3019 { SRP_OPT_PKEY, "pkey=%x" },
3020 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3021 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3022 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07003023 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003024 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05003025 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05003026 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3027 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003028 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02003029 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02003030 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07003031 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003032};
3033
3034static int srp_parse_options(const char *buf, struct srp_target_port *target)
3035{
3036 char *options, *sep_opt;
3037 char *p;
3038 char dgid[3];
3039 substring_t args[MAX_OPT_ARGS];
3040 int opt_mask = 0;
3041 int token;
3042 int ret = -EINVAL;
3043 int i;
3044
3045 options = kstrdup(buf, GFP_KERNEL);
3046 if (!options)
3047 return -ENOMEM;
3048
3049 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03003050 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08003051 if (!*p)
3052 continue;
3053
3054 token = match_token(p, srp_opt_tokens, args);
3055 opt_mask |= token;
3056
3057 switch (token) {
3058 case SRP_OPT_ID_EXT:
3059 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003060 if (!p) {
3061 ret = -ENOMEM;
3062 goto out;
3063 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003064 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3065 kfree(p);
3066 break;
3067
3068 case SRP_OPT_IOC_GUID:
3069 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003070 if (!p) {
3071 ret = -ENOMEM;
3072 goto out;
3073 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003074 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3075 kfree(p);
3076 break;
3077
3078 case SRP_OPT_DGID:
3079 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003080 if (!p) {
3081 ret = -ENOMEM;
3082 goto out;
3083 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003084 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003085 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07003086 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003087 goto out;
3088 }
3089
3090 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01003091 strlcpy(dgid, p + i * 2, sizeof(dgid));
3092 if (sscanf(dgid, "%hhx",
3093 &target->orig_dgid.raw[i]) < 1) {
3094 ret = -EINVAL;
3095 kfree(p);
3096 goto out;
3097 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003098 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08003099 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003100 break;
3101
3102 case SRP_OPT_PKEY:
3103 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003104 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003105 goto out;
3106 }
Bart Van Assche747fe002014-10-30 14:48:05 +01003107 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003108 break;
3109
3110 case SRP_OPT_SERVICE_ID:
3111 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003112 if (!p) {
3113 ret = -ENOMEM;
3114 goto out;
3115 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003116 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3117 kfree(p);
3118 break;
3119
3120 case SRP_OPT_MAX_SECT:
3121 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003122 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003123 goto out;
3124 }
3125 target->scsi_host->max_sectors = token;
3126 break;
3127
Bart Van Assche4d73f952013-10-26 14:40:37 +02003128 case SRP_OPT_QUEUE_SIZE:
3129 if (match_int(args, &token) || token < 1) {
3130 pr_warn("bad queue_size parameter '%s'\n", p);
3131 goto out;
3132 }
3133 target->scsi_host->can_queue = token;
3134 target->queue_size = token + SRP_RSP_SQ_SIZE +
3135 SRP_TSK_MGMT_SQ_SIZE;
3136 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3137 target->scsi_host->cmd_per_lun = token;
3138 break;
3139
Vu Pham52fb2b502006-06-17 20:37:31 -07003140 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003141 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003142 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3143 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003144 goto out;
3145 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003146 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003147 break;
3148
Ramachandra K0c0450db2006-06-17 20:37:38 -07003149 case SRP_OPT_IO_CLASS:
3150 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003151 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003152 goto out;
3153 }
3154 if (token != SRP_REV10_IB_IO_CLASS &&
3155 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003156 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3157 token, SRP_REV10_IB_IO_CLASS,
3158 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003159 goto out;
3160 }
3161 target->io_class = token;
3162 break;
3163
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003164 case SRP_OPT_INITIATOR_EXT:
3165 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003166 if (!p) {
3167 ret = -ENOMEM;
3168 goto out;
3169 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003170 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3171 kfree(p);
3172 break;
3173
David Dillow49248642011-01-14 18:23:24 -05003174 case SRP_OPT_CMD_SG_ENTRIES:
3175 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003176 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3177 p);
David Dillow49248642011-01-14 18:23:24 -05003178 goto out;
3179 }
3180 target->cmd_sg_cnt = token;
3181 break;
3182
David Dillowc07d4242011-01-16 13:57:10 -05003183 case SRP_OPT_ALLOW_EXT_SG:
3184 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003185 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003186 goto out;
3187 }
3188 target->allow_ext_sg = !!token;
3189 break;
3190
3191 case SRP_OPT_SG_TABLESIZE:
3192 if (match_int(args, &token) || token < 1 ||
Ming Lin65e86172016-04-04 14:48:10 -07003193 token > SG_MAX_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003194 pr_warn("bad max sg_tablesize parameter '%s'\n",
3195 p);
David Dillowc07d4242011-01-16 13:57:10 -05003196 goto out;
3197 }
3198 target->sg_tablesize = token;
3199 break;
3200
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003201 case SRP_OPT_COMP_VECTOR:
3202 if (match_int(args, &token) || token < 0) {
3203 pr_warn("bad comp_vector parameter '%s'\n", p);
3204 goto out;
3205 }
3206 target->comp_vector = token;
3207 break;
3208
Vu Pham7bb312e2013-10-26 14:31:27 +02003209 case SRP_OPT_TL_RETRY_COUNT:
3210 if (match_int(args, &token) || token < 2 || token > 7) {
3211 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3212 p);
3213 goto out;
3214 }
3215 target->tl_retry_count = token;
3216 break;
3217
Roland Dreieraef9ec32005-11-02 14:07:13 -08003218 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003219 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3220 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003221 goto out;
3222 }
3223 }
3224
3225 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3226 ret = 0;
3227 else
3228 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3229 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3230 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003231 pr_warn("target creation request is missing parameter '%s'\n",
3232 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003233
Bart Van Assche4d73f952013-10-26 14:40:37 +02003234 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3235 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3236 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3237 target->scsi_host->cmd_per_lun,
3238 target->scsi_host->can_queue);
3239
Roland Dreieraef9ec32005-11-02 14:07:13 -08003240out:
3241 kfree(options);
3242 return ret;
3243}
3244
Tony Jonesee959b02008-02-22 00:13:36 +01003245static ssize_t srp_create_target(struct device *dev,
3246 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003247 const char *buf, size_t count)
3248{
3249 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003250 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003251 struct Scsi_Host *target_host;
3252 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003253 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003254 struct srp_device *srp_dev = host->srp_dev;
3255 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003256 int ret, node_idx, node, cpu, i;
Bart Van Assche509c5f32016-05-12 10:50:35 -07003257 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003258 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003259
3260 target_host = scsi_host_alloc(&srp_template,
3261 sizeof (struct srp_target_port));
3262 if (!target_host)
3263 return -ENOMEM;
3264
David Dillow49248642011-01-14 18:23:24 -05003265 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003266 target_host->max_channel = 0;
3267 target_host->max_id = 1;
Bart Van Assche985aa492015-05-18 13:27:14 +02003268 target_host->max_lun = -1LL;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003269 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003270
Roland Dreieraef9ec32005-11-02 14:07:13 -08003271 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003272
David Dillow49248642011-01-14 18:23:24 -05003273 target->io_class = SRP_REV16A_IB_IO_CLASS;
3274 target->scsi_host = target_host;
3275 target->srp_host = host;
Christoph Hellwig5f071772016-09-05 12:56:19 +02003276 target->pd = host->srp_dev->pd;
Jason Gunthorpee6bf5f482015-07-30 17:22:22 -06003277 target->lkey = host->srp_dev->pd->local_dma_lkey;
David Dillow49248642011-01-14 18:23:24 -05003278 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003279 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3280 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003281 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003282 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003283
Bart Van Assche34aa6542014-10-30 14:47:22 +01003284 /*
3285 * Avoid that the SCSI host can be removed by srp_remove_target()
3286 * before this function returns.
3287 */
3288 scsi_host_get(target->scsi_host);
3289
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003290 mutex_lock(&host->add_target_mutex);
3291
Roland Dreieraef9ec32005-11-02 14:07:13 -08003292 ret = srp_parse_options(buf, target);
3293 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003294 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003295
Bart Van Assche4d73f952013-10-26 14:40:37 +02003296 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3297
Bart Van Assche96fc2482013-06-28 14:51:26 +02003298 if (!srp_conn_unique(target->srp_host, target)) {
3299 shost_printk(KERN_INFO, target->scsi_host,
3300 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3301 be64_to_cpu(target->id_ext),
3302 be64_to_cpu(target->ioc_guid),
3303 be64_to_cpu(target->initiator_ext));
3304 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003305 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003306 }
3307
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003308 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003309 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003310 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003311 target->sg_tablesize = target->cmd_sg_cnt;
3312 }
3313
Bart Van Assche509c5f32016-05-12 10:50:35 -07003314 if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
3315 /*
3316 * FR and FMR can only map one HCA page per entry. If the
3317 * start address is not aligned on a HCA page boundary two
3318 * entries will be used for the head and the tail although
3319 * these two entries combined contain at most one HCA page of
3320 * data. Hence the "+ 1" in the calculation below.
3321 *
3322 * The indirect data buffer descriptor is contiguous so the
3323 * memory for that buffer will only be registered if
3324 * register_always is true. Hence add one to mr_per_cmd if
3325 * register_always has been set.
3326 */
3327 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3328 (ilog2(srp_dev->mr_page_size) - 9);
3329 mr_per_cmd = register_always +
3330 (target->scsi_host->max_sectors + 1 +
3331 max_sectors_per_mr - 1) / max_sectors_per_mr;
3332 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3333 target->scsi_host->max_sectors,
3334 srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3335 max_sectors_per_mr, mr_per_cmd);
3336 }
3337
David Dillowc07d4242011-01-16 13:57:10 -05003338 target_host->sg_tablesize = target->sg_tablesize;
Bart Van Assche509c5f32016-05-12 10:50:35 -07003339 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3340 target->mr_per_cmd = mr_per_cmd;
David Dillowc07d4242011-01-16 13:57:10 -05003341 target->indirect_size = target->sg_tablesize *
3342 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003343 target->max_iu_len = sizeof (struct srp_cmd) +
3344 sizeof (struct srp_indirect_buf) +
3345 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3346
Bart Van Asschec1120f82013-10-26 14:35:08 +02003347 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003348 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003349 spin_lock_init(&target->lock);
Matan Barak55ee3ab2015-10-15 18:38:45 +03003350 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003351 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003352 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003353
Bart Van Assched92c0da2014-10-06 17:14:36 +02003354 ret = -ENOMEM;
3355 target->ch_count = max_t(unsigned, num_online_nodes(),
3356 min(ch_count ? :
3357 min(4 * num_online_nodes(),
3358 ibdev->num_comp_vectors),
3359 num_online_cpus()));
3360 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3361 GFP_KERNEL);
3362 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003363 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003364
Bart Van Assched92c0da2014-10-06 17:14:36 +02003365 node_idx = 0;
3366 for_each_online_node(node) {
3367 const int ch_start = (node_idx * target->ch_count /
3368 num_online_nodes());
3369 const int ch_end = ((node_idx + 1) * target->ch_count /
3370 num_online_nodes());
3371 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3372 num_online_nodes() + target->comp_vector)
3373 % ibdev->num_comp_vectors;
3374 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3375 num_online_nodes() + target->comp_vector)
3376 % ibdev->num_comp_vectors;
3377 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003378
Bart Van Assched92c0da2014-10-06 17:14:36 +02003379 for_each_online_cpu(cpu) {
3380 if (cpu_to_node(cpu) != node)
3381 continue;
3382 if (ch_start + cpu_idx >= ch_end)
3383 continue;
3384 ch = &target->ch[ch_start + cpu_idx];
3385 ch->target = target;
3386 ch->comp_vector = cv_start == cv_end ? cv_start :
3387 cv_start + cpu_idx % (cv_end - cv_start);
3388 spin_lock_init(&ch->lock);
3389 INIT_LIST_HEAD(&ch->free_tx);
3390 ret = srp_new_cm_id(ch);
3391 if (ret)
3392 goto err_disconnect;
3393
3394 ret = srp_create_ch_ib(ch);
3395 if (ret)
3396 goto err_disconnect;
3397
3398 ret = srp_alloc_req_data(ch);
3399 if (ret)
3400 goto err_disconnect;
3401
3402 ret = srp_connect_ch(ch, multich);
3403 if (ret) {
3404 shost_printk(KERN_ERR, target->scsi_host,
3405 PFX "Connection %d/%d failed\n",
3406 ch_start + cpu_idx,
3407 target->ch_count);
3408 if (node_idx == 0 && cpu_idx == 0) {
3409 goto err_disconnect;
3410 } else {
3411 srp_free_ch_ib(target, ch);
3412 srp_free_req_data(target, ch);
3413 target->ch_count = ch - target->ch;
Bart Van Asschec257ea62015-07-31 14:13:22 -07003414 goto connected;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003415 }
3416 }
3417
3418 multich = true;
3419 cpu_idx++;
3420 }
3421 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003422 }
3423
Bart Van Asschec257ea62015-07-31 14:13:22 -07003424connected:
Bart Van Assched92c0da2014-10-06 17:14:36 +02003425 target->scsi_host->nr_hw_queues = target->ch_count;
3426
Roland Dreieraef9ec32005-11-02 14:07:13 -08003427 ret = srp_add_target(host, target);
3428 if (ret)
3429 goto err_disconnect;
3430
Bart Van Assche34aa6542014-10-30 14:47:22 +01003431 if (target->state != SRP_TARGET_REMOVED) {
3432 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3433 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3434 be64_to_cpu(target->id_ext),
3435 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003436 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003437 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003438 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003439 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003440
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003441 ret = count;
3442
3443out:
3444 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003445
3446 scsi_host_put(target->scsi_host);
Bart Van Asschebc44bd12015-08-14 11:01:09 -07003447 if (ret < 0)
3448 scsi_host_put(target->scsi_host);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003449
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003450 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003451
3452err_disconnect:
3453 srp_disconnect_target(target);
3454
Bart Van Assched92c0da2014-10-06 17:14:36 +02003455 for (i = 0; i < target->ch_count; i++) {
3456 ch = &target->ch[i];
3457 srp_free_ch_ib(target, ch);
3458 srp_free_req_data(target, ch);
3459 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003460
Bart Van Assched92c0da2014-10-06 17:14:36 +02003461 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003462 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003463}
3464
Tony Jonesee959b02008-02-22 00:13:36 +01003465static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003466
Tony Jonesee959b02008-02-22 00:13:36 +01003467static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3468 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003469{
Tony Jonesee959b02008-02-22 00:13:36 +01003470 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003471
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003472 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003473}
3474
Tony Jonesee959b02008-02-22 00:13:36 +01003475static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003476
Tony Jonesee959b02008-02-22 00:13:36 +01003477static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3478 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003479{
Tony Jonesee959b02008-02-22 00:13:36 +01003480 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003481
3482 return sprintf(buf, "%d\n", host->port);
3483}
3484
Tony Jonesee959b02008-02-22 00:13:36 +01003485static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003486
Roland Dreierf5358a12006-06-17 20:37:29 -07003487static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003488{
3489 struct srp_host *host;
3490
3491 host = kzalloc(sizeof *host, GFP_KERNEL);
3492 if (!host)
3493 return NULL;
3494
3495 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003496 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003497 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003498 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003499 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003500 host->port = port;
3501
Tony Jonesee959b02008-02-22 00:13:36 +01003502 host->dev.class = &srp_class;
3503 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003504 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003505
Tony Jonesee959b02008-02-22 00:13:36 +01003506 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003507 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003508 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003509 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003510 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003511 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003512 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003513 goto err_class;
3514
3515 return host;
3516
3517err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003518 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003519
Roland Dreierf5358a12006-06-17 20:37:29 -07003520free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003521 kfree(host);
3522
3523 return NULL;
3524}
3525
3526static void srp_add_one(struct ib_device *device)
3527{
Roland Dreierf5358a12006-06-17 20:37:29 -07003528 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003529 struct srp_host *host;
Hal Rosenstock41390322015-06-29 09:57:00 -04003530 int mr_page_shift, p;
Bart Van Assche52ede082014-05-20 15:07:45 +02003531 u64 max_pages_per_mr;
Christoph Hellwig5f071772016-09-05 12:56:19 +02003532 unsigned int flags = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003533
Bart Van Assche249f0652016-06-03 11:39:35 -07003534 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
Roland Dreierf5358a12006-06-17 20:37:29 -07003535 if (!srp_dev)
Or Gerlitz4a061b22015-12-18 10:59:46 +02003536 return;
Roland Dreierf5358a12006-06-17 20:37:29 -07003537
3538 /*
3539 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003540 * minimum of 4096 bytes. We're unlikely to build large sglists
3541 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003542 */
Or Gerlitz4a061b22015-12-18 10:59:46 +02003543 mr_page_shift = max(12, ffs(device->attrs.page_size_cap) - 1);
Bart Van Assche52ede082014-05-20 15:07:45 +02003544 srp_dev->mr_page_size = 1 << mr_page_shift;
3545 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
Or Gerlitz4a061b22015-12-18 10:59:46 +02003546 max_pages_per_mr = device->attrs.max_mr_size;
Bart Van Assche52ede082014-05-20 15:07:45 +02003547 do_div(max_pages_per_mr, srp_dev->mr_page_size);
Bart Van Assche509c5f32016-05-12 10:50:35 -07003548 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3549 device->attrs.max_mr_size, srp_dev->mr_page_size,
3550 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
Bart Van Assche52ede082014-05-20 15:07:45 +02003551 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3552 max_pages_per_mr);
Bart Van Assche835ee622016-05-12 10:49:39 -07003553
3554 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3555 device->map_phys_fmr && device->unmap_fmr);
3556 srp_dev->has_fr = (device->attrs.device_cap_flags &
3557 IB_DEVICE_MEM_MGT_EXTENSIONS);
Bart Van Asschec222a392016-05-12 10:51:01 -07003558 if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
Bart Van Assche835ee622016-05-12 10:49:39 -07003559 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
Bart Van Asschec222a392016-05-12 10:51:01 -07003560 } else if (!never_register &&
3561 device->attrs.max_mr_size >= 2 * srp_dev->mr_page_size) {
Bart Van Assche509c5f32016-05-12 10:50:35 -07003562 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3563 (!srp_dev->has_fmr || prefer_fr));
3564 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3565 }
Bart Van Assche835ee622016-05-12 10:49:39 -07003566
Christoph Hellwig5f071772016-09-05 12:56:19 +02003567 if (never_register || !register_always ||
3568 (!srp_dev->has_fmr && !srp_dev->has_fr))
3569 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3570
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003571 if (srp_dev->use_fast_reg) {
3572 srp_dev->max_pages_per_mr =
3573 min_t(u32, srp_dev->max_pages_per_mr,
Or Gerlitz4a061b22015-12-18 10:59:46 +02003574 device->attrs.max_fast_reg_page_list_len);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003575 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003576 srp_dev->mr_max_size = srp_dev->mr_page_size *
3577 srp_dev->max_pages_per_mr;
Or Gerlitz4a061b22015-12-18 10:59:46 +02003578 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3579 device->name, mr_page_shift, device->attrs.max_mr_size,
3580 device->attrs.max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003581 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003582
3583 INIT_LIST_HEAD(&srp_dev->dev_list);
3584
3585 srp_dev->dev = device;
Christoph Hellwig5f071772016-09-05 12:56:19 +02003586 srp_dev->pd = ib_alloc_pd(device, flags);
Roland Dreierf5358a12006-06-17 20:37:29 -07003587 if (IS_ERR(srp_dev->pd))
3588 goto free_dev;
3589
Roland Dreierf5358a12006-06-17 20:37:29 -07003590
Hal Rosenstock41390322015-06-29 09:57:00 -04003591 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003592 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003593 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003594 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003595 }
3596
Roland Dreierf5358a12006-06-17 20:37:29 -07003597 ib_set_client_data(device, &srp_client, srp_dev);
Or Gerlitz4a061b22015-12-18 10:59:46 +02003598 return;
Roland Dreierf5358a12006-06-17 20:37:29 -07003599
Roland Dreierf5358a12006-06-17 20:37:29 -07003600free_dev:
3601 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003602}
3603
Haggai Eran7c1eb452015-07-30 17:50:14 +03003604static void srp_remove_one(struct ib_device *device, void *client_data)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003605{
Roland Dreierf5358a12006-06-17 20:37:29 -07003606 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003607 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003608 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003609
Haggai Eran7c1eb452015-07-30 17:50:14 +03003610 srp_dev = client_data;
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003611 if (!srp_dev)
3612 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003613
Roland Dreierf5358a12006-06-17 20:37:29 -07003614 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003615 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003616 /*
3617 * Wait for the sysfs entry to go away, so that no new
3618 * target ports can be created.
3619 */
3620 wait_for_completion(&host->released);
3621
3622 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003623 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003624 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003625 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003626 list_for_each_entry(target, &host->target_list, list)
3627 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003628 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003629
3630 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003631 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003632 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003633 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003634 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003635
Roland Dreieraef9ec32005-11-02 14:07:13 -08003636 kfree(host);
3637 }
3638
Roland Dreierf5358a12006-06-17 20:37:29 -07003639 ib_dealloc_pd(srp_dev->pd);
3640
3641 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003642}
3643
FUJITA Tomonori32368222007-06-27 16:33:12 +09003644static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003645 .has_rport_state = true,
3646 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003647 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003648 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3649 .dev_loss_tmo = &srp_dev_loss_tmo,
3650 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003651 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003652 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003653};
3654
Roland Dreieraef9ec32005-11-02 14:07:13 -08003655static int __init srp_init_module(void)
3656{
3657 int ret;
3658
David Dillow49248642011-01-14 18:23:24 -05003659 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003660 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003661 if (!cmd_sg_entries)
3662 cmd_sg_entries = srp_sg_tablesize;
3663 }
3664
3665 if (!cmd_sg_entries)
3666 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3667
3668 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003669 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003670 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003671 }
3672
David Dillowc07d4242011-01-16 13:57:10 -05003673 if (!indirect_sg_entries)
3674 indirect_sg_entries = cmd_sg_entries;
3675 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003676 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3677 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003678 indirect_sg_entries = cmd_sg_entries;
3679 }
3680
Bart Van Asschebcc05912014-07-09 15:57:26 +02003681 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003682 if (!srp_remove_wq) {
3683 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003684 goto out;
3685 }
3686
3687 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003688 ib_srp_transport_template =
3689 srp_attach_transport(&ib_srp_transport_functions);
3690 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003691 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003692
Roland Dreieraef9ec32005-11-02 14:07:13 -08003693 ret = class_register(&srp_class);
3694 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003695 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003696 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003697 }
3698
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003699 ib_sa_register_client(&srp_sa_client);
3700
Roland Dreieraef9ec32005-11-02 14:07:13 -08003701 ret = ib_register_client(&srp_client);
3702 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003703 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003704 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003705 }
3706
Bart Van Asschebcc05912014-07-09 15:57:26 +02003707out:
3708 return ret;
3709
3710unreg_sa:
3711 ib_sa_unregister_client(&srp_sa_client);
3712 class_unregister(&srp_class);
3713
3714release_tr:
3715 srp_release_transport(ib_srp_transport_template);
3716
3717destroy_wq:
3718 destroy_workqueue(srp_remove_wq);
3719 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003720}
3721
3722static void __exit srp_cleanup_module(void)
3723{
3724 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003725 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003726 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003727 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003728 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003729}
3730
3731module_init(srp_init_module);
3732module_exit(srp_cleanup_module);