blob: eada8f758ad4089ec0e15a7469ccc50cb626a98f [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020058#define DRV_VERSION "1.0"
59#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020062MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
Roland Dreieraef9ec32005-11-02 14:07:13 -080063MODULE_LICENSE("Dual BSD/GPL");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020064MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
Roland Dreieraef9ec32005-11-02 14:07:13 -080066
David Dillow49248642011-01-14 18:23:24 -050067static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050069static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +020071static bool prefer_fr;
Bart Van Asscheb1b88542014-05-20 15:06:41 +020072static bool register_always;
Roland Dreieraef9ec32005-11-02 14:07:13 -080073static int topspin_workarounds = 1;
74
David Dillow49248642011-01-14 18:23:24 -050075module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
David Dillowc07d4242011-01-16 13:57:10 -050082module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
Roland Dreieraef9ec32005-11-02 14:07:13 -080090module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
Bart Van Assche5cfb1782014-05-20 15:08:34 +020094module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
Bart Van Asscheb1b88542014-05-20 15:06:41 +020098module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200102static struct kernel_param_ops srp_tmo_ops;
103
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200117static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
Bart Van Assched92c0da2014-10-06 17:14:36 +0200128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
Roland Dreieraef9ec32005-11-02 14:07:13 -0800133static void srp_add_one(struct ib_device *device);
134static void srp_remove_one(struct ib_device *device);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
FUJITA Tomonori32368222007-06-27 16:33:12 +0900139static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200140static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900141
Roland Dreieraef9ec32005-11-02 14:07:13 -0800142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700148static struct ib_sa_client srp_sa_client;
149
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
164 if (strncmp(val, "off", 3) != 0) {
165 res = kstrtoint(val, 0, &tmo);
166 if (res)
167 goto out;
168 } else {
169 tmo = -1;
170 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200171 if (kp->arg == &srp_reconnect_delay)
172 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
173 srp_dev_loss_tmo);
174 else if (kp->arg == &srp_fast_io_fail_tmo)
175 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200176 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200177 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
178 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200179 if (res)
180 goto out;
181 *(int *)kp->arg = tmo;
182
183out:
184 return res;
185}
186
187static struct kernel_param_ops srp_tmo_ops = {
188 .get = srp_tmo_get,
189 .set = srp_tmo_set,
190};
191
Roland Dreieraef9ec32005-11-02 14:07:13 -0800192static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
193{
194 return (struct srp_target_port *) host->hostdata;
195}
196
197static const char *srp_target_info(struct Scsi_Host *host)
198{
199 return host_to_target(host)->target_name;
200}
201
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700202static int srp_target_is_topspin(struct srp_target_port *target)
203{
204 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700205 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700206
207 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700208 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
209 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700210}
211
Roland Dreieraef9ec32005-11-02 14:07:13 -0800212static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
213 gfp_t gfp_mask,
214 enum dma_data_direction direction)
215{
216 struct srp_iu *iu;
217
218 iu = kmalloc(sizeof *iu, gfp_mask);
219 if (!iu)
220 goto out;
221
222 iu->buf = kzalloc(size, gfp_mask);
223 if (!iu->buf)
224 goto out_free_iu;
225
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100226 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
227 direction);
228 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800229 goto out_free_buf;
230
231 iu->size = size;
232 iu->direction = direction;
233
234 return iu;
235
236out_free_buf:
237 kfree(iu->buf);
238out_free_iu:
239 kfree(iu);
240out:
241 return NULL;
242}
243
244static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
245{
246 if (!iu)
247 return;
248
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100249 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
250 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800251 kfree(iu->buf);
252 kfree(iu);
253}
254
255static void srp_qp_event(struct ib_event *event, void *context)
256{
Sagi Grimberg57363d92015-05-18 13:40:29 +0300257 pr_debug("QP event %s (%d)\n",
258 ib_event_msg(event->event), event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800259}
260
261static int srp_init_qp(struct srp_target_port *target,
262 struct ib_qp *qp)
263{
264 struct ib_qp_attr *attr;
265 int ret;
266
267 attr = kmalloc(sizeof *attr, GFP_KERNEL);
268 if (!attr)
269 return -ENOMEM;
270
Bart Van Assche56b53902014-07-09 15:58:22 +0200271 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
272 target->srp_host->port,
273 be16_to_cpu(target->pkey),
274 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800275 if (ret)
276 goto out;
277
278 attr->qp_state = IB_QPS_INIT;
279 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
280 IB_ACCESS_REMOTE_WRITE);
281 attr->port_num = target->srp_host->port;
282
283 ret = ib_modify_qp(qp, attr,
284 IB_QP_STATE |
285 IB_QP_PKEY_INDEX |
286 IB_QP_ACCESS_FLAGS |
287 IB_QP_PORT);
288
289out:
290 kfree(attr);
291 return ret;
292}
293
Bart Van Assche509c07b2014-10-30 14:48:30 +0100294static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500295{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100296 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500297 struct ib_cm_id *new_cm_id;
298
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100299 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100300 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500301 if (IS_ERR(new_cm_id))
302 return PTR_ERR(new_cm_id);
303
Bart Van Assche509c07b2014-10-30 14:48:30 +0100304 if (ch->cm_id)
305 ib_destroy_cm_id(ch->cm_id);
306 ch->cm_id = new_cm_id;
307 ch->path.sgid = target->sgid;
308 ch->path.dgid = target->orig_dgid;
309 ch->path.pkey = target->pkey;
310 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500311
312 return 0;
313}
314
Bart Van Assched1b42892014-05-20 15:07:20 +0200315static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
316{
317 struct srp_device *dev = target->srp_host->srp_dev;
318 struct ib_fmr_pool_param fmr_param;
319
320 memset(&fmr_param, 0, sizeof(fmr_param));
321 fmr_param.pool_size = target->scsi_host->can_queue;
322 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
323 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200324 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
325 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200326 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
327 IB_ACCESS_REMOTE_WRITE |
328 IB_ACCESS_REMOTE_READ);
329
330 return ib_create_fmr_pool(dev->pd, &fmr_param);
331}
332
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200333/**
334 * srp_destroy_fr_pool() - free the resources owned by a pool
335 * @pool: Fast registration pool to be destroyed.
336 */
337static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
338{
339 int i;
340 struct srp_fr_desc *d;
341
342 if (!pool)
343 return;
344
345 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
346 if (d->frpl)
347 ib_free_fast_reg_page_list(d->frpl);
348 if (d->mr)
349 ib_dereg_mr(d->mr);
350 }
351 kfree(pool);
352}
353
354/**
355 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
356 * @device: IB device to allocate fast registration descriptors for.
357 * @pd: Protection domain associated with the FR descriptors.
358 * @pool_size: Number of descriptors to allocate.
359 * @max_page_list_len: Maximum fast registration work request page list length.
360 */
361static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
362 struct ib_pd *pd, int pool_size,
363 int max_page_list_len)
364{
365 struct srp_fr_pool *pool;
366 struct srp_fr_desc *d;
367 struct ib_mr *mr;
368 struct ib_fast_reg_page_list *frpl;
369 int i, ret = -EINVAL;
370
371 if (pool_size <= 0)
372 goto err;
373 ret = -ENOMEM;
374 pool = kzalloc(sizeof(struct srp_fr_pool) +
375 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
376 if (!pool)
377 goto err;
378 pool->size = pool_size;
379 pool->max_page_list_len = max_page_list_len;
380 spin_lock_init(&pool->lock);
381 INIT_LIST_HEAD(&pool->free_list);
382
383 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
384 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
385 if (IS_ERR(mr)) {
386 ret = PTR_ERR(mr);
387 goto destroy_pool;
388 }
389 d->mr = mr;
390 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
391 if (IS_ERR(frpl)) {
392 ret = PTR_ERR(frpl);
393 goto destroy_pool;
394 }
395 d->frpl = frpl;
396 list_add_tail(&d->entry, &pool->free_list);
397 }
398
399out:
400 return pool;
401
402destroy_pool:
403 srp_destroy_fr_pool(pool);
404
405err:
406 pool = ERR_PTR(ret);
407 goto out;
408}
409
410/**
411 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
412 * @pool: Pool to obtain descriptor from.
413 */
414static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
415{
416 struct srp_fr_desc *d = NULL;
417 unsigned long flags;
418
419 spin_lock_irqsave(&pool->lock, flags);
420 if (!list_empty(&pool->free_list)) {
421 d = list_first_entry(&pool->free_list, typeof(*d), entry);
422 list_del(&d->entry);
423 }
424 spin_unlock_irqrestore(&pool->lock, flags);
425
426 return d;
427}
428
429/**
430 * srp_fr_pool_put() - put an FR descriptor back in the free list
431 * @pool: Pool the descriptor was allocated from.
432 * @desc: Pointer to an array of fast registration descriptor pointers.
433 * @n: Number of descriptors to put back.
434 *
435 * Note: The caller must already have queued an invalidation request for
436 * desc->mr->rkey before calling this function.
437 */
438static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
439 int n)
440{
441 unsigned long flags;
442 int i;
443
444 spin_lock_irqsave(&pool->lock, flags);
445 for (i = 0; i < n; i++)
446 list_add(&desc[i]->entry, &pool->free_list);
447 spin_unlock_irqrestore(&pool->lock, flags);
448}
449
450static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
451{
452 struct srp_device *dev = target->srp_host->srp_dev;
453
454 return srp_create_fr_pool(dev->dev, dev->pd,
455 target->scsi_host->can_queue,
456 dev->max_pages_per_mr);
457}
458
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200459/**
460 * srp_destroy_qp() - destroy an RDMA queue pair
461 * @ch: SRP RDMA channel.
462 *
463 * Change a queue pair into the error state and wait until all receive
464 * completions have been processed before destroying it. This avoids that
465 * the receive completion handler can access the queue pair while it is
466 * being destroyed.
467 */
468static void srp_destroy_qp(struct srp_rdma_ch *ch)
469{
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200470 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
471 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
472 struct ib_recv_wr *bad_wr;
473 int ret;
474
475 /* Destroying a QP and reusing ch->done is only safe if not connected */
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200476 WARN_ON_ONCE(ch->connected);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200477
478 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
479 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
480 if (ret)
481 goto out;
482
483 init_completion(&ch->done);
484 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
485 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
486 if (ret == 0)
487 wait_for_completion(&ch->done);
488
489out:
490 ib_destroy_qp(ch->qp);
491}
492
Bart Van Assche509c07b2014-10-30 14:48:30 +0100493static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800494{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100495 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200496 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800497 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100498 struct ib_cq *recv_cq, *send_cq;
499 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200500 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200501 struct srp_fr_pool *fr_pool = NULL;
502 const int m = 1 + dev->use_fast_reg;
Matan Barak8e372102015-06-11 16:35:21 +0300503 struct ib_cq_init_attr cq_attr = {};
Roland Dreieraef9ec32005-11-02 14:07:13 -0800504 int ret;
505
506 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
507 if (!init_attr)
508 return -ENOMEM;
509
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200510 /* + 1 for SRP_LAST_WR_ID */
Matan Barak8e372102015-06-11 16:35:21 +0300511 cq_attr.cqe = target->queue_size + 1;
512 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100513 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300514 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100515 if (IS_ERR(recv_cq)) {
516 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800517 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800518 }
519
Matan Barak8e372102015-06-11 16:35:21 +0300520 cq_attr.cqe = m * target->queue_size;
521 cq_attr.comp_vector = ch->comp_vector;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100522 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
Matan Barak8e372102015-06-11 16:35:21 +0300523 &cq_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100524 if (IS_ERR(send_cq)) {
525 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800526 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000527 }
528
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100529 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800530
531 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200532 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200533 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800534 init_attr->cap.max_recv_sge = 1;
535 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200536 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800537 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100538 init_attr->send_cq = send_cq;
539 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800540
Bart Van Assche62154b22014-05-20 15:04:45 +0200541 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100542 if (IS_ERR(qp)) {
543 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800544 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800545 }
546
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100547 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800548 if (ret)
549 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800550
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200551 if (dev->use_fast_reg && dev->has_fr) {
552 fr_pool = srp_alloc_fr_pool(target);
553 if (IS_ERR(fr_pool)) {
554 ret = PTR_ERR(fr_pool);
555 shost_printk(KERN_WARNING, target->scsi_host, PFX
556 "FR pool allocation failed (%d)\n", ret);
557 goto err_qp;
558 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100559 if (ch->fr_pool)
560 srp_destroy_fr_pool(ch->fr_pool);
561 ch->fr_pool = fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200562 } else if (!dev->use_fast_reg && dev->has_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200563 fmr_pool = srp_alloc_fmr_pool(target);
564 if (IS_ERR(fmr_pool)) {
565 ret = PTR_ERR(fmr_pool);
566 shost_printk(KERN_WARNING, target->scsi_host, PFX
567 "FMR pool allocation failed (%d)\n", ret);
568 goto err_qp;
569 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100570 if (ch->fmr_pool)
571 ib_destroy_fmr_pool(ch->fmr_pool);
572 ch->fmr_pool = fmr_pool;
Bart Van Assched1b42892014-05-20 15:07:20 +0200573 }
574
Bart Van Assche509c07b2014-10-30 14:48:30 +0100575 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200576 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100577 if (ch->recv_cq)
578 ib_destroy_cq(ch->recv_cq);
579 if (ch->send_cq)
580 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100581
Bart Van Assche509c07b2014-10-30 14:48:30 +0100582 ch->qp = qp;
583 ch->recv_cq = recv_cq;
584 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100585
Roland Dreierda9d2f02010-02-24 15:07:59 -0800586 kfree(init_attr);
587 return 0;
588
589err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100590 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800591
592err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100593 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800594
595err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100596 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800597
598err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800599 kfree(init_attr);
600 return ret;
601}
602
Bart Van Assche4d73f952013-10-26 14:40:37 +0200603/*
604 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100605 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200606 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100607static void srp_free_ch_ib(struct srp_target_port *target,
608 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800609{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200610 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800611 int i;
612
Bart Van Assched92c0da2014-10-06 17:14:36 +0200613 if (!ch->target)
614 return;
615
Bart Van Assche509c07b2014-10-30 14:48:30 +0100616 if (ch->cm_id) {
617 ib_destroy_cm_id(ch->cm_id);
618 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100619 }
620
Bart Van Assched92c0da2014-10-06 17:14:36 +0200621 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
622 if (!ch->qp)
623 return;
624
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200625 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100626 if (ch->fr_pool)
627 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200628 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100629 if (ch->fmr_pool)
630 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200631 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200632 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100633 ib_destroy_cq(ch->send_cq);
634 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800635
Bart Van Assched92c0da2014-10-06 17:14:36 +0200636 /*
637 * Avoid that the SCSI error handler tries to use this channel after
638 * it has been freed. The SCSI error handler can namely continue
639 * trying to perform recovery actions after scsi_remove_host()
640 * returned.
641 */
642 ch->target = NULL;
643
Bart Van Assche509c07b2014-10-30 14:48:30 +0100644 ch->qp = NULL;
645 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100646
Bart Van Assche509c07b2014-10-30 14:48:30 +0100647 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200648 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100649 srp_free_iu(target->srp_host, ch->rx_ring[i]);
650 kfree(ch->rx_ring);
651 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200652 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100653 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200654 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100655 srp_free_iu(target->srp_host, ch->tx_ring[i]);
656 kfree(ch->tx_ring);
657 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200658 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800659}
660
661static void srp_path_rec_completion(int status,
662 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100663 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800664{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100665 struct srp_rdma_ch *ch = ch_ptr;
666 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800667
Bart Van Assche509c07b2014-10-30 14:48:30 +0100668 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800669 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500670 shost_printk(KERN_ERR, target->scsi_host,
671 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800672 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100673 ch->path = *pathrec;
674 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800675}
676
Bart Van Assche509c07b2014-10-30 14:48:30 +0100677static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800678{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100679 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100680 int ret;
681
Bart Van Assche509c07b2014-10-30 14:48:30 +0100682 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800683
Bart Van Assche509c07b2014-10-30 14:48:30 +0100684 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800685
Bart Van Assche509c07b2014-10-30 14:48:30 +0100686 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
687 target->srp_host->srp_dev->dev,
688 target->srp_host->port,
689 &ch->path,
690 IB_SA_PATH_REC_SERVICE_ID |
691 IB_SA_PATH_REC_DGID |
692 IB_SA_PATH_REC_SGID |
693 IB_SA_PATH_REC_NUMB_PATH |
694 IB_SA_PATH_REC_PKEY,
695 SRP_PATH_REC_TIMEOUT_MS,
696 GFP_KERNEL,
697 srp_path_rec_completion,
698 ch, &ch->path_query);
699 if (ch->path_query_id < 0)
700 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800701
Bart Van Assche509c07b2014-10-30 14:48:30 +0100702 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100703 if (ret < 0)
704 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800705
Bart Van Assche509c07b2014-10-30 14:48:30 +0100706 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500707 shost_printk(KERN_WARNING, target->scsi_host,
708 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800709
Bart Van Assche509c07b2014-10-30 14:48:30 +0100710 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800711}
712
Bart Van Assched92c0da2014-10-06 17:14:36 +0200713static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800714{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100715 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800716 struct {
717 struct ib_cm_req_param param;
718 struct srp_login_req priv;
719 } *req = NULL;
720 int status;
721
722 req = kzalloc(sizeof *req, GFP_KERNEL);
723 if (!req)
724 return -ENOMEM;
725
Bart Van Assche509c07b2014-10-30 14:48:30 +0100726 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800727 req->param.alternate_path = NULL;
728 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100729 req->param.qp_num = ch->qp->qp_num;
730 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800731 req->param.private_data = &req->priv;
732 req->param.private_data_len = sizeof req->priv;
733 req->param.flow_control = 1;
734
735 get_random_bytes(&req->param.starting_psn, 4);
736 req->param.starting_psn &= 0xffffff;
737
738 /*
739 * Pick some arbitrary defaults here; we could make these
740 * module parameters if anyone cared about setting them.
741 */
742 req->param.responder_resources = 4;
743 req->param.remote_cm_response_timeout = 20;
744 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200745 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800746 req->param.rnr_retry_count = 7;
747 req->param.max_cm_retries = 15;
748
749 req->priv.opcode = SRP_LOGIN_REQ;
750 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500751 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800752 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
753 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200754 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
755 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700756 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700757 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700758 * port identifier format is 8 bytes of ID extension followed
759 * by 8 bytes of GUID. Older drafts put the two halves in the
760 * opposite order, so that the GUID comes first.
761 *
762 * Targets conforming to these obsolete drafts can be
763 * recognized by the I/O Class they report.
764 */
765 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
766 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100767 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700768 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200769 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700770 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
771 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
772 } else {
773 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200774 &target->initiator_ext, 8);
775 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100776 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700777 memcpy(req->priv.target_port_id, &target->id_ext, 8);
778 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
779 }
780
Roland Dreieraef9ec32005-11-02 14:07:13 -0800781 /*
782 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200783 * zero out the first 8 bytes of our initiator port ID and set
784 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800785 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700786 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500787 shost_printk(KERN_DEBUG, target->scsi_host,
788 PFX "Topspin/Cisco initiator port ID workaround "
789 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200790 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800791 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200792 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100793 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800794 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800795
Bart Van Assche509c07b2014-10-30 14:48:30 +0100796 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800797
798 kfree(req);
799
800 return status;
801}
802
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000803static bool srp_queue_remove_work(struct srp_target_port *target)
804{
805 bool changed = false;
806
807 spin_lock_irq(&target->lock);
808 if (target->state != SRP_TARGET_REMOVED) {
809 target->state = SRP_TARGET_REMOVED;
810 changed = true;
811 }
812 spin_unlock_irq(&target->lock);
813
814 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200815 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000816
817 return changed;
818}
819
Roland Dreieraef9ec32005-11-02 14:07:13 -0800820static void srp_disconnect_target(struct srp_target_port *target)
821{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200822 struct srp_rdma_ch *ch;
823 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100824
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200825 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800826
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200827 for (i = 0; i < target->ch_count; i++) {
828 ch = &target->ch[i];
829 ch->connected = false;
830 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
831 shost_printk(KERN_DEBUG, target->scsi_host,
832 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000833 }
Roland Dreiere6581052006-05-17 09:13:21 -0700834 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800835}
836
Bart Van Assche509c07b2014-10-30 14:48:30 +0100837static void srp_free_req_data(struct srp_target_port *target,
838 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500839{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200840 struct srp_device *dev = target->srp_host->srp_dev;
841 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500842 struct srp_request *req;
843 int i;
844
Bart Van Assche47513cf2015-05-18 13:25:54 +0200845 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200846 return;
847
848 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100849 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200850 if (dev->use_fast_reg)
851 kfree(req->fr_list);
852 else
853 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500854 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500855 if (req->indirect_dma_addr) {
856 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
857 target->indirect_size,
858 DMA_TO_DEVICE);
859 }
860 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500861 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200862
Bart Van Assche509c07b2014-10-30 14:48:30 +0100863 kfree(ch->req_ring);
864 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500865}
866
Bart Van Assche509c07b2014-10-30 14:48:30 +0100867static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200868{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100869 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200870 struct srp_device *srp_dev = target->srp_host->srp_dev;
871 struct ib_device *ibdev = srp_dev->dev;
872 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200873 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200874 dma_addr_t dma_addr;
875 int i, ret = -ENOMEM;
876
Bart Van Assche509c07b2014-10-30 14:48:30 +0100877 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
878 GFP_KERNEL);
879 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200880 goto out;
881
882 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100883 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200884 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
885 GFP_KERNEL);
886 if (!mr_list)
887 goto out;
888 if (srp_dev->use_fast_reg)
889 req->fr_list = mr_list;
890 else
891 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200892 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200893 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200894 if (!req->map_page)
895 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200896 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200897 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200898 goto out;
899
900 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
901 target->indirect_size,
902 DMA_TO_DEVICE);
903 if (ib_dma_mapping_error(ibdev, dma_addr))
904 goto out;
905
906 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200907 }
908 ret = 0;
909
910out:
911 return ret;
912}
913
Bart Van Assche683b1592012-01-14 12:40:44 +0000914/**
915 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
916 * @shost: SCSI host whose attributes to remove from sysfs.
917 *
918 * Note: Any attributes defined in the host template and that did not exist
919 * before invocation of this function will be ignored.
920 */
921static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
922{
923 struct device_attribute **attr;
924
925 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
926 device_remove_file(&shost->shost_dev, *attr);
927}
928
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000929static void srp_remove_target(struct srp_target_port *target)
930{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200931 struct srp_rdma_ch *ch;
932 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100933
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000934 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
935
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000936 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200937 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000938 srp_remove_host(target->scsi_host);
939 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100940 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000941 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200942 for (i = 0; i < target->ch_count; i++) {
943 ch = &target->ch[i];
944 srp_free_ch_ib(target, ch);
945 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200946 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200947 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200948 for (i = 0; i < target->ch_count; i++) {
949 ch = &target->ch[i];
950 srp_free_req_data(target, ch);
951 }
952 kfree(target->ch);
953 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200954
955 spin_lock(&target->srp_host->target_lock);
956 list_del(&target->list);
957 spin_unlock(&target->srp_host->target_lock);
958
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000959 scsi_host_put(target->scsi_host);
960}
961
David Howellsc4028952006-11-22 14:57:56 +0000962static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800963{
David Howellsc4028952006-11-22 14:57:56 +0000964 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000965 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800966
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000967 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800968
Bart Van Assche96fc2482013-06-28 14:51:26 +0200969 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800970}
971
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200972static void srp_rport_delete(struct srp_rport *rport)
973{
974 struct srp_target_port *target = rport->lld_data;
975
976 srp_queue_remove_work(target);
977}
978
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200979/**
980 * srp_connected_ch() - number of connected channels
981 * @target: SRP target port.
982 */
983static int srp_connected_ch(struct srp_target_port *target)
984{
985 int i, c = 0;
986
987 for (i = 0; i < target->ch_count; i++)
988 c += target->ch[i].connected;
989
990 return c;
991}
992
Bart Van Assched92c0da2014-10-06 17:14:36 +0200993static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800994{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100995 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800996 int ret;
997
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200998 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000999
Bart Van Assche509c07b2014-10-30 14:48:30 +01001000 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001001 if (ret)
1002 return ret;
1003
1004 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001005 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +02001006 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001007 if (ret)
1008 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001009 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001010 if (ret < 0)
1011 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001012
1013 /*
1014 * The CM event handling code will set status to
1015 * SRP_PORT_REDIRECT if we get a port redirect REJ
1016 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1017 * redirect REJ back.
1018 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001019 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001020 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001021 ch->connected = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001022 return 0;
1023
1024 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001025 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001026 if (ret)
1027 return ret;
1028 break;
1029
1030 case SRP_DLID_REDIRECT:
1031 break;
1032
David Dillow9fe4bcf2008-01-08 17:08:52 -05001033 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001034 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001035 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001036 ch->status = -ECONNRESET;
1037 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001038
Roland Dreieraef9ec32005-11-02 14:07:13 -08001039 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001040 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001041 }
1042 }
1043}
1044
Bart Van Assche509c07b2014-10-30 14:48:30 +01001045static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001046{
1047 struct ib_send_wr *bad_wr;
1048 struct ib_send_wr wr = {
1049 .opcode = IB_WR_LOCAL_INV,
1050 .wr_id = LOCAL_INV_WR_ID_MASK,
1051 .next = NULL,
1052 .num_sge = 0,
1053 .send_flags = 0,
1054 .ex.invalidate_rkey = rkey,
1055 };
1056
Bart Van Assche509c07b2014-10-30 14:48:30 +01001057 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001058}
1059
Roland Dreierd945e1d2006-05-09 10:50:28 -07001060static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001061 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001062 struct srp_request *req)
1063{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001064 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001065 struct srp_device *dev = target->srp_host->srp_dev;
1066 struct ib_device *ibdev = dev->dev;
1067 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001068
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001069 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001070 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1071 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1072 return;
1073
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001074 if (dev->use_fast_reg) {
1075 struct srp_fr_desc **pfr;
1076
1077 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001078 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001079 if (res < 0) {
1080 shost_printk(KERN_ERR, target->scsi_host, PFX
1081 "Queueing INV WR for rkey %#x failed (%d)\n",
1082 (*pfr)->mr->rkey, res);
1083 queue_work(system_long_wq,
1084 &target->tl_err_work);
1085 }
1086 }
1087 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001088 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001089 req->nmdesc);
1090 } else {
1091 struct ib_pool_fmr **pfmr;
1092
1093 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1094 ib_fmr_pool_unmap(*pfmr);
1095 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001096
David Dillow8f26c9f2011-01-14 19:45:50 -05001097 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1098 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001099}
1100
Bart Van Assche22032992012-08-14 13:18:53 +00001101/**
1102 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001103 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001104 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001105 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001106 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1107 * ownership of @req->scmnd if it equals @scmnd.
1108 *
1109 * Return value:
1110 * Either NULL or a pointer to the SCSI command the caller became owner of.
1111 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001112static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001113 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001114 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001115 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001116{
Bart Van Assche94a91742010-11-26 14:50:09 -05001117 unsigned long flags;
1118
Bart Van Assche509c07b2014-10-30 14:48:30 +01001119 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001120 if (req->scmnd &&
1121 (!sdev || req->scmnd->device == sdev) &&
1122 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001123 scmnd = req->scmnd;
1124 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001125 } else {
1126 scmnd = NULL;
1127 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001128 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001129
1130 return scmnd;
1131}
1132
1133/**
1134 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001135 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001136 * @req: Request to be freed.
1137 * @scmnd: SCSI command associated with @req.
1138 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001139 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001140static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1141 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001142{
1143 unsigned long flags;
1144
Bart Van Assche509c07b2014-10-30 14:48:30 +01001145 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001146
Bart Van Assche509c07b2014-10-30 14:48:30 +01001147 spin_lock_irqsave(&ch->lock, flags);
1148 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001149 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001150}
1151
Bart Van Assche509c07b2014-10-30 14:48:30 +01001152static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1153 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001154{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001155 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001156
1157 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001158 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001159 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001160 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001161 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001162}
1163
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001164static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001165{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001166 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001167 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001168 struct Scsi_Host *shost = target->scsi_host;
1169 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001170 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001171
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001172 /*
1173 * Invoking srp_terminate_io() while srp_queuecommand() is running
1174 * is not safe. Hence the warning statement below.
1175 */
1176 shost_for_each_device(sdev, shost)
1177 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1178
Bart Van Assched92c0da2014-10-06 17:14:36 +02001179 for (i = 0; i < target->ch_count; i++) {
1180 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001181
Bart Van Assched92c0da2014-10-06 17:14:36 +02001182 for (j = 0; j < target->req_ring_size; ++j) {
1183 struct srp_request *req = &ch->req_ring[j];
1184
1185 srp_finish_req(ch, req, NULL,
1186 DID_TRANSPORT_FAILFAST << 16);
1187 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001188 }
1189}
1190
1191/*
1192 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1193 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1194 * srp_reset_device() or srp_reset_host() calls will occur while this function
1195 * is in progress. One way to realize that is not to call this function
1196 * directly but to call srp_reconnect_rport() instead since that last function
1197 * serializes calls of this function via rport->mutex and also blocks
1198 * srp_queuecommand() calls before invoking this function.
1199 */
1200static int srp_rport_reconnect(struct srp_rport *rport)
1201{
1202 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001203 struct srp_rdma_ch *ch;
1204 int i, j, ret = 0;
1205 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001206
Roland Dreieraef9ec32005-11-02 14:07:13 -08001207 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001208
1209 if (target->state == SRP_TARGET_SCANNING)
1210 return -ENODEV;
1211
Roland Dreieraef9ec32005-11-02 14:07:13 -08001212 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001213 * Now get a new local CM ID so that we avoid confusing the target in
1214 * case things are really fouled up. Doing so also ensures that all CM
1215 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001216 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001217 for (i = 0; i < target->ch_count; i++) {
1218 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001219 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001220 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001221 for (i = 0; i < target->ch_count; i++) {
1222 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001223 for (j = 0; j < target->req_ring_size; ++j) {
1224 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001225
Bart Van Assched92c0da2014-10-06 17:14:36 +02001226 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1227 }
1228 }
1229 for (i = 0; i < target->ch_count; i++) {
1230 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001231 /*
1232 * Whether or not creating a new CM ID succeeded, create a new
1233 * QP. This guarantees that all completion callback function
1234 * invocations have finished before request resetting starts.
1235 */
1236 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001237
Bart Van Assched92c0da2014-10-06 17:14:36 +02001238 INIT_LIST_HEAD(&ch->free_tx);
1239 for (j = 0; j < target->queue_size; ++j)
1240 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1241 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001242
1243 target->qp_in_error = false;
1244
Bart Van Assched92c0da2014-10-06 17:14:36 +02001245 for (i = 0; i < target->ch_count; i++) {
1246 ch = &target->ch[i];
Bart Van Asschebbac5cc2015-05-18 13:26:17 +02001247 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001248 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001249 ret = srp_connect_ch(ch, multich);
1250 multich = true;
1251 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001252
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001253 if (ret == 0)
1254 shost_printk(KERN_INFO, target->scsi_host,
1255 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001256
1257 return ret;
1258}
1259
David Dillow8f26c9f2011-01-14 19:45:50 -05001260static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1261 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001262{
David Dillow8f26c9f2011-01-14 19:45:50 -05001263 struct srp_direct_buf *desc = state->desc;
1264
1265 desc->va = cpu_to_be64(dma_addr);
1266 desc->key = cpu_to_be32(rkey);
1267 desc->len = cpu_to_be32(dma_len);
1268
1269 state->total_len += dma_len;
1270 state->desc++;
1271 state->ndesc++;
1272}
1273
1274static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001275 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001276{
David Dillow8f26c9f2011-01-14 19:45:50 -05001277 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001278 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001279
Bart Van Assche509c07b2014-10-30 14:48:30 +01001280 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001281 state->npages, io_addr);
1282 if (IS_ERR(fmr))
1283 return PTR_ERR(fmr);
1284
1285 *state->next_fmr++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001286 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001287
Bart Van Assche52ede082014-05-20 15:07:45 +02001288 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001289
David Dillow8f26c9f2011-01-14 19:45:50 -05001290 return 0;
1291}
1292
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001293static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001294 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001295{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001296 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001297 struct srp_device *dev = target->srp_host->srp_dev;
1298 struct ib_send_wr *bad_wr;
1299 struct ib_send_wr wr;
1300 struct srp_fr_desc *desc;
1301 u32 rkey;
1302
Bart Van Assche509c07b2014-10-30 14:48:30 +01001303 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001304 if (!desc)
1305 return -ENOMEM;
1306
1307 rkey = ib_inc_rkey(desc->mr->rkey);
1308 ib_update_fast_reg_key(desc->mr, rkey);
1309
1310 memcpy(desc->frpl->page_list, state->pages,
1311 sizeof(state->pages[0]) * state->npages);
1312
1313 memset(&wr, 0, sizeof(wr));
1314 wr.opcode = IB_WR_FAST_REG_MR;
1315 wr.wr_id = FAST_REG_WR_ID_MASK;
1316 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1317 wr.wr.fast_reg.page_list = desc->frpl;
1318 wr.wr.fast_reg.page_list_len = state->npages;
1319 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1320 wr.wr.fast_reg.length = state->dma_len;
1321 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1322 IB_ACCESS_REMOTE_READ |
1323 IB_ACCESS_REMOTE_WRITE);
1324 wr.wr.fast_reg.rkey = desc->mr->lkey;
1325
1326 *state->next_fr++ = desc;
1327 state->nmdesc++;
1328
1329 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1330 desc->mr->rkey);
1331
Bart Van Assche509c07b2014-10-30 14:48:30 +01001332 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001333}
1334
Bart Van Assche539dde62014-05-20 15:05:46 +02001335static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001336 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001337{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001338 struct srp_target_port *target = ch->target;
Bart Van Assche539dde62014-05-20 15:05:46 +02001339 int ret = 0;
1340
1341 if (state->npages == 0)
1342 return 0;
1343
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001344 if (state->npages == 1 && !register_always)
Bart Van Assche52ede082014-05-20 15:07:45 +02001345 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche539dde62014-05-20 15:05:46 +02001346 target->rkey);
1347 else
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001348 ret = target->srp_host->srp_dev->use_fast_reg ?
Bart Van Assche509c07b2014-10-30 14:48:30 +01001349 srp_map_finish_fr(state, ch) :
1350 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001351
1352 if (ret == 0) {
1353 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001354 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001355 }
1356
1357 return ret;
1358}
1359
David Dillow8f26c9f2011-01-14 19:45:50 -05001360static void srp_map_update_start(struct srp_map_state *state,
1361 struct scatterlist *sg, int sg_index,
1362 dma_addr_t dma_addr)
1363{
1364 state->unmapped_sg = sg;
1365 state->unmapped_index = sg_index;
1366 state->unmapped_addr = dma_addr;
1367}
1368
1369static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001370 struct srp_rdma_ch *ch,
David Dillow8f26c9f2011-01-14 19:45:50 -05001371 struct scatterlist *sg, int sg_index,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001372 bool use_mr)
David Dillow8f26c9f2011-01-14 19:45:50 -05001373{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001374 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001375 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001376 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001377 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1378 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1379 unsigned int len;
1380 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001381
David Dillow8f26c9f2011-01-14 19:45:50 -05001382 if (!dma_len)
1383 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -07001384
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001385 if (!use_mr) {
1386 /*
1387 * Once we're in direct map mode for a request, we don't
1388 * go back to FMR or FR mode, so no need to update anything
David Dillow8f26c9f2011-01-14 19:45:50 -05001389 * other than the descriptor.
1390 */
1391 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1392 return 0;
1393 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001394
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001395 /*
1396 * Since not all RDMA HW drivers support non-zero page offsets for
1397 * FMR, if we start at an offset into a page, don't merge into the
1398 * current FMR mapping. Finish it out, and use the kernel's MR for
1399 * this sg entry.
David Dillow8f26c9f2011-01-14 19:45:50 -05001400 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001401 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1402 dma_len > dev->mr_max_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001403 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001404 if (ret)
1405 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001406
David Dillow8f26c9f2011-01-14 19:45:50 -05001407 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1408 srp_map_update_start(state, NULL, 0, 0);
1409 return 0;
1410 }
1411
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001412 /*
1413 * If this is the first sg that will be mapped via FMR or via FR, save
1414 * our position. We need to know the first unmapped entry, its index,
1415 * and the first unmapped address within that entry to be able to
1416 * restart mapping after an error.
David Dillow8f26c9f2011-01-14 19:45:50 -05001417 */
1418 if (!state->unmapped_sg)
1419 srp_map_update_start(state, sg, sg_index, dma_addr);
1420
1421 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001422 unsigned offset = dma_addr & ~dev->mr_page_mask;
1423 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001424 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001425 if (ret)
1426 return ret;
1427
1428 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001429 }
1430
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001431 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001432
1433 if (!state->npages)
1434 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001435 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001436 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001437 dma_addr += len;
1438 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001439 }
1440
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001441 /*
1442 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001443 * close it out and start a new one -- we can only merge at page
1444 * boundries.
1445 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001446 ret = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001447 if (len != dev->mr_page_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001448 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001449 if (!ret)
1450 srp_map_update_start(state, NULL, 0, 0);
1451 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001452 return ret;
1453}
1454
Bart Van Assche509c07b2014-10-30 14:48:30 +01001455static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1456 struct srp_request *req, struct scatterlist *scat,
1457 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001458{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001459 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001460 struct srp_device *dev = target->srp_host->srp_dev;
1461 struct ib_device *ibdev = dev->dev;
1462 struct scatterlist *sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001463 int i;
1464 bool use_mr;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001465
1466 state->desc = req->indirect_desc;
1467 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001468 if (dev->use_fast_reg) {
1469 state->next_fr = req->fr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001470 use_mr = !!ch->fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001471 } else {
1472 state->next_fmr = req->fmr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001473 use_mr = !!ch->fmr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001474 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001475
1476 for_each_sg(scat, sg, count, i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001477 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001478 /*
1479 * Memory registration failed, so backtrack to the
1480 * first unmapped entry and continue on without using
1481 * memory registration.
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001482 */
1483 dma_addr_t dma_addr;
1484 unsigned int dma_len;
1485
1486backtrack:
1487 sg = state->unmapped_sg;
1488 i = state->unmapped_index;
1489
1490 dma_addr = ib_sg_dma_address(ibdev, sg);
1491 dma_len = ib_sg_dma_len(ibdev, sg);
1492 dma_len -= (state->unmapped_addr - dma_addr);
1493 dma_addr = state->unmapped_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001494 use_mr = false;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001495 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1496 }
1497 }
1498
Bart Van Assche509c07b2014-10-30 14:48:30 +01001499 if (use_mr && srp_finish_mapping(state, ch))
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001500 goto backtrack;
1501
Bart Van Assche52ede082014-05-20 15:07:45 +02001502 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001503
1504 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001505}
1506
Bart Van Assche509c07b2014-10-30 14:48:30 +01001507static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001508 struct srp_request *req)
1509{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001510 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001511 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001512 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001513 int len, nents, count;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001514 struct srp_device *dev;
1515 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001516 struct srp_map_state state;
1517 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001518 u32 table_len;
1519 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001520
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001521 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001522 return sizeof (struct srp_cmd);
1523
1524 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1525 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001526 shost_printk(KERN_WARNING, target->scsi_host,
1527 PFX "Unhandled data direction %d\n",
1528 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001529 return -EINVAL;
1530 }
1531
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001532 nents = scsi_sg_count(scmnd);
1533 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001534
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001535 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001536 ibdev = dev->dev;
1537
1538 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001539 if (unlikely(count == 0))
1540 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001541
1542 fmt = SRP_DATA_DESC_DIRECT;
1543 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001544
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001545 if (count == 1 && !register_always) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001546 /*
1547 * The midlayer only generated a single gather/scatter
1548 * entry, or DMA mapping coalesced everything to a
1549 * single entry. So a direct descriptor along with
1550 * the DMA MR suffices.
1551 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001552 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001553
Ralph Campbell85507bc2006-12-12 14:30:55 -08001554 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001555 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001556 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001557
Bart Van Assche52ede082014-05-20 15:07:45 +02001558 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001559 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001560 }
1561
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001562 /*
1563 * We have more than one scatter/gather entry, so build our indirect
1564 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001565 */
1566 indirect_hdr = (void *) cmd->add_data;
1567
David Dillowc07d4242011-01-16 13:57:10 -05001568 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1569 target->indirect_size, DMA_TO_DEVICE);
1570
David Dillow8f26c9f2011-01-14 19:45:50 -05001571 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001572 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001573
David Dillowc07d4242011-01-16 13:57:10 -05001574 /* We've mapped the request, now pull as much of the indirect
1575 * descriptor table as we can into the command buffer. If this
1576 * target is not using an external indirect table, we are
1577 * guaranteed to fit into the command, as the SCSI layer won't
1578 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001579 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001580 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001581 /*
1582 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001583 * so use a direct descriptor.
1584 */
1585 struct srp_direct_buf *buf = (void *) cmd->add_data;
1586
David Dillowc07d4242011-01-16 13:57:10 -05001587 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001588 goto map_complete;
1589 }
1590
David Dillowc07d4242011-01-16 13:57:10 -05001591 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1592 !target->allow_ext_sg)) {
1593 shost_printk(KERN_ERR, target->scsi_host,
1594 "Could not fit S/G list into SRP_CMD\n");
1595 return -EIO;
1596 }
1597
1598 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001599 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1600
1601 fmt = SRP_DATA_DESC_INDIRECT;
1602 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001603 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001604
David Dillowc07d4242011-01-16 13:57:10 -05001605 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1606 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001607
David Dillowc07d4242011-01-16 13:57:10 -05001608 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001609 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1610 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1611 indirect_hdr->len = cpu_to_be32(state.total_len);
1612
1613 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001614 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001615 else
David Dillowc07d4242011-01-16 13:57:10 -05001616 cmd->data_in_desc_cnt = count;
1617
1618 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1619 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001620
1621map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001622 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1623 cmd->buf_fmt = fmt << 4;
1624 else
1625 cmd->buf_fmt = fmt;
1626
Roland Dreieraef9ec32005-11-02 14:07:13 -08001627 return len;
1628}
1629
David Dillow05a1d752010-10-08 14:48:14 -04001630/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001631 * Return an IU and possible credit to the free pool
1632 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001633static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001634 enum srp_iu_type iu_type)
1635{
1636 unsigned long flags;
1637
Bart Van Assche509c07b2014-10-30 14:48:30 +01001638 spin_lock_irqsave(&ch->lock, flags);
1639 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001640 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001641 ++ch->req_lim;
1642 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001643}
1644
1645/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001646 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001647 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001648 *
1649 * Note:
1650 * An upper limit for the number of allocated information units for each
1651 * request type is:
1652 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1653 * more than Scsi_Host.can_queue requests.
1654 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1655 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1656 * one unanswered SRP request to an initiator.
1657 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001658static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001659 enum srp_iu_type iu_type)
1660{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001661 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001662 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1663 struct srp_iu *iu;
1664
Bart Van Assche509c07b2014-10-30 14:48:30 +01001665 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001666
Bart Van Assche509c07b2014-10-30 14:48:30 +01001667 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001668 return NULL;
1669
1670 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001671 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001672 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001673 ++target->zero_req_lim;
1674 return NULL;
1675 }
1676
Bart Van Assche509c07b2014-10-30 14:48:30 +01001677 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001678 }
1679
Bart Van Assche509c07b2014-10-30 14:48:30 +01001680 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001681 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001682 return iu;
1683}
1684
Bart Van Assche509c07b2014-10-30 14:48:30 +01001685static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001686{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001687 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001688 struct ib_sge list;
1689 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001690
1691 list.addr = iu->dma;
1692 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001693 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001694
1695 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001696 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001697 wr.sg_list = &list;
1698 wr.num_sge = 1;
1699 wr.opcode = IB_WR_SEND;
1700 wr.send_flags = IB_SEND_SIGNALED;
1701
Bart Van Assche509c07b2014-10-30 14:48:30 +01001702 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001703}
1704
Bart Van Assche509c07b2014-10-30 14:48:30 +01001705static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001706{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001707 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001708 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001709 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001710
1711 list.addr = iu->dma;
1712 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001713 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001714
1715 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001716 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001717 wr.sg_list = &list;
1718 wr.num_sge = 1;
1719
Bart Van Assche509c07b2014-10-30 14:48:30 +01001720 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001721}
1722
Bart Van Assche509c07b2014-10-30 14:48:30 +01001723static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001724{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001725 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001726 struct srp_request *req;
1727 struct scsi_cmnd *scmnd;
1728 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001729
Roland Dreieraef9ec32005-11-02 14:07:13 -08001730 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001731 spin_lock_irqsave(&ch->lock, flags);
1732 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1733 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001734
Bart Van Assche509c07b2014-10-30 14:48:30 +01001735 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001736 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001737 ch->tsk_mgmt_status = rsp->data[3];
1738 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001739 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001740 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1741 if (scmnd) {
1742 req = (void *)scmnd->host_scribble;
1743 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1744 }
Bart Van Assche22032992012-08-14 13:18:53 +00001745 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001746 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001747 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1748 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001749
Bart Van Assche509c07b2014-10-30 14:48:30 +01001750 spin_lock_irqsave(&ch->lock, flags);
1751 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1752 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001753
1754 return;
1755 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001756 scmnd->result = rsp->status;
1757
1758 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1759 memcpy(scmnd->sense_buffer, rsp->data +
1760 be32_to_cpu(rsp->resp_data_len),
1761 min_t(int, be32_to_cpu(rsp->sense_data_len),
1762 SCSI_SENSE_BUFFERSIZE));
1763 }
1764
Bart Van Asschee7145312014-07-09 15:57:51 +02001765 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001766 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001767 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1768 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1769 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1770 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1771 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1772 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001773
Bart Van Assche509c07b2014-10-30 14:48:30 +01001774 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001775 be32_to_cpu(rsp->req_lim_delta));
1776
David Dillowf8b6e312010-11-26 13:02:21 -05001777 scmnd->host_scribble = NULL;
1778 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001779 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001780}
1781
Bart Van Assche509c07b2014-10-30 14:48:30 +01001782static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001783 void *rsp, int len)
1784{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001785 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001786 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001787 unsigned long flags;
1788 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001789 int err;
David Dillowbb125882010-10-08 14:40:47 -04001790
Bart Van Assche509c07b2014-10-30 14:48:30 +01001791 spin_lock_irqsave(&ch->lock, flags);
1792 ch->req_lim += req_delta;
1793 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1794 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001795
David Dillowbb125882010-10-08 14:40:47 -04001796 if (!iu) {
1797 shost_printk(KERN_ERR, target->scsi_host, PFX
1798 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001799 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001800 }
1801
1802 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1803 memcpy(iu->buf, rsp, len);
1804 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1805
Bart Van Assche509c07b2014-10-30 14:48:30 +01001806 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001807 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001808 shost_printk(KERN_ERR, target->scsi_host, PFX
1809 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001810 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001811 }
David Dillowbb125882010-10-08 14:40:47 -04001812
David Dillowbb125882010-10-08 14:40:47 -04001813 return err;
1814}
1815
Bart Van Assche509c07b2014-10-30 14:48:30 +01001816static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001817 struct srp_cred_req *req)
1818{
1819 struct srp_cred_rsp rsp = {
1820 .opcode = SRP_CRED_RSP,
1821 .tag = req->tag,
1822 };
1823 s32 delta = be32_to_cpu(req->req_lim_delta);
1824
Bart Van Assche509c07b2014-10-30 14:48:30 +01001825 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1826 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001827 "problems processing SRP_CRED_REQ\n");
1828}
1829
Bart Van Assche509c07b2014-10-30 14:48:30 +01001830static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001831 struct srp_aer_req *req)
1832{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001833 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001834 struct srp_aer_rsp rsp = {
1835 .opcode = SRP_AER_RSP,
1836 .tag = req->tag,
1837 };
1838 s32 delta = be32_to_cpu(req->req_lim_delta);
1839
1840 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche985aa492015-05-18 13:27:14 +02001841 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
David Dillowbb125882010-10-08 14:40:47 -04001842
Bart Van Assche509c07b2014-10-30 14:48:30 +01001843 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001844 shost_printk(KERN_ERR, target->scsi_host, PFX
1845 "problems processing SRP_AER_REQ\n");
1846}
1847
Bart Van Assche509c07b2014-10-30 14:48:30 +01001848static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001849{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001850 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001851 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001852 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001853 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001854 u8 opcode;
1855
Bart Van Assche509c07b2014-10-30 14:48:30 +01001856 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001857 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001858
1859 opcode = *(u8 *) iu->buf;
1860
1861 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001862 shost_printk(KERN_ERR, target->scsi_host,
1863 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001864 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1865 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001866 }
1867
1868 switch (opcode) {
1869 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001870 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001871 break;
1872
David Dillowbb125882010-10-08 14:40:47 -04001873 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001874 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001875 break;
1876
1877 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001878 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001879 break;
1880
Roland Dreieraef9ec32005-11-02 14:07:13 -08001881 case SRP_T_LOGOUT:
1882 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001883 shost_printk(KERN_WARNING, target->scsi_host,
1884 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001885 break;
1886
1887 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001888 shost_printk(KERN_WARNING, target->scsi_host,
1889 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001890 break;
1891 }
1892
Bart Van Assche509c07b2014-10-30 14:48:30 +01001893 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001894 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001895
Bart Van Assche509c07b2014-10-30 14:48:30 +01001896 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001897 if (res != 0)
1898 shost_printk(KERN_ERR, target->scsi_host,
1899 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001900}
1901
Bart Van Asschec1120f82013-10-26 14:35:08 +02001902/**
1903 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001904 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001905 *
1906 * Note: This function may get invoked before the rport has been created,
1907 * hence the target->rport test.
1908 */
1909static void srp_tl_err_work(struct work_struct *work)
1910{
1911 struct srp_target_port *target;
1912
1913 target = container_of(work, struct srp_target_port, tl_err_work);
1914 if (target->rport)
1915 srp_start_tl_fail_timers(target->rport);
1916}
1917
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001918static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001919 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001920{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001921 struct srp_target_port *target = ch->target;
1922
1923 if (wr_id == SRP_LAST_WR_ID) {
1924 complete(&ch->done);
1925 return;
1926 }
1927
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001928 if (ch->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001929 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1930 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001931 "LOCAL_INV failed with status %s (%d)\n",
1932 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001933 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1934 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001935 "FAST_REG_MR failed status %s (%d)\n",
1936 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001937 } else {
1938 shost_printk(KERN_ERR, target->scsi_host,
Sagi Grimberg57363d92015-05-18 13:40:29 +03001939 PFX "failed %s status %s (%d) for iu %p\n",
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001940 send_err ? "send" : "receive",
Sagi Grimberg57363d92015-05-18 13:40:29 +03001941 ib_wc_status_msg(wc_status), wc_status,
1942 (void *)(uintptr_t)wr_id);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001943 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001944 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001945 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001946 target->qp_in_error = true;
1947}
1948
Bart Van Assche509c07b2014-10-30 14:48:30 +01001949static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001950{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001951 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001952 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001953
1954 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1955 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001956 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001957 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001958 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001959 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001960 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001961 }
1962}
1963
Bart Van Assche509c07b2014-10-30 14:48:30 +01001964static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001965{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001966 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001967 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001968 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001969
1970 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001971 if (likely(wc.status == IB_WC_SUCCESS)) {
1972 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001973 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001974 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001975 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001976 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001977 }
1978}
1979
Bart Van Assche76c75b22010-11-26 14:37:47 -05001980static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001981{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001982 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001983 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001984 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001985 struct srp_request *req;
1986 struct srp_iu *iu;
1987 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001988 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001989 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001990 u32 tag;
1991 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02001992 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001993 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1994
1995 /*
1996 * The SCSI EH thread is the only context from which srp_queuecommand()
1997 * can get invoked for blocked devices (SDEV_BLOCK /
1998 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1999 * locking the rport mutex if invoked from inside the SCSI EH.
2000 */
2001 if (in_scsi_eh)
2002 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002003
Bart Van Assched1b42892014-05-20 15:07:20 +02002004 scmnd->result = srp_chkready(target->rport);
2005 if (unlikely(scmnd->result))
2006 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002007
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002008 WARN_ON_ONCE(scmnd->request->tag < 0);
2009 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002010 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002011 idx = blk_mq_unique_tag_to_tag(tag);
2012 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2013 dev_name(&shost->shost_gendev), tag, idx,
2014 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002015
2016 spin_lock_irqsave(&ch->lock, flags);
2017 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002018 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002019
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002020 if (!iu)
2021 goto err;
2022
2023 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002024 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002025 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002026 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002027
David Dillowf8b6e312010-11-26 13:02:21 -05002028 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002029
2030 cmd = iu->buf;
2031 memset(cmd, 0, sizeof *cmd);
2032
2033 cmd->opcode = SRP_CMD;
Bart Van Assche985aa492015-05-18 13:27:14 +02002034 int_to_scsilun(scmnd->device->lun, &cmd->lun);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002035 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002036 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2037
Roland Dreieraef9ec32005-11-02 14:07:13 -08002038 req->scmnd = scmnd;
2039 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002040
Bart Van Assche509c07b2014-10-30 14:48:30 +01002041 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002042 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002043 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002044 PFX "Failed to map data (%d)\n", len);
2045 /*
2046 * If we ran out of memory descriptors (-ENOMEM) because an
2047 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002048 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002049 * to reduce queue depth temporarily.
2050 */
2051 scmnd->result = len == -ENOMEM ?
2052 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002053 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002054 }
2055
David Dillow49248642011-01-14 18:23:24 -05002056 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002057 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002058
Bart Van Assche509c07b2014-10-30 14:48:30 +01002059 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002060 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002061 goto err_unmap;
2062 }
2063
Bart Van Assched1b42892014-05-20 15:07:20 +02002064 ret = 0;
2065
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002066unlock_rport:
2067 if (in_scsi_eh)
2068 mutex_unlock(&rport->mutex);
2069
Bart Van Assched1b42892014-05-20 15:07:20 +02002070 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002071
2072err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002073 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002074
Bart Van Assche76c75b22010-11-26 14:37:47 -05002075err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002076 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002077
Bart Van Assche024ca902014-05-20 15:03:49 +02002078 /*
2079 * Avoid that the loops that iterate over the request ring can
2080 * encounter a dangling SCSI command pointer.
2081 */
2082 req->scmnd = NULL;
2083
Bart Van Assched1b42892014-05-20 15:07:20 +02002084err:
2085 if (scmnd->result) {
2086 scmnd->scsi_done(scmnd);
2087 ret = 0;
2088 } else {
2089 ret = SCSI_MLQUEUE_HOST_BUSY;
2090 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002091
Bart Van Assched1b42892014-05-20 15:07:20 +02002092 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002093}
2094
Bart Van Assche4d73f952013-10-26 14:40:37 +02002095/*
2096 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002097 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002098 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002099static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002100{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002101 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002102 int i;
2103
Bart Van Assche509c07b2014-10-30 14:48:30 +01002104 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2105 GFP_KERNEL);
2106 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002107 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002108 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2109 GFP_KERNEL);
2110 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002111 goto err_no_ring;
2112
2113 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002114 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2115 ch->max_ti_iu_len,
2116 GFP_KERNEL, DMA_FROM_DEVICE);
2117 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002118 goto err;
2119 }
2120
Bart Van Assche4d73f952013-10-26 14:40:37 +02002121 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002122 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2123 target->max_iu_len,
2124 GFP_KERNEL, DMA_TO_DEVICE);
2125 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002126 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002127
Bart Van Assche509c07b2014-10-30 14:48:30 +01002128 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002129 }
2130
2131 return 0;
2132
2133err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002134 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002135 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2136 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002137 }
2138
Bart Van Assche4d73f952013-10-26 14:40:37 +02002139
2140err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002141 kfree(ch->tx_ring);
2142 ch->tx_ring = NULL;
2143 kfree(ch->rx_ring);
2144 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002145
2146 return -ENOMEM;
2147}
2148
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002149static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2150{
2151 uint64_t T_tr_ns, max_compl_time_ms;
2152 uint32_t rq_tmo_jiffies;
2153
2154 /*
2155 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2156 * table 91), both the QP timeout and the retry count have to be set
2157 * for RC QP's during the RTR to RTS transition.
2158 */
2159 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2160 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2161
2162 /*
2163 * Set target->rq_tmo_jiffies to one second more than the largest time
2164 * it can take before an error completion is generated. See also
2165 * C9-140..142 in the IBTA spec for more information about how to
2166 * convert the QP Local ACK Timeout value to nanoseconds.
2167 */
2168 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2169 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2170 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2171 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2172
2173 return rq_tmo_jiffies;
2174}
2175
David Dillow961e0be2011-01-14 17:32:07 -05002176static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2177 struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002178 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002179{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002180 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002181 struct ib_qp_attr *qp_attr = NULL;
2182 int attr_mask = 0;
2183 int ret;
2184 int i;
2185
2186 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002187 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2188 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002189
2190 /*
2191 * Reserve credits for task management so we don't
2192 * bounce requests back to the SCSI mid-layer.
2193 */
2194 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002195 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002196 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002197 target->scsi_host->cmd_per_lun
2198 = min_t(int, target->scsi_host->can_queue,
2199 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002200 } else {
2201 shost_printk(KERN_WARNING, target->scsi_host,
2202 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2203 ret = -ECONNRESET;
2204 goto error;
2205 }
2206
Bart Van Assche509c07b2014-10-30 14:48:30 +01002207 if (!ch->rx_ring) {
2208 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002209 if (ret)
2210 goto error;
2211 }
2212
2213 ret = -ENOMEM;
2214 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2215 if (!qp_attr)
2216 goto error;
2217
2218 qp_attr->qp_state = IB_QPS_RTR;
2219 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2220 if (ret)
2221 goto error_free;
2222
Bart Van Assche509c07b2014-10-30 14:48:30 +01002223 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002224 if (ret)
2225 goto error_free;
2226
Bart Van Assche4d73f952013-10-26 14:40:37 +02002227 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002228 struct srp_iu *iu = ch->rx_ring[i];
2229
2230 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002231 if (ret)
2232 goto error_free;
2233 }
2234
2235 qp_attr->qp_state = IB_QPS_RTS;
2236 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2237 if (ret)
2238 goto error_free;
2239
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002240 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2241
Bart Van Assche509c07b2014-10-30 14:48:30 +01002242 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002243 if (ret)
2244 goto error_free;
2245
2246 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2247
2248error_free:
2249 kfree(qp_attr);
2250
2251error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002252 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002253}
2254
Roland Dreieraef9ec32005-11-02 14:07:13 -08002255static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2256 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002257 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002258{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002259 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002260 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002261 struct ib_class_port_info *cpi;
2262 int opcode;
2263
2264 switch (event->param.rej_rcvd.reason) {
2265 case IB_CM_REJ_PORT_CM_REDIRECT:
2266 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002267 ch->path.dlid = cpi->redirect_lid;
2268 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002269 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002270 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002271
Bart Van Assche509c07b2014-10-30 14:48:30 +01002272 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002273 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2274 break;
2275
2276 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002277 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002278 /*
2279 * Topspin/Cisco SRP gateways incorrectly send
2280 * reject reason code 25 when they mean 24
2281 * (port redirect).
2282 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002283 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002284 event->param.rej_rcvd.ari, 16);
2285
David Dillow7aa54bd2008-01-07 18:23:41 -05002286 shost_printk(KERN_DEBUG, shost,
2287 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002288 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2289 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002290
Bart Van Assche509c07b2014-10-30 14:48:30 +01002291 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002292 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002293 shost_printk(KERN_WARNING, shost,
2294 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002295 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002296 }
2297 break;
2298
2299 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002300 shost_printk(KERN_WARNING, shost,
2301 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002302 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002303 break;
2304
2305 case IB_CM_REJ_CONSUMER_DEFINED:
2306 opcode = *(u8 *) event->private_data;
2307 if (opcode == SRP_LOGIN_REJ) {
2308 struct srp_login_rej *rej = event->private_data;
2309 u32 reason = be32_to_cpu(rej->reason);
2310
2311 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002312 shost_printk(KERN_WARNING, shost,
2313 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002314 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002315 shost_printk(KERN_WARNING, shost, PFX
2316 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002317 target->sgid.raw,
2318 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002319 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002320 shost_printk(KERN_WARNING, shost,
2321 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2322 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002323 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002324 break;
2325
David Dillow9fe4bcf2008-01-08 17:08:52 -05002326 case IB_CM_REJ_STALE_CONN:
2327 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002328 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002329 break;
2330
Roland Dreieraef9ec32005-11-02 14:07:13 -08002331 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002332 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2333 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002334 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002335 }
2336}
2337
2338static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2339{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002340 struct srp_rdma_ch *ch = cm_id->context;
2341 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002342 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002343
2344 switch (event->event) {
2345 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002346 shost_printk(KERN_DEBUG, target->scsi_host,
2347 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002348 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002349 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002350 break;
2351
2352 case IB_CM_REP_RECEIVED:
2353 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002354 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002355 break;
2356
2357 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002358 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002359 comp = 1;
2360
Bart Van Assche509c07b2014-10-30 14:48:30 +01002361 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002362 break;
2363
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002364 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002365 shost_printk(KERN_WARNING, target->scsi_host,
2366 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002367 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002368 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002369 shost_printk(KERN_ERR, target->scsi_host,
2370 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002371 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002372 break;
2373
2374 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002375 shost_printk(KERN_ERR, target->scsi_host,
2376 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002377 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002378
Bart Van Assche509c07b2014-10-30 14:48:30 +01002379 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002380 break;
2381
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002382 case IB_CM_MRA_RECEIVED:
2383 case IB_CM_DREQ_ERROR:
2384 case IB_CM_DREP_RECEIVED:
2385 break;
2386
Roland Dreieraef9ec32005-11-02 14:07:13 -08002387 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002388 shost_printk(KERN_WARNING, target->scsi_host,
2389 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002390 break;
2391 }
2392
2393 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002394 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002395
Roland Dreieraef9ec32005-11-02 14:07:13 -08002396 return 0;
2397}
2398
Jack Wang71444b92013-11-07 11:37:37 +01002399/**
Jack Wang71444b92013-11-07 11:37:37 +01002400 * srp_change_queue_depth - setting device queue depth
2401 * @sdev: scsi device struct
2402 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002403 *
2404 * Returns queue depth.
2405 */
2406static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002407srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002408{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002409 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002410 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002411 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002412}
2413
Bart Van Assche985aa492015-05-18 13:27:14 +02002414static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2415 u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002416{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002417 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002418 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002419 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002420 struct srp_iu *iu;
2421 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002422
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002423 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002424 return -1;
2425
Bart Van Assche509c07b2014-10-30 14:48:30 +01002426 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002427
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002428 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002429 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002430 * invoked while a task management function is being sent.
2431 */
2432 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002433 spin_lock_irq(&ch->lock);
2434 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2435 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002436
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002437 if (!iu) {
2438 mutex_unlock(&rport->mutex);
2439
Bart Van Assche76c75b22010-11-26 14:37:47 -05002440 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002441 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002442
David Dillow19081f32010-10-18 08:54:49 -04002443 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2444 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002445 tsk_mgmt = iu->buf;
2446 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2447
2448 tsk_mgmt->opcode = SRP_TSK_MGMT;
Bart Van Assche985aa492015-05-18 13:27:14 +02002449 int_to_scsilun(lun, &tsk_mgmt->lun);
David Dillowf8b6e312010-11-26 13:02:21 -05002450 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002451 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002452 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002453
David Dillow19081f32010-10-18 08:54:49 -04002454 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2455 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002456 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2457 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002458 mutex_unlock(&rport->mutex);
2459
Bart Van Assche76c75b22010-11-26 14:37:47 -05002460 return -1;
2461 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002462 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002463
Bart Van Assche509c07b2014-10-30 14:48:30 +01002464 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002465 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002466 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002467
Roland Dreierd945e1d2006-05-09 10:50:28 -07002468 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002469}
2470
Roland Dreieraef9ec32005-11-02 14:07:13 -08002471static int srp_abort(struct scsi_cmnd *scmnd)
2472{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002473 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002474 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002475 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002476 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002477 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002478 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002479
David Dillow7aa54bd2008-01-07 18:23:41 -05002480 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002481
Bart Van Assched92c0da2014-10-06 17:14:36 +02002482 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002483 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002484 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002485 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2486 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2487 return SUCCESS;
2488 ch = &target->ch[ch_idx];
2489 if (!srp_claim_req(ch, req, NULL, scmnd))
2490 return SUCCESS;
2491 shost_printk(KERN_ERR, target->scsi_host,
2492 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002493 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002494 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002495 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002496 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002497 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002498 else
2499 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002500 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002501 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002502 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002503
Bart Van Assche086f44f2013-06-12 15:23:04 +02002504 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002505}
2506
2507static int srp_reset_device(struct scsi_cmnd *scmnd)
2508{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002509 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002510 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002511 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002512
David Dillow7aa54bd2008-01-07 18:23:41 -05002513 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002514
Bart Van Assched92c0da2014-10-06 17:14:36 +02002515 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002516 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002517 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002518 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002519 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002520 return FAILED;
2521
Bart Van Assched92c0da2014-10-06 17:14:36 +02002522 for (i = 0; i < target->ch_count; i++) {
2523 ch = &target->ch[i];
2524 for (i = 0; i < target->req_ring_size; ++i) {
2525 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002526
Bart Van Assched92c0da2014-10-06 17:14:36 +02002527 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2528 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002529 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002530
Roland Dreierd945e1d2006-05-09 10:50:28 -07002531 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002532}
2533
2534static int srp_reset_host(struct scsi_cmnd *scmnd)
2535{
2536 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002537
David Dillow7aa54bd2008-01-07 18:23:41 -05002538 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002539
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002540 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002541}
2542
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002543static int srp_slave_configure(struct scsi_device *sdev)
2544{
2545 struct Scsi_Host *shost = sdev->host;
2546 struct srp_target_port *target = host_to_target(shost);
2547 struct request_queue *q = sdev->request_queue;
2548 unsigned long timeout;
2549
2550 if (sdev->type == TYPE_DISK) {
2551 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2552 blk_queue_rq_timeout(q, timeout);
2553 }
2554
2555 return 0;
2556}
2557
Tony Jonesee959b02008-02-22 00:13:36 +01002558static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2559 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002560{
Tony Jonesee959b02008-02-22 00:13:36 +01002561 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002562
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002563 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002564}
2565
Tony Jonesee959b02008-02-22 00:13:36 +01002566static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2567 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002568{
Tony Jonesee959b02008-02-22 00:13:36 +01002569 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002570
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002571 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002572}
2573
Tony Jonesee959b02008-02-22 00:13:36 +01002574static ssize_t show_service_id(struct device *dev,
2575 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002576{
Tony Jonesee959b02008-02-22 00:13:36 +01002577 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002578
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002579 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002580}
2581
Tony Jonesee959b02008-02-22 00:13:36 +01002582static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2583 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002584{
Tony Jonesee959b02008-02-22 00:13:36 +01002585 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002586
Bart Van Assche747fe002014-10-30 14:48:05 +01002587 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002588}
2589
Bart Van Assche848b3082013-10-26 14:38:12 +02002590static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2591 char *buf)
2592{
2593 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2594
Bart Van Assche747fe002014-10-30 14:48:05 +01002595 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002596}
2597
Tony Jonesee959b02008-02-22 00:13:36 +01002598static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2599 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002600{
Tony Jonesee959b02008-02-22 00:13:36 +01002601 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002602 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002603
Bart Van Assche509c07b2014-10-30 14:48:30 +01002604 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002605}
2606
Tony Jonesee959b02008-02-22 00:13:36 +01002607static ssize_t show_orig_dgid(struct device *dev,
2608 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002609{
Tony Jonesee959b02008-02-22 00:13:36 +01002610 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002611
Bart Van Assche747fe002014-10-30 14:48:05 +01002612 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002613}
2614
Bart Van Assche89de7482010-08-03 14:08:45 +00002615static ssize_t show_req_lim(struct device *dev,
2616 struct device_attribute *attr, char *buf)
2617{
2618 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002619 struct srp_rdma_ch *ch;
2620 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002621
Bart Van Assched92c0da2014-10-06 17:14:36 +02002622 for (i = 0; i < target->ch_count; i++) {
2623 ch = &target->ch[i];
2624 req_lim = min(req_lim, ch->req_lim);
2625 }
2626 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002627}
2628
Tony Jonesee959b02008-02-22 00:13:36 +01002629static ssize_t show_zero_req_lim(struct device *dev,
2630 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002631{
Tony Jonesee959b02008-02-22 00:13:36 +01002632 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002633
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002634 return sprintf(buf, "%d\n", target->zero_req_lim);
2635}
2636
Tony Jonesee959b02008-02-22 00:13:36 +01002637static ssize_t show_local_ib_port(struct device *dev,
2638 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002639{
Tony Jonesee959b02008-02-22 00:13:36 +01002640 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002641
2642 return sprintf(buf, "%d\n", target->srp_host->port);
2643}
2644
Tony Jonesee959b02008-02-22 00:13:36 +01002645static ssize_t show_local_ib_device(struct device *dev,
2646 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002647{
Tony Jonesee959b02008-02-22 00:13:36 +01002648 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002649
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002650 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002651}
2652
Bart Van Assched92c0da2014-10-06 17:14:36 +02002653static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2654 char *buf)
2655{
2656 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2657
2658 return sprintf(buf, "%d\n", target->ch_count);
2659}
2660
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002661static ssize_t show_comp_vector(struct device *dev,
2662 struct device_attribute *attr, char *buf)
2663{
2664 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2665
2666 return sprintf(buf, "%d\n", target->comp_vector);
2667}
2668
Vu Pham7bb312e2013-10-26 14:31:27 +02002669static ssize_t show_tl_retry_count(struct device *dev,
2670 struct device_attribute *attr, char *buf)
2671{
2672 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2673
2674 return sprintf(buf, "%d\n", target->tl_retry_count);
2675}
2676
David Dillow49248642011-01-14 18:23:24 -05002677static ssize_t show_cmd_sg_entries(struct device *dev,
2678 struct device_attribute *attr, char *buf)
2679{
2680 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2681
2682 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2683}
2684
David Dillowc07d4242011-01-16 13:57:10 -05002685static ssize_t show_allow_ext_sg(struct device *dev,
2686 struct device_attribute *attr, char *buf)
2687{
2688 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2689
2690 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2691}
2692
Tony Jonesee959b02008-02-22 00:13:36 +01002693static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2694static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2695static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2696static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002697static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002698static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2699static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002700static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002701static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2702static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2703static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002704static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002705static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002706static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002707static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002708static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002709
Tony Jonesee959b02008-02-22 00:13:36 +01002710static struct device_attribute *srp_host_attrs[] = {
2711 &dev_attr_id_ext,
2712 &dev_attr_ioc_guid,
2713 &dev_attr_service_id,
2714 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002715 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002716 &dev_attr_dgid,
2717 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002718 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002719 &dev_attr_zero_req_lim,
2720 &dev_attr_local_ib_port,
2721 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002722 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002723 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002724 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002725 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002726 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002727 NULL
2728};
2729
Roland Dreieraef9ec32005-11-02 14:07:13 -08002730static struct scsi_host_template srp_template = {
2731 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002732 .name = "InfiniBand SRP initiator",
2733 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002734 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002735 .info = srp_target_info,
2736 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002737 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002738 .eh_abort_handler = srp_abort,
2739 .eh_device_reset_handler = srp_reset_device,
2740 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002741 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002742 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002743 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002744 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002745 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002746 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002747 .shost_attrs = srp_host_attrs,
2748 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002749 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002750};
2751
Bart Van Assche34aa6542014-10-30 14:47:22 +01002752static int srp_sdev_count(struct Scsi_Host *host)
2753{
2754 struct scsi_device *sdev;
2755 int c = 0;
2756
2757 shost_for_each_device(sdev, host)
2758 c++;
2759
2760 return c;
2761}
2762
Roland Dreieraef9ec32005-11-02 14:07:13 -08002763static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2764{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002765 struct srp_rport_identifiers ids;
2766 struct srp_rport *rport;
2767
Bart Van Assche34aa6542014-10-30 14:47:22 +01002768 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002769 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002770 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002771
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002772 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002773 return -ENODEV;
2774
FUJITA Tomonori32368222007-06-27 16:33:12 +09002775 memcpy(ids.port_id, &target->id_ext, 8);
2776 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002777 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002778 rport = srp_rport_add(target->scsi_host, &ids);
2779 if (IS_ERR(rport)) {
2780 scsi_remove_host(target->scsi_host);
2781 return PTR_ERR(rport);
2782 }
2783
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002784 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002785 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002786
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002787 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002788 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002789 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002790
Roland Dreieraef9ec32005-11-02 14:07:13 -08002791 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002792 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002793
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002794 if (srp_connected_ch(target) < target->ch_count ||
2795 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002796 shost_printk(KERN_INFO, target->scsi_host,
2797 PFX "SCSI scan failed - removing SCSI host\n");
2798 srp_queue_remove_work(target);
2799 goto out;
2800 }
2801
2802 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2803 dev_name(&target->scsi_host->shost_gendev),
2804 srp_sdev_count(target->scsi_host));
2805
2806 spin_lock_irq(&target->lock);
2807 if (target->state == SRP_TARGET_SCANNING)
2808 target->state = SRP_TARGET_LIVE;
2809 spin_unlock_irq(&target->lock);
2810
2811out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002812 return 0;
2813}
2814
Tony Jonesee959b02008-02-22 00:13:36 +01002815static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002816{
2817 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002818 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002819
2820 complete(&host->released);
2821}
2822
2823static struct class srp_class = {
2824 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002825 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002826};
2827
Bart Van Assche96fc2482013-06-28 14:51:26 +02002828/**
2829 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002830 * @host: SRP host.
2831 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002832 */
2833static bool srp_conn_unique(struct srp_host *host,
2834 struct srp_target_port *target)
2835{
2836 struct srp_target_port *t;
2837 bool ret = false;
2838
2839 if (target->state == SRP_TARGET_REMOVED)
2840 goto out;
2841
2842 ret = true;
2843
2844 spin_lock(&host->target_lock);
2845 list_for_each_entry(t, &host->target_list, list) {
2846 if (t != target &&
2847 target->id_ext == t->id_ext &&
2848 target->ioc_guid == t->ioc_guid &&
2849 target->initiator_ext == t->initiator_ext) {
2850 ret = false;
2851 break;
2852 }
2853 }
2854 spin_unlock(&host->target_lock);
2855
2856out:
2857 return ret;
2858}
2859
Roland Dreieraef9ec32005-11-02 14:07:13 -08002860/*
2861 * Target ports are added by writing
2862 *
2863 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2864 * pkey=<P_Key>,service_id=<service ID>
2865 *
2866 * to the add_target sysfs attribute.
2867 */
2868enum {
2869 SRP_OPT_ERR = 0,
2870 SRP_OPT_ID_EXT = 1 << 0,
2871 SRP_OPT_IOC_GUID = 1 << 1,
2872 SRP_OPT_DGID = 1 << 2,
2873 SRP_OPT_PKEY = 1 << 3,
2874 SRP_OPT_SERVICE_ID = 1 << 4,
2875 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002876 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002877 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002878 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002879 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002880 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2881 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002882 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002883 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002884 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002885 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2886 SRP_OPT_IOC_GUID |
2887 SRP_OPT_DGID |
2888 SRP_OPT_PKEY |
2889 SRP_OPT_SERVICE_ID),
2890};
2891
Steven Whitehousea447c092008-10-13 10:46:57 +01002892static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002893 { SRP_OPT_ID_EXT, "id_ext=%s" },
2894 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2895 { SRP_OPT_DGID, "dgid=%s" },
2896 { SRP_OPT_PKEY, "pkey=%x" },
2897 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2898 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2899 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002900 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002901 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002902 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002903 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2904 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002905 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002906 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002907 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002908 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002909};
2910
2911static int srp_parse_options(const char *buf, struct srp_target_port *target)
2912{
2913 char *options, *sep_opt;
2914 char *p;
2915 char dgid[3];
2916 substring_t args[MAX_OPT_ARGS];
2917 int opt_mask = 0;
2918 int token;
2919 int ret = -EINVAL;
2920 int i;
2921
2922 options = kstrdup(buf, GFP_KERNEL);
2923 if (!options)
2924 return -ENOMEM;
2925
2926 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002927 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002928 if (!*p)
2929 continue;
2930
2931 token = match_token(p, srp_opt_tokens, args);
2932 opt_mask |= token;
2933
2934 switch (token) {
2935 case SRP_OPT_ID_EXT:
2936 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002937 if (!p) {
2938 ret = -ENOMEM;
2939 goto out;
2940 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002941 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2942 kfree(p);
2943 break;
2944
2945 case SRP_OPT_IOC_GUID:
2946 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002947 if (!p) {
2948 ret = -ENOMEM;
2949 goto out;
2950 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002951 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2952 kfree(p);
2953 break;
2954
2955 case SRP_OPT_DGID:
2956 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002957 if (!p) {
2958 ret = -ENOMEM;
2959 goto out;
2960 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002961 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002962 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002963 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002964 goto out;
2965 }
2966
2967 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002968 strlcpy(dgid, p + i * 2, sizeof(dgid));
2969 if (sscanf(dgid, "%hhx",
2970 &target->orig_dgid.raw[i]) < 1) {
2971 ret = -EINVAL;
2972 kfree(p);
2973 goto out;
2974 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002975 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002976 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002977 break;
2978
2979 case SRP_OPT_PKEY:
2980 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002981 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002982 goto out;
2983 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002984 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002985 break;
2986
2987 case SRP_OPT_SERVICE_ID:
2988 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002989 if (!p) {
2990 ret = -ENOMEM;
2991 goto out;
2992 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002993 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2994 kfree(p);
2995 break;
2996
2997 case SRP_OPT_MAX_SECT:
2998 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002999 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003000 goto out;
3001 }
3002 target->scsi_host->max_sectors = token;
3003 break;
3004
Bart Van Assche4d73f952013-10-26 14:40:37 +02003005 case SRP_OPT_QUEUE_SIZE:
3006 if (match_int(args, &token) || token < 1) {
3007 pr_warn("bad queue_size parameter '%s'\n", p);
3008 goto out;
3009 }
3010 target->scsi_host->can_queue = token;
3011 target->queue_size = token + SRP_RSP_SQ_SIZE +
3012 SRP_TSK_MGMT_SQ_SIZE;
3013 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3014 target->scsi_host->cmd_per_lun = token;
3015 break;
3016
Vu Pham52fb2b502006-06-17 20:37:31 -07003017 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003018 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003019 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3020 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003021 goto out;
3022 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003023 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003024 break;
3025
Ramachandra K0c0450db2006-06-17 20:37:38 -07003026 case SRP_OPT_IO_CLASS:
3027 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003028 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003029 goto out;
3030 }
3031 if (token != SRP_REV10_IB_IO_CLASS &&
3032 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003033 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3034 token, SRP_REV10_IB_IO_CLASS,
3035 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003036 goto out;
3037 }
3038 target->io_class = token;
3039 break;
3040
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003041 case SRP_OPT_INITIATOR_EXT:
3042 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003043 if (!p) {
3044 ret = -ENOMEM;
3045 goto out;
3046 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003047 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3048 kfree(p);
3049 break;
3050
David Dillow49248642011-01-14 18:23:24 -05003051 case SRP_OPT_CMD_SG_ENTRIES:
3052 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003053 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3054 p);
David Dillow49248642011-01-14 18:23:24 -05003055 goto out;
3056 }
3057 target->cmd_sg_cnt = token;
3058 break;
3059
David Dillowc07d4242011-01-16 13:57:10 -05003060 case SRP_OPT_ALLOW_EXT_SG:
3061 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003062 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003063 goto out;
3064 }
3065 target->allow_ext_sg = !!token;
3066 break;
3067
3068 case SRP_OPT_SG_TABLESIZE:
3069 if (match_int(args, &token) || token < 1 ||
3070 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003071 pr_warn("bad max sg_tablesize parameter '%s'\n",
3072 p);
David Dillowc07d4242011-01-16 13:57:10 -05003073 goto out;
3074 }
3075 target->sg_tablesize = token;
3076 break;
3077
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003078 case SRP_OPT_COMP_VECTOR:
3079 if (match_int(args, &token) || token < 0) {
3080 pr_warn("bad comp_vector parameter '%s'\n", p);
3081 goto out;
3082 }
3083 target->comp_vector = token;
3084 break;
3085
Vu Pham7bb312e2013-10-26 14:31:27 +02003086 case SRP_OPT_TL_RETRY_COUNT:
3087 if (match_int(args, &token) || token < 2 || token > 7) {
3088 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3089 p);
3090 goto out;
3091 }
3092 target->tl_retry_count = token;
3093 break;
3094
Roland Dreieraef9ec32005-11-02 14:07:13 -08003095 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003096 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3097 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003098 goto out;
3099 }
3100 }
3101
3102 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3103 ret = 0;
3104 else
3105 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3106 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3107 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003108 pr_warn("target creation request is missing parameter '%s'\n",
3109 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003110
Bart Van Assche4d73f952013-10-26 14:40:37 +02003111 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3112 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3113 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3114 target->scsi_host->cmd_per_lun,
3115 target->scsi_host->can_queue);
3116
Roland Dreieraef9ec32005-11-02 14:07:13 -08003117out:
3118 kfree(options);
3119 return ret;
3120}
3121
Tony Jonesee959b02008-02-22 00:13:36 +01003122static ssize_t srp_create_target(struct device *dev,
3123 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003124 const char *buf, size_t count)
3125{
3126 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003127 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003128 struct Scsi_Host *target_host;
3129 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003130 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003131 struct srp_device *srp_dev = host->srp_dev;
3132 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003133 int ret, node_idx, node, cpu, i;
3134 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003135
3136 target_host = scsi_host_alloc(&srp_template,
3137 sizeof (struct srp_target_port));
3138 if (!target_host)
3139 return -ENOMEM;
3140
David Dillow49248642011-01-14 18:23:24 -05003141 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003142 target_host->max_channel = 0;
3143 target_host->max_id = 1;
Bart Van Assche985aa492015-05-18 13:27:14 +02003144 target_host->max_lun = -1LL;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003145 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003146
Roland Dreieraef9ec32005-11-02 14:07:13 -08003147 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003148
David Dillow49248642011-01-14 18:23:24 -05003149 target->io_class = SRP_REV16A_IB_IO_CLASS;
3150 target->scsi_host = target_host;
3151 target->srp_host = host;
3152 target->lkey = host->srp_dev->mr->lkey;
3153 target->rkey = host->srp_dev->mr->rkey;
3154 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003155 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3156 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003157 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003158 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003159
Bart Van Assche34aa6542014-10-30 14:47:22 +01003160 /*
3161 * Avoid that the SCSI host can be removed by srp_remove_target()
3162 * before this function returns.
3163 */
3164 scsi_host_get(target->scsi_host);
3165
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003166 mutex_lock(&host->add_target_mutex);
3167
Roland Dreieraef9ec32005-11-02 14:07:13 -08003168 ret = srp_parse_options(buf, target);
3169 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003170 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003171
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003172 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3173 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003174 goto out;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003175
Bart Van Assche4d73f952013-10-26 14:40:37 +02003176 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3177
Bart Van Assche96fc2482013-06-28 14:51:26 +02003178 if (!srp_conn_unique(target->srp_host, target)) {
3179 shost_printk(KERN_INFO, target->scsi_host,
3180 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3181 be64_to_cpu(target->id_ext),
3182 be64_to_cpu(target->ioc_guid),
3183 be64_to_cpu(target->initiator_ext));
3184 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003185 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003186 }
3187
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003188 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003189 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003190 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003191 target->sg_tablesize = target->cmd_sg_cnt;
3192 }
3193
3194 target_host->sg_tablesize = target->sg_tablesize;
3195 target->indirect_size = target->sg_tablesize *
3196 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003197 target->max_iu_len = sizeof (struct srp_cmd) +
3198 sizeof (struct srp_indirect_buf) +
3199 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3200
Bart Van Asschec1120f82013-10-26 14:35:08 +02003201 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003202 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003203 spin_lock_init(&target->lock);
Bart Van Assche747fe002014-10-30 14:48:05 +01003204 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003205 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003206 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003207
Bart Van Assched92c0da2014-10-06 17:14:36 +02003208 ret = -ENOMEM;
3209 target->ch_count = max_t(unsigned, num_online_nodes(),
3210 min(ch_count ? :
3211 min(4 * num_online_nodes(),
3212 ibdev->num_comp_vectors),
3213 num_online_cpus()));
3214 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3215 GFP_KERNEL);
3216 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003217 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003218
Bart Van Assched92c0da2014-10-06 17:14:36 +02003219 node_idx = 0;
3220 for_each_online_node(node) {
3221 const int ch_start = (node_idx * target->ch_count /
3222 num_online_nodes());
3223 const int ch_end = ((node_idx + 1) * target->ch_count /
3224 num_online_nodes());
3225 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3226 num_online_nodes() + target->comp_vector)
3227 % ibdev->num_comp_vectors;
3228 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3229 num_online_nodes() + target->comp_vector)
3230 % ibdev->num_comp_vectors;
3231 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003232
Bart Van Assched92c0da2014-10-06 17:14:36 +02003233 for_each_online_cpu(cpu) {
3234 if (cpu_to_node(cpu) != node)
3235 continue;
3236 if (ch_start + cpu_idx >= ch_end)
3237 continue;
3238 ch = &target->ch[ch_start + cpu_idx];
3239 ch->target = target;
3240 ch->comp_vector = cv_start == cv_end ? cv_start :
3241 cv_start + cpu_idx % (cv_end - cv_start);
3242 spin_lock_init(&ch->lock);
3243 INIT_LIST_HEAD(&ch->free_tx);
3244 ret = srp_new_cm_id(ch);
3245 if (ret)
3246 goto err_disconnect;
3247
3248 ret = srp_create_ch_ib(ch);
3249 if (ret)
3250 goto err_disconnect;
3251
3252 ret = srp_alloc_req_data(ch);
3253 if (ret)
3254 goto err_disconnect;
3255
3256 ret = srp_connect_ch(ch, multich);
3257 if (ret) {
3258 shost_printk(KERN_ERR, target->scsi_host,
3259 PFX "Connection %d/%d failed\n",
3260 ch_start + cpu_idx,
3261 target->ch_count);
3262 if (node_idx == 0 && cpu_idx == 0) {
3263 goto err_disconnect;
3264 } else {
3265 srp_free_ch_ib(target, ch);
3266 srp_free_req_data(target, ch);
3267 target->ch_count = ch - target->ch;
3268 break;
3269 }
3270 }
3271
3272 multich = true;
3273 cpu_idx++;
3274 }
3275 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003276 }
3277
Bart Van Assched92c0da2014-10-06 17:14:36 +02003278 target->scsi_host->nr_hw_queues = target->ch_count;
3279
Roland Dreieraef9ec32005-11-02 14:07:13 -08003280 ret = srp_add_target(host, target);
3281 if (ret)
3282 goto err_disconnect;
3283
Bart Van Assche34aa6542014-10-30 14:47:22 +01003284 if (target->state != SRP_TARGET_REMOVED) {
3285 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3286 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3287 be64_to_cpu(target->id_ext),
3288 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003289 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003290 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003291 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003292 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003293
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003294 ret = count;
3295
3296out:
3297 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003298
3299 scsi_host_put(target->scsi_host);
3300
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003301 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003302
3303err_disconnect:
3304 srp_disconnect_target(target);
3305
Bart Van Assched92c0da2014-10-06 17:14:36 +02003306 for (i = 0; i < target->ch_count; i++) {
3307 ch = &target->ch[i];
3308 srp_free_ch_ib(target, ch);
3309 srp_free_req_data(target, ch);
3310 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003311
Bart Van Assched92c0da2014-10-06 17:14:36 +02003312 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003313 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003314}
3315
Tony Jonesee959b02008-02-22 00:13:36 +01003316static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003317
Tony Jonesee959b02008-02-22 00:13:36 +01003318static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3319 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003320{
Tony Jonesee959b02008-02-22 00:13:36 +01003321 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003322
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003323 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003324}
3325
Tony Jonesee959b02008-02-22 00:13:36 +01003326static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003327
Tony Jonesee959b02008-02-22 00:13:36 +01003328static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3329 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003330{
Tony Jonesee959b02008-02-22 00:13:36 +01003331 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003332
3333 return sprintf(buf, "%d\n", host->port);
3334}
3335
Tony Jonesee959b02008-02-22 00:13:36 +01003336static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003337
Roland Dreierf5358a12006-06-17 20:37:29 -07003338static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003339{
3340 struct srp_host *host;
3341
3342 host = kzalloc(sizeof *host, GFP_KERNEL);
3343 if (!host)
3344 return NULL;
3345
3346 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003347 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003348 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003349 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003350 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003351 host->port = port;
3352
Tony Jonesee959b02008-02-22 00:13:36 +01003353 host->dev.class = &srp_class;
3354 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003355 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003356
Tony Jonesee959b02008-02-22 00:13:36 +01003357 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003358 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003359 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003360 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003361 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003362 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003363 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003364 goto err_class;
3365
3366 return host;
3367
3368err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003369 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003370
Roland Dreierf5358a12006-06-17 20:37:29 -07003371free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003372 kfree(host);
3373
3374 return NULL;
3375}
3376
3377static void srp_add_one(struct ib_device *device)
3378{
Roland Dreierf5358a12006-06-17 20:37:29 -07003379 struct srp_device *srp_dev;
3380 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003381 struct srp_host *host;
Bart Van Assche52ede082014-05-20 15:07:45 +02003382 int mr_page_shift, s, e, p;
3383 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003384
Roland Dreierf5358a12006-06-17 20:37:29 -07003385 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3386 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003387 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003388
Roland Dreierf5358a12006-06-17 20:37:29 -07003389 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003390 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003391 goto free_attr;
3392 }
3393
3394 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3395 if (!srp_dev)
3396 goto free_attr;
3397
Bart Van Assched1b42892014-05-20 15:07:20 +02003398 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3399 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003400 srp_dev->has_fr = (dev_attr->device_cap_flags &
3401 IB_DEVICE_MEM_MGT_EXTENSIONS);
3402 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3403 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3404
3405 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3406 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assched1b42892014-05-20 15:07:20 +02003407
Roland Dreierf5358a12006-06-17 20:37:29 -07003408 /*
3409 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003410 * minimum of 4096 bytes. We're unlikely to build large sglists
3411 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003412 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003413 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3414 srp_dev->mr_page_size = 1 << mr_page_shift;
3415 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3416 max_pages_per_mr = dev_attr->max_mr_size;
3417 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3418 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3419 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003420 if (srp_dev->use_fast_reg) {
3421 srp_dev->max_pages_per_mr =
3422 min_t(u32, srp_dev->max_pages_per_mr,
3423 dev_attr->max_fast_reg_page_list_len);
3424 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003425 srp_dev->mr_max_size = srp_dev->mr_page_size *
3426 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003427 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003428 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003429 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003430 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003431
3432 INIT_LIST_HEAD(&srp_dev->dev_list);
3433
3434 srp_dev->dev = device;
3435 srp_dev->pd = ib_alloc_pd(device);
3436 if (IS_ERR(srp_dev->pd))
3437 goto free_dev;
3438
3439 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3440 IB_ACCESS_LOCAL_WRITE |
3441 IB_ACCESS_REMOTE_READ |
3442 IB_ACCESS_REMOTE_WRITE);
3443 if (IS_ERR(srp_dev->mr))
3444 goto err_pd;
3445
Tom Tucker07ebafb2006-08-03 16:02:42 -05003446 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08003447 s = 0;
3448 e = 0;
3449 } else {
3450 s = 1;
3451 e = device->phys_port_cnt;
3452 }
3453
3454 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003455 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003456 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003457 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003458 }
3459
Roland Dreierf5358a12006-06-17 20:37:29 -07003460 ib_set_client_data(device, &srp_client, srp_dev);
3461
3462 goto free_attr;
3463
3464err_pd:
3465 ib_dealloc_pd(srp_dev->pd);
3466
3467free_dev:
3468 kfree(srp_dev);
3469
3470free_attr:
3471 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003472}
3473
3474static void srp_remove_one(struct ib_device *device)
3475{
Roland Dreierf5358a12006-06-17 20:37:29 -07003476 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003477 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003478 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003479
Roland Dreierf5358a12006-06-17 20:37:29 -07003480 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003481 if (!srp_dev)
3482 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003483
Roland Dreierf5358a12006-06-17 20:37:29 -07003484 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003485 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003486 /*
3487 * Wait for the sysfs entry to go away, so that no new
3488 * target ports can be created.
3489 */
3490 wait_for_completion(&host->released);
3491
3492 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003493 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003494 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003495 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003496 list_for_each_entry(target, &host->target_list, list)
3497 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003498 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003499
3500 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003501 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003502 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003503 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003504 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003505
Roland Dreieraef9ec32005-11-02 14:07:13 -08003506 kfree(host);
3507 }
3508
Roland Dreierf5358a12006-06-17 20:37:29 -07003509 ib_dereg_mr(srp_dev->mr);
3510 ib_dealloc_pd(srp_dev->pd);
3511
3512 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003513}
3514
FUJITA Tomonori32368222007-06-27 16:33:12 +09003515static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003516 .has_rport_state = true,
3517 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003518 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003519 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3520 .dev_loss_tmo = &srp_dev_loss_tmo,
3521 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003522 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003523 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003524};
3525
Roland Dreieraef9ec32005-11-02 14:07:13 -08003526static int __init srp_init_module(void)
3527{
3528 int ret;
3529
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003530 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003531
David Dillow49248642011-01-14 18:23:24 -05003532 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003533 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003534 if (!cmd_sg_entries)
3535 cmd_sg_entries = srp_sg_tablesize;
3536 }
3537
3538 if (!cmd_sg_entries)
3539 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3540
3541 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003542 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003543 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003544 }
3545
David Dillowc07d4242011-01-16 13:57:10 -05003546 if (!indirect_sg_entries)
3547 indirect_sg_entries = cmd_sg_entries;
3548 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003549 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3550 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003551 indirect_sg_entries = cmd_sg_entries;
3552 }
3553
Bart Van Asschebcc05912014-07-09 15:57:26 +02003554 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003555 if (!srp_remove_wq) {
3556 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003557 goto out;
3558 }
3559
3560 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003561 ib_srp_transport_template =
3562 srp_attach_transport(&ib_srp_transport_functions);
3563 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003564 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003565
Roland Dreieraef9ec32005-11-02 14:07:13 -08003566 ret = class_register(&srp_class);
3567 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003568 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003569 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003570 }
3571
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003572 ib_sa_register_client(&srp_sa_client);
3573
Roland Dreieraef9ec32005-11-02 14:07:13 -08003574 ret = ib_register_client(&srp_client);
3575 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003576 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003577 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003578 }
3579
Bart Van Asschebcc05912014-07-09 15:57:26 +02003580out:
3581 return ret;
3582
3583unreg_sa:
3584 ib_sa_unregister_client(&srp_sa_client);
3585 class_unregister(&srp_class);
3586
3587release_tr:
3588 srp_release_transport(ib_srp_transport_template);
3589
3590destroy_wq:
3591 destroy_workqueue(srp_remove_wq);
3592 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003593}
3594
3595static void __exit srp_cleanup_module(void)
3596{
3597 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003598 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003599 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003600 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003601 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003602}
3603
3604module_init(srp_init_module);
3605module_exit(srp_cleanup_module);