blob: 06d91e7e4163ce2e0a86424a12b66ceb549f28ef [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020058#define DRV_VERSION "1.0"
59#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020062MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
Roland Dreieraef9ec32005-11-02 14:07:13 -080063MODULE_LICENSE("Dual BSD/GPL");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020064MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
Roland Dreieraef9ec32005-11-02 14:07:13 -080066
David Dillow49248642011-01-14 18:23:24 -050067static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050069static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +020071static bool prefer_fr;
Bart Van Asscheb1b88542014-05-20 15:06:41 +020072static bool register_always;
Roland Dreieraef9ec32005-11-02 14:07:13 -080073static int topspin_workarounds = 1;
74
David Dillow49248642011-01-14 18:23:24 -050075module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
David Dillowc07d4242011-01-16 13:57:10 -050082module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
Roland Dreieraef9ec32005-11-02 14:07:13 -080090module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
Bart Van Assche5cfb1782014-05-20 15:08:34 +020094module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
Bart Van Asscheb1b88542014-05-20 15:06:41 +020098module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200102static struct kernel_param_ops srp_tmo_ops;
103
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200117static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
Bart Van Assched92c0da2014-10-06 17:14:36 +0200128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
Roland Dreieraef9ec32005-11-02 14:07:13 -0800133static void srp_add_one(struct ib_device *device);
134static void srp_remove_one(struct ib_device *device);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
FUJITA Tomonori32368222007-06-27 16:33:12 +0900139static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200140static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900141
Roland Dreieraef9ec32005-11-02 14:07:13 -0800142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700148static struct ib_sa_client srp_sa_client;
149
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
164 if (strncmp(val, "off", 3) != 0) {
165 res = kstrtoint(val, 0, &tmo);
166 if (res)
167 goto out;
168 } else {
169 tmo = -1;
170 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200171 if (kp->arg == &srp_reconnect_delay)
172 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
173 srp_dev_loss_tmo);
174 else if (kp->arg == &srp_fast_io_fail_tmo)
175 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200176 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200177 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
178 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200179 if (res)
180 goto out;
181 *(int *)kp->arg = tmo;
182
183out:
184 return res;
185}
186
187static struct kernel_param_ops srp_tmo_ops = {
188 .get = srp_tmo_get,
189 .set = srp_tmo_set,
190};
191
Roland Dreieraef9ec32005-11-02 14:07:13 -0800192static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
193{
194 return (struct srp_target_port *) host->hostdata;
195}
196
197static const char *srp_target_info(struct Scsi_Host *host)
198{
199 return host_to_target(host)->target_name;
200}
201
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700202static int srp_target_is_topspin(struct srp_target_port *target)
203{
204 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700205 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700206
207 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700208 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
209 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700210}
211
Roland Dreieraef9ec32005-11-02 14:07:13 -0800212static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
213 gfp_t gfp_mask,
214 enum dma_data_direction direction)
215{
216 struct srp_iu *iu;
217
218 iu = kmalloc(sizeof *iu, gfp_mask);
219 if (!iu)
220 goto out;
221
222 iu->buf = kzalloc(size, gfp_mask);
223 if (!iu->buf)
224 goto out_free_iu;
225
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100226 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
227 direction);
228 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800229 goto out_free_buf;
230
231 iu->size = size;
232 iu->direction = direction;
233
234 return iu;
235
236out_free_buf:
237 kfree(iu->buf);
238out_free_iu:
239 kfree(iu);
240out:
241 return NULL;
242}
243
244static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
245{
246 if (!iu)
247 return;
248
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100249 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
250 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800251 kfree(iu->buf);
252 kfree(iu);
253}
254
255static void srp_qp_event(struct ib_event *event, void *context)
256{
Bart Van Asschee0bda7d2012-01-14 12:39:44 +0000257 pr_debug("QP event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800258}
259
260static int srp_init_qp(struct srp_target_port *target,
261 struct ib_qp *qp)
262{
263 struct ib_qp_attr *attr;
264 int ret;
265
266 attr = kmalloc(sizeof *attr, GFP_KERNEL);
267 if (!attr)
268 return -ENOMEM;
269
Bart Van Assche56b53902014-07-09 15:58:22 +0200270 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
271 target->srp_host->port,
272 be16_to_cpu(target->pkey),
273 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800274 if (ret)
275 goto out;
276
277 attr->qp_state = IB_QPS_INIT;
278 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
279 IB_ACCESS_REMOTE_WRITE);
280 attr->port_num = target->srp_host->port;
281
282 ret = ib_modify_qp(qp, attr,
283 IB_QP_STATE |
284 IB_QP_PKEY_INDEX |
285 IB_QP_ACCESS_FLAGS |
286 IB_QP_PORT);
287
288out:
289 kfree(attr);
290 return ret;
291}
292
Bart Van Assche509c07b2014-10-30 14:48:30 +0100293static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500294{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100295 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500296 struct ib_cm_id *new_cm_id;
297
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100298 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100299 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500300 if (IS_ERR(new_cm_id))
301 return PTR_ERR(new_cm_id);
302
Bart Van Assche509c07b2014-10-30 14:48:30 +0100303 if (ch->cm_id)
304 ib_destroy_cm_id(ch->cm_id);
305 ch->cm_id = new_cm_id;
306 ch->path.sgid = target->sgid;
307 ch->path.dgid = target->orig_dgid;
308 ch->path.pkey = target->pkey;
309 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500310
311 return 0;
312}
313
Bart Van Assched1b42892014-05-20 15:07:20 +0200314static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
315{
316 struct srp_device *dev = target->srp_host->srp_dev;
317 struct ib_fmr_pool_param fmr_param;
318
319 memset(&fmr_param, 0, sizeof(fmr_param));
320 fmr_param.pool_size = target->scsi_host->can_queue;
321 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
322 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200323 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
324 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200325 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
326 IB_ACCESS_REMOTE_WRITE |
327 IB_ACCESS_REMOTE_READ);
328
329 return ib_create_fmr_pool(dev->pd, &fmr_param);
330}
331
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200332/**
333 * srp_destroy_fr_pool() - free the resources owned by a pool
334 * @pool: Fast registration pool to be destroyed.
335 */
336static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
337{
338 int i;
339 struct srp_fr_desc *d;
340
341 if (!pool)
342 return;
343
344 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
345 if (d->frpl)
346 ib_free_fast_reg_page_list(d->frpl);
347 if (d->mr)
348 ib_dereg_mr(d->mr);
349 }
350 kfree(pool);
351}
352
353/**
354 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
355 * @device: IB device to allocate fast registration descriptors for.
356 * @pd: Protection domain associated with the FR descriptors.
357 * @pool_size: Number of descriptors to allocate.
358 * @max_page_list_len: Maximum fast registration work request page list length.
359 */
360static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
361 struct ib_pd *pd, int pool_size,
362 int max_page_list_len)
363{
364 struct srp_fr_pool *pool;
365 struct srp_fr_desc *d;
366 struct ib_mr *mr;
367 struct ib_fast_reg_page_list *frpl;
368 int i, ret = -EINVAL;
369
370 if (pool_size <= 0)
371 goto err;
372 ret = -ENOMEM;
373 pool = kzalloc(sizeof(struct srp_fr_pool) +
374 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
375 if (!pool)
376 goto err;
377 pool->size = pool_size;
378 pool->max_page_list_len = max_page_list_len;
379 spin_lock_init(&pool->lock);
380 INIT_LIST_HEAD(&pool->free_list);
381
382 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
383 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
384 if (IS_ERR(mr)) {
385 ret = PTR_ERR(mr);
386 goto destroy_pool;
387 }
388 d->mr = mr;
389 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
390 if (IS_ERR(frpl)) {
391 ret = PTR_ERR(frpl);
392 goto destroy_pool;
393 }
394 d->frpl = frpl;
395 list_add_tail(&d->entry, &pool->free_list);
396 }
397
398out:
399 return pool;
400
401destroy_pool:
402 srp_destroy_fr_pool(pool);
403
404err:
405 pool = ERR_PTR(ret);
406 goto out;
407}
408
409/**
410 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
411 * @pool: Pool to obtain descriptor from.
412 */
413static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
414{
415 struct srp_fr_desc *d = NULL;
416 unsigned long flags;
417
418 spin_lock_irqsave(&pool->lock, flags);
419 if (!list_empty(&pool->free_list)) {
420 d = list_first_entry(&pool->free_list, typeof(*d), entry);
421 list_del(&d->entry);
422 }
423 spin_unlock_irqrestore(&pool->lock, flags);
424
425 return d;
426}
427
428/**
429 * srp_fr_pool_put() - put an FR descriptor back in the free list
430 * @pool: Pool the descriptor was allocated from.
431 * @desc: Pointer to an array of fast registration descriptor pointers.
432 * @n: Number of descriptors to put back.
433 *
434 * Note: The caller must already have queued an invalidation request for
435 * desc->mr->rkey before calling this function.
436 */
437static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
438 int n)
439{
440 unsigned long flags;
441 int i;
442
443 spin_lock_irqsave(&pool->lock, flags);
444 for (i = 0; i < n; i++)
445 list_add(&desc[i]->entry, &pool->free_list);
446 spin_unlock_irqrestore(&pool->lock, flags);
447}
448
449static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
450{
451 struct srp_device *dev = target->srp_host->srp_dev;
452
453 return srp_create_fr_pool(dev->dev, dev->pd,
454 target->scsi_host->can_queue,
455 dev->max_pages_per_mr);
456}
457
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200458/**
459 * srp_destroy_qp() - destroy an RDMA queue pair
460 * @ch: SRP RDMA channel.
461 *
462 * Change a queue pair into the error state and wait until all receive
463 * completions have been processed before destroying it. This avoids that
464 * the receive completion handler can access the queue pair while it is
465 * being destroyed.
466 */
467static void srp_destroy_qp(struct srp_rdma_ch *ch)
468{
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200469 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
470 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
471 struct ib_recv_wr *bad_wr;
472 int ret;
473
474 /* Destroying a QP and reusing ch->done is only safe if not connected */
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200475 WARN_ON_ONCE(ch->connected);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200476
477 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
478 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
479 if (ret)
480 goto out;
481
482 init_completion(&ch->done);
483 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
484 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
485 if (ret == 0)
486 wait_for_completion(&ch->done);
487
488out:
489 ib_destroy_qp(ch->qp);
490}
491
Bart Van Assche509c07b2014-10-30 14:48:30 +0100492static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800493{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100494 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200495 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800496 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100497 struct ib_cq *recv_cq, *send_cq;
498 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200499 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200500 struct srp_fr_pool *fr_pool = NULL;
501 const int m = 1 + dev->use_fast_reg;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800502 int ret;
503
504 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
505 if (!init_attr)
506 return -ENOMEM;
507
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200508 /* + 1 for SRP_LAST_WR_ID */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100509 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200510 target->queue_size + 1, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100511 if (IS_ERR(recv_cq)) {
512 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800513 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800514 }
515
Bart Van Assche509c07b2014-10-30 14:48:30 +0100516 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
517 m * target->queue_size, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100518 if (IS_ERR(send_cq)) {
519 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800520 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000521 }
522
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100523 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800524
525 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200526 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200527 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800528 init_attr->cap.max_recv_sge = 1;
529 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200530 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800531 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100532 init_attr->send_cq = send_cq;
533 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800534
Bart Van Assche62154b22014-05-20 15:04:45 +0200535 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100536 if (IS_ERR(qp)) {
537 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800538 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800539 }
540
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100541 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800542 if (ret)
543 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800544
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200545 if (dev->use_fast_reg && dev->has_fr) {
546 fr_pool = srp_alloc_fr_pool(target);
547 if (IS_ERR(fr_pool)) {
548 ret = PTR_ERR(fr_pool);
549 shost_printk(KERN_WARNING, target->scsi_host, PFX
550 "FR pool allocation failed (%d)\n", ret);
551 goto err_qp;
552 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100553 if (ch->fr_pool)
554 srp_destroy_fr_pool(ch->fr_pool);
555 ch->fr_pool = fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200556 } else if (!dev->use_fast_reg && dev->has_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200557 fmr_pool = srp_alloc_fmr_pool(target);
558 if (IS_ERR(fmr_pool)) {
559 ret = PTR_ERR(fmr_pool);
560 shost_printk(KERN_WARNING, target->scsi_host, PFX
561 "FMR pool allocation failed (%d)\n", ret);
562 goto err_qp;
563 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100564 if (ch->fmr_pool)
565 ib_destroy_fmr_pool(ch->fmr_pool);
566 ch->fmr_pool = fmr_pool;
Bart Van Assched1b42892014-05-20 15:07:20 +0200567 }
568
Bart Van Assche509c07b2014-10-30 14:48:30 +0100569 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200570 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100571 if (ch->recv_cq)
572 ib_destroy_cq(ch->recv_cq);
573 if (ch->send_cq)
574 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100575
Bart Van Assche509c07b2014-10-30 14:48:30 +0100576 ch->qp = qp;
577 ch->recv_cq = recv_cq;
578 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100579
Roland Dreierda9d2f02010-02-24 15:07:59 -0800580 kfree(init_attr);
581 return 0;
582
583err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100584 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800585
586err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100587 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800588
589err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100590 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800591
592err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800593 kfree(init_attr);
594 return ret;
595}
596
Bart Van Assche4d73f952013-10-26 14:40:37 +0200597/*
598 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100599 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200600 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100601static void srp_free_ch_ib(struct srp_target_port *target,
602 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800603{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200604 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800605 int i;
606
Bart Van Assched92c0da2014-10-06 17:14:36 +0200607 if (!ch->target)
608 return;
609
Bart Van Assche509c07b2014-10-30 14:48:30 +0100610 if (ch->cm_id) {
611 ib_destroy_cm_id(ch->cm_id);
612 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100613 }
614
Bart Van Assched92c0da2014-10-06 17:14:36 +0200615 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
616 if (!ch->qp)
617 return;
618
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200619 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100620 if (ch->fr_pool)
621 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200622 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100623 if (ch->fmr_pool)
624 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200625 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200626 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100627 ib_destroy_cq(ch->send_cq);
628 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800629
Bart Van Assched92c0da2014-10-06 17:14:36 +0200630 /*
631 * Avoid that the SCSI error handler tries to use this channel after
632 * it has been freed. The SCSI error handler can namely continue
633 * trying to perform recovery actions after scsi_remove_host()
634 * returned.
635 */
636 ch->target = NULL;
637
Bart Van Assche509c07b2014-10-30 14:48:30 +0100638 ch->qp = NULL;
639 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100640
Bart Van Assche509c07b2014-10-30 14:48:30 +0100641 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200642 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100643 srp_free_iu(target->srp_host, ch->rx_ring[i]);
644 kfree(ch->rx_ring);
645 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200646 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100647 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200648 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100649 srp_free_iu(target->srp_host, ch->tx_ring[i]);
650 kfree(ch->tx_ring);
651 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200652 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800653}
654
655static void srp_path_rec_completion(int status,
656 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100657 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800658{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100659 struct srp_rdma_ch *ch = ch_ptr;
660 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800661
Bart Van Assche509c07b2014-10-30 14:48:30 +0100662 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800663 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500664 shost_printk(KERN_ERR, target->scsi_host,
665 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800666 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100667 ch->path = *pathrec;
668 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800669}
670
Bart Van Assche509c07b2014-10-30 14:48:30 +0100671static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800672{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100673 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100674 int ret;
675
Bart Van Assche509c07b2014-10-30 14:48:30 +0100676 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800677
Bart Van Assche509c07b2014-10-30 14:48:30 +0100678 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800679
Bart Van Assche509c07b2014-10-30 14:48:30 +0100680 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
681 target->srp_host->srp_dev->dev,
682 target->srp_host->port,
683 &ch->path,
684 IB_SA_PATH_REC_SERVICE_ID |
685 IB_SA_PATH_REC_DGID |
686 IB_SA_PATH_REC_SGID |
687 IB_SA_PATH_REC_NUMB_PATH |
688 IB_SA_PATH_REC_PKEY,
689 SRP_PATH_REC_TIMEOUT_MS,
690 GFP_KERNEL,
691 srp_path_rec_completion,
692 ch, &ch->path_query);
693 if (ch->path_query_id < 0)
694 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800695
Bart Van Assche509c07b2014-10-30 14:48:30 +0100696 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100697 if (ret < 0)
698 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800699
Bart Van Assche509c07b2014-10-30 14:48:30 +0100700 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500701 shost_printk(KERN_WARNING, target->scsi_host,
702 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800703
Bart Van Assche509c07b2014-10-30 14:48:30 +0100704 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800705}
706
Bart Van Assched92c0da2014-10-06 17:14:36 +0200707static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800708{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100709 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800710 struct {
711 struct ib_cm_req_param param;
712 struct srp_login_req priv;
713 } *req = NULL;
714 int status;
715
716 req = kzalloc(sizeof *req, GFP_KERNEL);
717 if (!req)
718 return -ENOMEM;
719
Bart Van Assche509c07b2014-10-30 14:48:30 +0100720 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800721 req->param.alternate_path = NULL;
722 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100723 req->param.qp_num = ch->qp->qp_num;
724 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800725 req->param.private_data = &req->priv;
726 req->param.private_data_len = sizeof req->priv;
727 req->param.flow_control = 1;
728
729 get_random_bytes(&req->param.starting_psn, 4);
730 req->param.starting_psn &= 0xffffff;
731
732 /*
733 * Pick some arbitrary defaults here; we could make these
734 * module parameters if anyone cared about setting them.
735 */
736 req->param.responder_resources = 4;
737 req->param.remote_cm_response_timeout = 20;
738 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200739 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800740 req->param.rnr_retry_count = 7;
741 req->param.max_cm_retries = 15;
742
743 req->priv.opcode = SRP_LOGIN_REQ;
744 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500745 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800746 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
747 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200748 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
749 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700750 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700751 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700752 * port identifier format is 8 bytes of ID extension followed
753 * by 8 bytes of GUID. Older drafts put the two halves in the
754 * opposite order, so that the GUID comes first.
755 *
756 * Targets conforming to these obsolete drafts can be
757 * recognized by the I/O Class they report.
758 */
759 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
760 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100761 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700762 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200763 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700764 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
765 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
766 } else {
767 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200768 &target->initiator_ext, 8);
769 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100770 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700771 memcpy(req->priv.target_port_id, &target->id_ext, 8);
772 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
773 }
774
Roland Dreieraef9ec32005-11-02 14:07:13 -0800775 /*
776 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200777 * zero out the first 8 bytes of our initiator port ID and set
778 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800779 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700780 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500781 shost_printk(KERN_DEBUG, target->scsi_host,
782 PFX "Topspin/Cisco initiator port ID workaround "
783 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200784 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800785 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200786 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100787 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800788 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800789
Bart Van Assche509c07b2014-10-30 14:48:30 +0100790 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800791
792 kfree(req);
793
794 return status;
795}
796
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000797static bool srp_queue_remove_work(struct srp_target_port *target)
798{
799 bool changed = false;
800
801 spin_lock_irq(&target->lock);
802 if (target->state != SRP_TARGET_REMOVED) {
803 target->state = SRP_TARGET_REMOVED;
804 changed = true;
805 }
806 spin_unlock_irq(&target->lock);
807
808 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200809 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000810
811 return changed;
812}
813
Roland Dreieraef9ec32005-11-02 14:07:13 -0800814static void srp_disconnect_target(struct srp_target_port *target)
815{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200816 struct srp_rdma_ch *ch;
817 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100818
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200819 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800820
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200821 for (i = 0; i < target->ch_count; i++) {
822 ch = &target->ch[i];
823 ch->connected = false;
824 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
825 shost_printk(KERN_DEBUG, target->scsi_host,
826 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000827 }
Roland Dreiere6581052006-05-17 09:13:21 -0700828 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800829}
830
Bart Van Assche509c07b2014-10-30 14:48:30 +0100831static void srp_free_req_data(struct srp_target_port *target,
832 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500833{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200834 struct srp_device *dev = target->srp_host->srp_dev;
835 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500836 struct srp_request *req;
837 int i;
838
Bart Van Assche47513cf2015-05-18 13:25:54 +0200839 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200840 return;
841
842 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100843 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200844 if (dev->use_fast_reg)
845 kfree(req->fr_list);
846 else
847 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500848 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500849 if (req->indirect_dma_addr) {
850 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
851 target->indirect_size,
852 DMA_TO_DEVICE);
853 }
854 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500855 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200856
Bart Van Assche509c07b2014-10-30 14:48:30 +0100857 kfree(ch->req_ring);
858 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500859}
860
Bart Van Assche509c07b2014-10-30 14:48:30 +0100861static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200862{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100863 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200864 struct srp_device *srp_dev = target->srp_host->srp_dev;
865 struct ib_device *ibdev = srp_dev->dev;
866 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200867 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200868 dma_addr_t dma_addr;
869 int i, ret = -ENOMEM;
870
Bart Van Assche509c07b2014-10-30 14:48:30 +0100871 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
872 GFP_KERNEL);
873 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200874 goto out;
875
876 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100877 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200878 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
879 GFP_KERNEL);
880 if (!mr_list)
881 goto out;
882 if (srp_dev->use_fast_reg)
883 req->fr_list = mr_list;
884 else
885 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200886 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200887 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200888 if (!req->map_page)
889 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200890 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200891 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200892 goto out;
893
894 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
895 target->indirect_size,
896 DMA_TO_DEVICE);
897 if (ib_dma_mapping_error(ibdev, dma_addr))
898 goto out;
899
900 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200901 }
902 ret = 0;
903
904out:
905 return ret;
906}
907
Bart Van Assche683b1592012-01-14 12:40:44 +0000908/**
909 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
910 * @shost: SCSI host whose attributes to remove from sysfs.
911 *
912 * Note: Any attributes defined in the host template and that did not exist
913 * before invocation of this function will be ignored.
914 */
915static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
916{
917 struct device_attribute **attr;
918
919 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
920 device_remove_file(&shost->shost_dev, *attr);
921}
922
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000923static void srp_remove_target(struct srp_target_port *target)
924{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200925 struct srp_rdma_ch *ch;
926 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100927
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000928 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
929
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000930 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200931 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000932 srp_remove_host(target->scsi_host);
933 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100934 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000935 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200936 for (i = 0; i < target->ch_count; i++) {
937 ch = &target->ch[i];
938 srp_free_ch_ib(target, ch);
939 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200940 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200941 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200942 for (i = 0; i < target->ch_count; i++) {
943 ch = &target->ch[i];
944 srp_free_req_data(target, ch);
945 }
946 kfree(target->ch);
947 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200948
949 spin_lock(&target->srp_host->target_lock);
950 list_del(&target->list);
951 spin_unlock(&target->srp_host->target_lock);
952
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000953 scsi_host_put(target->scsi_host);
954}
955
David Howellsc4028952006-11-22 14:57:56 +0000956static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800957{
David Howellsc4028952006-11-22 14:57:56 +0000958 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000959 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800960
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000961 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800962
Bart Van Assche96fc2482013-06-28 14:51:26 +0200963 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800964}
965
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200966static void srp_rport_delete(struct srp_rport *rport)
967{
968 struct srp_target_port *target = rport->lld_data;
969
970 srp_queue_remove_work(target);
971}
972
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200973/**
974 * srp_connected_ch() - number of connected channels
975 * @target: SRP target port.
976 */
977static int srp_connected_ch(struct srp_target_port *target)
978{
979 int i, c = 0;
980
981 for (i = 0; i < target->ch_count; i++)
982 c += target->ch[i].connected;
983
984 return c;
985}
986
Bart Van Assched92c0da2014-10-06 17:14:36 +0200987static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800988{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100989 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800990 int ret;
991
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200992 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000993
Bart Van Assche509c07b2014-10-30 14:48:30 +0100994 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800995 if (ret)
996 return ret;
997
998 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100999 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +02001000 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001001 if (ret)
1002 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001003 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001004 if (ret < 0)
1005 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001006
1007 /*
1008 * The CM event handling code will set status to
1009 * SRP_PORT_REDIRECT if we get a port redirect REJ
1010 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1011 * redirect REJ back.
1012 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001013 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001014 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001015 ch->connected = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001016 return 0;
1017
1018 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001019 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001020 if (ret)
1021 return ret;
1022 break;
1023
1024 case SRP_DLID_REDIRECT:
1025 break;
1026
David Dillow9fe4bcf2008-01-08 17:08:52 -05001027 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001028 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001029 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001030 ch->status = -ECONNRESET;
1031 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001032
Roland Dreieraef9ec32005-11-02 14:07:13 -08001033 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001034 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001035 }
1036 }
1037}
1038
Bart Van Assche509c07b2014-10-30 14:48:30 +01001039static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001040{
1041 struct ib_send_wr *bad_wr;
1042 struct ib_send_wr wr = {
1043 .opcode = IB_WR_LOCAL_INV,
1044 .wr_id = LOCAL_INV_WR_ID_MASK,
1045 .next = NULL,
1046 .num_sge = 0,
1047 .send_flags = 0,
1048 .ex.invalidate_rkey = rkey,
1049 };
1050
Bart Van Assche509c07b2014-10-30 14:48:30 +01001051 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001052}
1053
Roland Dreierd945e1d2006-05-09 10:50:28 -07001054static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001055 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001056 struct srp_request *req)
1057{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001058 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001059 struct srp_device *dev = target->srp_host->srp_dev;
1060 struct ib_device *ibdev = dev->dev;
1061 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001062
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001063 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001064 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1065 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1066 return;
1067
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001068 if (dev->use_fast_reg) {
1069 struct srp_fr_desc **pfr;
1070
1071 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001072 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001073 if (res < 0) {
1074 shost_printk(KERN_ERR, target->scsi_host, PFX
1075 "Queueing INV WR for rkey %#x failed (%d)\n",
1076 (*pfr)->mr->rkey, res);
1077 queue_work(system_long_wq,
1078 &target->tl_err_work);
1079 }
1080 }
1081 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001082 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001083 req->nmdesc);
1084 } else {
1085 struct ib_pool_fmr **pfmr;
1086
1087 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1088 ib_fmr_pool_unmap(*pfmr);
1089 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001090
David Dillow8f26c9f2011-01-14 19:45:50 -05001091 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1092 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001093}
1094
Bart Van Assche22032992012-08-14 13:18:53 +00001095/**
1096 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001097 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001098 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001099 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001100 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1101 * ownership of @req->scmnd if it equals @scmnd.
1102 *
1103 * Return value:
1104 * Either NULL or a pointer to the SCSI command the caller became owner of.
1105 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001106static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001107 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001108 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001109 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001110{
Bart Van Assche94a91742010-11-26 14:50:09 -05001111 unsigned long flags;
1112
Bart Van Assche509c07b2014-10-30 14:48:30 +01001113 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001114 if (req->scmnd &&
1115 (!sdev || req->scmnd->device == sdev) &&
1116 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001117 scmnd = req->scmnd;
1118 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001119 } else {
1120 scmnd = NULL;
1121 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001122 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001123
1124 return scmnd;
1125}
1126
1127/**
1128 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001129 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001130 * @req: Request to be freed.
1131 * @scmnd: SCSI command associated with @req.
1132 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001133 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001134static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1135 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001136{
1137 unsigned long flags;
1138
Bart Van Assche509c07b2014-10-30 14:48:30 +01001139 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001140
Bart Van Assche509c07b2014-10-30 14:48:30 +01001141 spin_lock_irqsave(&ch->lock, flags);
1142 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001143 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001144}
1145
Bart Van Assche509c07b2014-10-30 14:48:30 +01001146static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1147 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001148{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001149 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001150
1151 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001152 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001153 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001154 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001155 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001156}
1157
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001158static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001159{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001160 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001161 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001162 struct Scsi_Host *shost = target->scsi_host;
1163 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001164 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001165
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001166 /*
1167 * Invoking srp_terminate_io() while srp_queuecommand() is running
1168 * is not safe. Hence the warning statement below.
1169 */
1170 shost_for_each_device(sdev, shost)
1171 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1172
Bart Van Assched92c0da2014-10-06 17:14:36 +02001173 for (i = 0; i < target->ch_count; i++) {
1174 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001175
Bart Van Assched92c0da2014-10-06 17:14:36 +02001176 for (j = 0; j < target->req_ring_size; ++j) {
1177 struct srp_request *req = &ch->req_ring[j];
1178
1179 srp_finish_req(ch, req, NULL,
1180 DID_TRANSPORT_FAILFAST << 16);
1181 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001182 }
1183}
1184
1185/*
1186 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1187 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1188 * srp_reset_device() or srp_reset_host() calls will occur while this function
1189 * is in progress. One way to realize that is not to call this function
1190 * directly but to call srp_reconnect_rport() instead since that last function
1191 * serializes calls of this function via rport->mutex and also blocks
1192 * srp_queuecommand() calls before invoking this function.
1193 */
1194static int srp_rport_reconnect(struct srp_rport *rport)
1195{
1196 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001197 struct srp_rdma_ch *ch;
1198 int i, j, ret = 0;
1199 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001200
Roland Dreieraef9ec32005-11-02 14:07:13 -08001201 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001202
1203 if (target->state == SRP_TARGET_SCANNING)
1204 return -ENODEV;
1205
Roland Dreieraef9ec32005-11-02 14:07:13 -08001206 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001207 * Now get a new local CM ID so that we avoid confusing the target in
1208 * case things are really fouled up. Doing so also ensures that all CM
1209 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001210 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001211 for (i = 0; i < target->ch_count; i++) {
1212 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001213 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001214 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001215 for (i = 0; i < target->ch_count; i++) {
1216 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001217 for (j = 0; j < target->req_ring_size; ++j) {
1218 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001219
Bart Van Assched92c0da2014-10-06 17:14:36 +02001220 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1221 }
1222 }
1223 for (i = 0; i < target->ch_count; i++) {
1224 ch = &target->ch[i];
Bart Van Assched92c0da2014-10-06 17:14:36 +02001225 /*
1226 * Whether or not creating a new CM ID succeeded, create a new
1227 * QP. This guarantees that all completion callback function
1228 * invocations have finished before request resetting starts.
1229 */
1230 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001231
Bart Van Assched92c0da2014-10-06 17:14:36 +02001232 INIT_LIST_HEAD(&ch->free_tx);
1233 for (j = 0; j < target->queue_size; ++j)
1234 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1235 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001236
1237 target->qp_in_error = false;
1238
Bart Van Assched92c0da2014-10-06 17:14:36 +02001239 for (i = 0; i < target->ch_count; i++) {
1240 ch = &target->ch[i];
Bart Van Asschebbac5cc2015-05-18 13:26:17 +02001241 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001242 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001243 ret = srp_connect_ch(ch, multich);
1244 multich = true;
1245 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001246
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001247 if (ret == 0)
1248 shost_printk(KERN_INFO, target->scsi_host,
1249 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001250
1251 return ret;
1252}
1253
David Dillow8f26c9f2011-01-14 19:45:50 -05001254static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1255 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001256{
David Dillow8f26c9f2011-01-14 19:45:50 -05001257 struct srp_direct_buf *desc = state->desc;
1258
1259 desc->va = cpu_to_be64(dma_addr);
1260 desc->key = cpu_to_be32(rkey);
1261 desc->len = cpu_to_be32(dma_len);
1262
1263 state->total_len += dma_len;
1264 state->desc++;
1265 state->ndesc++;
1266}
1267
1268static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001269 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001270{
David Dillow8f26c9f2011-01-14 19:45:50 -05001271 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001272 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001273
Bart Van Assche509c07b2014-10-30 14:48:30 +01001274 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001275 state->npages, io_addr);
1276 if (IS_ERR(fmr))
1277 return PTR_ERR(fmr);
1278
1279 *state->next_fmr++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001280 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001281
Bart Van Assche52ede082014-05-20 15:07:45 +02001282 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001283
David Dillow8f26c9f2011-01-14 19:45:50 -05001284 return 0;
1285}
1286
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001287static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001288 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001289{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001290 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001291 struct srp_device *dev = target->srp_host->srp_dev;
1292 struct ib_send_wr *bad_wr;
1293 struct ib_send_wr wr;
1294 struct srp_fr_desc *desc;
1295 u32 rkey;
1296
Bart Van Assche509c07b2014-10-30 14:48:30 +01001297 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001298 if (!desc)
1299 return -ENOMEM;
1300
1301 rkey = ib_inc_rkey(desc->mr->rkey);
1302 ib_update_fast_reg_key(desc->mr, rkey);
1303
1304 memcpy(desc->frpl->page_list, state->pages,
1305 sizeof(state->pages[0]) * state->npages);
1306
1307 memset(&wr, 0, sizeof(wr));
1308 wr.opcode = IB_WR_FAST_REG_MR;
1309 wr.wr_id = FAST_REG_WR_ID_MASK;
1310 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1311 wr.wr.fast_reg.page_list = desc->frpl;
1312 wr.wr.fast_reg.page_list_len = state->npages;
1313 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1314 wr.wr.fast_reg.length = state->dma_len;
1315 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1316 IB_ACCESS_REMOTE_READ |
1317 IB_ACCESS_REMOTE_WRITE);
1318 wr.wr.fast_reg.rkey = desc->mr->lkey;
1319
1320 *state->next_fr++ = desc;
1321 state->nmdesc++;
1322
1323 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1324 desc->mr->rkey);
1325
Bart Van Assche509c07b2014-10-30 14:48:30 +01001326 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001327}
1328
Bart Van Assche539dde62014-05-20 15:05:46 +02001329static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001330 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001331{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001332 struct srp_target_port *target = ch->target;
Bart Van Assche539dde62014-05-20 15:05:46 +02001333 int ret = 0;
1334
1335 if (state->npages == 0)
1336 return 0;
1337
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001338 if (state->npages == 1 && !register_always)
Bart Van Assche52ede082014-05-20 15:07:45 +02001339 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche539dde62014-05-20 15:05:46 +02001340 target->rkey);
1341 else
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001342 ret = target->srp_host->srp_dev->use_fast_reg ?
Bart Van Assche509c07b2014-10-30 14:48:30 +01001343 srp_map_finish_fr(state, ch) :
1344 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001345
1346 if (ret == 0) {
1347 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001348 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001349 }
1350
1351 return ret;
1352}
1353
David Dillow8f26c9f2011-01-14 19:45:50 -05001354static void srp_map_update_start(struct srp_map_state *state,
1355 struct scatterlist *sg, int sg_index,
1356 dma_addr_t dma_addr)
1357{
1358 state->unmapped_sg = sg;
1359 state->unmapped_index = sg_index;
1360 state->unmapped_addr = dma_addr;
1361}
1362
1363static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001364 struct srp_rdma_ch *ch,
David Dillow8f26c9f2011-01-14 19:45:50 -05001365 struct scatterlist *sg, int sg_index,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001366 bool use_mr)
David Dillow8f26c9f2011-01-14 19:45:50 -05001367{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001368 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001369 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001370 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001371 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1372 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1373 unsigned int len;
1374 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001375
David Dillow8f26c9f2011-01-14 19:45:50 -05001376 if (!dma_len)
1377 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -07001378
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001379 if (!use_mr) {
1380 /*
1381 * Once we're in direct map mode for a request, we don't
1382 * go back to FMR or FR mode, so no need to update anything
David Dillow8f26c9f2011-01-14 19:45:50 -05001383 * other than the descriptor.
1384 */
1385 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1386 return 0;
1387 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001388
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001389 /*
1390 * Since not all RDMA HW drivers support non-zero page offsets for
1391 * FMR, if we start at an offset into a page, don't merge into the
1392 * current FMR mapping. Finish it out, and use the kernel's MR for
1393 * this sg entry.
David Dillow8f26c9f2011-01-14 19:45:50 -05001394 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001395 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1396 dma_len > dev->mr_max_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001397 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001398 if (ret)
1399 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001400
David Dillow8f26c9f2011-01-14 19:45:50 -05001401 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1402 srp_map_update_start(state, NULL, 0, 0);
1403 return 0;
1404 }
1405
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001406 /*
1407 * If this is the first sg that will be mapped via FMR or via FR, save
1408 * our position. We need to know the first unmapped entry, its index,
1409 * and the first unmapped address within that entry to be able to
1410 * restart mapping after an error.
David Dillow8f26c9f2011-01-14 19:45:50 -05001411 */
1412 if (!state->unmapped_sg)
1413 srp_map_update_start(state, sg, sg_index, dma_addr);
1414
1415 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001416 unsigned offset = dma_addr & ~dev->mr_page_mask;
1417 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001418 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001419 if (ret)
1420 return ret;
1421
1422 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001423 }
1424
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001425 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001426
1427 if (!state->npages)
1428 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001429 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001430 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001431 dma_addr += len;
1432 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001433 }
1434
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001435 /*
1436 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001437 * close it out and start a new one -- we can only merge at page
1438 * boundries.
1439 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001440 ret = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001441 if (len != dev->mr_page_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001442 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001443 if (!ret)
1444 srp_map_update_start(state, NULL, 0, 0);
1445 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001446 return ret;
1447}
1448
Bart Van Assche509c07b2014-10-30 14:48:30 +01001449static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1450 struct srp_request *req, struct scatterlist *scat,
1451 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001452{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001453 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001454 struct srp_device *dev = target->srp_host->srp_dev;
1455 struct ib_device *ibdev = dev->dev;
1456 struct scatterlist *sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001457 int i;
1458 bool use_mr;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001459
1460 state->desc = req->indirect_desc;
1461 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001462 if (dev->use_fast_reg) {
1463 state->next_fr = req->fr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001464 use_mr = !!ch->fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001465 } else {
1466 state->next_fmr = req->fmr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001467 use_mr = !!ch->fmr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001468 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001469
1470 for_each_sg(scat, sg, count, i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001471 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001472 /*
1473 * Memory registration failed, so backtrack to the
1474 * first unmapped entry and continue on without using
1475 * memory registration.
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001476 */
1477 dma_addr_t dma_addr;
1478 unsigned int dma_len;
1479
1480backtrack:
1481 sg = state->unmapped_sg;
1482 i = state->unmapped_index;
1483
1484 dma_addr = ib_sg_dma_address(ibdev, sg);
1485 dma_len = ib_sg_dma_len(ibdev, sg);
1486 dma_len -= (state->unmapped_addr - dma_addr);
1487 dma_addr = state->unmapped_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001488 use_mr = false;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001489 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1490 }
1491 }
1492
Bart Van Assche509c07b2014-10-30 14:48:30 +01001493 if (use_mr && srp_finish_mapping(state, ch))
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001494 goto backtrack;
1495
Bart Van Assche52ede082014-05-20 15:07:45 +02001496 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001497
1498 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001499}
1500
Bart Van Assche509c07b2014-10-30 14:48:30 +01001501static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001502 struct srp_request *req)
1503{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001504 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001505 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001506 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001507 int len, nents, count;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001508 struct srp_device *dev;
1509 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001510 struct srp_map_state state;
1511 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001512 u32 table_len;
1513 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001514
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001515 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001516 return sizeof (struct srp_cmd);
1517
1518 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1519 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001520 shost_printk(KERN_WARNING, target->scsi_host,
1521 PFX "Unhandled data direction %d\n",
1522 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001523 return -EINVAL;
1524 }
1525
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001526 nents = scsi_sg_count(scmnd);
1527 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001528
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001529 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001530 ibdev = dev->dev;
1531
1532 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001533 if (unlikely(count == 0))
1534 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001535
1536 fmt = SRP_DATA_DESC_DIRECT;
1537 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001538
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001539 if (count == 1 && !register_always) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001540 /*
1541 * The midlayer only generated a single gather/scatter
1542 * entry, or DMA mapping coalesced everything to a
1543 * single entry. So a direct descriptor along with
1544 * the DMA MR suffices.
1545 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001546 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001547
Ralph Campbell85507bc2006-12-12 14:30:55 -08001548 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001549 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001550 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001551
Bart Van Assche52ede082014-05-20 15:07:45 +02001552 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001553 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001554 }
1555
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001556 /*
1557 * We have more than one scatter/gather entry, so build our indirect
1558 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001559 */
1560 indirect_hdr = (void *) cmd->add_data;
1561
David Dillowc07d4242011-01-16 13:57:10 -05001562 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1563 target->indirect_size, DMA_TO_DEVICE);
1564
David Dillow8f26c9f2011-01-14 19:45:50 -05001565 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001566 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001567
David Dillowc07d4242011-01-16 13:57:10 -05001568 /* We've mapped the request, now pull as much of the indirect
1569 * descriptor table as we can into the command buffer. If this
1570 * target is not using an external indirect table, we are
1571 * guaranteed to fit into the command, as the SCSI layer won't
1572 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001573 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001574 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001575 /*
1576 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001577 * so use a direct descriptor.
1578 */
1579 struct srp_direct_buf *buf = (void *) cmd->add_data;
1580
David Dillowc07d4242011-01-16 13:57:10 -05001581 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001582 goto map_complete;
1583 }
1584
David Dillowc07d4242011-01-16 13:57:10 -05001585 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1586 !target->allow_ext_sg)) {
1587 shost_printk(KERN_ERR, target->scsi_host,
1588 "Could not fit S/G list into SRP_CMD\n");
1589 return -EIO;
1590 }
1591
1592 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001593 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1594
1595 fmt = SRP_DATA_DESC_INDIRECT;
1596 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001597 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001598
David Dillowc07d4242011-01-16 13:57:10 -05001599 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1600 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001601
David Dillowc07d4242011-01-16 13:57:10 -05001602 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001603 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1604 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1605 indirect_hdr->len = cpu_to_be32(state.total_len);
1606
1607 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001608 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001609 else
David Dillowc07d4242011-01-16 13:57:10 -05001610 cmd->data_in_desc_cnt = count;
1611
1612 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1613 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001614
1615map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001616 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1617 cmd->buf_fmt = fmt << 4;
1618 else
1619 cmd->buf_fmt = fmt;
1620
Roland Dreieraef9ec32005-11-02 14:07:13 -08001621 return len;
1622}
1623
David Dillow05a1d752010-10-08 14:48:14 -04001624/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001625 * Return an IU and possible credit to the free pool
1626 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001627static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001628 enum srp_iu_type iu_type)
1629{
1630 unsigned long flags;
1631
Bart Van Assche509c07b2014-10-30 14:48:30 +01001632 spin_lock_irqsave(&ch->lock, flags);
1633 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001634 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001635 ++ch->req_lim;
1636 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001637}
1638
1639/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001640 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001641 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001642 *
1643 * Note:
1644 * An upper limit for the number of allocated information units for each
1645 * request type is:
1646 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1647 * more than Scsi_Host.can_queue requests.
1648 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1649 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1650 * one unanswered SRP request to an initiator.
1651 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001652static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001653 enum srp_iu_type iu_type)
1654{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001655 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001656 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1657 struct srp_iu *iu;
1658
Bart Van Assche509c07b2014-10-30 14:48:30 +01001659 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001660
Bart Van Assche509c07b2014-10-30 14:48:30 +01001661 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001662 return NULL;
1663
1664 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001665 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001666 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001667 ++target->zero_req_lim;
1668 return NULL;
1669 }
1670
Bart Van Assche509c07b2014-10-30 14:48:30 +01001671 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001672 }
1673
Bart Van Assche509c07b2014-10-30 14:48:30 +01001674 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001675 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001676 return iu;
1677}
1678
Bart Van Assche509c07b2014-10-30 14:48:30 +01001679static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001680{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001681 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001682 struct ib_sge list;
1683 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001684
1685 list.addr = iu->dma;
1686 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001687 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001688
1689 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001690 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001691 wr.sg_list = &list;
1692 wr.num_sge = 1;
1693 wr.opcode = IB_WR_SEND;
1694 wr.send_flags = IB_SEND_SIGNALED;
1695
Bart Van Assche509c07b2014-10-30 14:48:30 +01001696 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001697}
1698
Bart Van Assche509c07b2014-10-30 14:48:30 +01001699static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001700{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001701 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001702 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001703 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001704
1705 list.addr = iu->dma;
1706 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001707 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001708
1709 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001710 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001711 wr.sg_list = &list;
1712 wr.num_sge = 1;
1713
Bart Van Assche509c07b2014-10-30 14:48:30 +01001714 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001715}
1716
Bart Van Assche509c07b2014-10-30 14:48:30 +01001717static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001718{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001719 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001720 struct srp_request *req;
1721 struct scsi_cmnd *scmnd;
1722 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001723
Roland Dreieraef9ec32005-11-02 14:07:13 -08001724 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001725 spin_lock_irqsave(&ch->lock, flags);
1726 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1727 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001728
Bart Van Assche509c07b2014-10-30 14:48:30 +01001729 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001730 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001731 ch->tsk_mgmt_status = rsp->data[3];
1732 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001733 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001734 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1735 if (scmnd) {
1736 req = (void *)scmnd->host_scribble;
1737 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1738 }
Bart Van Assche22032992012-08-14 13:18:53 +00001739 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001740 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001741 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1742 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001743
Bart Van Assche509c07b2014-10-30 14:48:30 +01001744 spin_lock_irqsave(&ch->lock, flags);
1745 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1746 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001747
1748 return;
1749 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001750 scmnd->result = rsp->status;
1751
1752 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1753 memcpy(scmnd->sense_buffer, rsp->data +
1754 be32_to_cpu(rsp->resp_data_len),
1755 min_t(int, be32_to_cpu(rsp->sense_data_len),
1756 SCSI_SENSE_BUFFERSIZE));
1757 }
1758
Bart Van Asschee7145312014-07-09 15:57:51 +02001759 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001760 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001761 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1762 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1763 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1764 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1765 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1766 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001767
Bart Van Assche509c07b2014-10-30 14:48:30 +01001768 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001769 be32_to_cpu(rsp->req_lim_delta));
1770
David Dillowf8b6e312010-11-26 13:02:21 -05001771 scmnd->host_scribble = NULL;
1772 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001773 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001774}
1775
Bart Van Assche509c07b2014-10-30 14:48:30 +01001776static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001777 void *rsp, int len)
1778{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001779 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001780 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001781 unsigned long flags;
1782 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001783 int err;
David Dillowbb125882010-10-08 14:40:47 -04001784
Bart Van Assche509c07b2014-10-30 14:48:30 +01001785 spin_lock_irqsave(&ch->lock, flags);
1786 ch->req_lim += req_delta;
1787 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1788 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001789
David Dillowbb125882010-10-08 14:40:47 -04001790 if (!iu) {
1791 shost_printk(KERN_ERR, target->scsi_host, PFX
1792 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001793 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001794 }
1795
1796 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1797 memcpy(iu->buf, rsp, len);
1798 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1799
Bart Van Assche509c07b2014-10-30 14:48:30 +01001800 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001801 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001802 shost_printk(KERN_ERR, target->scsi_host, PFX
1803 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001804 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001805 }
David Dillowbb125882010-10-08 14:40:47 -04001806
David Dillowbb125882010-10-08 14:40:47 -04001807 return err;
1808}
1809
Bart Van Assche509c07b2014-10-30 14:48:30 +01001810static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001811 struct srp_cred_req *req)
1812{
1813 struct srp_cred_rsp rsp = {
1814 .opcode = SRP_CRED_RSP,
1815 .tag = req->tag,
1816 };
1817 s32 delta = be32_to_cpu(req->req_lim_delta);
1818
Bart Van Assche509c07b2014-10-30 14:48:30 +01001819 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1820 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001821 "problems processing SRP_CRED_REQ\n");
1822}
1823
Bart Van Assche509c07b2014-10-30 14:48:30 +01001824static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001825 struct srp_aer_req *req)
1826{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001827 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001828 struct srp_aer_rsp rsp = {
1829 .opcode = SRP_AER_RSP,
1830 .tag = req->tag,
1831 };
1832 s32 delta = be32_to_cpu(req->req_lim_delta);
1833
1834 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche985aa492015-05-18 13:27:14 +02001835 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
David Dillowbb125882010-10-08 14:40:47 -04001836
Bart Van Assche509c07b2014-10-30 14:48:30 +01001837 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001838 shost_printk(KERN_ERR, target->scsi_host, PFX
1839 "problems processing SRP_AER_REQ\n");
1840}
1841
Bart Van Assche509c07b2014-10-30 14:48:30 +01001842static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001843{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001844 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001845 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001846 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001847 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001848 u8 opcode;
1849
Bart Van Assche509c07b2014-10-30 14:48:30 +01001850 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001851 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001852
1853 opcode = *(u8 *) iu->buf;
1854
1855 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001856 shost_printk(KERN_ERR, target->scsi_host,
1857 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001858 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1859 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001860 }
1861
1862 switch (opcode) {
1863 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001864 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001865 break;
1866
David Dillowbb125882010-10-08 14:40:47 -04001867 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001868 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001869 break;
1870
1871 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001872 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001873 break;
1874
Roland Dreieraef9ec32005-11-02 14:07:13 -08001875 case SRP_T_LOGOUT:
1876 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001877 shost_printk(KERN_WARNING, target->scsi_host,
1878 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001879 break;
1880
1881 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001882 shost_printk(KERN_WARNING, target->scsi_host,
1883 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001884 break;
1885 }
1886
Bart Van Assche509c07b2014-10-30 14:48:30 +01001887 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001888 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001889
Bart Van Assche509c07b2014-10-30 14:48:30 +01001890 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001891 if (res != 0)
1892 shost_printk(KERN_ERR, target->scsi_host,
1893 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001894}
1895
Bart Van Asschec1120f82013-10-26 14:35:08 +02001896/**
1897 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001898 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001899 *
1900 * Note: This function may get invoked before the rport has been created,
1901 * hence the target->rport test.
1902 */
1903static void srp_tl_err_work(struct work_struct *work)
1904{
1905 struct srp_target_port *target;
1906
1907 target = container_of(work, struct srp_target_port, tl_err_work);
1908 if (target->rport)
1909 srp_start_tl_fail_timers(target->rport);
1910}
1911
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001912static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001913 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001914{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001915 struct srp_target_port *target = ch->target;
1916
1917 if (wr_id == SRP_LAST_WR_ID) {
1918 complete(&ch->done);
1919 return;
1920 }
1921
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001922 if (ch->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001923 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1924 shost_printk(KERN_ERR, target->scsi_host, PFX
1925 "LOCAL_INV failed with status %d\n",
1926 wc_status);
1927 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1928 shost_printk(KERN_ERR, target->scsi_host, PFX
1929 "FAST_REG_MR failed status %d\n",
1930 wc_status);
1931 } else {
1932 shost_printk(KERN_ERR, target->scsi_host,
1933 PFX "failed %s status %d for iu %p\n",
1934 send_err ? "send" : "receive",
1935 wc_status, (void *)(uintptr_t)wr_id);
1936 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001937 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001938 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001939 target->qp_in_error = true;
1940}
1941
Bart Van Assche509c07b2014-10-30 14:48:30 +01001942static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001943{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001944 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001945 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001946
1947 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1948 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001949 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001950 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001951 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001952 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001953 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001954 }
1955}
1956
Bart Van Assche509c07b2014-10-30 14:48:30 +01001957static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001958{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001959 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001960 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001961 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001962
1963 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001964 if (likely(wc.status == IB_WC_SUCCESS)) {
1965 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001966 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001967 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001968 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001969 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001970 }
1971}
1972
Bart Van Assche76c75b22010-11-26 14:37:47 -05001973static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001974{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001975 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001976 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001977 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001978 struct srp_request *req;
1979 struct srp_iu *iu;
1980 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001981 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001982 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001983 u32 tag;
1984 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02001985 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001986 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1987
1988 /*
1989 * The SCSI EH thread is the only context from which srp_queuecommand()
1990 * can get invoked for blocked devices (SDEV_BLOCK /
1991 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1992 * locking the rport mutex if invoked from inside the SCSI EH.
1993 */
1994 if (in_scsi_eh)
1995 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001996
Bart Van Assched1b42892014-05-20 15:07:20 +02001997 scmnd->result = srp_chkready(target->rport);
1998 if (unlikely(scmnd->result))
1999 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002000
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002001 WARN_ON_ONCE(scmnd->request->tag < 0);
2002 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002003 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002004 idx = blk_mq_unique_tag_to_tag(tag);
2005 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2006 dev_name(&shost->shost_gendev), tag, idx,
2007 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002008
2009 spin_lock_irqsave(&ch->lock, flags);
2010 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002011 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002012
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002013 if (!iu)
2014 goto err;
2015
2016 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002017 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002018 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002019 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002020
David Dillowf8b6e312010-11-26 13:02:21 -05002021 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002022
2023 cmd = iu->buf;
2024 memset(cmd, 0, sizeof *cmd);
2025
2026 cmd->opcode = SRP_CMD;
Bart Van Assche985aa492015-05-18 13:27:14 +02002027 int_to_scsilun(scmnd->device->lun, &cmd->lun);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002028 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002029 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2030
Roland Dreieraef9ec32005-11-02 14:07:13 -08002031 req->scmnd = scmnd;
2032 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002033
Bart Van Assche509c07b2014-10-30 14:48:30 +01002034 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002035 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002036 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002037 PFX "Failed to map data (%d)\n", len);
2038 /*
2039 * If we ran out of memory descriptors (-ENOMEM) because an
2040 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002041 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002042 * to reduce queue depth temporarily.
2043 */
2044 scmnd->result = len == -ENOMEM ?
2045 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002046 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002047 }
2048
David Dillow49248642011-01-14 18:23:24 -05002049 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002050 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002051
Bart Van Assche509c07b2014-10-30 14:48:30 +01002052 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002053 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002054 goto err_unmap;
2055 }
2056
Bart Van Assched1b42892014-05-20 15:07:20 +02002057 ret = 0;
2058
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002059unlock_rport:
2060 if (in_scsi_eh)
2061 mutex_unlock(&rport->mutex);
2062
Bart Van Assched1b42892014-05-20 15:07:20 +02002063 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002064
2065err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002066 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002067
Bart Van Assche76c75b22010-11-26 14:37:47 -05002068err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002069 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002070
Bart Van Assche024ca902014-05-20 15:03:49 +02002071 /*
2072 * Avoid that the loops that iterate over the request ring can
2073 * encounter a dangling SCSI command pointer.
2074 */
2075 req->scmnd = NULL;
2076
Bart Van Assched1b42892014-05-20 15:07:20 +02002077err:
2078 if (scmnd->result) {
2079 scmnd->scsi_done(scmnd);
2080 ret = 0;
2081 } else {
2082 ret = SCSI_MLQUEUE_HOST_BUSY;
2083 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002084
Bart Van Assched1b42892014-05-20 15:07:20 +02002085 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002086}
2087
Bart Van Assche4d73f952013-10-26 14:40:37 +02002088/*
2089 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002090 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002091 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002092static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002093{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002094 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002095 int i;
2096
Bart Van Assche509c07b2014-10-30 14:48:30 +01002097 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2098 GFP_KERNEL);
2099 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002100 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002101 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2102 GFP_KERNEL);
2103 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002104 goto err_no_ring;
2105
2106 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002107 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2108 ch->max_ti_iu_len,
2109 GFP_KERNEL, DMA_FROM_DEVICE);
2110 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002111 goto err;
2112 }
2113
Bart Van Assche4d73f952013-10-26 14:40:37 +02002114 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002115 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2116 target->max_iu_len,
2117 GFP_KERNEL, DMA_TO_DEVICE);
2118 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002119 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002120
Bart Van Assche509c07b2014-10-30 14:48:30 +01002121 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002122 }
2123
2124 return 0;
2125
2126err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002127 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002128 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2129 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002130 }
2131
Bart Van Assche4d73f952013-10-26 14:40:37 +02002132
2133err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002134 kfree(ch->tx_ring);
2135 ch->tx_ring = NULL;
2136 kfree(ch->rx_ring);
2137 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002138
2139 return -ENOMEM;
2140}
2141
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002142static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2143{
2144 uint64_t T_tr_ns, max_compl_time_ms;
2145 uint32_t rq_tmo_jiffies;
2146
2147 /*
2148 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2149 * table 91), both the QP timeout and the retry count have to be set
2150 * for RC QP's during the RTR to RTS transition.
2151 */
2152 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2153 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2154
2155 /*
2156 * Set target->rq_tmo_jiffies to one second more than the largest time
2157 * it can take before an error completion is generated. See also
2158 * C9-140..142 in the IBTA spec for more information about how to
2159 * convert the QP Local ACK Timeout value to nanoseconds.
2160 */
2161 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2162 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2163 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2164 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2165
2166 return rq_tmo_jiffies;
2167}
2168
David Dillow961e0be2011-01-14 17:32:07 -05002169static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2170 struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002171 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002172{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002173 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002174 struct ib_qp_attr *qp_attr = NULL;
2175 int attr_mask = 0;
2176 int ret;
2177 int i;
2178
2179 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002180 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2181 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002182
2183 /*
2184 * Reserve credits for task management so we don't
2185 * bounce requests back to the SCSI mid-layer.
2186 */
2187 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002188 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002189 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002190 target->scsi_host->cmd_per_lun
2191 = min_t(int, target->scsi_host->can_queue,
2192 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002193 } else {
2194 shost_printk(KERN_WARNING, target->scsi_host,
2195 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2196 ret = -ECONNRESET;
2197 goto error;
2198 }
2199
Bart Van Assche509c07b2014-10-30 14:48:30 +01002200 if (!ch->rx_ring) {
2201 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002202 if (ret)
2203 goto error;
2204 }
2205
2206 ret = -ENOMEM;
2207 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2208 if (!qp_attr)
2209 goto error;
2210
2211 qp_attr->qp_state = IB_QPS_RTR;
2212 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2213 if (ret)
2214 goto error_free;
2215
Bart Van Assche509c07b2014-10-30 14:48:30 +01002216 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002217 if (ret)
2218 goto error_free;
2219
Bart Van Assche4d73f952013-10-26 14:40:37 +02002220 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002221 struct srp_iu *iu = ch->rx_ring[i];
2222
2223 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002224 if (ret)
2225 goto error_free;
2226 }
2227
2228 qp_attr->qp_state = IB_QPS_RTS;
2229 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2230 if (ret)
2231 goto error_free;
2232
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002233 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2234
Bart Van Assche509c07b2014-10-30 14:48:30 +01002235 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002236 if (ret)
2237 goto error_free;
2238
2239 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2240
2241error_free:
2242 kfree(qp_attr);
2243
2244error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002245 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002246}
2247
Roland Dreieraef9ec32005-11-02 14:07:13 -08002248static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2249 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002250 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002251{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002252 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002253 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002254 struct ib_class_port_info *cpi;
2255 int opcode;
2256
2257 switch (event->param.rej_rcvd.reason) {
2258 case IB_CM_REJ_PORT_CM_REDIRECT:
2259 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002260 ch->path.dlid = cpi->redirect_lid;
2261 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002262 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002263 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002264
Bart Van Assche509c07b2014-10-30 14:48:30 +01002265 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002266 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2267 break;
2268
2269 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002270 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002271 /*
2272 * Topspin/Cisco SRP gateways incorrectly send
2273 * reject reason code 25 when they mean 24
2274 * (port redirect).
2275 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002276 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002277 event->param.rej_rcvd.ari, 16);
2278
David Dillow7aa54bd2008-01-07 18:23:41 -05002279 shost_printk(KERN_DEBUG, shost,
2280 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002281 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2282 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002283
Bart Van Assche509c07b2014-10-30 14:48:30 +01002284 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002285 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002286 shost_printk(KERN_WARNING, shost,
2287 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002288 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002289 }
2290 break;
2291
2292 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002293 shost_printk(KERN_WARNING, shost,
2294 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002295 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002296 break;
2297
2298 case IB_CM_REJ_CONSUMER_DEFINED:
2299 opcode = *(u8 *) event->private_data;
2300 if (opcode == SRP_LOGIN_REJ) {
2301 struct srp_login_rej *rej = event->private_data;
2302 u32 reason = be32_to_cpu(rej->reason);
2303
2304 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002305 shost_printk(KERN_WARNING, shost,
2306 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002307 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002308 shost_printk(KERN_WARNING, shost, PFX
2309 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002310 target->sgid.raw,
2311 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002312 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002313 shost_printk(KERN_WARNING, shost,
2314 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2315 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002316 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002317 break;
2318
David Dillow9fe4bcf2008-01-08 17:08:52 -05002319 case IB_CM_REJ_STALE_CONN:
2320 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002321 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002322 break;
2323
Roland Dreieraef9ec32005-11-02 14:07:13 -08002324 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002325 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2326 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002327 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002328 }
2329}
2330
2331static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2332{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002333 struct srp_rdma_ch *ch = cm_id->context;
2334 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002335 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002336
2337 switch (event->event) {
2338 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002339 shost_printk(KERN_DEBUG, target->scsi_host,
2340 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002341 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002342 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002343 break;
2344
2345 case IB_CM_REP_RECEIVED:
2346 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002347 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002348 break;
2349
2350 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002351 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002352 comp = 1;
2353
Bart Van Assche509c07b2014-10-30 14:48:30 +01002354 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002355 break;
2356
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002357 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002358 shost_printk(KERN_WARNING, target->scsi_host,
2359 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002360 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002361 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002362 shost_printk(KERN_ERR, target->scsi_host,
2363 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002364 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002365 break;
2366
2367 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002368 shost_printk(KERN_ERR, target->scsi_host,
2369 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002370 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002371
Bart Van Assche509c07b2014-10-30 14:48:30 +01002372 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002373 break;
2374
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002375 case IB_CM_MRA_RECEIVED:
2376 case IB_CM_DREQ_ERROR:
2377 case IB_CM_DREP_RECEIVED:
2378 break;
2379
Roland Dreieraef9ec32005-11-02 14:07:13 -08002380 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002381 shost_printk(KERN_WARNING, target->scsi_host,
2382 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002383 break;
2384 }
2385
2386 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002387 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002388
Roland Dreieraef9ec32005-11-02 14:07:13 -08002389 return 0;
2390}
2391
Jack Wang71444b92013-11-07 11:37:37 +01002392/**
Jack Wang71444b92013-11-07 11:37:37 +01002393 * srp_change_queue_depth - setting device queue depth
2394 * @sdev: scsi device struct
2395 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002396 *
2397 * Returns queue depth.
2398 */
2399static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002400srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002401{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002402 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002403 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002404 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002405}
2406
Bart Van Assche985aa492015-05-18 13:27:14 +02002407static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2408 u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002409{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002410 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002411 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002412 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002413 struct srp_iu *iu;
2414 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002415
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002416 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002417 return -1;
2418
Bart Van Assche509c07b2014-10-30 14:48:30 +01002419 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002420
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002421 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002422 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002423 * invoked while a task management function is being sent.
2424 */
2425 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002426 spin_lock_irq(&ch->lock);
2427 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2428 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002429
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002430 if (!iu) {
2431 mutex_unlock(&rport->mutex);
2432
Bart Van Assche76c75b22010-11-26 14:37:47 -05002433 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002434 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002435
David Dillow19081f32010-10-18 08:54:49 -04002436 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2437 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002438 tsk_mgmt = iu->buf;
2439 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2440
2441 tsk_mgmt->opcode = SRP_TSK_MGMT;
Bart Van Assche985aa492015-05-18 13:27:14 +02002442 int_to_scsilun(lun, &tsk_mgmt->lun);
David Dillowf8b6e312010-11-26 13:02:21 -05002443 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002444 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002445 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002446
David Dillow19081f32010-10-18 08:54:49 -04002447 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2448 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002449 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2450 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002451 mutex_unlock(&rport->mutex);
2452
Bart Van Assche76c75b22010-11-26 14:37:47 -05002453 return -1;
2454 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002455 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002456
Bart Van Assche509c07b2014-10-30 14:48:30 +01002457 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002458 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002459 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002460
Roland Dreierd945e1d2006-05-09 10:50:28 -07002461 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002462}
2463
Roland Dreieraef9ec32005-11-02 14:07:13 -08002464static int srp_abort(struct scsi_cmnd *scmnd)
2465{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002466 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002467 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002468 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002469 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002470 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002471 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002472
David Dillow7aa54bd2008-01-07 18:23:41 -05002473 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002474
Bart Van Assched92c0da2014-10-06 17:14:36 +02002475 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002476 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002477 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002478 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2479 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2480 return SUCCESS;
2481 ch = &target->ch[ch_idx];
2482 if (!srp_claim_req(ch, req, NULL, scmnd))
2483 return SUCCESS;
2484 shost_printk(KERN_ERR, target->scsi_host,
2485 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002486 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002487 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002488 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002489 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002490 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002491 else
2492 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002493 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002494 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002495 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002496
Bart Van Assche086f44f2013-06-12 15:23:04 +02002497 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002498}
2499
2500static int srp_reset_device(struct scsi_cmnd *scmnd)
2501{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002502 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002503 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002504 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002505
David Dillow7aa54bd2008-01-07 18:23:41 -05002506 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002507
Bart Van Assched92c0da2014-10-06 17:14:36 +02002508 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002509 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002510 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002511 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002512 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002513 return FAILED;
2514
Bart Van Assched92c0da2014-10-06 17:14:36 +02002515 for (i = 0; i < target->ch_count; i++) {
2516 ch = &target->ch[i];
2517 for (i = 0; i < target->req_ring_size; ++i) {
2518 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002519
Bart Van Assched92c0da2014-10-06 17:14:36 +02002520 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2521 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002522 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002523
Roland Dreierd945e1d2006-05-09 10:50:28 -07002524 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002525}
2526
2527static int srp_reset_host(struct scsi_cmnd *scmnd)
2528{
2529 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002530
David Dillow7aa54bd2008-01-07 18:23:41 -05002531 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002532
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002533 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002534}
2535
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002536static int srp_slave_configure(struct scsi_device *sdev)
2537{
2538 struct Scsi_Host *shost = sdev->host;
2539 struct srp_target_port *target = host_to_target(shost);
2540 struct request_queue *q = sdev->request_queue;
2541 unsigned long timeout;
2542
2543 if (sdev->type == TYPE_DISK) {
2544 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2545 blk_queue_rq_timeout(q, timeout);
2546 }
2547
2548 return 0;
2549}
2550
Tony Jonesee959b02008-02-22 00:13:36 +01002551static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2552 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002553{
Tony Jonesee959b02008-02-22 00:13:36 +01002554 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002555
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002556 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002557}
2558
Tony Jonesee959b02008-02-22 00:13:36 +01002559static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2560 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002561{
Tony Jonesee959b02008-02-22 00:13:36 +01002562 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002563
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002564 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002565}
2566
Tony Jonesee959b02008-02-22 00:13:36 +01002567static ssize_t show_service_id(struct device *dev,
2568 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002569{
Tony Jonesee959b02008-02-22 00:13:36 +01002570 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002571
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002572 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002573}
2574
Tony Jonesee959b02008-02-22 00:13:36 +01002575static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2576 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002577{
Tony Jonesee959b02008-02-22 00:13:36 +01002578 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002579
Bart Van Assche747fe002014-10-30 14:48:05 +01002580 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002581}
2582
Bart Van Assche848b3082013-10-26 14:38:12 +02002583static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2584 char *buf)
2585{
2586 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2587
Bart Van Assche747fe002014-10-30 14:48:05 +01002588 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002589}
2590
Tony Jonesee959b02008-02-22 00:13:36 +01002591static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2592 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002593{
Tony Jonesee959b02008-02-22 00:13:36 +01002594 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002595 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002596
Bart Van Assche509c07b2014-10-30 14:48:30 +01002597 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002598}
2599
Tony Jonesee959b02008-02-22 00:13:36 +01002600static ssize_t show_orig_dgid(struct device *dev,
2601 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002602{
Tony Jonesee959b02008-02-22 00:13:36 +01002603 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002604
Bart Van Assche747fe002014-10-30 14:48:05 +01002605 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002606}
2607
Bart Van Assche89de7482010-08-03 14:08:45 +00002608static ssize_t show_req_lim(struct device *dev,
2609 struct device_attribute *attr, char *buf)
2610{
2611 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002612 struct srp_rdma_ch *ch;
2613 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002614
Bart Van Assched92c0da2014-10-06 17:14:36 +02002615 for (i = 0; i < target->ch_count; i++) {
2616 ch = &target->ch[i];
2617 req_lim = min(req_lim, ch->req_lim);
2618 }
2619 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002620}
2621
Tony Jonesee959b02008-02-22 00:13:36 +01002622static ssize_t show_zero_req_lim(struct device *dev,
2623 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002624{
Tony Jonesee959b02008-02-22 00:13:36 +01002625 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002626
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002627 return sprintf(buf, "%d\n", target->zero_req_lim);
2628}
2629
Tony Jonesee959b02008-02-22 00:13:36 +01002630static ssize_t show_local_ib_port(struct device *dev,
2631 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002632{
Tony Jonesee959b02008-02-22 00:13:36 +01002633 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002634
2635 return sprintf(buf, "%d\n", target->srp_host->port);
2636}
2637
Tony Jonesee959b02008-02-22 00:13:36 +01002638static ssize_t show_local_ib_device(struct device *dev,
2639 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002640{
Tony Jonesee959b02008-02-22 00:13:36 +01002641 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002642
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002643 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002644}
2645
Bart Van Assched92c0da2014-10-06 17:14:36 +02002646static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2647 char *buf)
2648{
2649 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2650
2651 return sprintf(buf, "%d\n", target->ch_count);
2652}
2653
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002654static ssize_t show_comp_vector(struct device *dev,
2655 struct device_attribute *attr, char *buf)
2656{
2657 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2658
2659 return sprintf(buf, "%d\n", target->comp_vector);
2660}
2661
Vu Pham7bb312e2013-10-26 14:31:27 +02002662static ssize_t show_tl_retry_count(struct device *dev,
2663 struct device_attribute *attr, char *buf)
2664{
2665 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2666
2667 return sprintf(buf, "%d\n", target->tl_retry_count);
2668}
2669
David Dillow49248642011-01-14 18:23:24 -05002670static ssize_t show_cmd_sg_entries(struct device *dev,
2671 struct device_attribute *attr, char *buf)
2672{
2673 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2674
2675 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2676}
2677
David Dillowc07d4242011-01-16 13:57:10 -05002678static ssize_t show_allow_ext_sg(struct device *dev,
2679 struct device_attribute *attr, char *buf)
2680{
2681 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2682
2683 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2684}
2685
Tony Jonesee959b02008-02-22 00:13:36 +01002686static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2687static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2688static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2689static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002690static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002691static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2692static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002693static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002694static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2695static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2696static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002697static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002698static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002699static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002700static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002701static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002702
Tony Jonesee959b02008-02-22 00:13:36 +01002703static struct device_attribute *srp_host_attrs[] = {
2704 &dev_attr_id_ext,
2705 &dev_attr_ioc_guid,
2706 &dev_attr_service_id,
2707 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002708 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002709 &dev_attr_dgid,
2710 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002711 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002712 &dev_attr_zero_req_lim,
2713 &dev_attr_local_ib_port,
2714 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002715 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002716 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002717 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002718 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002719 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002720 NULL
2721};
2722
Roland Dreieraef9ec32005-11-02 14:07:13 -08002723static struct scsi_host_template srp_template = {
2724 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002725 .name = "InfiniBand SRP initiator",
2726 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002727 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002728 .info = srp_target_info,
2729 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002730 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002731 .eh_abort_handler = srp_abort,
2732 .eh_device_reset_handler = srp_reset_device,
2733 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002734 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002735 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002736 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002737 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002738 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002739 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002740 .shost_attrs = srp_host_attrs,
2741 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002742 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002743};
2744
Bart Van Assche34aa6542014-10-30 14:47:22 +01002745static int srp_sdev_count(struct Scsi_Host *host)
2746{
2747 struct scsi_device *sdev;
2748 int c = 0;
2749
2750 shost_for_each_device(sdev, host)
2751 c++;
2752
2753 return c;
2754}
2755
Roland Dreieraef9ec32005-11-02 14:07:13 -08002756static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2757{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002758 struct srp_rport_identifiers ids;
2759 struct srp_rport *rport;
2760
Bart Van Assche34aa6542014-10-30 14:47:22 +01002761 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002762 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002763 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002764
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002765 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002766 return -ENODEV;
2767
FUJITA Tomonori32368222007-06-27 16:33:12 +09002768 memcpy(ids.port_id, &target->id_ext, 8);
2769 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002770 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002771 rport = srp_rport_add(target->scsi_host, &ids);
2772 if (IS_ERR(rport)) {
2773 scsi_remove_host(target->scsi_host);
2774 return PTR_ERR(rport);
2775 }
2776
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002777 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002778 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002779
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002780 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002781 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002782 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002783
Roland Dreieraef9ec32005-11-02 14:07:13 -08002784 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002785 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002786
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002787 if (srp_connected_ch(target) < target->ch_count ||
2788 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002789 shost_printk(KERN_INFO, target->scsi_host,
2790 PFX "SCSI scan failed - removing SCSI host\n");
2791 srp_queue_remove_work(target);
2792 goto out;
2793 }
2794
2795 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2796 dev_name(&target->scsi_host->shost_gendev),
2797 srp_sdev_count(target->scsi_host));
2798
2799 spin_lock_irq(&target->lock);
2800 if (target->state == SRP_TARGET_SCANNING)
2801 target->state = SRP_TARGET_LIVE;
2802 spin_unlock_irq(&target->lock);
2803
2804out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002805 return 0;
2806}
2807
Tony Jonesee959b02008-02-22 00:13:36 +01002808static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002809{
2810 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002811 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002812
2813 complete(&host->released);
2814}
2815
2816static struct class srp_class = {
2817 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002818 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002819};
2820
Bart Van Assche96fc2482013-06-28 14:51:26 +02002821/**
2822 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002823 * @host: SRP host.
2824 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002825 */
2826static bool srp_conn_unique(struct srp_host *host,
2827 struct srp_target_port *target)
2828{
2829 struct srp_target_port *t;
2830 bool ret = false;
2831
2832 if (target->state == SRP_TARGET_REMOVED)
2833 goto out;
2834
2835 ret = true;
2836
2837 spin_lock(&host->target_lock);
2838 list_for_each_entry(t, &host->target_list, list) {
2839 if (t != target &&
2840 target->id_ext == t->id_ext &&
2841 target->ioc_guid == t->ioc_guid &&
2842 target->initiator_ext == t->initiator_ext) {
2843 ret = false;
2844 break;
2845 }
2846 }
2847 spin_unlock(&host->target_lock);
2848
2849out:
2850 return ret;
2851}
2852
Roland Dreieraef9ec32005-11-02 14:07:13 -08002853/*
2854 * Target ports are added by writing
2855 *
2856 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2857 * pkey=<P_Key>,service_id=<service ID>
2858 *
2859 * to the add_target sysfs attribute.
2860 */
2861enum {
2862 SRP_OPT_ERR = 0,
2863 SRP_OPT_ID_EXT = 1 << 0,
2864 SRP_OPT_IOC_GUID = 1 << 1,
2865 SRP_OPT_DGID = 1 << 2,
2866 SRP_OPT_PKEY = 1 << 3,
2867 SRP_OPT_SERVICE_ID = 1 << 4,
2868 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002869 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002870 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002871 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002872 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002873 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2874 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002875 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002876 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002877 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002878 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2879 SRP_OPT_IOC_GUID |
2880 SRP_OPT_DGID |
2881 SRP_OPT_PKEY |
2882 SRP_OPT_SERVICE_ID),
2883};
2884
Steven Whitehousea447c092008-10-13 10:46:57 +01002885static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002886 { SRP_OPT_ID_EXT, "id_ext=%s" },
2887 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2888 { SRP_OPT_DGID, "dgid=%s" },
2889 { SRP_OPT_PKEY, "pkey=%x" },
2890 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2891 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2892 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002893 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002894 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002895 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002896 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2897 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002898 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002899 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002900 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002901 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002902};
2903
2904static int srp_parse_options(const char *buf, struct srp_target_port *target)
2905{
2906 char *options, *sep_opt;
2907 char *p;
2908 char dgid[3];
2909 substring_t args[MAX_OPT_ARGS];
2910 int opt_mask = 0;
2911 int token;
2912 int ret = -EINVAL;
2913 int i;
2914
2915 options = kstrdup(buf, GFP_KERNEL);
2916 if (!options)
2917 return -ENOMEM;
2918
2919 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002920 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002921 if (!*p)
2922 continue;
2923
2924 token = match_token(p, srp_opt_tokens, args);
2925 opt_mask |= token;
2926
2927 switch (token) {
2928 case SRP_OPT_ID_EXT:
2929 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002930 if (!p) {
2931 ret = -ENOMEM;
2932 goto out;
2933 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002934 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2935 kfree(p);
2936 break;
2937
2938 case SRP_OPT_IOC_GUID:
2939 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002940 if (!p) {
2941 ret = -ENOMEM;
2942 goto out;
2943 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002944 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2945 kfree(p);
2946 break;
2947
2948 case SRP_OPT_DGID:
2949 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002950 if (!p) {
2951 ret = -ENOMEM;
2952 goto out;
2953 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002954 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002955 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002956 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002957 goto out;
2958 }
2959
2960 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002961 strlcpy(dgid, p + i * 2, sizeof(dgid));
2962 if (sscanf(dgid, "%hhx",
2963 &target->orig_dgid.raw[i]) < 1) {
2964 ret = -EINVAL;
2965 kfree(p);
2966 goto out;
2967 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002968 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002969 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002970 break;
2971
2972 case SRP_OPT_PKEY:
2973 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002974 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002975 goto out;
2976 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002977 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002978 break;
2979
2980 case SRP_OPT_SERVICE_ID:
2981 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002982 if (!p) {
2983 ret = -ENOMEM;
2984 goto out;
2985 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002986 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2987 kfree(p);
2988 break;
2989
2990 case SRP_OPT_MAX_SECT:
2991 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002992 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002993 goto out;
2994 }
2995 target->scsi_host->max_sectors = token;
2996 break;
2997
Bart Van Assche4d73f952013-10-26 14:40:37 +02002998 case SRP_OPT_QUEUE_SIZE:
2999 if (match_int(args, &token) || token < 1) {
3000 pr_warn("bad queue_size parameter '%s'\n", p);
3001 goto out;
3002 }
3003 target->scsi_host->can_queue = token;
3004 target->queue_size = token + SRP_RSP_SQ_SIZE +
3005 SRP_TSK_MGMT_SQ_SIZE;
3006 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3007 target->scsi_host->cmd_per_lun = token;
3008 break;
3009
Vu Pham52fb2b502006-06-17 20:37:31 -07003010 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003011 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003012 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3013 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003014 goto out;
3015 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003016 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003017 break;
3018
Ramachandra K0c0450db2006-06-17 20:37:38 -07003019 case SRP_OPT_IO_CLASS:
3020 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003021 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003022 goto out;
3023 }
3024 if (token != SRP_REV10_IB_IO_CLASS &&
3025 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003026 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3027 token, SRP_REV10_IB_IO_CLASS,
3028 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003029 goto out;
3030 }
3031 target->io_class = token;
3032 break;
3033
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003034 case SRP_OPT_INITIATOR_EXT:
3035 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003036 if (!p) {
3037 ret = -ENOMEM;
3038 goto out;
3039 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003040 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3041 kfree(p);
3042 break;
3043
David Dillow49248642011-01-14 18:23:24 -05003044 case SRP_OPT_CMD_SG_ENTRIES:
3045 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003046 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3047 p);
David Dillow49248642011-01-14 18:23:24 -05003048 goto out;
3049 }
3050 target->cmd_sg_cnt = token;
3051 break;
3052
David Dillowc07d4242011-01-16 13:57:10 -05003053 case SRP_OPT_ALLOW_EXT_SG:
3054 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003055 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003056 goto out;
3057 }
3058 target->allow_ext_sg = !!token;
3059 break;
3060
3061 case SRP_OPT_SG_TABLESIZE:
3062 if (match_int(args, &token) || token < 1 ||
3063 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003064 pr_warn("bad max sg_tablesize parameter '%s'\n",
3065 p);
David Dillowc07d4242011-01-16 13:57:10 -05003066 goto out;
3067 }
3068 target->sg_tablesize = token;
3069 break;
3070
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003071 case SRP_OPT_COMP_VECTOR:
3072 if (match_int(args, &token) || token < 0) {
3073 pr_warn("bad comp_vector parameter '%s'\n", p);
3074 goto out;
3075 }
3076 target->comp_vector = token;
3077 break;
3078
Vu Pham7bb312e2013-10-26 14:31:27 +02003079 case SRP_OPT_TL_RETRY_COUNT:
3080 if (match_int(args, &token) || token < 2 || token > 7) {
3081 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3082 p);
3083 goto out;
3084 }
3085 target->tl_retry_count = token;
3086 break;
3087
Roland Dreieraef9ec32005-11-02 14:07:13 -08003088 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003089 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3090 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003091 goto out;
3092 }
3093 }
3094
3095 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3096 ret = 0;
3097 else
3098 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3099 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3100 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003101 pr_warn("target creation request is missing parameter '%s'\n",
3102 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003103
Bart Van Assche4d73f952013-10-26 14:40:37 +02003104 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3105 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3106 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3107 target->scsi_host->cmd_per_lun,
3108 target->scsi_host->can_queue);
3109
Roland Dreieraef9ec32005-11-02 14:07:13 -08003110out:
3111 kfree(options);
3112 return ret;
3113}
3114
Tony Jonesee959b02008-02-22 00:13:36 +01003115static ssize_t srp_create_target(struct device *dev,
3116 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003117 const char *buf, size_t count)
3118{
3119 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003120 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003121 struct Scsi_Host *target_host;
3122 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003123 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003124 struct srp_device *srp_dev = host->srp_dev;
3125 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003126 int ret, node_idx, node, cpu, i;
3127 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003128
3129 target_host = scsi_host_alloc(&srp_template,
3130 sizeof (struct srp_target_port));
3131 if (!target_host)
3132 return -ENOMEM;
3133
David Dillow49248642011-01-14 18:23:24 -05003134 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003135 target_host->max_channel = 0;
3136 target_host->max_id = 1;
Bart Van Assche985aa492015-05-18 13:27:14 +02003137 target_host->max_lun = -1LL;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003138 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003139
Roland Dreieraef9ec32005-11-02 14:07:13 -08003140 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003141
David Dillow49248642011-01-14 18:23:24 -05003142 target->io_class = SRP_REV16A_IB_IO_CLASS;
3143 target->scsi_host = target_host;
3144 target->srp_host = host;
3145 target->lkey = host->srp_dev->mr->lkey;
3146 target->rkey = host->srp_dev->mr->rkey;
3147 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003148 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3149 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003150 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003151 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003152
Bart Van Assche34aa6542014-10-30 14:47:22 +01003153 /*
3154 * Avoid that the SCSI host can be removed by srp_remove_target()
3155 * before this function returns.
3156 */
3157 scsi_host_get(target->scsi_host);
3158
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003159 mutex_lock(&host->add_target_mutex);
3160
Roland Dreieraef9ec32005-11-02 14:07:13 -08003161 ret = srp_parse_options(buf, target);
3162 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003163 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003164
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003165 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3166 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003167 goto out;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003168
Bart Van Assche4d73f952013-10-26 14:40:37 +02003169 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3170
Bart Van Assche96fc2482013-06-28 14:51:26 +02003171 if (!srp_conn_unique(target->srp_host, target)) {
3172 shost_printk(KERN_INFO, target->scsi_host,
3173 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3174 be64_to_cpu(target->id_ext),
3175 be64_to_cpu(target->ioc_guid),
3176 be64_to_cpu(target->initiator_ext));
3177 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003178 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003179 }
3180
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003181 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003182 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003183 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003184 target->sg_tablesize = target->cmd_sg_cnt;
3185 }
3186
3187 target_host->sg_tablesize = target->sg_tablesize;
3188 target->indirect_size = target->sg_tablesize *
3189 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003190 target->max_iu_len = sizeof (struct srp_cmd) +
3191 sizeof (struct srp_indirect_buf) +
3192 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3193
Bart Van Asschec1120f82013-10-26 14:35:08 +02003194 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003195 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003196 spin_lock_init(&target->lock);
Bart Van Assche747fe002014-10-30 14:48:05 +01003197 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003198 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003199 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003200
Bart Van Assched92c0da2014-10-06 17:14:36 +02003201 ret = -ENOMEM;
3202 target->ch_count = max_t(unsigned, num_online_nodes(),
3203 min(ch_count ? :
3204 min(4 * num_online_nodes(),
3205 ibdev->num_comp_vectors),
3206 num_online_cpus()));
3207 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3208 GFP_KERNEL);
3209 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003210 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003211
Bart Van Assched92c0da2014-10-06 17:14:36 +02003212 node_idx = 0;
3213 for_each_online_node(node) {
3214 const int ch_start = (node_idx * target->ch_count /
3215 num_online_nodes());
3216 const int ch_end = ((node_idx + 1) * target->ch_count /
3217 num_online_nodes());
3218 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3219 num_online_nodes() + target->comp_vector)
3220 % ibdev->num_comp_vectors;
3221 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3222 num_online_nodes() + target->comp_vector)
3223 % ibdev->num_comp_vectors;
3224 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003225
Bart Van Assched92c0da2014-10-06 17:14:36 +02003226 for_each_online_cpu(cpu) {
3227 if (cpu_to_node(cpu) != node)
3228 continue;
3229 if (ch_start + cpu_idx >= ch_end)
3230 continue;
3231 ch = &target->ch[ch_start + cpu_idx];
3232 ch->target = target;
3233 ch->comp_vector = cv_start == cv_end ? cv_start :
3234 cv_start + cpu_idx % (cv_end - cv_start);
3235 spin_lock_init(&ch->lock);
3236 INIT_LIST_HEAD(&ch->free_tx);
3237 ret = srp_new_cm_id(ch);
3238 if (ret)
3239 goto err_disconnect;
3240
3241 ret = srp_create_ch_ib(ch);
3242 if (ret)
3243 goto err_disconnect;
3244
3245 ret = srp_alloc_req_data(ch);
3246 if (ret)
3247 goto err_disconnect;
3248
3249 ret = srp_connect_ch(ch, multich);
3250 if (ret) {
3251 shost_printk(KERN_ERR, target->scsi_host,
3252 PFX "Connection %d/%d failed\n",
3253 ch_start + cpu_idx,
3254 target->ch_count);
3255 if (node_idx == 0 && cpu_idx == 0) {
3256 goto err_disconnect;
3257 } else {
3258 srp_free_ch_ib(target, ch);
3259 srp_free_req_data(target, ch);
3260 target->ch_count = ch - target->ch;
3261 break;
3262 }
3263 }
3264
3265 multich = true;
3266 cpu_idx++;
3267 }
3268 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003269 }
3270
Bart Van Assched92c0da2014-10-06 17:14:36 +02003271 target->scsi_host->nr_hw_queues = target->ch_count;
3272
Roland Dreieraef9ec32005-11-02 14:07:13 -08003273 ret = srp_add_target(host, target);
3274 if (ret)
3275 goto err_disconnect;
3276
Bart Van Assche34aa6542014-10-30 14:47:22 +01003277 if (target->state != SRP_TARGET_REMOVED) {
3278 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3279 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3280 be64_to_cpu(target->id_ext),
3281 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003282 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003283 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003284 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003285 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003286
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003287 ret = count;
3288
3289out:
3290 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003291
3292 scsi_host_put(target->scsi_host);
3293
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003294 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003295
3296err_disconnect:
3297 srp_disconnect_target(target);
3298
Bart Van Assched92c0da2014-10-06 17:14:36 +02003299 for (i = 0; i < target->ch_count; i++) {
3300 ch = &target->ch[i];
3301 srp_free_ch_ib(target, ch);
3302 srp_free_req_data(target, ch);
3303 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003304
Bart Van Assched92c0da2014-10-06 17:14:36 +02003305 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003306 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003307}
3308
Tony Jonesee959b02008-02-22 00:13:36 +01003309static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003310
Tony Jonesee959b02008-02-22 00:13:36 +01003311static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3312 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003313{
Tony Jonesee959b02008-02-22 00:13:36 +01003314 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003315
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003316 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003317}
3318
Tony Jonesee959b02008-02-22 00:13:36 +01003319static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003320
Tony Jonesee959b02008-02-22 00:13:36 +01003321static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3322 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003323{
Tony Jonesee959b02008-02-22 00:13:36 +01003324 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003325
3326 return sprintf(buf, "%d\n", host->port);
3327}
3328
Tony Jonesee959b02008-02-22 00:13:36 +01003329static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003330
Roland Dreierf5358a12006-06-17 20:37:29 -07003331static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003332{
3333 struct srp_host *host;
3334
3335 host = kzalloc(sizeof *host, GFP_KERNEL);
3336 if (!host)
3337 return NULL;
3338
3339 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003340 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003341 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003342 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003343 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003344 host->port = port;
3345
Tony Jonesee959b02008-02-22 00:13:36 +01003346 host->dev.class = &srp_class;
3347 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003348 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003349
Tony Jonesee959b02008-02-22 00:13:36 +01003350 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003351 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003352 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003353 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003354 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003355 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003356 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003357 goto err_class;
3358
3359 return host;
3360
3361err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003362 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003363
Roland Dreierf5358a12006-06-17 20:37:29 -07003364free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003365 kfree(host);
3366
3367 return NULL;
3368}
3369
3370static void srp_add_one(struct ib_device *device)
3371{
Roland Dreierf5358a12006-06-17 20:37:29 -07003372 struct srp_device *srp_dev;
3373 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003374 struct srp_host *host;
Bart Van Assche52ede082014-05-20 15:07:45 +02003375 int mr_page_shift, s, e, p;
3376 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003377
Roland Dreierf5358a12006-06-17 20:37:29 -07003378 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3379 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003380 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003381
Roland Dreierf5358a12006-06-17 20:37:29 -07003382 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003383 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003384 goto free_attr;
3385 }
3386
3387 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3388 if (!srp_dev)
3389 goto free_attr;
3390
Bart Van Assched1b42892014-05-20 15:07:20 +02003391 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3392 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003393 srp_dev->has_fr = (dev_attr->device_cap_flags &
3394 IB_DEVICE_MEM_MGT_EXTENSIONS);
3395 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3396 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3397
3398 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3399 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assched1b42892014-05-20 15:07:20 +02003400
Roland Dreierf5358a12006-06-17 20:37:29 -07003401 /*
3402 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003403 * minimum of 4096 bytes. We're unlikely to build large sglists
3404 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003405 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003406 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3407 srp_dev->mr_page_size = 1 << mr_page_shift;
3408 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3409 max_pages_per_mr = dev_attr->max_mr_size;
3410 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3411 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3412 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003413 if (srp_dev->use_fast_reg) {
3414 srp_dev->max_pages_per_mr =
3415 min_t(u32, srp_dev->max_pages_per_mr,
3416 dev_attr->max_fast_reg_page_list_len);
3417 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003418 srp_dev->mr_max_size = srp_dev->mr_page_size *
3419 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003420 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003421 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003422 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003423 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003424
3425 INIT_LIST_HEAD(&srp_dev->dev_list);
3426
3427 srp_dev->dev = device;
3428 srp_dev->pd = ib_alloc_pd(device);
3429 if (IS_ERR(srp_dev->pd))
3430 goto free_dev;
3431
3432 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3433 IB_ACCESS_LOCAL_WRITE |
3434 IB_ACCESS_REMOTE_READ |
3435 IB_ACCESS_REMOTE_WRITE);
3436 if (IS_ERR(srp_dev->mr))
3437 goto err_pd;
3438
Tom Tucker07ebafb2006-08-03 16:02:42 -05003439 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08003440 s = 0;
3441 e = 0;
3442 } else {
3443 s = 1;
3444 e = device->phys_port_cnt;
3445 }
3446
3447 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003448 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003449 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003450 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003451 }
3452
Roland Dreierf5358a12006-06-17 20:37:29 -07003453 ib_set_client_data(device, &srp_client, srp_dev);
3454
3455 goto free_attr;
3456
3457err_pd:
3458 ib_dealloc_pd(srp_dev->pd);
3459
3460free_dev:
3461 kfree(srp_dev);
3462
3463free_attr:
3464 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003465}
3466
3467static void srp_remove_one(struct ib_device *device)
3468{
Roland Dreierf5358a12006-06-17 20:37:29 -07003469 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003470 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003471 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003472
Roland Dreierf5358a12006-06-17 20:37:29 -07003473 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003474 if (!srp_dev)
3475 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003476
Roland Dreierf5358a12006-06-17 20:37:29 -07003477 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003478 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003479 /*
3480 * Wait for the sysfs entry to go away, so that no new
3481 * target ports can be created.
3482 */
3483 wait_for_completion(&host->released);
3484
3485 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003486 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003487 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003488 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003489 list_for_each_entry(target, &host->target_list, list)
3490 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003491 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003492
3493 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003494 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003495 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003496 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003497 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003498
Roland Dreieraef9ec32005-11-02 14:07:13 -08003499 kfree(host);
3500 }
3501
Roland Dreierf5358a12006-06-17 20:37:29 -07003502 ib_dereg_mr(srp_dev->mr);
3503 ib_dealloc_pd(srp_dev->pd);
3504
3505 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003506}
3507
FUJITA Tomonori32368222007-06-27 16:33:12 +09003508static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003509 .has_rport_state = true,
3510 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003511 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003512 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3513 .dev_loss_tmo = &srp_dev_loss_tmo,
3514 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003515 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003516 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003517};
3518
Roland Dreieraef9ec32005-11-02 14:07:13 -08003519static int __init srp_init_module(void)
3520{
3521 int ret;
3522
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003523 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003524
David Dillow49248642011-01-14 18:23:24 -05003525 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003526 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003527 if (!cmd_sg_entries)
3528 cmd_sg_entries = srp_sg_tablesize;
3529 }
3530
3531 if (!cmd_sg_entries)
3532 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3533
3534 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003535 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003536 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003537 }
3538
David Dillowc07d4242011-01-16 13:57:10 -05003539 if (!indirect_sg_entries)
3540 indirect_sg_entries = cmd_sg_entries;
3541 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003542 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3543 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003544 indirect_sg_entries = cmd_sg_entries;
3545 }
3546
Bart Van Asschebcc05912014-07-09 15:57:26 +02003547 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003548 if (!srp_remove_wq) {
3549 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003550 goto out;
3551 }
3552
3553 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003554 ib_srp_transport_template =
3555 srp_attach_transport(&ib_srp_transport_functions);
3556 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003557 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003558
Roland Dreieraef9ec32005-11-02 14:07:13 -08003559 ret = class_register(&srp_class);
3560 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003561 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003562 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003563 }
3564
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003565 ib_sa_register_client(&srp_sa_client);
3566
Roland Dreieraef9ec32005-11-02 14:07:13 -08003567 ret = ib_register_client(&srp_client);
3568 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003569 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003570 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003571 }
3572
Bart Van Asschebcc05912014-07-09 15:57:26 +02003573out:
3574 return ret;
3575
3576unreg_sa:
3577 ib_sa_unregister_client(&srp_sa_client);
3578 class_unregister(&srp_class);
3579
3580release_tr:
3581 srp_release_transport(ib_srp_transport_template);
3582
3583destroy_wq:
3584 destroy_workqueue(srp_remove_wq);
3585 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003586}
3587
3588static void __exit srp_cleanup_module(void)
3589{
3590 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003591 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003592 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003593 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003594 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003595}
3596
3597module_init(srp_init_module);
3598module_exit(srp_cleanup_module);