blob: 10dd8f266ec1919d596bc7a4ba4bd16bbba23b29 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020058#define DRV_VERSION "1.0"
59#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020062MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
Roland Dreieraef9ec32005-11-02 14:07:13 -080063MODULE_LICENSE("Dual BSD/GPL");
Bart Van Assche33ab3e52015-05-18 13:25:27 +020064MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
Roland Dreieraef9ec32005-11-02 14:07:13 -080066
David Dillow49248642011-01-14 18:23:24 -050067static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050069static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +020071static bool prefer_fr;
Bart Van Asscheb1b88542014-05-20 15:06:41 +020072static bool register_always;
Roland Dreieraef9ec32005-11-02 14:07:13 -080073static int topspin_workarounds = 1;
74
David Dillow49248642011-01-14 18:23:24 -050075module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
David Dillowc07d4242011-01-16 13:57:10 -050082module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
Roland Dreieraef9ec32005-11-02 14:07:13 -080090module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
Bart Van Assche5cfb1782014-05-20 15:08:34 +020094module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
Bart Van Asscheb1b88542014-05-20 15:06:41 +020098module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200102static struct kernel_param_ops srp_tmo_ops;
103
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200117static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
Bart Van Assched92c0da2014-10-06 17:14:36 +0200128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
Roland Dreieraef9ec32005-11-02 14:07:13 -0800133static void srp_add_one(struct ib_device *device);
134static void srp_remove_one(struct ib_device *device);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
FUJITA Tomonori32368222007-06-27 16:33:12 +0900139static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200140static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900141
Roland Dreieraef9ec32005-11-02 14:07:13 -0800142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700148static struct ib_sa_client srp_sa_client;
149
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
164 if (strncmp(val, "off", 3) != 0) {
165 res = kstrtoint(val, 0, &tmo);
166 if (res)
167 goto out;
168 } else {
169 tmo = -1;
170 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200171 if (kp->arg == &srp_reconnect_delay)
172 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
173 srp_dev_loss_tmo);
174 else if (kp->arg == &srp_fast_io_fail_tmo)
175 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200176 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200177 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
178 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200179 if (res)
180 goto out;
181 *(int *)kp->arg = tmo;
182
183out:
184 return res;
185}
186
187static struct kernel_param_ops srp_tmo_ops = {
188 .get = srp_tmo_get,
189 .set = srp_tmo_set,
190};
191
Roland Dreieraef9ec32005-11-02 14:07:13 -0800192static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
193{
194 return (struct srp_target_port *) host->hostdata;
195}
196
197static const char *srp_target_info(struct Scsi_Host *host)
198{
199 return host_to_target(host)->target_name;
200}
201
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700202static int srp_target_is_topspin(struct srp_target_port *target)
203{
204 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700205 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700206
207 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700208 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
209 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700210}
211
Roland Dreieraef9ec32005-11-02 14:07:13 -0800212static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
213 gfp_t gfp_mask,
214 enum dma_data_direction direction)
215{
216 struct srp_iu *iu;
217
218 iu = kmalloc(sizeof *iu, gfp_mask);
219 if (!iu)
220 goto out;
221
222 iu->buf = kzalloc(size, gfp_mask);
223 if (!iu->buf)
224 goto out_free_iu;
225
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100226 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
227 direction);
228 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800229 goto out_free_buf;
230
231 iu->size = size;
232 iu->direction = direction;
233
234 return iu;
235
236out_free_buf:
237 kfree(iu->buf);
238out_free_iu:
239 kfree(iu);
240out:
241 return NULL;
242}
243
244static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
245{
246 if (!iu)
247 return;
248
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100249 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
250 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800251 kfree(iu->buf);
252 kfree(iu);
253}
254
255static void srp_qp_event(struct ib_event *event, void *context)
256{
Bart Van Asschee0bda7d2012-01-14 12:39:44 +0000257 pr_debug("QP event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800258}
259
260static int srp_init_qp(struct srp_target_port *target,
261 struct ib_qp *qp)
262{
263 struct ib_qp_attr *attr;
264 int ret;
265
266 attr = kmalloc(sizeof *attr, GFP_KERNEL);
267 if (!attr)
268 return -ENOMEM;
269
Bart Van Assche56b53902014-07-09 15:58:22 +0200270 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
271 target->srp_host->port,
272 be16_to_cpu(target->pkey),
273 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800274 if (ret)
275 goto out;
276
277 attr->qp_state = IB_QPS_INIT;
278 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
279 IB_ACCESS_REMOTE_WRITE);
280 attr->port_num = target->srp_host->port;
281
282 ret = ib_modify_qp(qp, attr,
283 IB_QP_STATE |
284 IB_QP_PKEY_INDEX |
285 IB_QP_ACCESS_FLAGS |
286 IB_QP_PORT);
287
288out:
289 kfree(attr);
290 return ret;
291}
292
Bart Van Assche509c07b2014-10-30 14:48:30 +0100293static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500294{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100295 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500296 struct ib_cm_id *new_cm_id;
297
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100298 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100299 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500300 if (IS_ERR(new_cm_id))
301 return PTR_ERR(new_cm_id);
302
Bart Van Assche509c07b2014-10-30 14:48:30 +0100303 if (ch->cm_id)
304 ib_destroy_cm_id(ch->cm_id);
305 ch->cm_id = new_cm_id;
306 ch->path.sgid = target->sgid;
307 ch->path.dgid = target->orig_dgid;
308 ch->path.pkey = target->pkey;
309 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500310
311 return 0;
312}
313
Bart Van Assched1b42892014-05-20 15:07:20 +0200314static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
315{
316 struct srp_device *dev = target->srp_host->srp_dev;
317 struct ib_fmr_pool_param fmr_param;
318
319 memset(&fmr_param, 0, sizeof(fmr_param));
320 fmr_param.pool_size = target->scsi_host->can_queue;
321 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
322 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200323 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
324 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200325 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
326 IB_ACCESS_REMOTE_WRITE |
327 IB_ACCESS_REMOTE_READ);
328
329 return ib_create_fmr_pool(dev->pd, &fmr_param);
330}
331
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200332/**
333 * srp_destroy_fr_pool() - free the resources owned by a pool
334 * @pool: Fast registration pool to be destroyed.
335 */
336static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
337{
338 int i;
339 struct srp_fr_desc *d;
340
341 if (!pool)
342 return;
343
344 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
345 if (d->frpl)
346 ib_free_fast_reg_page_list(d->frpl);
347 if (d->mr)
348 ib_dereg_mr(d->mr);
349 }
350 kfree(pool);
351}
352
353/**
354 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
355 * @device: IB device to allocate fast registration descriptors for.
356 * @pd: Protection domain associated with the FR descriptors.
357 * @pool_size: Number of descriptors to allocate.
358 * @max_page_list_len: Maximum fast registration work request page list length.
359 */
360static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
361 struct ib_pd *pd, int pool_size,
362 int max_page_list_len)
363{
364 struct srp_fr_pool *pool;
365 struct srp_fr_desc *d;
366 struct ib_mr *mr;
367 struct ib_fast_reg_page_list *frpl;
368 int i, ret = -EINVAL;
369
370 if (pool_size <= 0)
371 goto err;
372 ret = -ENOMEM;
373 pool = kzalloc(sizeof(struct srp_fr_pool) +
374 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
375 if (!pool)
376 goto err;
377 pool->size = pool_size;
378 pool->max_page_list_len = max_page_list_len;
379 spin_lock_init(&pool->lock);
380 INIT_LIST_HEAD(&pool->free_list);
381
382 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
383 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
384 if (IS_ERR(mr)) {
385 ret = PTR_ERR(mr);
386 goto destroy_pool;
387 }
388 d->mr = mr;
389 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
390 if (IS_ERR(frpl)) {
391 ret = PTR_ERR(frpl);
392 goto destroy_pool;
393 }
394 d->frpl = frpl;
395 list_add_tail(&d->entry, &pool->free_list);
396 }
397
398out:
399 return pool;
400
401destroy_pool:
402 srp_destroy_fr_pool(pool);
403
404err:
405 pool = ERR_PTR(ret);
406 goto out;
407}
408
409/**
410 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
411 * @pool: Pool to obtain descriptor from.
412 */
413static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
414{
415 struct srp_fr_desc *d = NULL;
416 unsigned long flags;
417
418 spin_lock_irqsave(&pool->lock, flags);
419 if (!list_empty(&pool->free_list)) {
420 d = list_first_entry(&pool->free_list, typeof(*d), entry);
421 list_del(&d->entry);
422 }
423 spin_unlock_irqrestore(&pool->lock, flags);
424
425 return d;
426}
427
428/**
429 * srp_fr_pool_put() - put an FR descriptor back in the free list
430 * @pool: Pool the descriptor was allocated from.
431 * @desc: Pointer to an array of fast registration descriptor pointers.
432 * @n: Number of descriptors to put back.
433 *
434 * Note: The caller must already have queued an invalidation request for
435 * desc->mr->rkey before calling this function.
436 */
437static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
438 int n)
439{
440 unsigned long flags;
441 int i;
442
443 spin_lock_irqsave(&pool->lock, flags);
444 for (i = 0; i < n; i++)
445 list_add(&desc[i]->entry, &pool->free_list);
446 spin_unlock_irqrestore(&pool->lock, flags);
447}
448
449static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
450{
451 struct srp_device *dev = target->srp_host->srp_dev;
452
453 return srp_create_fr_pool(dev->dev, dev->pd,
454 target->scsi_host->can_queue,
455 dev->max_pages_per_mr);
456}
457
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200458/**
459 * srp_destroy_qp() - destroy an RDMA queue pair
460 * @ch: SRP RDMA channel.
461 *
462 * Change a queue pair into the error state and wait until all receive
463 * completions have been processed before destroying it. This avoids that
464 * the receive completion handler can access the queue pair while it is
465 * being destroyed.
466 */
467static void srp_destroy_qp(struct srp_rdma_ch *ch)
468{
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200469 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
470 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
471 struct ib_recv_wr *bad_wr;
472 int ret;
473
474 /* Destroying a QP and reusing ch->done is only safe if not connected */
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200475 WARN_ON_ONCE(ch->connected);
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200476
477 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
478 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
479 if (ret)
480 goto out;
481
482 init_completion(&ch->done);
483 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
484 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
485 if (ret == 0)
486 wait_for_completion(&ch->done);
487
488out:
489 ib_destroy_qp(ch->qp);
490}
491
Bart Van Assche509c07b2014-10-30 14:48:30 +0100492static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800493{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100494 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200495 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800496 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100497 struct ib_cq *recv_cq, *send_cq;
498 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200499 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200500 struct srp_fr_pool *fr_pool = NULL;
501 const int m = 1 + dev->use_fast_reg;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800502 int ret;
503
504 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
505 if (!init_attr)
506 return -ENOMEM;
507
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200508 /* + 1 for SRP_LAST_WR_ID */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100509 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200510 target->queue_size + 1, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100511 if (IS_ERR(recv_cq)) {
512 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800513 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800514 }
515
Bart Van Assche509c07b2014-10-30 14:48:30 +0100516 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
517 m * target->queue_size, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100518 if (IS_ERR(send_cq)) {
519 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800520 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000521 }
522
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100523 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800524
525 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200526 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200527 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800528 init_attr->cap.max_recv_sge = 1;
529 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200530 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800531 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100532 init_attr->send_cq = send_cq;
533 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800534
Bart Van Assche62154b22014-05-20 15:04:45 +0200535 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100536 if (IS_ERR(qp)) {
537 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800538 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800539 }
540
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100541 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800542 if (ret)
543 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800544
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200545 if (dev->use_fast_reg && dev->has_fr) {
546 fr_pool = srp_alloc_fr_pool(target);
547 if (IS_ERR(fr_pool)) {
548 ret = PTR_ERR(fr_pool);
549 shost_printk(KERN_WARNING, target->scsi_host, PFX
550 "FR pool allocation failed (%d)\n", ret);
551 goto err_qp;
552 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100553 if (ch->fr_pool)
554 srp_destroy_fr_pool(ch->fr_pool);
555 ch->fr_pool = fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200556 } else if (!dev->use_fast_reg && dev->has_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200557 fmr_pool = srp_alloc_fmr_pool(target);
558 if (IS_ERR(fmr_pool)) {
559 ret = PTR_ERR(fmr_pool);
560 shost_printk(KERN_WARNING, target->scsi_host, PFX
561 "FMR pool allocation failed (%d)\n", ret);
562 goto err_qp;
563 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100564 if (ch->fmr_pool)
565 ib_destroy_fmr_pool(ch->fmr_pool);
566 ch->fmr_pool = fmr_pool;
Bart Van Assched1b42892014-05-20 15:07:20 +0200567 }
568
Bart Van Assche509c07b2014-10-30 14:48:30 +0100569 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200570 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100571 if (ch->recv_cq)
572 ib_destroy_cq(ch->recv_cq);
573 if (ch->send_cq)
574 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100575
Bart Van Assche509c07b2014-10-30 14:48:30 +0100576 ch->qp = qp;
577 ch->recv_cq = recv_cq;
578 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100579
Roland Dreierda9d2f02010-02-24 15:07:59 -0800580 kfree(init_attr);
581 return 0;
582
583err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100584 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800585
586err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100587 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800588
589err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100590 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800591
592err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800593 kfree(init_attr);
594 return ret;
595}
596
Bart Van Assche4d73f952013-10-26 14:40:37 +0200597/*
598 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100599 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200600 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100601static void srp_free_ch_ib(struct srp_target_port *target,
602 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800603{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200604 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800605 int i;
606
Bart Van Assched92c0da2014-10-06 17:14:36 +0200607 if (!ch->target)
608 return;
609
Bart Van Assche509c07b2014-10-30 14:48:30 +0100610 if (ch->cm_id) {
611 ib_destroy_cm_id(ch->cm_id);
612 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100613 }
614
Bart Van Assched92c0da2014-10-06 17:14:36 +0200615 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
616 if (!ch->qp)
617 return;
618
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200619 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100620 if (ch->fr_pool)
621 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200622 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100623 if (ch->fmr_pool)
624 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200625 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200626 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100627 ib_destroy_cq(ch->send_cq);
628 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800629
Bart Van Assched92c0da2014-10-06 17:14:36 +0200630 /*
631 * Avoid that the SCSI error handler tries to use this channel after
632 * it has been freed. The SCSI error handler can namely continue
633 * trying to perform recovery actions after scsi_remove_host()
634 * returned.
635 */
636 ch->target = NULL;
637
Bart Van Assche509c07b2014-10-30 14:48:30 +0100638 ch->qp = NULL;
639 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100640
Bart Van Assche509c07b2014-10-30 14:48:30 +0100641 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200642 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100643 srp_free_iu(target->srp_host, ch->rx_ring[i]);
644 kfree(ch->rx_ring);
645 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200646 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100647 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200648 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100649 srp_free_iu(target->srp_host, ch->tx_ring[i]);
650 kfree(ch->tx_ring);
651 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200652 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800653}
654
655static void srp_path_rec_completion(int status,
656 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100657 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800658{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100659 struct srp_rdma_ch *ch = ch_ptr;
660 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800661
Bart Van Assche509c07b2014-10-30 14:48:30 +0100662 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800663 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500664 shost_printk(KERN_ERR, target->scsi_host,
665 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800666 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100667 ch->path = *pathrec;
668 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800669}
670
Bart Van Assche509c07b2014-10-30 14:48:30 +0100671static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800672{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100673 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100674 int ret;
675
Bart Van Assche509c07b2014-10-30 14:48:30 +0100676 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800677
Bart Van Assche509c07b2014-10-30 14:48:30 +0100678 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800679
Bart Van Assche509c07b2014-10-30 14:48:30 +0100680 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
681 target->srp_host->srp_dev->dev,
682 target->srp_host->port,
683 &ch->path,
684 IB_SA_PATH_REC_SERVICE_ID |
685 IB_SA_PATH_REC_DGID |
686 IB_SA_PATH_REC_SGID |
687 IB_SA_PATH_REC_NUMB_PATH |
688 IB_SA_PATH_REC_PKEY,
689 SRP_PATH_REC_TIMEOUT_MS,
690 GFP_KERNEL,
691 srp_path_rec_completion,
692 ch, &ch->path_query);
693 if (ch->path_query_id < 0)
694 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800695
Bart Van Assche509c07b2014-10-30 14:48:30 +0100696 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100697 if (ret < 0)
698 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800699
Bart Van Assche509c07b2014-10-30 14:48:30 +0100700 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500701 shost_printk(KERN_WARNING, target->scsi_host,
702 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800703
Bart Van Assche509c07b2014-10-30 14:48:30 +0100704 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800705}
706
Bart Van Assched92c0da2014-10-06 17:14:36 +0200707static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800708{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100709 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800710 struct {
711 struct ib_cm_req_param param;
712 struct srp_login_req priv;
713 } *req = NULL;
714 int status;
715
716 req = kzalloc(sizeof *req, GFP_KERNEL);
717 if (!req)
718 return -ENOMEM;
719
Bart Van Assche509c07b2014-10-30 14:48:30 +0100720 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800721 req->param.alternate_path = NULL;
722 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100723 req->param.qp_num = ch->qp->qp_num;
724 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800725 req->param.private_data = &req->priv;
726 req->param.private_data_len = sizeof req->priv;
727 req->param.flow_control = 1;
728
729 get_random_bytes(&req->param.starting_psn, 4);
730 req->param.starting_psn &= 0xffffff;
731
732 /*
733 * Pick some arbitrary defaults here; we could make these
734 * module parameters if anyone cared about setting them.
735 */
736 req->param.responder_resources = 4;
737 req->param.remote_cm_response_timeout = 20;
738 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200739 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800740 req->param.rnr_retry_count = 7;
741 req->param.max_cm_retries = 15;
742
743 req->priv.opcode = SRP_LOGIN_REQ;
744 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500745 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800746 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
747 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200748 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
749 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700750 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700751 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700752 * port identifier format is 8 bytes of ID extension followed
753 * by 8 bytes of GUID. Older drafts put the two halves in the
754 * opposite order, so that the GUID comes first.
755 *
756 * Targets conforming to these obsolete drafts can be
757 * recognized by the I/O Class they report.
758 */
759 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
760 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100761 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700762 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200763 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700764 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
765 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
766 } else {
767 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200768 &target->initiator_ext, 8);
769 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100770 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700771 memcpy(req->priv.target_port_id, &target->id_ext, 8);
772 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
773 }
774
Roland Dreieraef9ec32005-11-02 14:07:13 -0800775 /*
776 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200777 * zero out the first 8 bytes of our initiator port ID and set
778 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800779 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700780 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500781 shost_printk(KERN_DEBUG, target->scsi_host,
782 PFX "Topspin/Cisco initiator port ID workaround "
783 "activated for target GUID %016llx\n",
Bart Van Assche45c37ca2015-05-18 13:25:10 +0200784 be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800785 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200786 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100787 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800788 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800789
Bart Van Assche509c07b2014-10-30 14:48:30 +0100790 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800791
792 kfree(req);
793
794 return status;
795}
796
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000797static bool srp_queue_remove_work(struct srp_target_port *target)
798{
799 bool changed = false;
800
801 spin_lock_irq(&target->lock);
802 if (target->state != SRP_TARGET_REMOVED) {
803 target->state = SRP_TARGET_REMOVED;
804 changed = true;
805 }
806 spin_unlock_irq(&target->lock);
807
808 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200809 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000810
811 return changed;
812}
813
Roland Dreieraef9ec32005-11-02 14:07:13 -0800814static void srp_disconnect_target(struct srp_target_port *target)
815{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200816 struct srp_rdma_ch *ch;
817 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100818
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200819 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800820
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200821 for (i = 0; i < target->ch_count; i++) {
822 ch = &target->ch[i];
823 ch->connected = false;
824 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
825 shost_printk(KERN_DEBUG, target->scsi_host,
826 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000827 }
Roland Dreiere6581052006-05-17 09:13:21 -0700828 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800829}
830
Bart Van Assche509c07b2014-10-30 14:48:30 +0100831static void srp_free_req_data(struct srp_target_port *target,
832 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500833{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200834 struct srp_device *dev = target->srp_host->srp_dev;
835 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500836 struct srp_request *req;
837 int i;
838
Bart Van Assched92c0da2014-10-06 17:14:36 +0200839 if (!ch->target || !ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200840 return;
841
842 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100843 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200844 if (dev->use_fast_reg)
845 kfree(req->fr_list);
846 else
847 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500848 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500849 if (req->indirect_dma_addr) {
850 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
851 target->indirect_size,
852 DMA_TO_DEVICE);
853 }
854 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500855 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200856
Bart Van Assche509c07b2014-10-30 14:48:30 +0100857 kfree(ch->req_ring);
858 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500859}
860
Bart Van Assche509c07b2014-10-30 14:48:30 +0100861static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200862{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100863 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200864 struct srp_device *srp_dev = target->srp_host->srp_dev;
865 struct ib_device *ibdev = srp_dev->dev;
866 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200867 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200868 dma_addr_t dma_addr;
869 int i, ret = -ENOMEM;
870
Bart Van Assche509c07b2014-10-30 14:48:30 +0100871 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
872 GFP_KERNEL);
873 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200874 goto out;
875
876 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100877 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200878 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
879 GFP_KERNEL);
880 if (!mr_list)
881 goto out;
882 if (srp_dev->use_fast_reg)
883 req->fr_list = mr_list;
884 else
885 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200886 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200887 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200888 if (!req->map_page)
889 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200890 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200891 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200892 goto out;
893
894 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
895 target->indirect_size,
896 DMA_TO_DEVICE);
897 if (ib_dma_mapping_error(ibdev, dma_addr))
898 goto out;
899
900 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200901 }
902 ret = 0;
903
904out:
905 return ret;
906}
907
Bart Van Assche683b1592012-01-14 12:40:44 +0000908/**
909 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
910 * @shost: SCSI host whose attributes to remove from sysfs.
911 *
912 * Note: Any attributes defined in the host template and that did not exist
913 * before invocation of this function will be ignored.
914 */
915static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
916{
917 struct device_attribute **attr;
918
919 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
920 device_remove_file(&shost->shost_dev, *attr);
921}
922
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000923static void srp_remove_target(struct srp_target_port *target)
924{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200925 struct srp_rdma_ch *ch;
926 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100927
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000928 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
929
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000930 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200931 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000932 srp_remove_host(target->scsi_host);
933 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100934 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000935 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200936 for (i = 0; i < target->ch_count; i++) {
937 ch = &target->ch[i];
938 srp_free_ch_ib(target, ch);
939 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200940 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200941 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200942 for (i = 0; i < target->ch_count; i++) {
943 ch = &target->ch[i];
944 srp_free_req_data(target, ch);
945 }
946 kfree(target->ch);
947 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200948
949 spin_lock(&target->srp_host->target_lock);
950 list_del(&target->list);
951 spin_unlock(&target->srp_host->target_lock);
952
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000953 scsi_host_put(target->scsi_host);
954}
955
David Howellsc4028952006-11-22 14:57:56 +0000956static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800957{
David Howellsc4028952006-11-22 14:57:56 +0000958 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000959 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800960
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000961 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800962
Bart Van Assche96fc2482013-06-28 14:51:26 +0200963 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800964}
965
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200966static void srp_rport_delete(struct srp_rport *rport)
967{
968 struct srp_target_port *target = rport->lld_data;
969
970 srp_queue_remove_work(target);
971}
972
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200973/**
974 * srp_connected_ch() - number of connected channels
975 * @target: SRP target port.
976 */
977static int srp_connected_ch(struct srp_target_port *target)
978{
979 int i, c = 0;
980
981 for (i = 0; i < target->ch_count; i++)
982 c += target->ch[i].connected;
983
984 return c;
985}
986
Bart Van Assched92c0da2014-10-06 17:14:36 +0200987static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800988{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100989 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800990 int ret;
991
Bart Van Asschec014c8c2015-05-18 13:23:57 +0200992 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
Bart Van Assche294c8752011-12-25 12:18:12 +0000993
Bart Van Assche509c07b2014-10-30 14:48:30 +0100994 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800995 if (ret)
996 return ret;
997
998 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100999 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +02001000 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001001 if (ret)
1002 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001003 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001004 if (ret < 0)
1005 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001006
1007 /*
1008 * The CM event handling code will set status to
1009 * SRP_PORT_REDIRECT if we get a port redirect REJ
1010 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1011 * redirect REJ back.
1012 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001013 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001014 case 0:
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001015 ch->connected = true;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001016 return 0;
1017
1018 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001019 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001020 if (ret)
1021 return ret;
1022 break;
1023
1024 case SRP_DLID_REDIRECT:
1025 break;
1026
David Dillow9fe4bcf2008-01-08 17:08:52 -05001027 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001028 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001029 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001030 ch->status = -ECONNRESET;
1031 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001032
Roland Dreieraef9ec32005-11-02 14:07:13 -08001033 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001034 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001035 }
1036 }
1037}
1038
Bart Van Assche509c07b2014-10-30 14:48:30 +01001039static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001040{
1041 struct ib_send_wr *bad_wr;
1042 struct ib_send_wr wr = {
1043 .opcode = IB_WR_LOCAL_INV,
1044 .wr_id = LOCAL_INV_WR_ID_MASK,
1045 .next = NULL,
1046 .num_sge = 0,
1047 .send_flags = 0,
1048 .ex.invalidate_rkey = rkey,
1049 };
1050
Bart Van Assche509c07b2014-10-30 14:48:30 +01001051 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001052}
1053
Roland Dreierd945e1d2006-05-09 10:50:28 -07001054static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001055 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001056 struct srp_request *req)
1057{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001058 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001059 struct srp_device *dev = target->srp_host->srp_dev;
1060 struct ib_device *ibdev = dev->dev;
1061 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001062
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001063 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001064 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1065 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1066 return;
1067
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001068 if (dev->use_fast_reg) {
1069 struct srp_fr_desc **pfr;
1070
1071 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001072 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001073 if (res < 0) {
1074 shost_printk(KERN_ERR, target->scsi_host, PFX
1075 "Queueing INV WR for rkey %#x failed (%d)\n",
1076 (*pfr)->mr->rkey, res);
1077 queue_work(system_long_wq,
1078 &target->tl_err_work);
1079 }
1080 }
1081 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001082 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001083 req->nmdesc);
1084 } else {
1085 struct ib_pool_fmr **pfmr;
1086
1087 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1088 ib_fmr_pool_unmap(*pfmr);
1089 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001090
David Dillow8f26c9f2011-01-14 19:45:50 -05001091 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1092 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001093}
1094
Bart Van Assche22032992012-08-14 13:18:53 +00001095/**
1096 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001097 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001098 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001099 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001100 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1101 * ownership of @req->scmnd if it equals @scmnd.
1102 *
1103 * Return value:
1104 * Either NULL or a pointer to the SCSI command the caller became owner of.
1105 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001106static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001107 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001108 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001109 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001110{
Bart Van Assche94a91742010-11-26 14:50:09 -05001111 unsigned long flags;
1112
Bart Van Assche509c07b2014-10-30 14:48:30 +01001113 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001114 if (req->scmnd &&
1115 (!sdev || req->scmnd->device == sdev) &&
1116 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001117 scmnd = req->scmnd;
1118 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001119 } else {
1120 scmnd = NULL;
1121 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001122 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001123
1124 return scmnd;
1125}
1126
1127/**
1128 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001129 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001130 * @req: Request to be freed.
1131 * @scmnd: SCSI command associated with @req.
1132 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001133 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001134static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1135 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001136{
1137 unsigned long flags;
1138
Bart Van Assche509c07b2014-10-30 14:48:30 +01001139 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001140
Bart Van Assche509c07b2014-10-30 14:48:30 +01001141 spin_lock_irqsave(&ch->lock, flags);
1142 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001143 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001144}
1145
Bart Van Assche509c07b2014-10-30 14:48:30 +01001146static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1147 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001148{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001149 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001150
1151 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001152 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001153 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001154 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001155 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001156}
1157
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001158static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001159{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001160 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001161 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001162 struct Scsi_Host *shost = target->scsi_host;
1163 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001164 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001165
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001166 /*
1167 * Invoking srp_terminate_io() while srp_queuecommand() is running
1168 * is not safe. Hence the warning statement below.
1169 */
1170 shost_for_each_device(sdev, shost)
1171 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1172
Bart Van Assched92c0da2014-10-06 17:14:36 +02001173 for (i = 0; i < target->ch_count; i++) {
1174 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001175
Bart Van Assched92c0da2014-10-06 17:14:36 +02001176 for (j = 0; j < target->req_ring_size; ++j) {
1177 struct srp_request *req = &ch->req_ring[j];
1178
1179 srp_finish_req(ch, req, NULL,
1180 DID_TRANSPORT_FAILFAST << 16);
1181 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001182 }
1183}
1184
1185/*
1186 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1187 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1188 * srp_reset_device() or srp_reset_host() calls will occur while this function
1189 * is in progress. One way to realize that is not to call this function
1190 * directly but to call srp_reconnect_rport() instead since that last function
1191 * serializes calls of this function via rport->mutex and also blocks
1192 * srp_queuecommand() calls before invoking this function.
1193 */
1194static int srp_rport_reconnect(struct srp_rport *rport)
1195{
1196 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001197 struct srp_rdma_ch *ch;
1198 int i, j, ret = 0;
1199 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001200
Roland Dreieraef9ec32005-11-02 14:07:13 -08001201 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001202
1203 if (target->state == SRP_TARGET_SCANNING)
1204 return -ENODEV;
1205
Roland Dreieraef9ec32005-11-02 14:07:13 -08001206 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001207 * Now get a new local CM ID so that we avoid confusing the target in
1208 * case things are really fouled up. Doing so also ensures that all CM
1209 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001210 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001211 for (i = 0; i < target->ch_count; i++) {
1212 ch = &target->ch[i];
1213 if (!ch->target)
1214 break;
1215 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001216 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001217 for (i = 0; i < target->ch_count; i++) {
1218 ch = &target->ch[i];
1219 if (!ch->target)
1220 break;
1221 for (j = 0; j < target->req_ring_size; ++j) {
1222 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001223
Bart Van Assched92c0da2014-10-06 17:14:36 +02001224 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1225 }
1226 }
1227 for (i = 0; i < target->ch_count; i++) {
1228 ch = &target->ch[i];
1229 if (!ch->target)
1230 break;
1231 /*
1232 * Whether or not creating a new CM ID succeeded, create a new
1233 * QP. This guarantees that all completion callback function
1234 * invocations have finished before request resetting starts.
1235 */
1236 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001237
Bart Van Assched92c0da2014-10-06 17:14:36 +02001238 INIT_LIST_HEAD(&ch->free_tx);
1239 for (j = 0; j < target->queue_size; ++j)
1240 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1241 }
Bart Van Assche8de9fe32015-05-18 13:23:36 +02001242
1243 target->qp_in_error = false;
1244
Bart Van Assched92c0da2014-10-06 17:14:36 +02001245 for (i = 0; i < target->ch_count; i++) {
1246 ch = &target->ch[i];
Bart Van Asschea44074f2015-05-18 13:24:17 +02001247 if (ret || !ch->target)
Bart Van Assched92c0da2014-10-06 17:14:36 +02001248 break;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001249 ret = srp_connect_ch(ch, multich);
1250 multich = true;
1251 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001252
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001253 if (ret == 0)
1254 shost_printk(KERN_INFO, target->scsi_host,
1255 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001256
1257 return ret;
1258}
1259
David Dillow8f26c9f2011-01-14 19:45:50 -05001260static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1261 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001262{
David Dillow8f26c9f2011-01-14 19:45:50 -05001263 struct srp_direct_buf *desc = state->desc;
1264
1265 desc->va = cpu_to_be64(dma_addr);
1266 desc->key = cpu_to_be32(rkey);
1267 desc->len = cpu_to_be32(dma_len);
1268
1269 state->total_len += dma_len;
1270 state->desc++;
1271 state->ndesc++;
1272}
1273
1274static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001275 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001276{
David Dillow8f26c9f2011-01-14 19:45:50 -05001277 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001278 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001279
Bart Van Assche509c07b2014-10-30 14:48:30 +01001280 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001281 state->npages, io_addr);
1282 if (IS_ERR(fmr))
1283 return PTR_ERR(fmr);
1284
1285 *state->next_fmr++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001286 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001287
Bart Van Assche52ede082014-05-20 15:07:45 +02001288 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001289
David Dillow8f26c9f2011-01-14 19:45:50 -05001290 return 0;
1291}
1292
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001293static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001294 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001295{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001296 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001297 struct srp_device *dev = target->srp_host->srp_dev;
1298 struct ib_send_wr *bad_wr;
1299 struct ib_send_wr wr;
1300 struct srp_fr_desc *desc;
1301 u32 rkey;
1302
Bart Van Assche509c07b2014-10-30 14:48:30 +01001303 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001304 if (!desc)
1305 return -ENOMEM;
1306
1307 rkey = ib_inc_rkey(desc->mr->rkey);
1308 ib_update_fast_reg_key(desc->mr, rkey);
1309
1310 memcpy(desc->frpl->page_list, state->pages,
1311 sizeof(state->pages[0]) * state->npages);
1312
1313 memset(&wr, 0, sizeof(wr));
1314 wr.opcode = IB_WR_FAST_REG_MR;
1315 wr.wr_id = FAST_REG_WR_ID_MASK;
1316 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1317 wr.wr.fast_reg.page_list = desc->frpl;
1318 wr.wr.fast_reg.page_list_len = state->npages;
1319 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1320 wr.wr.fast_reg.length = state->dma_len;
1321 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1322 IB_ACCESS_REMOTE_READ |
1323 IB_ACCESS_REMOTE_WRITE);
1324 wr.wr.fast_reg.rkey = desc->mr->lkey;
1325
1326 *state->next_fr++ = desc;
1327 state->nmdesc++;
1328
1329 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1330 desc->mr->rkey);
1331
Bart Van Assche509c07b2014-10-30 14:48:30 +01001332 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001333}
1334
Bart Van Assche539dde62014-05-20 15:05:46 +02001335static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001336 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001337{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001338 struct srp_target_port *target = ch->target;
Bart Van Assche539dde62014-05-20 15:05:46 +02001339 int ret = 0;
1340
1341 if (state->npages == 0)
1342 return 0;
1343
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001344 if (state->npages == 1 && !register_always)
Bart Van Assche52ede082014-05-20 15:07:45 +02001345 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche539dde62014-05-20 15:05:46 +02001346 target->rkey);
1347 else
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001348 ret = target->srp_host->srp_dev->use_fast_reg ?
Bart Van Assche509c07b2014-10-30 14:48:30 +01001349 srp_map_finish_fr(state, ch) :
1350 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001351
1352 if (ret == 0) {
1353 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001354 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001355 }
1356
1357 return ret;
1358}
1359
David Dillow8f26c9f2011-01-14 19:45:50 -05001360static void srp_map_update_start(struct srp_map_state *state,
1361 struct scatterlist *sg, int sg_index,
1362 dma_addr_t dma_addr)
1363{
1364 state->unmapped_sg = sg;
1365 state->unmapped_index = sg_index;
1366 state->unmapped_addr = dma_addr;
1367}
1368
1369static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001370 struct srp_rdma_ch *ch,
David Dillow8f26c9f2011-01-14 19:45:50 -05001371 struct scatterlist *sg, int sg_index,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001372 bool use_mr)
David Dillow8f26c9f2011-01-14 19:45:50 -05001373{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001374 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001375 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001376 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001377 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1378 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1379 unsigned int len;
1380 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001381
David Dillow8f26c9f2011-01-14 19:45:50 -05001382 if (!dma_len)
1383 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -07001384
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001385 if (!use_mr) {
1386 /*
1387 * Once we're in direct map mode for a request, we don't
1388 * go back to FMR or FR mode, so no need to update anything
David Dillow8f26c9f2011-01-14 19:45:50 -05001389 * other than the descriptor.
1390 */
1391 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1392 return 0;
1393 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001394
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001395 /*
1396 * Since not all RDMA HW drivers support non-zero page offsets for
1397 * FMR, if we start at an offset into a page, don't merge into the
1398 * current FMR mapping. Finish it out, and use the kernel's MR for
1399 * this sg entry.
David Dillow8f26c9f2011-01-14 19:45:50 -05001400 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001401 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1402 dma_len > dev->mr_max_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001403 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001404 if (ret)
1405 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001406
David Dillow8f26c9f2011-01-14 19:45:50 -05001407 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1408 srp_map_update_start(state, NULL, 0, 0);
1409 return 0;
1410 }
1411
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001412 /*
1413 * If this is the first sg that will be mapped via FMR or via FR, save
1414 * our position. We need to know the first unmapped entry, its index,
1415 * and the first unmapped address within that entry to be able to
1416 * restart mapping after an error.
David Dillow8f26c9f2011-01-14 19:45:50 -05001417 */
1418 if (!state->unmapped_sg)
1419 srp_map_update_start(state, sg, sg_index, dma_addr);
1420
1421 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001422 unsigned offset = dma_addr & ~dev->mr_page_mask;
1423 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001424 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001425 if (ret)
1426 return ret;
1427
1428 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001429 }
1430
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001431 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001432
1433 if (!state->npages)
1434 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001435 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001436 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001437 dma_addr += len;
1438 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001439 }
1440
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001441 /*
1442 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001443 * close it out and start a new one -- we can only merge at page
1444 * boundries.
1445 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001446 ret = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001447 if (len != dev->mr_page_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001448 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001449 if (!ret)
1450 srp_map_update_start(state, NULL, 0, 0);
1451 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001452 return ret;
1453}
1454
Bart Van Assche509c07b2014-10-30 14:48:30 +01001455static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1456 struct srp_request *req, struct scatterlist *scat,
1457 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001458{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001459 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001460 struct srp_device *dev = target->srp_host->srp_dev;
1461 struct ib_device *ibdev = dev->dev;
1462 struct scatterlist *sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001463 int i;
1464 bool use_mr;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001465
1466 state->desc = req->indirect_desc;
1467 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001468 if (dev->use_fast_reg) {
1469 state->next_fr = req->fr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001470 use_mr = !!ch->fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001471 } else {
1472 state->next_fmr = req->fmr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001473 use_mr = !!ch->fmr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001474 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001475
1476 for_each_sg(scat, sg, count, i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001477 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001478 /*
1479 * Memory registration failed, so backtrack to the
1480 * first unmapped entry and continue on without using
1481 * memory registration.
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001482 */
1483 dma_addr_t dma_addr;
1484 unsigned int dma_len;
1485
1486backtrack:
1487 sg = state->unmapped_sg;
1488 i = state->unmapped_index;
1489
1490 dma_addr = ib_sg_dma_address(ibdev, sg);
1491 dma_len = ib_sg_dma_len(ibdev, sg);
1492 dma_len -= (state->unmapped_addr - dma_addr);
1493 dma_addr = state->unmapped_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001494 use_mr = false;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001495 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1496 }
1497 }
1498
Bart Van Assche509c07b2014-10-30 14:48:30 +01001499 if (use_mr && srp_finish_mapping(state, ch))
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001500 goto backtrack;
1501
Bart Van Assche52ede082014-05-20 15:07:45 +02001502 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001503
1504 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001505}
1506
Bart Van Assche509c07b2014-10-30 14:48:30 +01001507static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001508 struct srp_request *req)
1509{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001510 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001511 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001512 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001513 int len, nents, count;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001514 struct srp_device *dev;
1515 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001516 struct srp_map_state state;
1517 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001518 u32 table_len;
1519 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001520
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001521 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001522 return sizeof (struct srp_cmd);
1523
1524 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1525 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001526 shost_printk(KERN_WARNING, target->scsi_host,
1527 PFX "Unhandled data direction %d\n",
1528 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001529 return -EINVAL;
1530 }
1531
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001532 nents = scsi_sg_count(scmnd);
1533 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001534
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001535 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001536 ibdev = dev->dev;
1537
1538 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001539 if (unlikely(count == 0))
1540 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001541
1542 fmt = SRP_DATA_DESC_DIRECT;
1543 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001544
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001545 if (count == 1 && !register_always) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001546 /*
1547 * The midlayer only generated a single gather/scatter
1548 * entry, or DMA mapping coalesced everything to a
1549 * single entry. So a direct descriptor along with
1550 * the DMA MR suffices.
1551 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001552 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001553
Ralph Campbell85507bc2006-12-12 14:30:55 -08001554 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001555 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001556 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001557
Bart Van Assche52ede082014-05-20 15:07:45 +02001558 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001559 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001560 }
1561
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001562 /*
1563 * We have more than one scatter/gather entry, so build our indirect
1564 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001565 */
1566 indirect_hdr = (void *) cmd->add_data;
1567
David Dillowc07d4242011-01-16 13:57:10 -05001568 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1569 target->indirect_size, DMA_TO_DEVICE);
1570
David Dillow8f26c9f2011-01-14 19:45:50 -05001571 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001572 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001573
David Dillowc07d4242011-01-16 13:57:10 -05001574 /* We've mapped the request, now pull as much of the indirect
1575 * descriptor table as we can into the command buffer. If this
1576 * target is not using an external indirect table, we are
1577 * guaranteed to fit into the command, as the SCSI layer won't
1578 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001579 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001580 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001581 /*
1582 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001583 * so use a direct descriptor.
1584 */
1585 struct srp_direct_buf *buf = (void *) cmd->add_data;
1586
David Dillowc07d4242011-01-16 13:57:10 -05001587 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001588 goto map_complete;
1589 }
1590
David Dillowc07d4242011-01-16 13:57:10 -05001591 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1592 !target->allow_ext_sg)) {
1593 shost_printk(KERN_ERR, target->scsi_host,
1594 "Could not fit S/G list into SRP_CMD\n");
1595 return -EIO;
1596 }
1597
1598 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001599 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1600
1601 fmt = SRP_DATA_DESC_INDIRECT;
1602 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001603 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001604
David Dillowc07d4242011-01-16 13:57:10 -05001605 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1606 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001607
David Dillowc07d4242011-01-16 13:57:10 -05001608 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001609 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1610 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1611 indirect_hdr->len = cpu_to_be32(state.total_len);
1612
1613 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001614 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001615 else
David Dillowc07d4242011-01-16 13:57:10 -05001616 cmd->data_in_desc_cnt = count;
1617
1618 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1619 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001620
1621map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001622 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1623 cmd->buf_fmt = fmt << 4;
1624 else
1625 cmd->buf_fmt = fmt;
1626
Roland Dreieraef9ec32005-11-02 14:07:13 -08001627 return len;
1628}
1629
David Dillow05a1d752010-10-08 14:48:14 -04001630/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001631 * Return an IU and possible credit to the free pool
1632 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001633static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001634 enum srp_iu_type iu_type)
1635{
1636 unsigned long flags;
1637
Bart Van Assche509c07b2014-10-30 14:48:30 +01001638 spin_lock_irqsave(&ch->lock, flags);
1639 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001640 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001641 ++ch->req_lim;
1642 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001643}
1644
1645/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001646 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001647 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001648 *
1649 * Note:
1650 * An upper limit for the number of allocated information units for each
1651 * request type is:
1652 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1653 * more than Scsi_Host.can_queue requests.
1654 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1655 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1656 * one unanswered SRP request to an initiator.
1657 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001658static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001659 enum srp_iu_type iu_type)
1660{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001661 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001662 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1663 struct srp_iu *iu;
1664
Bart Van Assche509c07b2014-10-30 14:48:30 +01001665 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001666
Bart Van Assche509c07b2014-10-30 14:48:30 +01001667 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001668 return NULL;
1669
1670 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001671 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001672 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001673 ++target->zero_req_lim;
1674 return NULL;
1675 }
1676
Bart Van Assche509c07b2014-10-30 14:48:30 +01001677 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001678 }
1679
Bart Van Assche509c07b2014-10-30 14:48:30 +01001680 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001681 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001682 return iu;
1683}
1684
Bart Van Assche509c07b2014-10-30 14:48:30 +01001685static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001686{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001687 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001688 struct ib_sge list;
1689 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001690
1691 list.addr = iu->dma;
1692 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001693 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001694
1695 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001696 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001697 wr.sg_list = &list;
1698 wr.num_sge = 1;
1699 wr.opcode = IB_WR_SEND;
1700 wr.send_flags = IB_SEND_SIGNALED;
1701
Bart Van Assche509c07b2014-10-30 14:48:30 +01001702 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001703}
1704
Bart Van Assche509c07b2014-10-30 14:48:30 +01001705static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001706{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001707 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001708 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001709 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001710
1711 list.addr = iu->dma;
1712 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001713 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001714
1715 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001716 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001717 wr.sg_list = &list;
1718 wr.num_sge = 1;
1719
Bart Van Assche509c07b2014-10-30 14:48:30 +01001720 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001721}
1722
Bart Van Assche509c07b2014-10-30 14:48:30 +01001723static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001724{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001725 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001726 struct srp_request *req;
1727 struct scsi_cmnd *scmnd;
1728 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001729
Roland Dreieraef9ec32005-11-02 14:07:13 -08001730 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001731 spin_lock_irqsave(&ch->lock, flags);
1732 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1733 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001734
Bart Van Assche509c07b2014-10-30 14:48:30 +01001735 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001736 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001737 ch->tsk_mgmt_status = rsp->data[3];
1738 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001739 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001740 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1741 if (scmnd) {
1742 req = (void *)scmnd->host_scribble;
1743 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1744 }
Bart Van Assche22032992012-08-14 13:18:53 +00001745 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001746 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001747 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1748 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001749
Bart Van Assche509c07b2014-10-30 14:48:30 +01001750 spin_lock_irqsave(&ch->lock, flags);
1751 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1752 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001753
1754 return;
1755 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001756 scmnd->result = rsp->status;
1757
1758 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1759 memcpy(scmnd->sense_buffer, rsp->data +
1760 be32_to_cpu(rsp->resp_data_len),
1761 min_t(int, be32_to_cpu(rsp->sense_data_len),
1762 SCSI_SENSE_BUFFERSIZE));
1763 }
1764
Bart Van Asschee7145312014-07-09 15:57:51 +02001765 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001766 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001767 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1768 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1769 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1770 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1771 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1772 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001773
Bart Van Assche509c07b2014-10-30 14:48:30 +01001774 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001775 be32_to_cpu(rsp->req_lim_delta));
1776
David Dillowf8b6e312010-11-26 13:02:21 -05001777 scmnd->host_scribble = NULL;
1778 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001779 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001780}
1781
Bart Van Assche509c07b2014-10-30 14:48:30 +01001782static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001783 void *rsp, int len)
1784{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001785 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001786 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001787 unsigned long flags;
1788 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001789 int err;
David Dillowbb125882010-10-08 14:40:47 -04001790
Bart Van Assche509c07b2014-10-30 14:48:30 +01001791 spin_lock_irqsave(&ch->lock, flags);
1792 ch->req_lim += req_delta;
1793 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1794 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001795
David Dillowbb125882010-10-08 14:40:47 -04001796 if (!iu) {
1797 shost_printk(KERN_ERR, target->scsi_host, PFX
1798 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001799 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001800 }
1801
1802 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1803 memcpy(iu->buf, rsp, len);
1804 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1805
Bart Van Assche509c07b2014-10-30 14:48:30 +01001806 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001807 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001808 shost_printk(KERN_ERR, target->scsi_host, PFX
1809 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001810 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001811 }
David Dillowbb125882010-10-08 14:40:47 -04001812
David Dillowbb125882010-10-08 14:40:47 -04001813 return err;
1814}
1815
Bart Van Assche509c07b2014-10-30 14:48:30 +01001816static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001817 struct srp_cred_req *req)
1818{
1819 struct srp_cred_rsp rsp = {
1820 .opcode = SRP_CRED_RSP,
1821 .tag = req->tag,
1822 };
1823 s32 delta = be32_to_cpu(req->req_lim_delta);
1824
Bart Van Assche509c07b2014-10-30 14:48:30 +01001825 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1826 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001827 "problems processing SRP_CRED_REQ\n");
1828}
1829
Bart Van Assche509c07b2014-10-30 14:48:30 +01001830static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001831 struct srp_aer_req *req)
1832{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001833 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001834 struct srp_aer_rsp rsp = {
1835 .opcode = SRP_AER_RSP,
1836 .tag = req->tag,
1837 };
1838 s32 delta = be32_to_cpu(req->req_lim_delta);
1839
1840 shost_printk(KERN_ERR, target->scsi_host, PFX
1841 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1842
Bart Van Assche509c07b2014-10-30 14:48:30 +01001843 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001844 shost_printk(KERN_ERR, target->scsi_host, PFX
1845 "problems processing SRP_AER_REQ\n");
1846}
1847
Bart Van Assche509c07b2014-10-30 14:48:30 +01001848static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001849{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001850 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001851 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001852 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001853 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001854 u8 opcode;
1855
Bart Van Assche509c07b2014-10-30 14:48:30 +01001856 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001857 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001858
1859 opcode = *(u8 *) iu->buf;
1860
1861 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001862 shost_printk(KERN_ERR, target->scsi_host,
1863 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001864 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1865 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001866 }
1867
1868 switch (opcode) {
1869 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001870 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001871 break;
1872
David Dillowbb125882010-10-08 14:40:47 -04001873 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001874 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001875 break;
1876
1877 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001878 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001879 break;
1880
Roland Dreieraef9ec32005-11-02 14:07:13 -08001881 case SRP_T_LOGOUT:
1882 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001883 shost_printk(KERN_WARNING, target->scsi_host,
1884 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001885 break;
1886
1887 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001888 shost_printk(KERN_WARNING, target->scsi_host,
1889 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001890 break;
1891 }
1892
Bart Van Assche509c07b2014-10-30 14:48:30 +01001893 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001894 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001895
Bart Van Assche509c07b2014-10-30 14:48:30 +01001896 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001897 if (res != 0)
1898 shost_printk(KERN_ERR, target->scsi_host,
1899 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001900}
1901
Bart Van Asschec1120f82013-10-26 14:35:08 +02001902/**
1903 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001904 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001905 *
1906 * Note: This function may get invoked before the rport has been created,
1907 * hence the target->rport test.
1908 */
1909static void srp_tl_err_work(struct work_struct *work)
1910{
1911 struct srp_target_port *target;
1912
1913 target = container_of(work, struct srp_target_port, tl_err_work);
1914 if (target->rport)
1915 srp_start_tl_fail_timers(target->rport);
1916}
1917
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001918static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001919 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001920{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001921 struct srp_target_port *target = ch->target;
1922
1923 if (wr_id == SRP_LAST_WR_ID) {
1924 complete(&ch->done);
1925 return;
1926 }
1927
Bart Van Asschec014c8c2015-05-18 13:23:57 +02001928 if (ch->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001929 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1930 shost_printk(KERN_ERR, target->scsi_host, PFX
1931 "LOCAL_INV failed with status %d\n",
1932 wc_status);
1933 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1934 shost_printk(KERN_ERR, target->scsi_host, PFX
1935 "FAST_REG_MR failed status %d\n",
1936 wc_status);
1937 } else {
1938 shost_printk(KERN_ERR, target->scsi_host,
1939 PFX "failed %s status %d for iu %p\n",
1940 send_err ? "send" : "receive",
1941 wc_status, (void *)(uintptr_t)wr_id);
1942 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001943 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001944 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001945 target->qp_in_error = true;
1946}
1947
Bart Van Assche509c07b2014-10-30 14:48:30 +01001948static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001949{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001950 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001951 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001952
1953 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1954 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001955 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001956 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001957 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001958 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001959 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001960 }
1961}
1962
Bart Van Assche509c07b2014-10-30 14:48:30 +01001963static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001964{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001965 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001966 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001967 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001968
1969 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001970 if (likely(wc.status == IB_WC_SUCCESS)) {
1971 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001972 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001973 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001974 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001975 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001976 }
1977}
1978
Bart Van Assche76c75b22010-11-26 14:37:47 -05001979static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001980{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001981 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001982 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001983 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001984 struct srp_request *req;
1985 struct srp_iu *iu;
1986 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001987 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001988 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001989 u32 tag;
1990 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02001991 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001992 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1993
1994 /*
1995 * The SCSI EH thread is the only context from which srp_queuecommand()
1996 * can get invoked for blocked devices (SDEV_BLOCK /
1997 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1998 * locking the rport mutex if invoked from inside the SCSI EH.
1999 */
2000 if (in_scsi_eh)
2001 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002002
Bart Van Assched1b42892014-05-20 15:07:20 +02002003 scmnd->result = srp_chkready(target->rport);
2004 if (unlikely(scmnd->result))
2005 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002006
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002007 WARN_ON_ONCE(scmnd->request->tag < 0);
2008 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002009 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002010 idx = blk_mq_unique_tag_to_tag(tag);
2011 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2012 dev_name(&shost->shost_gendev), tag, idx,
2013 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002014
2015 spin_lock_irqsave(&ch->lock, flags);
2016 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002017 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002018
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002019 if (!iu)
2020 goto err;
2021
2022 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002023 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002024 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002025 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002026
David Dillowf8b6e312010-11-26 13:02:21 -05002027 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002028
2029 cmd = iu->buf;
2030 memset(cmd, 0, sizeof *cmd);
2031
2032 cmd->opcode = SRP_CMD;
2033 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002034 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002035 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2036
Roland Dreieraef9ec32005-11-02 14:07:13 -08002037 req->scmnd = scmnd;
2038 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002039
Bart Van Assche509c07b2014-10-30 14:48:30 +01002040 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002041 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002042 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002043 PFX "Failed to map data (%d)\n", len);
2044 /*
2045 * If we ran out of memory descriptors (-ENOMEM) because an
2046 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002047 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002048 * to reduce queue depth temporarily.
2049 */
2050 scmnd->result = len == -ENOMEM ?
2051 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002052 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002053 }
2054
David Dillow49248642011-01-14 18:23:24 -05002055 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002056 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002057
Bart Van Assche509c07b2014-10-30 14:48:30 +01002058 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002059 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002060 goto err_unmap;
2061 }
2062
Bart Van Assched1b42892014-05-20 15:07:20 +02002063 ret = 0;
2064
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002065unlock_rport:
2066 if (in_scsi_eh)
2067 mutex_unlock(&rport->mutex);
2068
Bart Van Assched1b42892014-05-20 15:07:20 +02002069 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002070
2071err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002072 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002073
Bart Van Assche76c75b22010-11-26 14:37:47 -05002074err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002075 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002076
Bart Van Assche024ca902014-05-20 15:03:49 +02002077 /*
2078 * Avoid that the loops that iterate over the request ring can
2079 * encounter a dangling SCSI command pointer.
2080 */
2081 req->scmnd = NULL;
2082
Bart Van Assched1b42892014-05-20 15:07:20 +02002083err:
2084 if (scmnd->result) {
2085 scmnd->scsi_done(scmnd);
2086 ret = 0;
2087 } else {
2088 ret = SCSI_MLQUEUE_HOST_BUSY;
2089 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002090
Bart Van Assched1b42892014-05-20 15:07:20 +02002091 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002092}
2093
Bart Van Assche4d73f952013-10-26 14:40:37 +02002094/*
2095 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002096 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002097 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002098static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002099{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002100 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002101 int i;
2102
Bart Van Assche509c07b2014-10-30 14:48:30 +01002103 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2104 GFP_KERNEL);
2105 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002106 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002107 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2108 GFP_KERNEL);
2109 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002110 goto err_no_ring;
2111
2112 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002113 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2114 ch->max_ti_iu_len,
2115 GFP_KERNEL, DMA_FROM_DEVICE);
2116 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002117 goto err;
2118 }
2119
Bart Van Assche4d73f952013-10-26 14:40:37 +02002120 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002121 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2122 target->max_iu_len,
2123 GFP_KERNEL, DMA_TO_DEVICE);
2124 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002125 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002126
Bart Van Assche509c07b2014-10-30 14:48:30 +01002127 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002128 }
2129
2130 return 0;
2131
2132err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002133 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002134 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2135 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002136 }
2137
Bart Van Assche4d73f952013-10-26 14:40:37 +02002138
2139err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002140 kfree(ch->tx_ring);
2141 ch->tx_ring = NULL;
2142 kfree(ch->rx_ring);
2143 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002144
2145 return -ENOMEM;
2146}
2147
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002148static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2149{
2150 uint64_t T_tr_ns, max_compl_time_ms;
2151 uint32_t rq_tmo_jiffies;
2152
2153 /*
2154 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2155 * table 91), both the QP timeout and the retry count have to be set
2156 * for RC QP's during the RTR to RTS transition.
2157 */
2158 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2159 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2160
2161 /*
2162 * Set target->rq_tmo_jiffies to one second more than the largest time
2163 * it can take before an error completion is generated. See also
2164 * C9-140..142 in the IBTA spec for more information about how to
2165 * convert the QP Local ACK Timeout value to nanoseconds.
2166 */
2167 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2168 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2169 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2170 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2171
2172 return rq_tmo_jiffies;
2173}
2174
David Dillow961e0be2011-01-14 17:32:07 -05002175static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2176 struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002177 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002178{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002179 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002180 struct ib_qp_attr *qp_attr = NULL;
2181 int attr_mask = 0;
2182 int ret;
2183 int i;
2184
2185 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002186 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2187 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002188
2189 /*
2190 * Reserve credits for task management so we don't
2191 * bounce requests back to the SCSI mid-layer.
2192 */
2193 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002194 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002195 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002196 target->scsi_host->cmd_per_lun
2197 = min_t(int, target->scsi_host->can_queue,
2198 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002199 } else {
2200 shost_printk(KERN_WARNING, target->scsi_host,
2201 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2202 ret = -ECONNRESET;
2203 goto error;
2204 }
2205
Bart Van Assche509c07b2014-10-30 14:48:30 +01002206 if (!ch->rx_ring) {
2207 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002208 if (ret)
2209 goto error;
2210 }
2211
2212 ret = -ENOMEM;
2213 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2214 if (!qp_attr)
2215 goto error;
2216
2217 qp_attr->qp_state = IB_QPS_RTR;
2218 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2219 if (ret)
2220 goto error_free;
2221
Bart Van Assche509c07b2014-10-30 14:48:30 +01002222 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002223 if (ret)
2224 goto error_free;
2225
Bart Van Assche4d73f952013-10-26 14:40:37 +02002226 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002227 struct srp_iu *iu = ch->rx_ring[i];
2228
2229 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002230 if (ret)
2231 goto error_free;
2232 }
2233
2234 qp_attr->qp_state = IB_QPS_RTS;
2235 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2236 if (ret)
2237 goto error_free;
2238
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002239 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2240
Bart Van Assche509c07b2014-10-30 14:48:30 +01002241 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002242 if (ret)
2243 goto error_free;
2244
2245 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2246
2247error_free:
2248 kfree(qp_attr);
2249
2250error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002251 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002252}
2253
Roland Dreieraef9ec32005-11-02 14:07:13 -08002254static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2255 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002256 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002257{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002258 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002259 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002260 struct ib_class_port_info *cpi;
2261 int opcode;
2262
2263 switch (event->param.rej_rcvd.reason) {
2264 case IB_CM_REJ_PORT_CM_REDIRECT:
2265 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002266 ch->path.dlid = cpi->redirect_lid;
2267 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002268 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002269 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002270
Bart Van Assche509c07b2014-10-30 14:48:30 +01002271 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002272 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2273 break;
2274
2275 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002276 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002277 /*
2278 * Topspin/Cisco SRP gateways incorrectly send
2279 * reject reason code 25 when they mean 24
2280 * (port redirect).
2281 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002282 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002283 event->param.rej_rcvd.ari, 16);
2284
David Dillow7aa54bd2008-01-07 18:23:41 -05002285 shost_printk(KERN_DEBUG, shost,
2286 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002287 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2288 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002289
Bart Van Assche509c07b2014-10-30 14:48:30 +01002290 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002291 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002292 shost_printk(KERN_WARNING, shost,
2293 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002294 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002295 }
2296 break;
2297
2298 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002299 shost_printk(KERN_WARNING, shost,
2300 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002301 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002302 break;
2303
2304 case IB_CM_REJ_CONSUMER_DEFINED:
2305 opcode = *(u8 *) event->private_data;
2306 if (opcode == SRP_LOGIN_REJ) {
2307 struct srp_login_rej *rej = event->private_data;
2308 u32 reason = be32_to_cpu(rej->reason);
2309
2310 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002311 shost_printk(KERN_WARNING, shost,
2312 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002313 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002314 shost_printk(KERN_WARNING, shost, PFX
2315 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002316 target->sgid.raw,
2317 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002318 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002319 shost_printk(KERN_WARNING, shost,
2320 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2321 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002322 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002323 break;
2324
David Dillow9fe4bcf2008-01-08 17:08:52 -05002325 case IB_CM_REJ_STALE_CONN:
2326 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002327 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002328 break;
2329
Roland Dreieraef9ec32005-11-02 14:07:13 -08002330 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002331 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2332 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002333 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002334 }
2335}
2336
2337static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2338{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002339 struct srp_rdma_ch *ch = cm_id->context;
2340 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002341 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002342
2343 switch (event->event) {
2344 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002345 shost_printk(KERN_DEBUG, target->scsi_host,
2346 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002347 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002348 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002349 break;
2350
2351 case IB_CM_REP_RECEIVED:
2352 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002353 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002354 break;
2355
2356 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002357 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002358 comp = 1;
2359
Bart Van Assche509c07b2014-10-30 14:48:30 +01002360 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002361 break;
2362
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002363 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002364 shost_printk(KERN_WARNING, target->scsi_host,
2365 PFX "DREQ received - connection closed\n");
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002366 ch->connected = false;
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002367 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002368 shost_printk(KERN_ERR, target->scsi_host,
2369 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002370 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002371 break;
2372
2373 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002374 shost_printk(KERN_ERR, target->scsi_host,
2375 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002376 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002377
Bart Van Assche509c07b2014-10-30 14:48:30 +01002378 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002379 break;
2380
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002381 case IB_CM_MRA_RECEIVED:
2382 case IB_CM_DREQ_ERROR:
2383 case IB_CM_DREP_RECEIVED:
2384 break;
2385
Roland Dreieraef9ec32005-11-02 14:07:13 -08002386 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002387 shost_printk(KERN_WARNING, target->scsi_host,
2388 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002389 break;
2390 }
2391
2392 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002393 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002394
Roland Dreieraef9ec32005-11-02 14:07:13 -08002395 return 0;
2396}
2397
Jack Wang71444b92013-11-07 11:37:37 +01002398/**
Jack Wang71444b92013-11-07 11:37:37 +01002399 * srp_change_queue_depth - setting device queue depth
2400 * @sdev: scsi device struct
2401 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002402 *
2403 * Returns queue depth.
2404 */
2405static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002406srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002407{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002408 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002409 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002410 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002411}
2412
Bart Van Assche509c07b2014-10-30 14:48:30 +01002413static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2414 unsigned int lun, u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002415{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002416 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002417 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002418 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002419 struct srp_iu *iu;
2420 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002421
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002422 if (!ch->connected || target->qp_in_error)
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002423 return -1;
2424
Bart Van Assche509c07b2014-10-30 14:48:30 +01002425 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002426
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002427 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002428 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002429 * invoked while a task management function is being sent.
2430 */
2431 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002432 spin_lock_irq(&ch->lock);
2433 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2434 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002435
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002436 if (!iu) {
2437 mutex_unlock(&rport->mutex);
2438
Bart Van Assche76c75b22010-11-26 14:37:47 -05002439 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002440 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002441
David Dillow19081f32010-10-18 08:54:49 -04002442 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2443 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002444 tsk_mgmt = iu->buf;
2445 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2446
2447 tsk_mgmt->opcode = SRP_TSK_MGMT;
David Dillowf8b6e312010-11-26 13:02:21 -05002448 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2449 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002450 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002451 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002452
David Dillow19081f32010-10-18 08:54:49 -04002453 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2454 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002455 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2456 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002457 mutex_unlock(&rport->mutex);
2458
Bart Van Assche76c75b22010-11-26 14:37:47 -05002459 return -1;
2460 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002461 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002462
Bart Van Assche509c07b2014-10-30 14:48:30 +01002463 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002464 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002465 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002466
Roland Dreierd945e1d2006-05-09 10:50:28 -07002467 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002468}
2469
Roland Dreieraef9ec32005-11-02 14:07:13 -08002470static int srp_abort(struct scsi_cmnd *scmnd)
2471{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002472 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002473 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002474 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002475 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002476 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002477 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002478
David Dillow7aa54bd2008-01-07 18:23:41 -05002479 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002480
Bart Van Assched92c0da2014-10-06 17:14:36 +02002481 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002482 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002483 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002484 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2485 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2486 return SUCCESS;
2487 ch = &target->ch[ch_idx];
2488 if (!srp_claim_req(ch, req, NULL, scmnd))
2489 return SUCCESS;
2490 shost_printk(KERN_ERR, target->scsi_host,
2491 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002492 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002493 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002494 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002495 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002496 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002497 else
2498 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002499 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002500 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002501 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002502
Bart Van Assche086f44f2013-06-12 15:23:04 +02002503 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002504}
2505
2506static int srp_reset_device(struct scsi_cmnd *scmnd)
2507{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002508 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002509 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002510 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002511
David Dillow7aa54bd2008-01-07 18:23:41 -05002512 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002513
Bart Van Assched92c0da2014-10-06 17:14:36 +02002514 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002515 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002516 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002517 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002518 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002519 return FAILED;
2520
Bart Van Assched92c0da2014-10-06 17:14:36 +02002521 for (i = 0; i < target->ch_count; i++) {
2522 ch = &target->ch[i];
2523 for (i = 0; i < target->req_ring_size; ++i) {
2524 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002525
Bart Van Assched92c0da2014-10-06 17:14:36 +02002526 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2527 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002528 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002529
Roland Dreierd945e1d2006-05-09 10:50:28 -07002530 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002531}
2532
2533static int srp_reset_host(struct scsi_cmnd *scmnd)
2534{
2535 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002536
David Dillow7aa54bd2008-01-07 18:23:41 -05002537 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002538
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002539 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002540}
2541
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002542static int srp_slave_configure(struct scsi_device *sdev)
2543{
2544 struct Scsi_Host *shost = sdev->host;
2545 struct srp_target_port *target = host_to_target(shost);
2546 struct request_queue *q = sdev->request_queue;
2547 unsigned long timeout;
2548
2549 if (sdev->type == TYPE_DISK) {
2550 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2551 blk_queue_rq_timeout(q, timeout);
2552 }
2553
2554 return 0;
2555}
2556
Tony Jonesee959b02008-02-22 00:13:36 +01002557static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2558 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002559{
Tony Jonesee959b02008-02-22 00:13:36 +01002560 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002561
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002562 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002563}
2564
Tony Jonesee959b02008-02-22 00:13:36 +01002565static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2566 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002567{
Tony Jonesee959b02008-02-22 00:13:36 +01002568 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002569
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002570 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002571}
2572
Tony Jonesee959b02008-02-22 00:13:36 +01002573static ssize_t show_service_id(struct device *dev,
2574 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002575{
Tony Jonesee959b02008-02-22 00:13:36 +01002576 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002577
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002578 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002579}
2580
Tony Jonesee959b02008-02-22 00:13:36 +01002581static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2582 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002583{
Tony Jonesee959b02008-02-22 00:13:36 +01002584 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002585
Bart Van Assche747fe002014-10-30 14:48:05 +01002586 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002587}
2588
Bart Van Assche848b3082013-10-26 14:38:12 +02002589static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2590 char *buf)
2591{
2592 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2593
Bart Van Assche747fe002014-10-30 14:48:05 +01002594 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002595}
2596
Tony Jonesee959b02008-02-22 00:13:36 +01002597static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2598 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002599{
Tony Jonesee959b02008-02-22 00:13:36 +01002600 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002601 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002602
Bart Van Assche509c07b2014-10-30 14:48:30 +01002603 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002604}
2605
Tony Jonesee959b02008-02-22 00:13:36 +01002606static ssize_t show_orig_dgid(struct device *dev,
2607 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002608{
Tony Jonesee959b02008-02-22 00:13:36 +01002609 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002610
Bart Van Assche747fe002014-10-30 14:48:05 +01002611 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002612}
2613
Bart Van Assche89de7482010-08-03 14:08:45 +00002614static ssize_t show_req_lim(struct device *dev,
2615 struct device_attribute *attr, char *buf)
2616{
2617 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002618 struct srp_rdma_ch *ch;
2619 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002620
Bart Van Assched92c0da2014-10-06 17:14:36 +02002621 for (i = 0; i < target->ch_count; i++) {
2622 ch = &target->ch[i];
2623 req_lim = min(req_lim, ch->req_lim);
2624 }
2625 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002626}
2627
Tony Jonesee959b02008-02-22 00:13:36 +01002628static ssize_t show_zero_req_lim(struct device *dev,
2629 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002630{
Tony Jonesee959b02008-02-22 00:13:36 +01002631 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002632
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002633 return sprintf(buf, "%d\n", target->zero_req_lim);
2634}
2635
Tony Jonesee959b02008-02-22 00:13:36 +01002636static ssize_t show_local_ib_port(struct device *dev,
2637 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002638{
Tony Jonesee959b02008-02-22 00:13:36 +01002639 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002640
2641 return sprintf(buf, "%d\n", target->srp_host->port);
2642}
2643
Tony Jonesee959b02008-02-22 00:13:36 +01002644static ssize_t show_local_ib_device(struct device *dev,
2645 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002646{
Tony Jonesee959b02008-02-22 00:13:36 +01002647 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002648
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002649 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002650}
2651
Bart Van Assched92c0da2014-10-06 17:14:36 +02002652static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2653 char *buf)
2654{
2655 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2656
2657 return sprintf(buf, "%d\n", target->ch_count);
2658}
2659
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002660static ssize_t show_comp_vector(struct device *dev,
2661 struct device_attribute *attr, char *buf)
2662{
2663 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2664
2665 return sprintf(buf, "%d\n", target->comp_vector);
2666}
2667
Vu Pham7bb312e2013-10-26 14:31:27 +02002668static ssize_t show_tl_retry_count(struct device *dev,
2669 struct device_attribute *attr, char *buf)
2670{
2671 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2672
2673 return sprintf(buf, "%d\n", target->tl_retry_count);
2674}
2675
David Dillow49248642011-01-14 18:23:24 -05002676static ssize_t show_cmd_sg_entries(struct device *dev,
2677 struct device_attribute *attr, char *buf)
2678{
2679 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2680
2681 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2682}
2683
David Dillowc07d4242011-01-16 13:57:10 -05002684static ssize_t show_allow_ext_sg(struct device *dev,
2685 struct device_attribute *attr, char *buf)
2686{
2687 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2688
2689 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2690}
2691
Tony Jonesee959b02008-02-22 00:13:36 +01002692static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2693static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2694static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2695static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002696static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002697static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2698static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002699static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002700static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2701static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2702static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002703static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002704static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002705static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002706static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002707static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002708
Tony Jonesee959b02008-02-22 00:13:36 +01002709static struct device_attribute *srp_host_attrs[] = {
2710 &dev_attr_id_ext,
2711 &dev_attr_ioc_guid,
2712 &dev_attr_service_id,
2713 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002714 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002715 &dev_attr_dgid,
2716 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002717 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002718 &dev_attr_zero_req_lim,
2719 &dev_attr_local_ib_port,
2720 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002721 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002722 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002723 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002724 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002725 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002726 NULL
2727};
2728
Roland Dreieraef9ec32005-11-02 14:07:13 -08002729static struct scsi_host_template srp_template = {
2730 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002731 .name = "InfiniBand SRP initiator",
2732 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002733 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002734 .info = srp_target_info,
2735 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002736 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002737 .eh_abort_handler = srp_abort,
2738 .eh_device_reset_handler = srp_reset_device,
2739 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002740 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002741 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002742 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002743 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002744 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002745 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002746 .shost_attrs = srp_host_attrs,
2747 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002748 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002749};
2750
Bart Van Assche34aa6542014-10-30 14:47:22 +01002751static int srp_sdev_count(struct Scsi_Host *host)
2752{
2753 struct scsi_device *sdev;
2754 int c = 0;
2755
2756 shost_for_each_device(sdev, host)
2757 c++;
2758
2759 return c;
2760}
2761
Roland Dreieraef9ec32005-11-02 14:07:13 -08002762static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2763{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002764 struct srp_rport_identifiers ids;
2765 struct srp_rport *rport;
2766
Bart Van Assche34aa6542014-10-30 14:47:22 +01002767 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002768 sprintf(target->target_name, "SRP.T10:%016llX",
Bart Van Assche45c37ca2015-05-18 13:25:10 +02002769 be64_to_cpu(target->id_ext));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002770
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002771 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002772 return -ENODEV;
2773
FUJITA Tomonori32368222007-06-27 16:33:12 +09002774 memcpy(ids.port_id, &target->id_ext, 8);
2775 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002776 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002777 rport = srp_rport_add(target->scsi_host, &ids);
2778 if (IS_ERR(rport)) {
2779 scsi_remove_host(target->scsi_host);
2780 return PTR_ERR(rport);
2781 }
2782
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002783 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002784 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002785
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002786 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002787 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002788 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002789
Roland Dreieraef9ec32005-11-02 14:07:13 -08002790 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002791 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002792
Bart Van Asschec014c8c2015-05-18 13:23:57 +02002793 if (srp_connected_ch(target) < target->ch_count ||
2794 target->qp_in_error) {
Bart Van Assche34aa6542014-10-30 14:47:22 +01002795 shost_printk(KERN_INFO, target->scsi_host,
2796 PFX "SCSI scan failed - removing SCSI host\n");
2797 srp_queue_remove_work(target);
2798 goto out;
2799 }
2800
2801 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2802 dev_name(&target->scsi_host->shost_gendev),
2803 srp_sdev_count(target->scsi_host));
2804
2805 spin_lock_irq(&target->lock);
2806 if (target->state == SRP_TARGET_SCANNING)
2807 target->state = SRP_TARGET_LIVE;
2808 spin_unlock_irq(&target->lock);
2809
2810out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002811 return 0;
2812}
2813
Tony Jonesee959b02008-02-22 00:13:36 +01002814static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002815{
2816 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002817 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002818
2819 complete(&host->released);
2820}
2821
2822static struct class srp_class = {
2823 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002824 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002825};
2826
Bart Van Assche96fc2482013-06-28 14:51:26 +02002827/**
2828 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002829 * @host: SRP host.
2830 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002831 */
2832static bool srp_conn_unique(struct srp_host *host,
2833 struct srp_target_port *target)
2834{
2835 struct srp_target_port *t;
2836 bool ret = false;
2837
2838 if (target->state == SRP_TARGET_REMOVED)
2839 goto out;
2840
2841 ret = true;
2842
2843 spin_lock(&host->target_lock);
2844 list_for_each_entry(t, &host->target_list, list) {
2845 if (t != target &&
2846 target->id_ext == t->id_ext &&
2847 target->ioc_guid == t->ioc_guid &&
2848 target->initiator_ext == t->initiator_ext) {
2849 ret = false;
2850 break;
2851 }
2852 }
2853 spin_unlock(&host->target_lock);
2854
2855out:
2856 return ret;
2857}
2858
Roland Dreieraef9ec32005-11-02 14:07:13 -08002859/*
2860 * Target ports are added by writing
2861 *
2862 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2863 * pkey=<P_Key>,service_id=<service ID>
2864 *
2865 * to the add_target sysfs attribute.
2866 */
2867enum {
2868 SRP_OPT_ERR = 0,
2869 SRP_OPT_ID_EXT = 1 << 0,
2870 SRP_OPT_IOC_GUID = 1 << 1,
2871 SRP_OPT_DGID = 1 << 2,
2872 SRP_OPT_PKEY = 1 << 3,
2873 SRP_OPT_SERVICE_ID = 1 << 4,
2874 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002875 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002876 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002877 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002878 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002879 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2880 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002881 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002882 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002883 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002884 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2885 SRP_OPT_IOC_GUID |
2886 SRP_OPT_DGID |
2887 SRP_OPT_PKEY |
2888 SRP_OPT_SERVICE_ID),
2889};
2890
Steven Whitehousea447c092008-10-13 10:46:57 +01002891static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002892 { SRP_OPT_ID_EXT, "id_ext=%s" },
2893 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2894 { SRP_OPT_DGID, "dgid=%s" },
2895 { SRP_OPT_PKEY, "pkey=%x" },
2896 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2897 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2898 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002899 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002900 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002901 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002902 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2903 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002904 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002905 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002906 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002907 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002908};
2909
2910static int srp_parse_options(const char *buf, struct srp_target_port *target)
2911{
2912 char *options, *sep_opt;
2913 char *p;
2914 char dgid[3];
2915 substring_t args[MAX_OPT_ARGS];
2916 int opt_mask = 0;
2917 int token;
2918 int ret = -EINVAL;
2919 int i;
2920
2921 options = kstrdup(buf, GFP_KERNEL);
2922 if (!options)
2923 return -ENOMEM;
2924
2925 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002926 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002927 if (!*p)
2928 continue;
2929
2930 token = match_token(p, srp_opt_tokens, args);
2931 opt_mask |= token;
2932
2933 switch (token) {
2934 case SRP_OPT_ID_EXT:
2935 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002936 if (!p) {
2937 ret = -ENOMEM;
2938 goto out;
2939 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002940 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2941 kfree(p);
2942 break;
2943
2944 case SRP_OPT_IOC_GUID:
2945 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002946 if (!p) {
2947 ret = -ENOMEM;
2948 goto out;
2949 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002950 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2951 kfree(p);
2952 break;
2953
2954 case SRP_OPT_DGID:
2955 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002956 if (!p) {
2957 ret = -ENOMEM;
2958 goto out;
2959 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002960 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002961 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002962 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002963 goto out;
2964 }
2965
2966 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002967 strlcpy(dgid, p + i * 2, sizeof(dgid));
2968 if (sscanf(dgid, "%hhx",
2969 &target->orig_dgid.raw[i]) < 1) {
2970 ret = -EINVAL;
2971 kfree(p);
2972 goto out;
2973 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002974 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002975 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002976 break;
2977
2978 case SRP_OPT_PKEY:
2979 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002980 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002981 goto out;
2982 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002983 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002984 break;
2985
2986 case SRP_OPT_SERVICE_ID:
2987 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002988 if (!p) {
2989 ret = -ENOMEM;
2990 goto out;
2991 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002992 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2993 kfree(p);
2994 break;
2995
2996 case SRP_OPT_MAX_SECT:
2997 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002998 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002999 goto out;
3000 }
3001 target->scsi_host->max_sectors = token;
3002 break;
3003
Bart Van Assche4d73f952013-10-26 14:40:37 +02003004 case SRP_OPT_QUEUE_SIZE:
3005 if (match_int(args, &token) || token < 1) {
3006 pr_warn("bad queue_size parameter '%s'\n", p);
3007 goto out;
3008 }
3009 target->scsi_host->can_queue = token;
3010 target->queue_size = token + SRP_RSP_SQ_SIZE +
3011 SRP_TSK_MGMT_SQ_SIZE;
3012 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3013 target->scsi_host->cmd_per_lun = token;
3014 break;
3015
Vu Pham52fb2b502006-06-17 20:37:31 -07003016 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003017 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003018 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3019 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003020 goto out;
3021 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003022 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003023 break;
3024
Ramachandra K0c0450db2006-06-17 20:37:38 -07003025 case SRP_OPT_IO_CLASS:
3026 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003027 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003028 goto out;
3029 }
3030 if (token != SRP_REV10_IB_IO_CLASS &&
3031 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003032 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3033 token, SRP_REV10_IB_IO_CLASS,
3034 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003035 goto out;
3036 }
3037 target->io_class = token;
3038 break;
3039
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003040 case SRP_OPT_INITIATOR_EXT:
3041 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003042 if (!p) {
3043 ret = -ENOMEM;
3044 goto out;
3045 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003046 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3047 kfree(p);
3048 break;
3049
David Dillow49248642011-01-14 18:23:24 -05003050 case SRP_OPT_CMD_SG_ENTRIES:
3051 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003052 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3053 p);
David Dillow49248642011-01-14 18:23:24 -05003054 goto out;
3055 }
3056 target->cmd_sg_cnt = token;
3057 break;
3058
David Dillowc07d4242011-01-16 13:57:10 -05003059 case SRP_OPT_ALLOW_EXT_SG:
3060 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003061 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003062 goto out;
3063 }
3064 target->allow_ext_sg = !!token;
3065 break;
3066
3067 case SRP_OPT_SG_TABLESIZE:
3068 if (match_int(args, &token) || token < 1 ||
3069 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003070 pr_warn("bad max sg_tablesize parameter '%s'\n",
3071 p);
David Dillowc07d4242011-01-16 13:57:10 -05003072 goto out;
3073 }
3074 target->sg_tablesize = token;
3075 break;
3076
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003077 case SRP_OPT_COMP_VECTOR:
3078 if (match_int(args, &token) || token < 0) {
3079 pr_warn("bad comp_vector parameter '%s'\n", p);
3080 goto out;
3081 }
3082 target->comp_vector = token;
3083 break;
3084
Vu Pham7bb312e2013-10-26 14:31:27 +02003085 case SRP_OPT_TL_RETRY_COUNT:
3086 if (match_int(args, &token) || token < 2 || token > 7) {
3087 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3088 p);
3089 goto out;
3090 }
3091 target->tl_retry_count = token;
3092 break;
3093
Roland Dreieraef9ec32005-11-02 14:07:13 -08003094 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003095 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3096 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003097 goto out;
3098 }
3099 }
3100
3101 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3102 ret = 0;
3103 else
3104 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3105 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3106 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003107 pr_warn("target creation request is missing parameter '%s'\n",
3108 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003109
Bart Van Assche4d73f952013-10-26 14:40:37 +02003110 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3111 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3112 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3113 target->scsi_host->cmd_per_lun,
3114 target->scsi_host->can_queue);
3115
Roland Dreieraef9ec32005-11-02 14:07:13 -08003116out:
3117 kfree(options);
3118 return ret;
3119}
3120
Tony Jonesee959b02008-02-22 00:13:36 +01003121static ssize_t srp_create_target(struct device *dev,
3122 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003123 const char *buf, size_t count)
3124{
3125 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003126 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003127 struct Scsi_Host *target_host;
3128 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003129 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003130 struct srp_device *srp_dev = host->srp_dev;
3131 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003132 int ret, node_idx, node, cpu, i;
3133 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003134
3135 target_host = scsi_host_alloc(&srp_template,
3136 sizeof (struct srp_target_port));
3137 if (!target_host)
3138 return -ENOMEM;
3139
David Dillow49248642011-01-14 18:23:24 -05003140 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003141 target_host->max_channel = 0;
3142 target_host->max_id = 1;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003143 target_host->max_lun = SRP_MAX_LUN;
3144 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003145
Roland Dreieraef9ec32005-11-02 14:07:13 -08003146 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003147
David Dillow49248642011-01-14 18:23:24 -05003148 target->io_class = SRP_REV16A_IB_IO_CLASS;
3149 target->scsi_host = target_host;
3150 target->srp_host = host;
3151 target->lkey = host->srp_dev->mr->lkey;
3152 target->rkey = host->srp_dev->mr->rkey;
3153 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003154 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3155 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003156 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003157 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003158
Bart Van Assche34aa6542014-10-30 14:47:22 +01003159 /*
3160 * Avoid that the SCSI host can be removed by srp_remove_target()
3161 * before this function returns.
3162 */
3163 scsi_host_get(target->scsi_host);
3164
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003165 mutex_lock(&host->add_target_mutex);
3166
Roland Dreieraef9ec32005-11-02 14:07:13 -08003167 ret = srp_parse_options(buf, target);
3168 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003169 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003170
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003171 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3172 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003173 goto out;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003174
Bart Van Assche4d73f952013-10-26 14:40:37 +02003175 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3176
Bart Van Assche96fc2482013-06-28 14:51:26 +02003177 if (!srp_conn_unique(target->srp_host, target)) {
3178 shost_printk(KERN_INFO, target->scsi_host,
3179 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3180 be64_to_cpu(target->id_ext),
3181 be64_to_cpu(target->ioc_guid),
3182 be64_to_cpu(target->initiator_ext));
3183 ret = -EEXIST;
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003184 goto out;
Bart Van Assche96fc2482013-06-28 14:51:26 +02003185 }
3186
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003187 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003188 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003189 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003190 target->sg_tablesize = target->cmd_sg_cnt;
3191 }
3192
3193 target_host->sg_tablesize = target->sg_tablesize;
3194 target->indirect_size = target->sg_tablesize *
3195 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003196 target->max_iu_len = sizeof (struct srp_cmd) +
3197 sizeof (struct srp_indirect_buf) +
3198 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3199
Bart Van Asschec1120f82013-10-26 14:35:08 +02003200 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003201 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003202 spin_lock_init(&target->lock);
Bart Van Assche747fe002014-10-30 14:48:05 +01003203 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003204 if (ret)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003205 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003206
Bart Van Assched92c0da2014-10-06 17:14:36 +02003207 ret = -ENOMEM;
3208 target->ch_count = max_t(unsigned, num_online_nodes(),
3209 min(ch_count ? :
3210 min(4 * num_online_nodes(),
3211 ibdev->num_comp_vectors),
3212 num_online_cpus()));
3213 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3214 GFP_KERNEL);
3215 if (!target->ch)
Bart Van Asschefb49c8b2015-05-18 13:23:14 +02003216 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003217
Bart Van Assched92c0da2014-10-06 17:14:36 +02003218 node_idx = 0;
3219 for_each_online_node(node) {
3220 const int ch_start = (node_idx * target->ch_count /
3221 num_online_nodes());
3222 const int ch_end = ((node_idx + 1) * target->ch_count /
3223 num_online_nodes());
3224 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3225 num_online_nodes() + target->comp_vector)
3226 % ibdev->num_comp_vectors;
3227 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3228 num_online_nodes() + target->comp_vector)
3229 % ibdev->num_comp_vectors;
3230 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003231
Bart Van Assched92c0da2014-10-06 17:14:36 +02003232 for_each_online_cpu(cpu) {
3233 if (cpu_to_node(cpu) != node)
3234 continue;
3235 if (ch_start + cpu_idx >= ch_end)
3236 continue;
3237 ch = &target->ch[ch_start + cpu_idx];
3238 ch->target = target;
3239 ch->comp_vector = cv_start == cv_end ? cv_start :
3240 cv_start + cpu_idx % (cv_end - cv_start);
3241 spin_lock_init(&ch->lock);
3242 INIT_LIST_HEAD(&ch->free_tx);
3243 ret = srp_new_cm_id(ch);
3244 if (ret)
3245 goto err_disconnect;
3246
3247 ret = srp_create_ch_ib(ch);
3248 if (ret)
3249 goto err_disconnect;
3250
3251 ret = srp_alloc_req_data(ch);
3252 if (ret)
3253 goto err_disconnect;
3254
3255 ret = srp_connect_ch(ch, multich);
3256 if (ret) {
3257 shost_printk(KERN_ERR, target->scsi_host,
3258 PFX "Connection %d/%d failed\n",
3259 ch_start + cpu_idx,
3260 target->ch_count);
3261 if (node_idx == 0 && cpu_idx == 0) {
3262 goto err_disconnect;
3263 } else {
3264 srp_free_ch_ib(target, ch);
3265 srp_free_req_data(target, ch);
3266 target->ch_count = ch - target->ch;
3267 break;
3268 }
3269 }
3270
3271 multich = true;
3272 cpu_idx++;
3273 }
3274 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003275 }
3276
Bart Van Assched92c0da2014-10-06 17:14:36 +02003277 target->scsi_host->nr_hw_queues = target->ch_count;
3278
Roland Dreieraef9ec32005-11-02 14:07:13 -08003279 ret = srp_add_target(host, target);
3280 if (ret)
3281 goto err_disconnect;
3282
Bart Van Assche34aa6542014-10-30 14:47:22 +01003283 if (target->state != SRP_TARGET_REMOVED) {
3284 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3285 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3286 be64_to_cpu(target->id_ext),
3287 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003288 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003289 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003290 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003291 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003292
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003293 ret = count;
3294
3295out:
3296 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003297
3298 scsi_host_put(target->scsi_host);
3299
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003300 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003301
3302err_disconnect:
3303 srp_disconnect_target(target);
3304
Bart Van Assched92c0da2014-10-06 17:14:36 +02003305 for (i = 0; i < target->ch_count; i++) {
3306 ch = &target->ch[i];
3307 srp_free_ch_ib(target, ch);
3308 srp_free_req_data(target, ch);
3309 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003310
Bart Van Assched92c0da2014-10-06 17:14:36 +02003311 kfree(target->ch);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003312 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003313}
3314
Tony Jonesee959b02008-02-22 00:13:36 +01003315static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003316
Tony Jonesee959b02008-02-22 00:13:36 +01003317static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3318 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003319{
Tony Jonesee959b02008-02-22 00:13:36 +01003320 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003321
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003322 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003323}
3324
Tony Jonesee959b02008-02-22 00:13:36 +01003325static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003326
Tony Jonesee959b02008-02-22 00:13:36 +01003327static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3328 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003329{
Tony Jonesee959b02008-02-22 00:13:36 +01003330 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003331
3332 return sprintf(buf, "%d\n", host->port);
3333}
3334
Tony Jonesee959b02008-02-22 00:13:36 +01003335static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003336
Roland Dreierf5358a12006-06-17 20:37:29 -07003337static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003338{
3339 struct srp_host *host;
3340
3341 host = kzalloc(sizeof *host, GFP_KERNEL);
3342 if (!host)
3343 return NULL;
3344
3345 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003346 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003347 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003348 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003349 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003350 host->port = port;
3351
Tony Jonesee959b02008-02-22 00:13:36 +01003352 host->dev.class = &srp_class;
3353 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003354 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003355
Tony Jonesee959b02008-02-22 00:13:36 +01003356 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003357 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003358 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003359 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003360 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003361 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003362 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003363 goto err_class;
3364
3365 return host;
3366
3367err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003368 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003369
Roland Dreierf5358a12006-06-17 20:37:29 -07003370free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003371 kfree(host);
3372
3373 return NULL;
3374}
3375
3376static void srp_add_one(struct ib_device *device)
3377{
Roland Dreierf5358a12006-06-17 20:37:29 -07003378 struct srp_device *srp_dev;
3379 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003380 struct srp_host *host;
Bart Van Assche52ede082014-05-20 15:07:45 +02003381 int mr_page_shift, s, e, p;
3382 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003383
Roland Dreierf5358a12006-06-17 20:37:29 -07003384 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3385 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003386 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003387
Roland Dreierf5358a12006-06-17 20:37:29 -07003388 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003389 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003390 goto free_attr;
3391 }
3392
3393 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3394 if (!srp_dev)
3395 goto free_attr;
3396
Bart Van Assched1b42892014-05-20 15:07:20 +02003397 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3398 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003399 srp_dev->has_fr = (dev_attr->device_cap_flags &
3400 IB_DEVICE_MEM_MGT_EXTENSIONS);
3401 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3402 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3403
3404 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3405 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assched1b42892014-05-20 15:07:20 +02003406
Roland Dreierf5358a12006-06-17 20:37:29 -07003407 /*
3408 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003409 * minimum of 4096 bytes. We're unlikely to build large sglists
3410 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003411 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003412 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3413 srp_dev->mr_page_size = 1 << mr_page_shift;
3414 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3415 max_pages_per_mr = dev_attr->max_mr_size;
3416 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3417 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3418 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003419 if (srp_dev->use_fast_reg) {
3420 srp_dev->max_pages_per_mr =
3421 min_t(u32, srp_dev->max_pages_per_mr,
3422 dev_attr->max_fast_reg_page_list_len);
3423 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003424 srp_dev->mr_max_size = srp_dev->mr_page_size *
3425 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003426 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003427 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003428 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003429 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003430
3431 INIT_LIST_HEAD(&srp_dev->dev_list);
3432
3433 srp_dev->dev = device;
3434 srp_dev->pd = ib_alloc_pd(device);
3435 if (IS_ERR(srp_dev->pd))
3436 goto free_dev;
3437
3438 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3439 IB_ACCESS_LOCAL_WRITE |
3440 IB_ACCESS_REMOTE_READ |
3441 IB_ACCESS_REMOTE_WRITE);
3442 if (IS_ERR(srp_dev->mr))
3443 goto err_pd;
3444
Tom Tucker07ebafb2006-08-03 16:02:42 -05003445 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08003446 s = 0;
3447 e = 0;
3448 } else {
3449 s = 1;
3450 e = device->phys_port_cnt;
3451 }
3452
3453 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003454 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003455 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003456 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003457 }
3458
Roland Dreierf5358a12006-06-17 20:37:29 -07003459 ib_set_client_data(device, &srp_client, srp_dev);
3460
3461 goto free_attr;
3462
3463err_pd:
3464 ib_dealloc_pd(srp_dev->pd);
3465
3466free_dev:
3467 kfree(srp_dev);
3468
3469free_attr:
3470 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003471}
3472
3473static void srp_remove_one(struct ib_device *device)
3474{
Roland Dreierf5358a12006-06-17 20:37:29 -07003475 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003476 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003477 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003478
Roland Dreierf5358a12006-06-17 20:37:29 -07003479 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003480 if (!srp_dev)
3481 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003482
Roland Dreierf5358a12006-06-17 20:37:29 -07003483 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003484 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003485 /*
3486 * Wait for the sysfs entry to go away, so that no new
3487 * target ports can be created.
3488 */
3489 wait_for_completion(&host->released);
3490
3491 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003492 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003493 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003494 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003495 list_for_each_entry(target, &host->target_list, list)
3496 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003497 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003498
3499 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003500 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003501 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003502 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003503 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003504
Roland Dreieraef9ec32005-11-02 14:07:13 -08003505 kfree(host);
3506 }
3507
Roland Dreierf5358a12006-06-17 20:37:29 -07003508 ib_dereg_mr(srp_dev->mr);
3509 ib_dealloc_pd(srp_dev->pd);
3510
3511 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003512}
3513
FUJITA Tomonori32368222007-06-27 16:33:12 +09003514static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003515 .has_rport_state = true,
3516 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003517 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003518 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3519 .dev_loss_tmo = &srp_dev_loss_tmo,
3520 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003521 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003522 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003523};
3524
Roland Dreieraef9ec32005-11-02 14:07:13 -08003525static int __init srp_init_module(void)
3526{
3527 int ret;
3528
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003529 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003530
David Dillow49248642011-01-14 18:23:24 -05003531 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003532 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003533 if (!cmd_sg_entries)
3534 cmd_sg_entries = srp_sg_tablesize;
3535 }
3536
3537 if (!cmd_sg_entries)
3538 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3539
3540 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003541 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003542 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003543 }
3544
David Dillowc07d4242011-01-16 13:57:10 -05003545 if (!indirect_sg_entries)
3546 indirect_sg_entries = cmd_sg_entries;
3547 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003548 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3549 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003550 indirect_sg_entries = cmd_sg_entries;
3551 }
3552
Bart Van Asschebcc05912014-07-09 15:57:26 +02003553 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003554 if (!srp_remove_wq) {
3555 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003556 goto out;
3557 }
3558
3559 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003560 ib_srp_transport_template =
3561 srp_attach_transport(&ib_srp_transport_functions);
3562 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003563 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003564
Roland Dreieraef9ec32005-11-02 14:07:13 -08003565 ret = class_register(&srp_class);
3566 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003567 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003568 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003569 }
3570
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003571 ib_sa_register_client(&srp_sa_client);
3572
Roland Dreieraef9ec32005-11-02 14:07:13 -08003573 ret = ib_register_client(&srp_client);
3574 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003575 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003576 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003577 }
3578
Bart Van Asschebcc05912014-07-09 15:57:26 +02003579out:
3580 return ret;
3581
3582unreg_sa:
3583 ib_sa_unregister_client(&srp_sa_client);
3584 class_unregister(&srp_class);
3585
3586release_tr:
3587 srp_release_transport(ib_srp_transport_template);
3588
3589destroy_wq:
3590 destroy_workqueue(srp_remove_wq);
3591 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003592}
3593
3594static void __exit srp_cleanup_module(void)
3595{
3596 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003597 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003598 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003599 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003600 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003601}
3602
3603module_init(srp_init_module);
3604module_exit(srp_cleanup_module);