blob: 667df9d423eff1168020bfc6196a66324604b5f7 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Bart Van Assche56b53902014-07-09 15:58:22 +020043#include <rdma/ib_cache.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080044
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080046
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010050#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090052#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080053
Roland Dreieraef9ec32005-11-02 14:07:13 -080054#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020058#define DRV_VERSION "1.0"
59#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080060
61MODULE_AUTHOR("Roland Dreier");
62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
63 "v" DRV_VERSION " (" DRV_RELDATE ")");
64MODULE_LICENSE("Dual BSD/GPL");
65
David Dillow49248642011-01-14 18:23:24 -050066static unsigned int srp_sg_tablesize;
67static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050068static unsigned int indirect_sg_entries;
69static bool allow_ext_sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +020070static bool prefer_fr;
Bart Van Asscheb1b88542014-05-20 15:06:41 +020071static bool register_always;
Roland Dreieraef9ec32005-11-02 14:07:13 -080072static int topspin_workarounds = 1;
73
David Dillow49248642011-01-14 18:23:24 -050074module_param(srp_sg_tablesize, uint, 0444);
75MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
76
77module_param(cmd_sg_entries, uint, 0444);
78MODULE_PARM_DESC(cmd_sg_entries,
79 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
80
David Dillowc07d4242011-01-16 13:57:10 -050081module_param(indirect_sg_entries, uint, 0444);
82MODULE_PARM_DESC(indirect_sg_entries,
83 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
84
85module_param(allow_ext_sg, bool, 0444);
86MODULE_PARM_DESC(allow_ext_sg,
87 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
88
Roland Dreieraef9ec32005-11-02 14:07:13 -080089module_param(topspin_workarounds, int, 0444);
90MODULE_PARM_DESC(topspin_workarounds,
91 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
92
Bart Van Assche5cfb1782014-05-20 15:08:34 +020093module_param(prefer_fr, bool, 0444);
94MODULE_PARM_DESC(prefer_fr,
95"Whether to use fast registration if both FMR and fast registration are supported");
96
Bart Van Asscheb1b88542014-05-20 15:06:41 +020097module_param(register_always, bool, 0444);
98MODULE_PARM_DESC(register_always,
99 "Use memory registration even for contiguous memory regions");
100
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200101static struct kernel_param_ops srp_tmo_ops;
102
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200103static int srp_reconnect_delay = 10;
104module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
105 S_IRUGO | S_IWUSR);
106MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
107
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200108static int srp_fast_io_fail_tmo = 15;
109module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
110 S_IRUGO | S_IWUSR);
111MODULE_PARM_DESC(fast_io_fail_tmo,
112 "Number of seconds between the observation of a transport"
113 " layer error and failing all I/O. \"off\" means that this"
114 " functionality is disabled.");
115
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200116static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200117module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
118 S_IRUGO | S_IWUSR);
119MODULE_PARM_DESC(dev_loss_tmo,
120 "Maximum number of seconds that the SRP transport should"
121 " insulate transport layer errors. After this time has been"
122 " exceeded the SCSI host is removed. Should be"
123 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
124 " if fast_io_fail_tmo has not been set. \"off\" means that"
125 " this functionality is disabled.");
126
Bart Van Assched92c0da2014-10-06 17:14:36 +0200127static unsigned ch_count;
128module_param(ch_count, uint, 0444);
129MODULE_PARM_DESC(ch_count,
130 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
131
Roland Dreieraef9ec32005-11-02 14:07:13 -0800132static void srp_add_one(struct ib_device *device);
133static void srp_remove_one(struct ib_device *device);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100134static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
135static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800136static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
137
FUJITA Tomonori32368222007-06-27 16:33:12 +0900138static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200139static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900140
Roland Dreieraef9ec32005-11-02 14:07:13 -0800141static struct ib_client srp_client = {
142 .name = "srp",
143 .add = srp_add_one,
144 .remove = srp_remove_one
145};
146
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700147static struct ib_sa_client srp_sa_client;
148
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200149static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
150{
151 int tmo = *(int *)kp->arg;
152
153 if (tmo >= 0)
154 return sprintf(buffer, "%d", tmo);
155 else
156 return sprintf(buffer, "off");
157}
158
159static int srp_tmo_set(const char *val, const struct kernel_param *kp)
160{
161 int tmo, res;
162
163 if (strncmp(val, "off", 3) != 0) {
164 res = kstrtoint(val, 0, &tmo);
165 if (res)
166 goto out;
167 } else {
168 tmo = -1;
169 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200170 if (kp->arg == &srp_reconnect_delay)
171 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
172 srp_dev_loss_tmo);
173 else if (kp->arg == &srp_fast_io_fail_tmo)
174 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200175 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200176 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
177 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200178 if (res)
179 goto out;
180 *(int *)kp->arg = tmo;
181
182out:
183 return res;
184}
185
186static struct kernel_param_ops srp_tmo_ops = {
187 .get = srp_tmo_get,
188 .set = srp_tmo_set,
189};
190
Roland Dreieraef9ec32005-11-02 14:07:13 -0800191static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
192{
193 return (struct srp_target_port *) host->hostdata;
194}
195
196static const char *srp_target_info(struct Scsi_Host *host)
197{
198 return host_to_target(host)->target_name;
199}
200
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700201static int srp_target_is_topspin(struct srp_target_port *target)
202{
203 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700204 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700205
206 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700207 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
208 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700209}
210
Roland Dreieraef9ec32005-11-02 14:07:13 -0800211static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
212 gfp_t gfp_mask,
213 enum dma_data_direction direction)
214{
215 struct srp_iu *iu;
216
217 iu = kmalloc(sizeof *iu, gfp_mask);
218 if (!iu)
219 goto out;
220
221 iu->buf = kzalloc(size, gfp_mask);
222 if (!iu->buf)
223 goto out_free_iu;
224
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100225 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
226 direction);
227 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800228 goto out_free_buf;
229
230 iu->size = size;
231 iu->direction = direction;
232
233 return iu;
234
235out_free_buf:
236 kfree(iu->buf);
237out_free_iu:
238 kfree(iu);
239out:
240 return NULL;
241}
242
243static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
244{
245 if (!iu)
246 return;
247
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100248 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
249 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800250 kfree(iu->buf);
251 kfree(iu);
252}
253
254static void srp_qp_event(struct ib_event *event, void *context)
255{
Sagi Grimberg57363d92015-05-18 13:40:29 +0300256 pr_debug("QP event %s (%d)\n",
257 ib_event_msg(event->event), event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800258}
259
260static int srp_init_qp(struct srp_target_port *target,
261 struct ib_qp *qp)
262{
263 struct ib_qp_attr *attr;
264 int ret;
265
266 attr = kmalloc(sizeof *attr, GFP_KERNEL);
267 if (!attr)
268 return -ENOMEM;
269
Bart Van Assche56b53902014-07-09 15:58:22 +0200270 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
271 target->srp_host->port,
272 be16_to_cpu(target->pkey),
273 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800274 if (ret)
275 goto out;
276
277 attr->qp_state = IB_QPS_INIT;
278 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
279 IB_ACCESS_REMOTE_WRITE);
280 attr->port_num = target->srp_host->port;
281
282 ret = ib_modify_qp(qp, attr,
283 IB_QP_STATE |
284 IB_QP_PKEY_INDEX |
285 IB_QP_ACCESS_FLAGS |
286 IB_QP_PORT);
287
288out:
289 kfree(attr);
290 return ret;
291}
292
Bart Van Assche509c07b2014-10-30 14:48:30 +0100293static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500294{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100295 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500296 struct ib_cm_id *new_cm_id;
297
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100298 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100299 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500300 if (IS_ERR(new_cm_id))
301 return PTR_ERR(new_cm_id);
302
Bart Van Assche509c07b2014-10-30 14:48:30 +0100303 if (ch->cm_id)
304 ib_destroy_cm_id(ch->cm_id);
305 ch->cm_id = new_cm_id;
306 ch->path.sgid = target->sgid;
307 ch->path.dgid = target->orig_dgid;
308 ch->path.pkey = target->pkey;
309 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500310
311 return 0;
312}
313
Bart Van Assched1b42892014-05-20 15:07:20 +0200314static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
315{
316 struct srp_device *dev = target->srp_host->srp_dev;
317 struct ib_fmr_pool_param fmr_param;
318
319 memset(&fmr_param, 0, sizeof(fmr_param));
320 fmr_param.pool_size = target->scsi_host->can_queue;
321 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
322 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200323 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
324 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200325 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
326 IB_ACCESS_REMOTE_WRITE |
327 IB_ACCESS_REMOTE_READ);
328
329 return ib_create_fmr_pool(dev->pd, &fmr_param);
330}
331
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200332/**
333 * srp_destroy_fr_pool() - free the resources owned by a pool
334 * @pool: Fast registration pool to be destroyed.
335 */
336static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
337{
338 int i;
339 struct srp_fr_desc *d;
340
341 if (!pool)
342 return;
343
344 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
345 if (d->frpl)
346 ib_free_fast_reg_page_list(d->frpl);
347 if (d->mr)
348 ib_dereg_mr(d->mr);
349 }
350 kfree(pool);
351}
352
353/**
354 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
355 * @device: IB device to allocate fast registration descriptors for.
356 * @pd: Protection domain associated with the FR descriptors.
357 * @pool_size: Number of descriptors to allocate.
358 * @max_page_list_len: Maximum fast registration work request page list length.
359 */
360static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
361 struct ib_pd *pd, int pool_size,
362 int max_page_list_len)
363{
364 struct srp_fr_pool *pool;
365 struct srp_fr_desc *d;
366 struct ib_mr *mr;
367 struct ib_fast_reg_page_list *frpl;
368 int i, ret = -EINVAL;
369
370 if (pool_size <= 0)
371 goto err;
372 ret = -ENOMEM;
373 pool = kzalloc(sizeof(struct srp_fr_pool) +
374 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
375 if (!pool)
376 goto err;
377 pool->size = pool_size;
378 pool->max_page_list_len = max_page_list_len;
379 spin_lock_init(&pool->lock);
380 INIT_LIST_HEAD(&pool->free_list);
381
382 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
383 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
384 if (IS_ERR(mr)) {
385 ret = PTR_ERR(mr);
386 goto destroy_pool;
387 }
388 d->mr = mr;
389 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
390 if (IS_ERR(frpl)) {
391 ret = PTR_ERR(frpl);
392 goto destroy_pool;
393 }
394 d->frpl = frpl;
395 list_add_tail(&d->entry, &pool->free_list);
396 }
397
398out:
399 return pool;
400
401destroy_pool:
402 srp_destroy_fr_pool(pool);
403
404err:
405 pool = ERR_PTR(ret);
406 goto out;
407}
408
409/**
410 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
411 * @pool: Pool to obtain descriptor from.
412 */
413static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
414{
415 struct srp_fr_desc *d = NULL;
416 unsigned long flags;
417
418 spin_lock_irqsave(&pool->lock, flags);
419 if (!list_empty(&pool->free_list)) {
420 d = list_first_entry(&pool->free_list, typeof(*d), entry);
421 list_del(&d->entry);
422 }
423 spin_unlock_irqrestore(&pool->lock, flags);
424
425 return d;
426}
427
428/**
429 * srp_fr_pool_put() - put an FR descriptor back in the free list
430 * @pool: Pool the descriptor was allocated from.
431 * @desc: Pointer to an array of fast registration descriptor pointers.
432 * @n: Number of descriptors to put back.
433 *
434 * Note: The caller must already have queued an invalidation request for
435 * desc->mr->rkey before calling this function.
436 */
437static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
438 int n)
439{
440 unsigned long flags;
441 int i;
442
443 spin_lock_irqsave(&pool->lock, flags);
444 for (i = 0; i < n; i++)
445 list_add(&desc[i]->entry, &pool->free_list);
446 spin_unlock_irqrestore(&pool->lock, flags);
447}
448
449static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
450{
451 struct srp_device *dev = target->srp_host->srp_dev;
452
453 return srp_create_fr_pool(dev->dev, dev->pd,
454 target->scsi_host->can_queue,
455 dev->max_pages_per_mr);
456}
457
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200458/**
459 * srp_destroy_qp() - destroy an RDMA queue pair
460 * @ch: SRP RDMA channel.
461 *
462 * Change a queue pair into the error state and wait until all receive
463 * completions have been processed before destroying it. This avoids that
464 * the receive completion handler can access the queue pair while it is
465 * being destroyed.
466 */
467static void srp_destroy_qp(struct srp_rdma_ch *ch)
468{
469 struct srp_target_port *target = ch->target;
470 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
471 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
472 struct ib_recv_wr *bad_wr;
473 int ret;
474
475 /* Destroying a QP and reusing ch->done is only safe if not connected */
476 WARN_ON_ONCE(target->connected);
477
478 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
479 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
480 if (ret)
481 goto out;
482
483 init_completion(&ch->done);
484 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
485 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
486 if (ret == 0)
487 wait_for_completion(&ch->done);
488
489out:
490 ib_destroy_qp(ch->qp);
491}
492
Bart Van Assche509c07b2014-10-30 14:48:30 +0100493static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800494{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100495 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200496 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800497 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100498 struct ib_cq *recv_cq, *send_cq;
499 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200500 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200501 struct srp_fr_pool *fr_pool = NULL;
502 const int m = 1 + dev->use_fast_reg;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800503 int ret;
504
505 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
506 if (!init_attr)
507 return -ENOMEM;
508
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200509 /* + 1 for SRP_LAST_WR_ID */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100510 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200511 target->queue_size + 1, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100512 if (IS_ERR(recv_cq)) {
513 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800514 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800515 }
516
Bart Van Assche509c07b2014-10-30 14:48:30 +0100517 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
518 m * target->queue_size, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100519 if (IS_ERR(send_cq)) {
520 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800521 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000522 }
523
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100524 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800525
526 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200527 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200528 init_attr->cap.max_recv_wr = target->queue_size + 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800529 init_attr->cap.max_recv_sge = 1;
530 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200531 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800532 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100533 init_attr->send_cq = send_cq;
534 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800535
Bart Van Assche62154b22014-05-20 15:04:45 +0200536 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100537 if (IS_ERR(qp)) {
538 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800539 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800540 }
541
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100542 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800543 if (ret)
544 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800545
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200546 if (dev->use_fast_reg && dev->has_fr) {
547 fr_pool = srp_alloc_fr_pool(target);
548 if (IS_ERR(fr_pool)) {
549 ret = PTR_ERR(fr_pool);
550 shost_printk(KERN_WARNING, target->scsi_host, PFX
551 "FR pool allocation failed (%d)\n", ret);
552 goto err_qp;
553 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100554 if (ch->fr_pool)
555 srp_destroy_fr_pool(ch->fr_pool);
556 ch->fr_pool = fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200557 } else if (!dev->use_fast_reg && dev->has_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200558 fmr_pool = srp_alloc_fmr_pool(target);
559 if (IS_ERR(fmr_pool)) {
560 ret = PTR_ERR(fmr_pool);
561 shost_printk(KERN_WARNING, target->scsi_host, PFX
562 "FMR pool allocation failed (%d)\n", ret);
563 goto err_qp;
564 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100565 if (ch->fmr_pool)
566 ib_destroy_fmr_pool(ch->fmr_pool);
567 ch->fmr_pool = fmr_pool;
Bart Van Assched1b42892014-05-20 15:07:20 +0200568 }
569
Bart Van Assche509c07b2014-10-30 14:48:30 +0100570 if (ch->qp)
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200571 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100572 if (ch->recv_cq)
573 ib_destroy_cq(ch->recv_cq);
574 if (ch->send_cq)
575 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100576
Bart Van Assche509c07b2014-10-30 14:48:30 +0100577 ch->qp = qp;
578 ch->recv_cq = recv_cq;
579 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100580
Roland Dreierda9d2f02010-02-24 15:07:59 -0800581 kfree(init_attr);
582 return 0;
583
584err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100585 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800586
587err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100588 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800589
590err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100591 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800592
593err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800594 kfree(init_attr);
595 return ret;
596}
597
Bart Van Assche4d73f952013-10-26 14:40:37 +0200598/*
599 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100600 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200601 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100602static void srp_free_ch_ib(struct srp_target_port *target,
603 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800604{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200605 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800606 int i;
607
Bart Van Assched92c0da2014-10-06 17:14:36 +0200608 if (!ch->target)
609 return;
610
Bart Van Assche509c07b2014-10-30 14:48:30 +0100611 if (ch->cm_id) {
612 ib_destroy_cm_id(ch->cm_id);
613 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100614 }
615
Bart Van Assched92c0da2014-10-06 17:14:36 +0200616 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
617 if (!ch->qp)
618 return;
619
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200620 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100621 if (ch->fr_pool)
622 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200623 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100624 if (ch->fmr_pool)
625 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200626 }
Bart Van Assche7dad6b22014-10-21 18:00:35 +0200627 srp_destroy_qp(ch);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100628 ib_destroy_cq(ch->send_cq);
629 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800630
Bart Van Assched92c0da2014-10-06 17:14:36 +0200631 /*
632 * Avoid that the SCSI error handler tries to use this channel after
633 * it has been freed. The SCSI error handler can namely continue
634 * trying to perform recovery actions after scsi_remove_host()
635 * returned.
636 */
637 ch->target = NULL;
638
Bart Van Assche509c07b2014-10-30 14:48:30 +0100639 ch->qp = NULL;
640 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100641
Bart Van Assche509c07b2014-10-30 14:48:30 +0100642 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200643 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100644 srp_free_iu(target->srp_host, ch->rx_ring[i]);
645 kfree(ch->rx_ring);
646 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200647 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100648 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200649 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100650 srp_free_iu(target->srp_host, ch->tx_ring[i]);
651 kfree(ch->tx_ring);
652 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200653 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800654}
655
656static void srp_path_rec_completion(int status,
657 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100658 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800659{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100660 struct srp_rdma_ch *ch = ch_ptr;
661 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800662
Bart Van Assche509c07b2014-10-30 14:48:30 +0100663 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800664 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500665 shost_printk(KERN_ERR, target->scsi_host,
666 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800667 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100668 ch->path = *pathrec;
669 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800670}
671
Bart Van Assche509c07b2014-10-30 14:48:30 +0100672static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800673{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100674 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100675 int ret;
676
Bart Van Assche509c07b2014-10-30 14:48:30 +0100677 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800678
Bart Van Assche509c07b2014-10-30 14:48:30 +0100679 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800680
Bart Van Assche509c07b2014-10-30 14:48:30 +0100681 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
682 target->srp_host->srp_dev->dev,
683 target->srp_host->port,
684 &ch->path,
685 IB_SA_PATH_REC_SERVICE_ID |
686 IB_SA_PATH_REC_DGID |
687 IB_SA_PATH_REC_SGID |
688 IB_SA_PATH_REC_NUMB_PATH |
689 IB_SA_PATH_REC_PKEY,
690 SRP_PATH_REC_TIMEOUT_MS,
691 GFP_KERNEL,
692 srp_path_rec_completion,
693 ch, &ch->path_query);
694 if (ch->path_query_id < 0)
695 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800696
Bart Van Assche509c07b2014-10-30 14:48:30 +0100697 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100698 if (ret < 0)
699 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800700
Bart Van Assche509c07b2014-10-30 14:48:30 +0100701 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500702 shost_printk(KERN_WARNING, target->scsi_host,
703 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800704
Bart Van Assche509c07b2014-10-30 14:48:30 +0100705 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800706}
707
Bart Van Assched92c0da2014-10-06 17:14:36 +0200708static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800709{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100710 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800711 struct {
712 struct ib_cm_req_param param;
713 struct srp_login_req priv;
714 } *req = NULL;
715 int status;
716
717 req = kzalloc(sizeof *req, GFP_KERNEL);
718 if (!req)
719 return -ENOMEM;
720
Bart Van Assche509c07b2014-10-30 14:48:30 +0100721 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800722 req->param.alternate_path = NULL;
723 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100724 req->param.qp_num = ch->qp->qp_num;
725 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800726 req->param.private_data = &req->priv;
727 req->param.private_data_len = sizeof req->priv;
728 req->param.flow_control = 1;
729
730 get_random_bytes(&req->param.starting_psn, 4);
731 req->param.starting_psn &= 0xffffff;
732
733 /*
734 * Pick some arbitrary defaults here; we could make these
735 * module parameters if anyone cared about setting them.
736 */
737 req->param.responder_resources = 4;
738 req->param.remote_cm_response_timeout = 20;
739 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200740 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800741 req->param.rnr_retry_count = 7;
742 req->param.max_cm_retries = 15;
743
744 req->priv.opcode = SRP_LOGIN_REQ;
745 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500746 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800747 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
748 SRP_BUF_FORMAT_INDIRECT);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200749 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
750 SRP_MULTICHAN_SINGLE);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700751 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700752 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700753 * port identifier format is 8 bytes of ID extension followed
754 * by 8 bytes of GUID. Older drafts put the two halves in the
755 * opposite order, so that the GUID comes first.
756 *
757 * Targets conforming to these obsolete drafts can be
758 * recognized by the I/O Class they report.
759 */
760 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
761 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100762 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700763 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200764 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700765 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
766 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
767 } else {
768 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200769 &target->initiator_ext, 8);
770 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100771 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700772 memcpy(req->priv.target_port_id, &target->id_ext, 8);
773 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
774 }
775
Roland Dreieraef9ec32005-11-02 14:07:13 -0800776 /*
777 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200778 * zero out the first 8 bytes of our initiator port ID and set
779 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800780 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700781 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500782 shost_printk(KERN_DEBUG, target->scsi_host,
783 PFX "Topspin/Cisco initiator port ID workaround "
784 "activated for target GUID %016llx\n",
785 (unsigned long long) be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800786 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200787 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100788 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800789 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800790
Bart Van Assche509c07b2014-10-30 14:48:30 +0100791 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800792
793 kfree(req);
794
795 return status;
796}
797
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000798static bool srp_queue_remove_work(struct srp_target_port *target)
799{
800 bool changed = false;
801
802 spin_lock_irq(&target->lock);
803 if (target->state != SRP_TARGET_REMOVED) {
804 target->state = SRP_TARGET_REMOVED;
805 changed = true;
806 }
807 spin_unlock_irq(&target->lock);
808
809 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200810 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000811
812 return changed;
813}
814
Bart Van Assche294c8752011-12-25 12:18:12 +0000815static bool srp_change_conn_state(struct srp_target_port *target,
816 bool connected)
817{
818 bool changed = false;
819
820 spin_lock_irq(&target->lock);
821 if (target->connected != connected) {
822 target->connected = connected;
823 changed = true;
824 }
825 spin_unlock_irq(&target->lock);
826
827 return changed;
828}
829
Roland Dreieraef9ec32005-11-02 14:07:13 -0800830static void srp_disconnect_target(struct srp_target_port *target)
831{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200832 struct srp_rdma_ch *ch;
833 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100834
Bart Van Assche294c8752011-12-25 12:18:12 +0000835 if (srp_change_conn_state(target, false)) {
836 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800837
Bart Van Assched92c0da2014-10-06 17:14:36 +0200838 for (i = 0; i < target->ch_count; i++) {
839 ch = &target->ch[i];
840 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
841 shost_printk(KERN_DEBUG, target->scsi_host,
842 PFX "Sending CM DREQ failed\n");
843 }
Bart Van Assche294c8752011-12-25 12:18:12 +0000844 }
Roland Dreiere6581052006-05-17 09:13:21 -0700845 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800846}
847
Bart Van Assche509c07b2014-10-30 14:48:30 +0100848static void srp_free_req_data(struct srp_target_port *target,
849 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500850{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200851 struct srp_device *dev = target->srp_host->srp_dev;
852 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500853 struct srp_request *req;
854 int i;
855
Bart Van Assched92c0da2014-10-06 17:14:36 +0200856 if (!ch->target || !ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200857 return;
858
859 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100860 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200861 if (dev->use_fast_reg)
862 kfree(req->fr_list);
863 else
864 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500865 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500866 if (req->indirect_dma_addr) {
867 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
868 target->indirect_size,
869 DMA_TO_DEVICE);
870 }
871 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500872 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200873
Bart Van Assche509c07b2014-10-30 14:48:30 +0100874 kfree(ch->req_ring);
875 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500876}
877
Bart Van Assche509c07b2014-10-30 14:48:30 +0100878static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200879{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100880 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200881 struct srp_device *srp_dev = target->srp_host->srp_dev;
882 struct ib_device *ibdev = srp_dev->dev;
883 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200884 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200885 dma_addr_t dma_addr;
886 int i, ret = -ENOMEM;
887
Bart Van Assche509c07b2014-10-30 14:48:30 +0100888 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
889 GFP_KERNEL);
890 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200891 goto out;
892
893 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100894 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200895 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
896 GFP_KERNEL);
897 if (!mr_list)
898 goto out;
899 if (srp_dev->use_fast_reg)
900 req->fr_list = mr_list;
901 else
902 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200903 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200904 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200905 if (!req->map_page)
906 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200907 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200908 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200909 goto out;
910
911 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
912 target->indirect_size,
913 DMA_TO_DEVICE);
914 if (ib_dma_mapping_error(ibdev, dma_addr))
915 goto out;
916
917 req->indirect_dma_addr = dma_addr;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200918 }
919 ret = 0;
920
921out:
922 return ret;
923}
924
Bart Van Assche683b1592012-01-14 12:40:44 +0000925/**
926 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
927 * @shost: SCSI host whose attributes to remove from sysfs.
928 *
929 * Note: Any attributes defined in the host template and that did not exist
930 * before invocation of this function will be ignored.
931 */
932static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
933{
934 struct device_attribute **attr;
935
936 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
937 device_remove_file(&shost->shost_dev, *attr);
938}
939
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000940static void srp_remove_target(struct srp_target_port *target)
941{
Bart Van Assched92c0da2014-10-06 17:14:36 +0200942 struct srp_rdma_ch *ch;
943 int i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100944
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000945 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
946
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000947 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200948 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000949 srp_remove_host(target->scsi_host);
950 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100951 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000952 srp_disconnect_target(target);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200953 for (i = 0; i < target->ch_count; i++) {
954 ch = &target->ch[i];
955 srp_free_ch_ib(target, ch);
956 }
Bart Van Asschec1120f82013-10-26 14:35:08 +0200957 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200958 srp_rport_put(target->rport);
Bart Van Assched92c0da2014-10-06 17:14:36 +0200959 for (i = 0; i < target->ch_count; i++) {
960 ch = &target->ch[i];
961 srp_free_req_data(target, ch);
962 }
963 kfree(target->ch);
964 target->ch = NULL;
Vu Pham65d7dd22013-10-10 13:50:29 +0200965
966 spin_lock(&target->srp_host->target_lock);
967 list_del(&target->list);
968 spin_unlock(&target->srp_host->target_lock);
969
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000970 scsi_host_put(target->scsi_host);
971}
972
David Howellsc4028952006-11-22 14:57:56 +0000973static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800974{
David Howellsc4028952006-11-22 14:57:56 +0000975 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000976 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800977
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000978 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800979
Bart Van Assche96fc2482013-06-28 14:51:26 +0200980 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800981}
982
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200983static void srp_rport_delete(struct srp_rport *rport)
984{
985 struct srp_target_port *target = rport->lld_data;
986
987 srp_queue_remove_work(target);
988}
989
Bart Van Assched92c0da2014-10-06 17:14:36 +0200990static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800991{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100992 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800993 int ret;
994
Bart Van Assched92c0da2014-10-06 17:14:36 +0200995 WARN_ON_ONCE(!multich && target->connected);
Bart Van Assche294c8752011-12-25 12:18:12 +0000996
Bart Van Assche948d1e82011-09-03 09:25:42 +0200997 target->qp_in_error = false;
998
Bart Van Assche509c07b2014-10-30 14:48:30 +0100999 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001000 if (ret)
1001 return ret;
1002
1003 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001004 init_completion(&ch->done);
Bart Van Assched92c0da2014-10-06 17:14:36 +02001005 ret = srp_send_req(ch, multich);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001006 if (ret)
1007 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001008 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +01001009 if (ret < 0)
1010 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001011
1012 /*
1013 * The CM event handling code will set status to
1014 * SRP_PORT_REDIRECT if we get a port redirect REJ
1015 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1016 * redirect REJ back.
1017 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001018 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001019 case 0:
Bart Van Assche294c8752011-12-25 12:18:12 +00001020 srp_change_conn_state(target, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001021 return 0;
1022
1023 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001024 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001025 if (ret)
1026 return ret;
1027 break;
1028
1029 case SRP_DLID_REDIRECT:
1030 break;
1031
David Dillow9fe4bcf2008-01-08 17:08:52 -05001032 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -05001033 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +01001034 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01001035 ch->status = -ECONNRESET;
1036 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -05001037
Roland Dreieraef9ec32005-11-02 14:07:13 -08001038 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001039 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001040 }
1041 }
1042}
1043
Bart Van Assche509c07b2014-10-30 14:48:30 +01001044static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001045{
1046 struct ib_send_wr *bad_wr;
1047 struct ib_send_wr wr = {
1048 .opcode = IB_WR_LOCAL_INV,
1049 .wr_id = LOCAL_INV_WR_ID_MASK,
1050 .next = NULL,
1051 .num_sge = 0,
1052 .send_flags = 0,
1053 .ex.invalidate_rkey = rkey,
1054 };
1055
Bart Van Assche509c07b2014-10-30 14:48:30 +01001056 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001057}
1058
Roland Dreierd945e1d2006-05-09 10:50:28 -07001059static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001060 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -07001061 struct srp_request *req)
1062{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001063 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001064 struct srp_device *dev = target->srp_host->srp_dev;
1065 struct ib_device *ibdev = dev->dev;
1066 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -05001067
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001068 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001069 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1070 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1071 return;
1072
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001073 if (dev->use_fast_reg) {
1074 struct srp_fr_desc **pfr;
1075
1076 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001077 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001078 if (res < 0) {
1079 shost_printk(KERN_ERR, target->scsi_host, PFX
1080 "Queueing INV WR for rkey %#x failed (%d)\n",
1081 (*pfr)->mr->rkey, res);
1082 queue_work(system_long_wq,
1083 &target->tl_err_work);
1084 }
1085 }
1086 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001087 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001088 req->nmdesc);
1089 } else {
1090 struct ib_pool_fmr **pfmr;
1091
1092 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1093 ib_fmr_pool_unmap(*pfmr);
1094 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001095
David Dillow8f26c9f2011-01-14 19:45:50 -05001096 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1097 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001098}
1099
Bart Van Assche22032992012-08-14 13:18:53 +00001100/**
1101 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001102 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001103 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001104 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001105 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1106 * ownership of @req->scmnd if it equals @scmnd.
1107 *
1108 * Return value:
1109 * Either NULL or a pointer to the SCSI command the caller became owner of.
1110 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001111static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001112 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001113 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001114 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001115{
Bart Van Assche94a91742010-11-26 14:50:09 -05001116 unsigned long flags;
1117
Bart Van Assche509c07b2014-10-30 14:48:30 +01001118 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001119 if (req->scmnd &&
1120 (!sdev || req->scmnd->device == sdev) &&
1121 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001122 scmnd = req->scmnd;
1123 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001124 } else {
1125 scmnd = NULL;
1126 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001127 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001128
1129 return scmnd;
1130}
1131
1132/**
1133 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001134 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001135 * @req: Request to be freed.
1136 * @scmnd: SCSI command associated with @req.
1137 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001138 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001139static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1140 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001141{
1142 unsigned long flags;
1143
Bart Van Assche509c07b2014-10-30 14:48:30 +01001144 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001145
Bart Van Assche509c07b2014-10-30 14:48:30 +01001146 spin_lock_irqsave(&ch->lock, flags);
1147 ch->req_lim += req_lim_delta;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001148 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001149}
1150
Bart Van Assche509c07b2014-10-30 14:48:30 +01001151static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1152 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001153{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001154 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001155
1156 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001157 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001158 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001159 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001160 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001161}
1162
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001163static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001164{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001165 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001166 struct srp_rdma_ch *ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001167 struct Scsi_Host *shost = target->scsi_host;
1168 struct scsi_device *sdev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001169 int i, j;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001170
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001171 /*
1172 * Invoking srp_terminate_io() while srp_queuecommand() is running
1173 * is not safe. Hence the warning statement below.
1174 */
1175 shost_for_each_device(sdev, shost)
1176 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1177
Bart Van Assched92c0da2014-10-06 17:14:36 +02001178 for (i = 0; i < target->ch_count; i++) {
1179 ch = &target->ch[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01001180
Bart Van Assched92c0da2014-10-06 17:14:36 +02001181 for (j = 0; j < target->req_ring_size; ++j) {
1182 struct srp_request *req = &ch->req_ring[j];
1183
1184 srp_finish_req(ch, req, NULL,
1185 DID_TRANSPORT_FAILFAST << 16);
1186 }
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001187 }
1188}
1189
1190/*
1191 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1192 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1193 * srp_reset_device() or srp_reset_host() calls will occur while this function
1194 * is in progress. One way to realize that is not to call this function
1195 * directly but to call srp_reconnect_rport() instead since that last function
1196 * serializes calls of this function via rport->mutex and also blocks
1197 * srp_queuecommand() calls before invoking this function.
1198 */
1199static int srp_rport_reconnect(struct srp_rport *rport)
1200{
1201 struct srp_target_port *target = rport->lld_data;
Bart Van Assched92c0da2014-10-06 17:14:36 +02001202 struct srp_rdma_ch *ch;
1203 int i, j, ret = 0;
1204 bool multich = false;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001205
Roland Dreieraef9ec32005-11-02 14:07:13 -08001206 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001207
1208 if (target->state == SRP_TARGET_SCANNING)
1209 return -ENODEV;
1210
Roland Dreieraef9ec32005-11-02 14:07:13 -08001211 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001212 * Now get a new local CM ID so that we avoid confusing the target in
1213 * case things are really fouled up. Doing so also ensures that all CM
1214 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001215 */
Bart Van Assched92c0da2014-10-06 17:14:36 +02001216 for (i = 0; i < target->ch_count; i++) {
1217 ch = &target->ch[i];
1218 if (!ch->target)
1219 break;
1220 ret += srp_new_cm_id(ch);
Bart Van Assche536ae142010-11-26 13:58:27 -05001221 }
Bart Van Assched92c0da2014-10-06 17:14:36 +02001222 for (i = 0; i < target->ch_count; i++) {
1223 ch = &target->ch[i];
1224 if (!ch->target)
1225 break;
1226 for (j = 0; j < target->req_ring_size; ++j) {
1227 struct srp_request *req = &ch->req_ring[j];
Roland Dreieraef9ec32005-11-02 14:07:13 -08001228
Bart Van Assched92c0da2014-10-06 17:14:36 +02001229 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1230 }
1231 }
1232 for (i = 0; i < target->ch_count; i++) {
1233 ch = &target->ch[i];
1234 if (!ch->target)
1235 break;
1236 /*
1237 * Whether or not creating a new CM ID succeeded, create a new
1238 * QP. This guarantees that all completion callback function
1239 * invocations have finished before request resetting starts.
1240 */
1241 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001242
Bart Van Assched92c0da2014-10-06 17:14:36 +02001243 INIT_LIST_HEAD(&ch->free_tx);
1244 for (j = 0; j < target->queue_size; ++j)
1245 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1246 }
1247 for (i = 0; i < target->ch_count; i++) {
1248 ch = &target->ch[i];
1249 if (ret || !ch->target) {
1250 if (i > 1)
1251 ret = 0;
1252 break;
1253 }
1254 ret = srp_connect_ch(ch, multich);
1255 multich = true;
1256 }
Bart Van Assche09be70a2012-03-17 17:18:54 +00001257
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001258 if (ret == 0)
1259 shost_printk(KERN_INFO, target->scsi_host,
1260 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001261
1262 return ret;
1263}
1264
David Dillow8f26c9f2011-01-14 19:45:50 -05001265static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1266 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001267{
David Dillow8f26c9f2011-01-14 19:45:50 -05001268 struct srp_direct_buf *desc = state->desc;
1269
1270 desc->va = cpu_to_be64(dma_addr);
1271 desc->key = cpu_to_be32(rkey);
1272 desc->len = cpu_to_be32(dma_len);
1273
1274 state->total_len += dma_len;
1275 state->desc++;
1276 state->ndesc++;
1277}
1278
1279static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001280 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001281{
David Dillow8f26c9f2011-01-14 19:45:50 -05001282 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001283 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001284
Bart Van Assche509c07b2014-10-30 14:48:30 +01001285 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001286 state->npages, io_addr);
1287 if (IS_ERR(fmr))
1288 return PTR_ERR(fmr);
1289
1290 *state->next_fmr++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001291 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001292
Bart Van Assche52ede082014-05-20 15:07:45 +02001293 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001294
David Dillow8f26c9f2011-01-14 19:45:50 -05001295 return 0;
1296}
1297
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001298static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001299 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001300{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001301 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001302 struct srp_device *dev = target->srp_host->srp_dev;
1303 struct ib_send_wr *bad_wr;
1304 struct ib_send_wr wr;
1305 struct srp_fr_desc *desc;
1306 u32 rkey;
1307
Bart Van Assche509c07b2014-10-30 14:48:30 +01001308 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001309 if (!desc)
1310 return -ENOMEM;
1311
1312 rkey = ib_inc_rkey(desc->mr->rkey);
1313 ib_update_fast_reg_key(desc->mr, rkey);
1314
1315 memcpy(desc->frpl->page_list, state->pages,
1316 sizeof(state->pages[0]) * state->npages);
1317
1318 memset(&wr, 0, sizeof(wr));
1319 wr.opcode = IB_WR_FAST_REG_MR;
1320 wr.wr_id = FAST_REG_WR_ID_MASK;
1321 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1322 wr.wr.fast_reg.page_list = desc->frpl;
1323 wr.wr.fast_reg.page_list_len = state->npages;
1324 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1325 wr.wr.fast_reg.length = state->dma_len;
1326 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1327 IB_ACCESS_REMOTE_READ |
1328 IB_ACCESS_REMOTE_WRITE);
1329 wr.wr.fast_reg.rkey = desc->mr->lkey;
1330
1331 *state->next_fr++ = desc;
1332 state->nmdesc++;
1333
1334 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1335 desc->mr->rkey);
1336
Bart Van Assche509c07b2014-10-30 14:48:30 +01001337 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001338}
1339
Bart Van Assche539dde62014-05-20 15:05:46 +02001340static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001341 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001342{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001343 struct srp_target_port *target = ch->target;
Bart Van Assche539dde62014-05-20 15:05:46 +02001344 int ret = 0;
1345
1346 if (state->npages == 0)
1347 return 0;
1348
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001349 if (state->npages == 1 && !register_always)
Bart Van Assche52ede082014-05-20 15:07:45 +02001350 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche539dde62014-05-20 15:05:46 +02001351 target->rkey);
1352 else
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001353 ret = target->srp_host->srp_dev->use_fast_reg ?
Bart Van Assche509c07b2014-10-30 14:48:30 +01001354 srp_map_finish_fr(state, ch) :
1355 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001356
1357 if (ret == 0) {
1358 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001359 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001360 }
1361
1362 return ret;
1363}
1364
David Dillow8f26c9f2011-01-14 19:45:50 -05001365static void srp_map_update_start(struct srp_map_state *state,
1366 struct scatterlist *sg, int sg_index,
1367 dma_addr_t dma_addr)
1368{
1369 state->unmapped_sg = sg;
1370 state->unmapped_index = sg_index;
1371 state->unmapped_addr = dma_addr;
1372}
1373
1374static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001375 struct srp_rdma_ch *ch,
David Dillow8f26c9f2011-01-14 19:45:50 -05001376 struct scatterlist *sg, int sg_index,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001377 bool use_mr)
David Dillow8f26c9f2011-01-14 19:45:50 -05001378{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001379 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001380 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001381 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001382 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1383 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1384 unsigned int len;
1385 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001386
David Dillow8f26c9f2011-01-14 19:45:50 -05001387 if (!dma_len)
1388 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -07001389
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001390 if (!use_mr) {
1391 /*
1392 * Once we're in direct map mode for a request, we don't
1393 * go back to FMR or FR mode, so no need to update anything
David Dillow8f26c9f2011-01-14 19:45:50 -05001394 * other than the descriptor.
1395 */
1396 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1397 return 0;
1398 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001399
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001400 /*
1401 * Since not all RDMA HW drivers support non-zero page offsets for
1402 * FMR, if we start at an offset into a page, don't merge into the
1403 * current FMR mapping. Finish it out, and use the kernel's MR for
1404 * this sg entry.
David Dillow8f26c9f2011-01-14 19:45:50 -05001405 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001406 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1407 dma_len > dev->mr_max_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001408 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001409 if (ret)
1410 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001411
David Dillow8f26c9f2011-01-14 19:45:50 -05001412 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1413 srp_map_update_start(state, NULL, 0, 0);
1414 return 0;
1415 }
1416
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001417 /*
1418 * If this is the first sg that will be mapped via FMR or via FR, save
1419 * our position. We need to know the first unmapped entry, its index,
1420 * and the first unmapped address within that entry to be able to
1421 * restart mapping after an error.
David Dillow8f26c9f2011-01-14 19:45:50 -05001422 */
1423 if (!state->unmapped_sg)
1424 srp_map_update_start(state, sg, sg_index, dma_addr);
1425
1426 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001427 unsigned offset = dma_addr & ~dev->mr_page_mask;
1428 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001429 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001430 if (ret)
1431 return ret;
1432
1433 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001434 }
1435
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001436 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001437
1438 if (!state->npages)
1439 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001440 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001441 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001442 dma_addr += len;
1443 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001444 }
1445
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001446 /*
1447 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001448 * close it out and start a new one -- we can only merge at page
1449 * boundries.
1450 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001451 ret = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001452 if (len != dev->mr_page_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001453 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001454 if (!ret)
1455 srp_map_update_start(state, NULL, 0, 0);
1456 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001457 return ret;
1458}
1459
Bart Van Assche509c07b2014-10-30 14:48:30 +01001460static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1461 struct srp_request *req, struct scatterlist *scat,
1462 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001463{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001464 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001465 struct srp_device *dev = target->srp_host->srp_dev;
1466 struct ib_device *ibdev = dev->dev;
1467 struct scatterlist *sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001468 int i;
1469 bool use_mr;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001470
1471 state->desc = req->indirect_desc;
1472 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001473 if (dev->use_fast_reg) {
1474 state->next_fr = req->fr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001475 use_mr = !!ch->fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001476 } else {
1477 state->next_fmr = req->fmr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001478 use_mr = !!ch->fmr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001479 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001480
1481 for_each_sg(scat, sg, count, i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001482 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001483 /*
1484 * Memory registration failed, so backtrack to the
1485 * first unmapped entry and continue on without using
1486 * memory registration.
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001487 */
1488 dma_addr_t dma_addr;
1489 unsigned int dma_len;
1490
1491backtrack:
1492 sg = state->unmapped_sg;
1493 i = state->unmapped_index;
1494
1495 dma_addr = ib_sg_dma_address(ibdev, sg);
1496 dma_len = ib_sg_dma_len(ibdev, sg);
1497 dma_len -= (state->unmapped_addr - dma_addr);
1498 dma_addr = state->unmapped_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001499 use_mr = false;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001500 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1501 }
1502 }
1503
Bart Van Assche509c07b2014-10-30 14:48:30 +01001504 if (use_mr && srp_finish_mapping(state, ch))
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001505 goto backtrack;
1506
Bart Van Assche52ede082014-05-20 15:07:45 +02001507 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001508
1509 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001510}
1511
Bart Van Assche509c07b2014-10-30 14:48:30 +01001512static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001513 struct srp_request *req)
1514{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001515 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001516 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001517 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001518 int len, nents, count;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001519 struct srp_device *dev;
1520 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001521 struct srp_map_state state;
1522 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001523 u32 table_len;
1524 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001525
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001526 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001527 return sizeof (struct srp_cmd);
1528
1529 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1530 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001531 shost_printk(KERN_WARNING, target->scsi_host,
1532 PFX "Unhandled data direction %d\n",
1533 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001534 return -EINVAL;
1535 }
1536
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001537 nents = scsi_sg_count(scmnd);
1538 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001539
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001540 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001541 ibdev = dev->dev;
1542
1543 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001544 if (unlikely(count == 0))
1545 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001546
1547 fmt = SRP_DATA_DESC_DIRECT;
1548 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001549
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001550 if (count == 1 && !register_always) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001551 /*
1552 * The midlayer only generated a single gather/scatter
1553 * entry, or DMA mapping coalesced everything to a
1554 * single entry. So a direct descriptor along with
1555 * the DMA MR suffices.
1556 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001557 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001558
Ralph Campbell85507bc2006-12-12 14:30:55 -08001559 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001560 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001561 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001562
Bart Van Assche52ede082014-05-20 15:07:45 +02001563 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001564 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001565 }
1566
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001567 /*
1568 * We have more than one scatter/gather entry, so build our indirect
1569 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001570 */
1571 indirect_hdr = (void *) cmd->add_data;
1572
David Dillowc07d4242011-01-16 13:57:10 -05001573 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1574 target->indirect_size, DMA_TO_DEVICE);
1575
David Dillow8f26c9f2011-01-14 19:45:50 -05001576 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001577 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001578
David Dillowc07d4242011-01-16 13:57:10 -05001579 /* We've mapped the request, now pull as much of the indirect
1580 * descriptor table as we can into the command buffer. If this
1581 * target is not using an external indirect table, we are
1582 * guaranteed to fit into the command, as the SCSI layer won't
1583 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001584 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001585 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001586 /*
1587 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001588 * so use a direct descriptor.
1589 */
1590 struct srp_direct_buf *buf = (void *) cmd->add_data;
1591
David Dillowc07d4242011-01-16 13:57:10 -05001592 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001593 goto map_complete;
1594 }
1595
David Dillowc07d4242011-01-16 13:57:10 -05001596 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1597 !target->allow_ext_sg)) {
1598 shost_printk(KERN_ERR, target->scsi_host,
1599 "Could not fit S/G list into SRP_CMD\n");
1600 return -EIO;
1601 }
1602
1603 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001604 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1605
1606 fmt = SRP_DATA_DESC_INDIRECT;
1607 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001608 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001609
David Dillowc07d4242011-01-16 13:57:10 -05001610 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1611 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001612
David Dillowc07d4242011-01-16 13:57:10 -05001613 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001614 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1615 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1616 indirect_hdr->len = cpu_to_be32(state.total_len);
1617
1618 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001619 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001620 else
David Dillowc07d4242011-01-16 13:57:10 -05001621 cmd->data_in_desc_cnt = count;
1622
1623 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1624 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001625
1626map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001627 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1628 cmd->buf_fmt = fmt << 4;
1629 else
1630 cmd->buf_fmt = fmt;
1631
Roland Dreieraef9ec32005-11-02 14:07:13 -08001632 return len;
1633}
1634
David Dillow05a1d752010-10-08 14:48:14 -04001635/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001636 * Return an IU and possible credit to the free pool
1637 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001638static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001639 enum srp_iu_type iu_type)
1640{
1641 unsigned long flags;
1642
Bart Van Assche509c07b2014-10-30 14:48:30 +01001643 spin_lock_irqsave(&ch->lock, flags);
1644 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001645 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001646 ++ch->req_lim;
1647 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001648}
1649
1650/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001651 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001652 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001653 *
1654 * Note:
1655 * An upper limit for the number of allocated information units for each
1656 * request type is:
1657 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1658 * more than Scsi_Host.can_queue requests.
1659 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1660 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1661 * one unanswered SRP request to an initiator.
1662 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001663static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001664 enum srp_iu_type iu_type)
1665{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001666 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001667 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1668 struct srp_iu *iu;
1669
Bart Van Assche509c07b2014-10-30 14:48:30 +01001670 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001671
Bart Van Assche509c07b2014-10-30 14:48:30 +01001672 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001673 return NULL;
1674
1675 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001676 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001677 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001678 ++target->zero_req_lim;
1679 return NULL;
1680 }
1681
Bart Van Assche509c07b2014-10-30 14:48:30 +01001682 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001683 }
1684
Bart Van Assche509c07b2014-10-30 14:48:30 +01001685 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001686 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001687 return iu;
1688}
1689
Bart Van Assche509c07b2014-10-30 14:48:30 +01001690static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001691{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001692 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001693 struct ib_sge list;
1694 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001695
1696 list.addr = iu->dma;
1697 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001698 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001699
1700 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001701 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001702 wr.sg_list = &list;
1703 wr.num_sge = 1;
1704 wr.opcode = IB_WR_SEND;
1705 wr.send_flags = IB_SEND_SIGNALED;
1706
Bart Van Assche509c07b2014-10-30 14:48:30 +01001707 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001708}
1709
Bart Van Assche509c07b2014-10-30 14:48:30 +01001710static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001711{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001712 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001713 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001714 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001715
1716 list.addr = iu->dma;
1717 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001718 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001719
1720 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001721 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001722 wr.sg_list = &list;
1723 wr.num_sge = 1;
1724
Bart Van Assche509c07b2014-10-30 14:48:30 +01001725 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001726}
1727
Bart Van Assche509c07b2014-10-30 14:48:30 +01001728static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001729{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001730 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001731 struct srp_request *req;
1732 struct scsi_cmnd *scmnd;
1733 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001734
Roland Dreieraef9ec32005-11-02 14:07:13 -08001735 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001736 spin_lock_irqsave(&ch->lock, flags);
1737 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1738 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001739
Bart Van Assche509c07b2014-10-30 14:48:30 +01001740 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001741 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001742 ch->tsk_mgmt_status = rsp->data[3];
1743 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001744 } else {
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001745 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1746 if (scmnd) {
1747 req = (void *)scmnd->host_scribble;
1748 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1749 }
Bart Van Assche22032992012-08-14 13:18:53 +00001750 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001751 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched92c0da2014-10-06 17:14:36 +02001752 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1753 rsp->tag, ch - target->ch, ch->qp->qp_num);
Bart Van Assche22032992012-08-14 13:18:53 +00001754
Bart Van Assche509c07b2014-10-30 14:48:30 +01001755 spin_lock_irqsave(&ch->lock, flags);
1756 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1757 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001758
1759 return;
1760 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001761 scmnd->result = rsp->status;
1762
1763 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1764 memcpy(scmnd->sense_buffer, rsp->data +
1765 be32_to_cpu(rsp->resp_data_len),
1766 min_t(int, be32_to_cpu(rsp->sense_data_len),
1767 SCSI_SENSE_BUFFERSIZE));
1768 }
1769
Bart Van Asschee7145312014-07-09 15:57:51 +02001770 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001771 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001772 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1773 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1774 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1775 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1776 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1777 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001778
Bart Van Assche509c07b2014-10-30 14:48:30 +01001779 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001780 be32_to_cpu(rsp->req_lim_delta));
1781
David Dillowf8b6e312010-11-26 13:02:21 -05001782 scmnd->host_scribble = NULL;
1783 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001784 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001785}
1786
Bart Van Assche509c07b2014-10-30 14:48:30 +01001787static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001788 void *rsp, int len)
1789{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001790 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001791 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001792 unsigned long flags;
1793 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001794 int err;
David Dillowbb125882010-10-08 14:40:47 -04001795
Bart Van Assche509c07b2014-10-30 14:48:30 +01001796 spin_lock_irqsave(&ch->lock, flags);
1797 ch->req_lim += req_delta;
1798 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1799 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001800
David Dillowbb125882010-10-08 14:40:47 -04001801 if (!iu) {
1802 shost_printk(KERN_ERR, target->scsi_host, PFX
1803 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001804 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001805 }
1806
1807 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1808 memcpy(iu->buf, rsp, len);
1809 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1810
Bart Van Assche509c07b2014-10-30 14:48:30 +01001811 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001812 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001813 shost_printk(KERN_ERR, target->scsi_host, PFX
1814 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001815 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001816 }
David Dillowbb125882010-10-08 14:40:47 -04001817
David Dillowbb125882010-10-08 14:40:47 -04001818 return err;
1819}
1820
Bart Van Assche509c07b2014-10-30 14:48:30 +01001821static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001822 struct srp_cred_req *req)
1823{
1824 struct srp_cred_rsp rsp = {
1825 .opcode = SRP_CRED_RSP,
1826 .tag = req->tag,
1827 };
1828 s32 delta = be32_to_cpu(req->req_lim_delta);
1829
Bart Van Assche509c07b2014-10-30 14:48:30 +01001830 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1831 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001832 "problems processing SRP_CRED_REQ\n");
1833}
1834
Bart Van Assche509c07b2014-10-30 14:48:30 +01001835static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001836 struct srp_aer_req *req)
1837{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001838 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001839 struct srp_aer_rsp rsp = {
1840 .opcode = SRP_AER_RSP,
1841 .tag = req->tag,
1842 };
1843 s32 delta = be32_to_cpu(req->req_lim_delta);
1844
1845 shost_printk(KERN_ERR, target->scsi_host, PFX
1846 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1847
Bart Van Assche509c07b2014-10-30 14:48:30 +01001848 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001849 shost_printk(KERN_ERR, target->scsi_host, PFX
1850 "problems processing SRP_AER_REQ\n");
1851}
1852
Bart Van Assche509c07b2014-10-30 14:48:30 +01001853static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001854{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001855 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001856 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001857 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001858 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001859 u8 opcode;
1860
Bart Van Assche509c07b2014-10-30 14:48:30 +01001861 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001862 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001863
1864 opcode = *(u8 *) iu->buf;
1865
1866 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001867 shost_printk(KERN_ERR, target->scsi_host,
1868 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001869 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1870 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001871 }
1872
1873 switch (opcode) {
1874 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001875 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001876 break;
1877
David Dillowbb125882010-10-08 14:40:47 -04001878 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001879 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001880 break;
1881
1882 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001883 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001884 break;
1885
Roland Dreieraef9ec32005-11-02 14:07:13 -08001886 case SRP_T_LOGOUT:
1887 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001888 shost_printk(KERN_WARNING, target->scsi_host,
1889 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001890 break;
1891
1892 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001893 shost_printk(KERN_WARNING, target->scsi_host,
1894 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001895 break;
1896 }
1897
Bart Van Assche509c07b2014-10-30 14:48:30 +01001898 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001899 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001900
Bart Van Assche509c07b2014-10-30 14:48:30 +01001901 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001902 if (res != 0)
1903 shost_printk(KERN_ERR, target->scsi_host,
1904 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001905}
1906
Bart Van Asschec1120f82013-10-26 14:35:08 +02001907/**
1908 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001909 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001910 *
1911 * Note: This function may get invoked before the rport has been created,
1912 * hence the target->rport test.
1913 */
1914static void srp_tl_err_work(struct work_struct *work)
1915{
1916 struct srp_target_port *target;
1917
1918 target = container_of(work, struct srp_target_port, tl_err_work);
1919 if (target->rport)
1920 srp_start_tl_fail_timers(target->rport);
1921}
1922
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001923static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001924 bool send_err, struct srp_rdma_ch *ch)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001925{
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001926 struct srp_target_port *target = ch->target;
1927
1928 if (wr_id == SRP_LAST_WR_ID) {
1929 complete(&ch->done);
1930 return;
1931 }
1932
Bart Van Assche294c8752011-12-25 12:18:12 +00001933 if (target->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001934 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1935 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001936 "LOCAL_INV failed with status %s (%d)\n",
1937 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001938 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1939 shost_printk(KERN_ERR, target->scsi_host, PFX
Sagi Grimberg57363d92015-05-18 13:40:29 +03001940 "FAST_REG_MR failed status %s (%d)\n",
1941 ib_wc_status_msg(wc_status), wc_status);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001942 } else {
1943 shost_printk(KERN_ERR, target->scsi_host,
Sagi Grimberg57363d92015-05-18 13:40:29 +03001944 PFX "failed %s status %s (%d) for iu %p\n",
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001945 send_err ? "send" : "receive",
Sagi Grimberg57363d92015-05-18 13:40:29 +03001946 ib_wc_status_msg(wc_status), wc_status,
1947 (void *)(uintptr_t)wr_id);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001948 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001949 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001950 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001951 target->qp_in_error = true;
1952}
1953
Bart Van Assche509c07b2014-10-30 14:48:30 +01001954static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001955{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001956 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001957 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001958
1959 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1960 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001961 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001962 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001963 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001964 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001965 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001966 }
1967}
1968
Bart Van Assche509c07b2014-10-30 14:48:30 +01001969static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001970{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001971 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001972 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001973 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001974
1975 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001976 if (likely(wc.status == IB_WC_SUCCESS)) {
1977 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001978 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001979 } else {
Bart Van Assche7dad6b22014-10-21 18:00:35 +02001980 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001981 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001982 }
1983}
1984
Bart Van Assche76c75b22010-11-26 14:37:47 -05001985static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001986{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001987 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001988 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001989 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001990 struct srp_request *req;
1991 struct srp_iu *iu;
1992 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001993 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001994 unsigned long flags;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02001995 u32 tag;
1996 u16 idx;
Bart Van Assched1b42892014-05-20 15:07:20 +02001997 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001998 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1999
2000 /*
2001 * The SCSI EH thread is the only context from which srp_queuecommand()
2002 * can get invoked for blocked devices (SDEV_BLOCK /
2003 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2004 * locking the rport mutex if invoked from inside the SCSI EH.
2005 */
2006 if (in_scsi_eh)
2007 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002008
Bart Van Assched1b42892014-05-20 15:07:20 +02002009 scmnd->result = srp_chkready(target->rport);
2010 if (unlikely(scmnd->result))
2011 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00002012
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002013 WARN_ON_ONCE(scmnd->request->tag < 0);
2014 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002015 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002016 idx = blk_mq_unique_tag_to_tag(tag);
2017 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2018 dev_name(&shost->shost_gendev), tag, idx,
2019 target->req_ring_size);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002020
2021 spin_lock_irqsave(&ch->lock, flags);
2022 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002023 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002024
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002025 if (!iu)
2026 goto err;
2027
2028 req = &ch->req_ring[idx];
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002029 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05002030 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002031 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002032
David Dillowf8b6e312010-11-26 13:02:21 -05002033 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002034
2035 cmd = iu->buf;
2036 memset(cmd, 0, sizeof *cmd);
2037
2038 cmd->opcode = SRP_CMD;
2039 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002040 cmd->tag = tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002041 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2042
Roland Dreieraef9ec32005-11-02 14:07:13 -08002043 req->scmnd = scmnd;
2044 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002045
Bart Van Assche509c07b2014-10-30 14:48:30 +01002046 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002047 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002048 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02002049 PFX "Failed to map data (%d)\n", len);
2050 /*
2051 * If we ran out of memory descriptors (-ENOMEM) because an
2052 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02002053 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02002054 * to reduce queue depth temporarily.
2055 */
2056 scmnd->result = len == -ENOMEM ?
2057 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05002058 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002059 }
2060
David Dillow49248642011-01-14 18:23:24 -05002061 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08002062 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002063
Bart Van Assche509c07b2014-10-30 14:48:30 +01002064 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002065 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002066 goto err_unmap;
2067 }
2068
Bart Van Assched1b42892014-05-20 15:07:20 +02002069 ret = 0;
2070
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002071unlock_rport:
2072 if (in_scsi_eh)
2073 mutex_unlock(&rport->mutex);
2074
Bart Van Assched1b42892014-05-20 15:07:20 +02002075 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002076
2077err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002078 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002079
Bart Van Assche76c75b22010-11-26 14:37:47 -05002080err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002081 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002082
Bart Van Assche024ca902014-05-20 15:03:49 +02002083 /*
2084 * Avoid that the loops that iterate over the request ring can
2085 * encounter a dangling SCSI command pointer.
2086 */
2087 req->scmnd = NULL;
2088
Bart Van Assched1b42892014-05-20 15:07:20 +02002089err:
2090 if (scmnd->result) {
2091 scmnd->scsi_done(scmnd);
2092 ret = 0;
2093 } else {
2094 ret = SCSI_MLQUEUE_HOST_BUSY;
2095 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002096
Bart Van Assched1b42892014-05-20 15:07:20 +02002097 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002098}
2099
Bart Van Assche4d73f952013-10-26 14:40:37 +02002100/*
2101 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01002102 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02002103 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002104static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002105{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002106 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002107 int i;
2108
Bart Van Assche509c07b2014-10-30 14:48:30 +01002109 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2110 GFP_KERNEL);
2111 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002112 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002113 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2114 GFP_KERNEL);
2115 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002116 goto err_no_ring;
2117
2118 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002119 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2120 ch->max_ti_iu_len,
2121 GFP_KERNEL, DMA_FROM_DEVICE);
2122 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002123 goto err;
2124 }
2125
Bart Van Assche4d73f952013-10-26 14:40:37 +02002126 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002127 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2128 target->max_iu_len,
2129 GFP_KERNEL, DMA_TO_DEVICE);
2130 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002131 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002132
Bart Van Assche509c07b2014-10-30 14:48:30 +01002133 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002134 }
2135
2136 return 0;
2137
2138err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002139 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002140 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2141 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002142 }
2143
Bart Van Assche4d73f952013-10-26 14:40:37 +02002144
2145err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002146 kfree(ch->tx_ring);
2147 ch->tx_ring = NULL;
2148 kfree(ch->rx_ring);
2149 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002150
2151 return -ENOMEM;
2152}
2153
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002154static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2155{
2156 uint64_t T_tr_ns, max_compl_time_ms;
2157 uint32_t rq_tmo_jiffies;
2158
2159 /*
2160 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2161 * table 91), both the QP timeout and the retry count have to be set
2162 * for RC QP's during the RTR to RTS transition.
2163 */
2164 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2165 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2166
2167 /*
2168 * Set target->rq_tmo_jiffies to one second more than the largest time
2169 * it can take before an error completion is generated. See also
2170 * C9-140..142 in the IBTA spec for more information about how to
2171 * convert the QP Local ACK Timeout value to nanoseconds.
2172 */
2173 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2174 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2175 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2176 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2177
2178 return rq_tmo_jiffies;
2179}
2180
David Dillow961e0be2011-01-14 17:32:07 -05002181static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2182 struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002183 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002184{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002185 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002186 struct ib_qp_attr *qp_attr = NULL;
2187 int attr_mask = 0;
2188 int ret;
2189 int i;
2190
2191 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002192 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2193 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002194
2195 /*
2196 * Reserve credits for task management so we don't
2197 * bounce requests back to the SCSI mid-layer.
2198 */
2199 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002200 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002201 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002202 target->scsi_host->cmd_per_lun
2203 = min_t(int, target->scsi_host->can_queue,
2204 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002205 } else {
2206 shost_printk(KERN_WARNING, target->scsi_host,
2207 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2208 ret = -ECONNRESET;
2209 goto error;
2210 }
2211
Bart Van Assche509c07b2014-10-30 14:48:30 +01002212 if (!ch->rx_ring) {
2213 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002214 if (ret)
2215 goto error;
2216 }
2217
2218 ret = -ENOMEM;
2219 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2220 if (!qp_attr)
2221 goto error;
2222
2223 qp_attr->qp_state = IB_QPS_RTR;
2224 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2225 if (ret)
2226 goto error_free;
2227
Bart Van Assche509c07b2014-10-30 14:48:30 +01002228 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002229 if (ret)
2230 goto error_free;
2231
Bart Van Assche4d73f952013-10-26 14:40:37 +02002232 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002233 struct srp_iu *iu = ch->rx_ring[i];
2234
2235 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002236 if (ret)
2237 goto error_free;
2238 }
2239
2240 qp_attr->qp_state = IB_QPS_RTS;
2241 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2242 if (ret)
2243 goto error_free;
2244
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002245 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2246
Bart Van Assche509c07b2014-10-30 14:48:30 +01002247 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002248 if (ret)
2249 goto error_free;
2250
2251 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2252
2253error_free:
2254 kfree(qp_attr);
2255
2256error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002257 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002258}
2259
Roland Dreieraef9ec32005-11-02 14:07:13 -08002260static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2261 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002262 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002263{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002264 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002265 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002266 struct ib_class_port_info *cpi;
2267 int opcode;
2268
2269 switch (event->param.rej_rcvd.reason) {
2270 case IB_CM_REJ_PORT_CM_REDIRECT:
2271 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002272 ch->path.dlid = cpi->redirect_lid;
2273 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002274 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002275 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002276
Bart Van Assche509c07b2014-10-30 14:48:30 +01002277 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002278 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2279 break;
2280
2281 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002282 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002283 /*
2284 * Topspin/Cisco SRP gateways incorrectly send
2285 * reject reason code 25 when they mean 24
2286 * (port redirect).
2287 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002288 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002289 event->param.rej_rcvd.ari, 16);
2290
David Dillow7aa54bd2008-01-07 18:23:41 -05002291 shost_printk(KERN_DEBUG, shost,
2292 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002293 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2294 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002295
Bart Van Assche509c07b2014-10-30 14:48:30 +01002296 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002297 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002298 shost_printk(KERN_WARNING, shost,
2299 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002300 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002301 }
2302 break;
2303
2304 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002305 shost_printk(KERN_WARNING, shost,
2306 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002307 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002308 break;
2309
2310 case IB_CM_REJ_CONSUMER_DEFINED:
2311 opcode = *(u8 *) event->private_data;
2312 if (opcode == SRP_LOGIN_REJ) {
2313 struct srp_login_rej *rej = event->private_data;
2314 u32 reason = be32_to_cpu(rej->reason);
2315
2316 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002317 shost_printk(KERN_WARNING, shost,
2318 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002319 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002320 shost_printk(KERN_WARNING, shost, PFX
2321 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002322 target->sgid.raw,
2323 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002324 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002325 shost_printk(KERN_WARNING, shost,
2326 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2327 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002328 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002329 break;
2330
David Dillow9fe4bcf2008-01-08 17:08:52 -05002331 case IB_CM_REJ_STALE_CONN:
2332 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002333 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002334 break;
2335
Roland Dreieraef9ec32005-11-02 14:07:13 -08002336 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002337 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2338 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002339 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002340 }
2341}
2342
2343static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2344{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002345 struct srp_rdma_ch *ch = cm_id->context;
2346 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002347 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002348
2349 switch (event->event) {
2350 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002351 shost_printk(KERN_DEBUG, target->scsi_host,
2352 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002353 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002354 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002355 break;
2356
2357 case IB_CM_REP_RECEIVED:
2358 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002359 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002360 break;
2361
2362 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002363 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002364 comp = 1;
2365
Bart Van Assche509c07b2014-10-30 14:48:30 +01002366 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002367 break;
2368
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002369 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002370 shost_printk(KERN_WARNING, target->scsi_host,
2371 PFX "DREQ received - connection closed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +00002372 srp_change_conn_state(target, false);
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002373 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002374 shost_printk(KERN_ERR, target->scsi_host,
2375 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002376 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002377 break;
2378
2379 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002380 shost_printk(KERN_ERR, target->scsi_host,
2381 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002382 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002383
Bart Van Assche509c07b2014-10-30 14:48:30 +01002384 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002385 break;
2386
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002387 case IB_CM_MRA_RECEIVED:
2388 case IB_CM_DREQ_ERROR:
2389 case IB_CM_DREP_RECEIVED:
2390 break;
2391
Roland Dreieraef9ec32005-11-02 14:07:13 -08002392 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002393 shost_printk(KERN_WARNING, target->scsi_host,
2394 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002395 break;
2396 }
2397
2398 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002399 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002400
Roland Dreieraef9ec32005-11-02 14:07:13 -08002401 return 0;
2402}
2403
Jack Wang71444b92013-11-07 11:37:37 +01002404/**
Jack Wang71444b92013-11-07 11:37:37 +01002405 * srp_change_queue_depth - setting device queue depth
2406 * @sdev: scsi device struct
2407 * @qdepth: requested queue depth
Jack Wang71444b92013-11-07 11:37:37 +01002408 *
2409 * Returns queue depth.
2410 */
2411static int
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002412srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
Jack Wang71444b92013-11-07 11:37:37 +01002413{
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002414 if (!sdev->tagged_supported)
Christoph Hellwig1e6f2412014-11-13 14:27:41 +01002415 qdepth = 1;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01002416 return scsi_change_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002417}
2418
Bart Van Assche509c07b2014-10-30 14:48:30 +01002419static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2420 unsigned int lun, u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002421{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002422 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002423 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002424 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002425 struct srp_iu *iu;
2426 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002427
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002428 if (!target->connected || target->qp_in_error)
2429 return -1;
2430
Bart Van Assche509c07b2014-10-30 14:48:30 +01002431 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002432
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002433 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002434 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002435 * invoked while a task management function is being sent.
2436 */
2437 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002438 spin_lock_irq(&ch->lock);
2439 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2440 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002441
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002442 if (!iu) {
2443 mutex_unlock(&rport->mutex);
2444
Bart Van Assche76c75b22010-11-26 14:37:47 -05002445 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002446 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002447
David Dillow19081f32010-10-18 08:54:49 -04002448 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2449 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002450 tsk_mgmt = iu->buf;
2451 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2452
2453 tsk_mgmt->opcode = SRP_TSK_MGMT;
David Dillowf8b6e312010-11-26 13:02:21 -05002454 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2455 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002456 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002457 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002458
David Dillow19081f32010-10-18 08:54:49 -04002459 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2460 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002461 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2462 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002463 mutex_unlock(&rport->mutex);
2464
Bart Van Assche76c75b22010-11-26 14:37:47 -05002465 return -1;
2466 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002467 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002468
Bart Van Assche509c07b2014-10-30 14:48:30 +01002469 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002470 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002471 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002472
Roland Dreierd945e1d2006-05-09 10:50:28 -07002473 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002474}
2475
Roland Dreieraef9ec32005-11-02 14:07:13 -08002476static int srp_abort(struct scsi_cmnd *scmnd)
2477{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002478 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002479 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002480 u32 tag;
Bart Van Assched92c0da2014-10-06 17:14:36 +02002481 u16 ch_idx;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002482 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002483 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002484
David Dillow7aa54bd2008-01-07 18:23:41 -05002485 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002486
Bart Van Assched92c0da2014-10-06 17:14:36 +02002487 if (!req)
Bart Van Assche99b66972013-10-10 13:52:33 +02002488 return SUCCESS;
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002489 tag = blk_mq_unique_tag(scmnd->request);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002490 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2491 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2492 return SUCCESS;
2493 ch = &target->ch[ch_idx];
2494 if (!srp_claim_req(ch, req, NULL, scmnd))
2495 return SUCCESS;
2496 shost_printk(KERN_ERR, target->scsi_host,
2497 "Sending SRP abort for tag %#x\n", tag);
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002498 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002499 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002500 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002501 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002502 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002503 else
2504 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002505 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002506 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002507 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002508
Bart Van Assche086f44f2013-06-12 15:23:04 +02002509 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002510}
2511
2512static int srp_reset_device(struct scsi_cmnd *scmnd)
2513{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002514 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002515 struct srp_rdma_ch *ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002516 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002517
David Dillow7aa54bd2008-01-07 18:23:41 -05002518 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002519
Bart Van Assched92c0da2014-10-06 17:14:36 +02002520 ch = &target->ch[0];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002521 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002522 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002523 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002524 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002525 return FAILED;
2526
Bart Van Assched92c0da2014-10-06 17:14:36 +02002527 for (i = 0; i < target->ch_count; i++) {
2528 ch = &target->ch[i];
2529 for (i = 0; i < target->req_ring_size; ++i) {
2530 struct srp_request *req = &ch->req_ring[i];
Bart Van Assche509c07b2014-10-30 14:48:30 +01002531
Bart Van Assched92c0da2014-10-06 17:14:36 +02002532 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2533 }
Bart Van Assche536ae142010-11-26 13:58:27 -05002534 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002535
Roland Dreierd945e1d2006-05-09 10:50:28 -07002536 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002537}
2538
2539static int srp_reset_host(struct scsi_cmnd *scmnd)
2540{
2541 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002542
David Dillow7aa54bd2008-01-07 18:23:41 -05002543 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002544
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002545 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002546}
2547
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002548static int srp_slave_configure(struct scsi_device *sdev)
2549{
2550 struct Scsi_Host *shost = sdev->host;
2551 struct srp_target_port *target = host_to_target(shost);
2552 struct request_queue *q = sdev->request_queue;
2553 unsigned long timeout;
2554
2555 if (sdev->type == TYPE_DISK) {
2556 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2557 blk_queue_rq_timeout(q, timeout);
2558 }
2559
2560 return 0;
2561}
2562
Tony Jonesee959b02008-02-22 00:13:36 +01002563static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2564 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002565{
Tony Jonesee959b02008-02-22 00:13:36 +01002566 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002567
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002568 return sprintf(buf, "0x%016llx\n",
2569 (unsigned long long) be64_to_cpu(target->id_ext));
2570}
2571
Tony Jonesee959b02008-02-22 00:13:36 +01002572static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2573 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002574{
Tony Jonesee959b02008-02-22 00:13:36 +01002575 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002576
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002577 return sprintf(buf, "0x%016llx\n",
2578 (unsigned long long) be64_to_cpu(target->ioc_guid));
2579}
2580
Tony Jonesee959b02008-02-22 00:13:36 +01002581static ssize_t show_service_id(struct device *dev,
2582 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002583{
Tony Jonesee959b02008-02-22 00:13:36 +01002584 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002585
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002586 return sprintf(buf, "0x%016llx\n",
2587 (unsigned long long) be64_to_cpu(target->service_id));
2588}
2589
Tony Jonesee959b02008-02-22 00:13:36 +01002590static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2591 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002592{
Tony Jonesee959b02008-02-22 00:13:36 +01002593 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002594
Bart Van Assche747fe002014-10-30 14:48:05 +01002595 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002596}
2597
Bart Van Assche848b3082013-10-26 14:38:12 +02002598static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2599 char *buf)
2600{
2601 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2602
Bart Van Assche747fe002014-10-30 14:48:05 +01002603 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002604}
2605
Tony Jonesee959b02008-02-22 00:13:36 +01002606static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2607 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002608{
Tony Jonesee959b02008-02-22 00:13:36 +01002609 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002610 struct srp_rdma_ch *ch = &target->ch[0];
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002611
Bart Van Assche509c07b2014-10-30 14:48:30 +01002612 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002613}
2614
Tony Jonesee959b02008-02-22 00:13:36 +01002615static ssize_t show_orig_dgid(struct device *dev,
2616 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002617{
Tony Jonesee959b02008-02-22 00:13:36 +01002618 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002619
Bart Van Assche747fe002014-10-30 14:48:05 +01002620 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002621}
2622
Bart Van Assche89de7482010-08-03 14:08:45 +00002623static ssize_t show_req_lim(struct device *dev,
2624 struct device_attribute *attr, char *buf)
2625{
2626 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assched92c0da2014-10-06 17:14:36 +02002627 struct srp_rdma_ch *ch;
2628 int i, req_lim = INT_MAX;
Bart Van Assche89de7482010-08-03 14:08:45 +00002629
Bart Van Assched92c0da2014-10-06 17:14:36 +02002630 for (i = 0; i < target->ch_count; i++) {
2631 ch = &target->ch[i];
2632 req_lim = min(req_lim, ch->req_lim);
2633 }
2634 return sprintf(buf, "%d\n", req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002635}
2636
Tony Jonesee959b02008-02-22 00:13:36 +01002637static ssize_t show_zero_req_lim(struct device *dev,
2638 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002639{
Tony Jonesee959b02008-02-22 00:13:36 +01002640 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002641
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002642 return sprintf(buf, "%d\n", target->zero_req_lim);
2643}
2644
Tony Jonesee959b02008-02-22 00:13:36 +01002645static ssize_t show_local_ib_port(struct device *dev,
2646 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002647{
Tony Jonesee959b02008-02-22 00:13:36 +01002648 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002649
2650 return sprintf(buf, "%d\n", target->srp_host->port);
2651}
2652
Tony Jonesee959b02008-02-22 00:13:36 +01002653static ssize_t show_local_ib_device(struct device *dev,
2654 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002655{
Tony Jonesee959b02008-02-22 00:13:36 +01002656 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002657
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002658 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002659}
2660
Bart Van Assched92c0da2014-10-06 17:14:36 +02002661static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2662 char *buf)
2663{
2664 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2665
2666 return sprintf(buf, "%d\n", target->ch_count);
2667}
2668
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002669static ssize_t show_comp_vector(struct device *dev,
2670 struct device_attribute *attr, char *buf)
2671{
2672 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2673
2674 return sprintf(buf, "%d\n", target->comp_vector);
2675}
2676
Vu Pham7bb312e2013-10-26 14:31:27 +02002677static ssize_t show_tl_retry_count(struct device *dev,
2678 struct device_attribute *attr, char *buf)
2679{
2680 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2681
2682 return sprintf(buf, "%d\n", target->tl_retry_count);
2683}
2684
David Dillow49248642011-01-14 18:23:24 -05002685static ssize_t show_cmd_sg_entries(struct device *dev,
2686 struct device_attribute *attr, char *buf)
2687{
2688 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2689
2690 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2691}
2692
David Dillowc07d4242011-01-16 13:57:10 -05002693static ssize_t show_allow_ext_sg(struct device *dev,
2694 struct device_attribute *attr, char *buf)
2695{
2696 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2697
2698 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2699}
2700
Tony Jonesee959b02008-02-22 00:13:36 +01002701static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2702static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2703static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2704static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002705static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002706static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2707static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002708static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002709static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2710static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2711static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assched92c0da2014-10-06 17:14:36 +02002712static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002713static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002714static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002715static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002716static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002717
Tony Jonesee959b02008-02-22 00:13:36 +01002718static struct device_attribute *srp_host_attrs[] = {
2719 &dev_attr_id_ext,
2720 &dev_attr_ioc_guid,
2721 &dev_attr_service_id,
2722 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002723 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002724 &dev_attr_dgid,
2725 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002726 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002727 &dev_attr_zero_req_lim,
2728 &dev_attr_local_ib_port,
2729 &dev_attr_local_ib_device,
Bart Van Assched92c0da2014-10-06 17:14:36 +02002730 &dev_attr_ch_count,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002731 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002732 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002733 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002734 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002735 NULL
2736};
2737
Roland Dreieraef9ec32005-11-02 14:07:13 -08002738static struct scsi_host_template srp_template = {
2739 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002740 .name = "InfiniBand SRP initiator",
2741 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002742 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002743 .info = srp_target_info,
2744 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002745 .change_queue_depth = srp_change_queue_depth,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002746 .eh_abort_handler = srp_abort,
2747 .eh_device_reset_handler = srp_reset_device,
2748 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002749 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002750 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002751 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002752 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002753 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002754 .use_clustering = ENABLE_CLUSTERING,
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02002755 .shost_attrs = srp_host_attrs,
2756 .use_blk_tags = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01002757 .track_queue_depth = 1,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002758};
2759
Bart Van Assche34aa6542014-10-30 14:47:22 +01002760static int srp_sdev_count(struct Scsi_Host *host)
2761{
2762 struct scsi_device *sdev;
2763 int c = 0;
2764
2765 shost_for_each_device(sdev, host)
2766 c++;
2767
2768 return c;
2769}
2770
Roland Dreieraef9ec32005-11-02 14:07:13 -08002771static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2772{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002773 struct srp_rport_identifiers ids;
2774 struct srp_rport *rport;
2775
Bart Van Assche34aa6542014-10-30 14:47:22 +01002776 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002777 sprintf(target->target_name, "SRP.T10:%016llX",
2778 (unsigned long long) be64_to_cpu(target->id_ext));
2779
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002780 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002781 return -ENODEV;
2782
FUJITA Tomonori32368222007-06-27 16:33:12 +09002783 memcpy(ids.port_id, &target->id_ext, 8);
2784 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002785 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002786 rport = srp_rport_add(target->scsi_host, &ids);
2787 if (IS_ERR(rport)) {
2788 scsi_remove_host(target->scsi_host);
2789 return PTR_ERR(rport);
2790 }
2791
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002792 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002793 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002794
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002795 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002796 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002797 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002798
Roland Dreieraef9ec32005-11-02 14:07:13 -08002799 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002800 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002801
Bart Van Assche34aa6542014-10-30 14:47:22 +01002802 if (!target->connected || target->qp_in_error) {
2803 shost_printk(KERN_INFO, target->scsi_host,
2804 PFX "SCSI scan failed - removing SCSI host\n");
2805 srp_queue_remove_work(target);
2806 goto out;
2807 }
2808
2809 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2810 dev_name(&target->scsi_host->shost_gendev),
2811 srp_sdev_count(target->scsi_host));
2812
2813 spin_lock_irq(&target->lock);
2814 if (target->state == SRP_TARGET_SCANNING)
2815 target->state = SRP_TARGET_LIVE;
2816 spin_unlock_irq(&target->lock);
2817
2818out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002819 return 0;
2820}
2821
Tony Jonesee959b02008-02-22 00:13:36 +01002822static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002823{
2824 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002825 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002826
2827 complete(&host->released);
2828}
2829
2830static struct class srp_class = {
2831 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002832 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002833};
2834
Bart Van Assche96fc2482013-06-28 14:51:26 +02002835/**
2836 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002837 * @host: SRP host.
2838 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002839 */
2840static bool srp_conn_unique(struct srp_host *host,
2841 struct srp_target_port *target)
2842{
2843 struct srp_target_port *t;
2844 bool ret = false;
2845
2846 if (target->state == SRP_TARGET_REMOVED)
2847 goto out;
2848
2849 ret = true;
2850
2851 spin_lock(&host->target_lock);
2852 list_for_each_entry(t, &host->target_list, list) {
2853 if (t != target &&
2854 target->id_ext == t->id_ext &&
2855 target->ioc_guid == t->ioc_guid &&
2856 target->initiator_ext == t->initiator_ext) {
2857 ret = false;
2858 break;
2859 }
2860 }
2861 spin_unlock(&host->target_lock);
2862
2863out:
2864 return ret;
2865}
2866
Roland Dreieraef9ec32005-11-02 14:07:13 -08002867/*
2868 * Target ports are added by writing
2869 *
2870 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2871 * pkey=<P_Key>,service_id=<service ID>
2872 *
2873 * to the add_target sysfs attribute.
2874 */
2875enum {
2876 SRP_OPT_ERR = 0,
2877 SRP_OPT_ID_EXT = 1 << 0,
2878 SRP_OPT_IOC_GUID = 1 << 1,
2879 SRP_OPT_DGID = 1 << 2,
2880 SRP_OPT_PKEY = 1 << 3,
2881 SRP_OPT_SERVICE_ID = 1 << 4,
2882 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002883 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002884 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002885 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002886 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002887 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2888 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002889 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002890 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002891 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002892 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2893 SRP_OPT_IOC_GUID |
2894 SRP_OPT_DGID |
2895 SRP_OPT_PKEY |
2896 SRP_OPT_SERVICE_ID),
2897};
2898
Steven Whitehousea447c092008-10-13 10:46:57 +01002899static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002900 { SRP_OPT_ID_EXT, "id_ext=%s" },
2901 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2902 { SRP_OPT_DGID, "dgid=%s" },
2903 { SRP_OPT_PKEY, "pkey=%x" },
2904 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2905 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2906 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002907 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002908 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002909 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002910 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2911 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002912 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002913 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002914 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002915 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002916};
2917
2918static int srp_parse_options(const char *buf, struct srp_target_port *target)
2919{
2920 char *options, *sep_opt;
2921 char *p;
2922 char dgid[3];
2923 substring_t args[MAX_OPT_ARGS];
2924 int opt_mask = 0;
2925 int token;
2926 int ret = -EINVAL;
2927 int i;
2928
2929 options = kstrdup(buf, GFP_KERNEL);
2930 if (!options)
2931 return -ENOMEM;
2932
2933 sep_opt = options;
Sagi Grimberg7dcf9c12014-10-19 18:19:02 +03002934 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002935 if (!*p)
2936 continue;
2937
2938 token = match_token(p, srp_opt_tokens, args);
2939 opt_mask |= token;
2940
2941 switch (token) {
2942 case SRP_OPT_ID_EXT:
2943 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002944 if (!p) {
2945 ret = -ENOMEM;
2946 goto out;
2947 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002948 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2949 kfree(p);
2950 break;
2951
2952 case SRP_OPT_IOC_GUID:
2953 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002954 if (!p) {
2955 ret = -ENOMEM;
2956 goto out;
2957 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002958 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2959 kfree(p);
2960 break;
2961
2962 case SRP_OPT_DGID:
2963 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002964 if (!p) {
2965 ret = -ENOMEM;
2966 goto out;
2967 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002968 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002969 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002970 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002971 goto out;
2972 }
2973
2974 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002975 strlcpy(dgid, p + i * 2, sizeof(dgid));
2976 if (sscanf(dgid, "%hhx",
2977 &target->orig_dgid.raw[i]) < 1) {
2978 ret = -EINVAL;
2979 kfree(p);
2980 goto out;
2981 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002982 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002983 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002984 break;
2985
2986 case SRP_OPT_PKEY:
2987 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002988 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002989 goto out;
2990 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002991 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002992 break;
2993
2994 case SRP_OPT_SERVICE_ID:
2995 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002996 if (!p) {
2997 ret = -ENOMEM;
2998 goto out;
2999 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003000 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3001 kfree(p);
3002 break;
3003
3004 case SRP_OPT_MAX_SECT:
3005 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003006 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003007 goto out;
3008 }
3009 target->scsi_host->max_sectors = token;
3010 break;
3011
Bart Van Assche4d73f952013-10-26 14:40:37 +02003012 case SRP_OPT_QUEUE_SIZE:
3013 if (match_int(args, &token) || token < 1) {
3014 pr_warn("bad queue_size parameter '%s'\n", p);
3015 goto out;
3016 }
3017 target->scsi_host->can_queue = token;
3018 target->queue_size = token + SRP_RSP_SQ_SIZE +
3019 SRP_TSK_MGMT_SQ_SIZE;
3020 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3021 target->scsi_host->cmd_per_lun = token;
3022 break;
3023
Vu Pham52fb2b502006-06-17 20:37:31 -07003024 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02003025 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003026 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3027 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07003028 goto out;
3029 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02003030 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07003031 break;
3032
Ramachandra K0c0450db2006-06-17 20:37:38 -07003033 case SRP_OPT_IO_CLASS:
3034 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003035 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003036 goto out;
3037 }
3038 if (token != SRP_REV10_IB_IO_CLASS &&
3039 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003040 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3041 token, SRP_REV10_IB_IO_CLASS,
3042 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07003043 goto out;
3044 }
3045 target->io_class = token;
3046 break;
3047
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003048 case SRP_OPT_INITIATOR_EXT:
3049 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02003050 if (!p) {
3051 ret = -ENOMEM;
3052 goto out;
3053 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02003054 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3055 kfree(p);
3056 break;
3057
David Dillow49248642011-01-14 18:23:24 -05003058 case SRP_OPT_CMD_SG_ENTRIES:
3059 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003060 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3061 p);
David Dillow49248642011-01-14 18:23:24 -05003062 goto out;
3063 }
3064 target->cmd_sg_cnt = token;
3065 break;
3066
David Dillowc07d4242011-01-16 13:57:10 -05003067 case SRP_OPT_ALLOW_EXT_SG:
3068 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003069 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05003070 goto out;
3071 }
3072 target->allow_ext_sg = !!token;
3073 break;
3074
3075 case SRP_OPT_SG_TABLESIZE:
3076 if (match_int(args, &token) || token < 1 ||
3077 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003078 pr_warn("bad max sg_tablesize parameter '%s'\n",
3079 p);
David Dillowc07d4242011-01-16 13:57:10 -05003080 goto out;
3081 }
3082 target->sg_tablesize = token;
3083 break;
3084
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02003085 case SRP_OPT_COMP_VECTOR:
3086 if (match_int(args, &token) || token < 0) {
3087 pr_warn("bad comp_vector parameter '%s'\n", p);
3088 goto out;
3089 }
3090 target->comp_vector = token;
3091 break;
3092
Vu Pham7bb312e2013-10-26 14:31:27 +02003093 case SRP_OPT_TL_RETRY_COUNT:
3094 if (match_int(args, &token) || token < 2 || token > 7) {
3095 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3096 p);
3097 goto out;
3098 }
3099 target->tl_retry_count = token;
3100 break;
3101
Roland Dreieraef9ec32005-11-02 14:07:13 -08003102 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003103 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3104 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003105 goto out;
3106 }
3107 }
3108
3109 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3110 ret = 0;
3111 else
3112 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3113 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3114 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003115 pr_warn("target creation request is missing parameter '%s'\n",
3116 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003117
Bart Van Assche4d73f952013-10-26 14:40:37 +02003118 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3119 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3120 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3121 target->scsi_host->cmd_per_lun,
3122 target->scsi_host->can_queue);
3123
Roland Dreieraef9ec32005-11-02 14:07:13 -08003124out:
3125 kfree(options);
3126 return ret;
3127}
3128
Tony Jonesee959b02008-02-22 00:13:36 +01003129static ssize_t srp_create_target(struct device *dev,
3130 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003131 const char *buf, size_t count)
3132{
3133 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003134 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003135 struct Scsi_Host *target_host;
3136 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003137 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003138 struct srp_device *srp_dev = host->srp_dev;
3139 struct ib_device *ibdev = srp_dev->dev;
Bart Van Assched92c0da2014-10-06 17:14:36 +02003140 int ret, node_idx, node, cpu, i;
3141 bool multich = false;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003142
3143 target_host = scsi_host_alloc(&srp_template,
3144 sizeof (struct srp_target_port));
3145 if (!target_host)
3146 return -ENOMEM;
3147
David Dillow49248642011-01-14 18:23:24 -05003148 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003149 target_host->max_channel = 0;
3150 target_host->max_id = 1;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003151 target_host->max_lun = SRP_MAX_LUN;
3152 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003153
Roland Dreieraef9ec32005-11-02 14:07:13 -08003154 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003155
David Dillow49248642011-01-14 18:23:24 -05003156 target->io_class = SRP_REV16A_IB_IO_CLASS;
3157 target->scsi_host = target_host;
3158 target->srp_host = host;
3159 target->lkey = host->srp_dev->mr->lkey;
3160 target->rkey = host->srp_dev->mr->rkey;
3161 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003162 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3163 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003164 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003165 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003166
Bart Van Assche34aa6542014-10-30 14:47:22 +01003167 /*
3168 * Avoid that the SCSI host can be removed by srp_remove_target()
3169 * before this function returns.
3170 */
3171 scsi_host_get(target->scsi_host);
3172
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003173 mutex_lock(&host->add_target_mutex);
3174
Roland Dreieraef9ec32005-11-02 14:07:13 -08003175 ret = srp_parse_options(buf, target);
3176 if (ret)
3177 goto err;
3178
Bart Van Assche77f2c1a2014-10-02 15:29:25 +02003179 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3180 if (ret)
3181 goto err;
3182
Bart Van Assche4d73f952013-10-26 14:40:37 +02003183 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3184
Bart Van Assche96fc2482013-06-28 14:51:26 +02003185 if (!srp_conn_unique(target->srp_host, target)) {
3186 shost_printk(KERN_INFO, target->scsi_host,
3187 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3188 be64_to_cpu(target->id_ext),
3189 be64_to_cpu(target->ioc_guid),
3190 be64_to_cpu(target->initiator_ext));
3191 ret = -EEXIST;
3192 goto err;
3193 }
3194
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003195 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003196 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003197 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003198 target->sg_tablesize = target->cmd_sg_cnt;
3199 }
3200
3201 target_host->sg_tablesize = target->sg_tablesize;
3202 target->indirect_size = target->sg_tablesize *
3203 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003204 target->max_iu_len = sizeof (struct srp_cmd) +
3205 sizeof (struct srp_indirect_buf) +
3206 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3207
Bart Van Asschec1120f82013-10-26 14:35:08 +02003208 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003209 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003210 spin_lock_init(&target->lock);
Bart Van Assche747fe002014-10-30 14:48:05 +01003211 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003212 if (ret)
Bart Van Assched92c0da2014-10-06 17:14:36 +02003213 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003214
Bart Van Assched92c0da2014-10-06 17:14:36 +02003215 ret = -ENOMEM;
3216 target->ch_count = max_t(unsigned, num_online_nodes(),
3217 min(ch_count ? :
3218 min(4 * num_online_nodes(),
3219 ibdev->num_comp_vectors),
3220 num_online_cpus()));
3221 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3222 GFP_KERNEL);
3223 if (!target->ch)
3224 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003225
Bart Van Assched92c0da2014-10-06 17:14:36 +02003226 node_idx = 0;
3227 for_each_online_node(node) {
3228 const int ch_start = (node_idx * target->ch_count /
3229 num_online_nodes());
3230 const int ch_end = ((node_idx + 1) * target->ch_count /
3231 num_online_nodes());
3232 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3233 num_online_nodes() + target->comp_vector)
3234 % ibdev->num_comp_vectors;
3235 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3236 num_online_nodes() + target->comp_vector)
3237 % ibdev->num_comp_vectors;
3238 int cpu_idx = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003239
Bart Van Assched92c0da2014-10-06 17:14:36 +02003240 for_each_online_cpu(cpu) {
3241 if (cpu_to_node(cpu) != node)
3242 continue;
3243 if (ch_start + cpu_idx >= ch_end)
3244 continue;
3245 ch = &target->ch[ch_start + cpu_idx];
3246 ch->target = target;
3247 ch->comp_vector = cv_start == cv_end ? cv_start :
3248 cv_start + cpu_idx % (cv_end - cv_start);
3249 spin_lock_init(&ch->lock);
3250 INIT_LIST_HEAD(&ch->free_tx);
3251 ret = srp_new_cm_id(ch);
3252 if (ret)
3253 goto err_disconnect;
3254
3255 ret = srp_create_ch_ib(ch);
3256 if (ret)
3257 goto err_disconnect;
3258
3259 ret = srp_alloc_req_data(ch);
3260 if (ret)
3261 goto err_disconnect;
3262
3263 ret = srp_connect_ch(ch, multich);
3264 if (ret) {
3265 shost_printk(KERN_ERR, target->scsi_host,
3266 PFX "Connection %d/%d failed\n",
3267 ch_start + cpu_idx,
3268 target->ch_count);
3269 if (node_idx == 0 && cpu_idx == 0) {
3270 goto err_disconnect;
3271 } else {
3272 srp_free_ch_ib(target, ch);
3273 srp_free_req_data(target, ch);
3274 target->ch_count = ch - target->ch;
3275 break;
3276 }
3277 }
3278
3279 multich = true;
3280 cpu_idx++;
3281 }
3282 node_idx++;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003283 }
3284
Bart Van Assched92c0da2014-10-06 17:14:36 +02003285 target->scsi_host->nr_hw_queues = target->ch_count;
3286
Roland Dreieraef9ec32005-11-02 14:07:13 -08003287 ret = srp_add_target(host, target);
3288 if (ret)
3289 goto err_disconnect;
3290
Bart Van Assche34aa6542014-10-30 14:47:22 +01003291 if (target->state != SRP_TARGET_REMOVED) {
3292 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3293 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3294 be64_to_cpu(target->id_ext),
3295 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003296 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003297 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003298 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003299 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003300
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003301 ret = count;
3302
3303out:
3304 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003305
3306 scsi_host_put(target->scsi_host);
3307
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003308 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003309
3310err_disconnect:
3311 srp_disconnect_target(target);
3312
Bart Van Assched92c0da2014-10-06 17:14:36 +02003313 for (i = 0; i < target->ch_count; i++) {
3314 ch = &target->ch[i];
3315 srp_free_ch_ib(target, ch);
3316 srp_free_req_data(target, ch);
3317 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08003318
Bart Van Assched92c0da2014-10-06 17:14:36 +02003319 kfree(target->ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05003320
Roland Dreieraef9ec32005-11-02 14:07:13 -08003321err:
3322 scsi_host_put(target_host);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003323 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003324}
3325
Tony Jonesee959b02008-02-22 00:13:36 +01003326static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003327
Tony Jonesee959b02008-02-22 00:13:36 +01003328static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3329 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003330{
Tony Jonesee959b02008-02-22 00:13:36 +01003331 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003332
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003333 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003334}
3335
Tony Jonesee959b02008-02-22 00:13:36 +01003336static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003337
Tony Jonesee959b02008-02-22 00:13:36 +01003338static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3339 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003340{
Tony Jonesee959b02008-02-22 00:13:36 +01003341 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003342
3343 return sprintf(buf, "%d\n", host->port);
3344}
3345
Tony Jonesee959b02008-02-22 00:13:36 +01003346static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003347
Roland Dreierf5358a12006-06-17 20:37:29 -07003348static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003349{
3350 struct srp_host *host;
3351
3352 host = kzalloc(sizeof *host, GFP_KERNEL);
3353 if (!host)
3354 return NULL;
3355
3356 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003357 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003358 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003359 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003360 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003361 host->port = port;
3362
Tony Jonesee959b02008-02-22 00:13:36 +01003363 host->dev.class = &srp_class;
3364 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003365 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003366
Tony Jonesee959b02008-02-22 00:13:36 +01003367 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003368 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003369 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003370 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003371 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003372 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003373 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003374 goto err_class;
3375
3376 return host;
3377
3378err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003379 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003380
Roland Dreierf5358a12006-06-17 20:37:29 -07003381free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003382 kfree(host);
3383
3384 return NULL;
3385}
3386
3387static void srp_add_one(struct ib_device *device)
3388{
Roland Dreierf5358a12006-06-17 20:37:29 -07003389 struct srp_device *srp_dev;
3390 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003391 struct srp_host *host;
Bart Van Assche52ede082014-05-20 15:07:45 +02003392 int mr_page_shift, s, e, p;
3393 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003394
Roland Dreierf5358a12006-06-17 20:37:29 -07003395 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3396 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003397 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003398
Roland Dreierf5358a12006-06-17 20:37:29 -07003399 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003400 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003401 goto free_attr;
3402 }
3403
3404 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3405 if (!srp_dev)
3406 goto free_attr;
3407
Bart Van Assched1b42892014-05-20 15:07:20 +02003408 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3409 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003410 srp_dev->has_fr = (dev_attr->device_cap_flags &
3411 IB_DEVICE_MEM_MGT_EXTENSIONS);
3412 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3413 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3414
3415 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3416 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assched1b42892014-05-20 15:07:20 +02003417
Roland Dreierf5358a12006-06-17 20:37:29 -07003418 /*
3419 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003420 * minimum of 4096 bytes. We're unlikely to build large sglists
3421 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003422 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003423 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3424 srp_dev->mr_page_size = 1 << mr_page_shift;
3425 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3426 max_pages_per_mr = dev_attr->max_mr_size;
3427 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3428 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3429 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003430 if (srp_dev->use_fast_reg) {
3431 srp_dev->max_pages_per_mr =
3432 min_t(u32, srp_dev->max_pages_per_mr,
3433 dev_attr->max_fast_reg_page_list_len);
3434 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003435 srp_dev->mr_max_size = srp_dev->mr_page_size *
3436 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003437 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003438 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003439 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003440 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003441
3442 INIT_LIST_HEAD(&srp_dev->dev_list);
3443
3444 srp_dev->dev = device;
3445 srp_dev->pd = ib_alloc_pd(device);
3446 if (IS_ERR(srp_dev->pd))
3447 goto free_dev;
3448
3449 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3450 IB_ACCESS_LOCAL_WRITE |
3451 IB_ACCESS_REMOTE_READ |
3452 IB_ACCESS_REMOTE_WRITE);
3453 if (IS_ERR(srp_dev->mr))
3454 goto err_pd;
3455
Tom Tucker07ebafb2006-08-03 16:02:42 -05003456 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08003457 s = 0;
3458 e = 0;
3459 } else {
3460 s = 1;
3461 e = device->phys_port_cnt;
3462 }
3463
3464 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003465 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003466 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003467 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003468 }
3469
Roland Dreierf5358a12006-06-17 20:37:29 -07003470 ib_set_client_data(device, &srp_client, srp_dev);
3471
3472 goto free_attr;
3473
3474err_pd:
3475 ib_dealloc_pd(srp_dev->pd);
3476
3477free_dev:
3478 kfree(srp_dev);
3479
3480free_attr:
3481 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003482}
3483
3484static void srp_remove_one(struct ib_device *device)
3485{
Roland Dreierf5358a12006-06-17 20:37:29 -07003486 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003487 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003488 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003489
Roland Dreierf5358a12006-06-17 20:37:29 -07003490 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003491 if (!srp_dev)
3492 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003493
Roland Dreierf5358a12006-06-17 20:37:29 -07003494 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003495 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003496 /*
3497 * Wait for the sysfs entry to go away, so that no new
3498 * target ports can be created.
3499 */
3500 wait_for_completion(&host->released);
3501
3502 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003503 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003504 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003505 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003506 list_for_each_entry(target, &host->target_list, list)
3507 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003508 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003509
3510 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003511 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003512 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003513 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003514 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003515
Roland Dreieraef9ec32005-11-02 14:07:13 -08003516 kfree(host);
3517 }
3518
Roland Dreierf5358a12006-06-17 20:37:29 -07003519 ib_dereg_mr(srp_dev->mr);
3520 ib_dealloc_pd(srp_dev->pd);
3521
3522 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003523}
3524
FUJITA Tomonori32368222007-06-27 16:33:12 +09003525static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003526 .has_rport_state = true,
3527 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003528 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003529 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3530 .dev_loss_tmo = &srp_dev_loss_tmo,
3531 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003532 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003533 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003534};
3535
Roland Dreieraef9ec32005-11-02 14:07:13 -08003536static int __init srp_init_module(void)
3537{
3538 int ret;
3539
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003540 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003541
David Dillow49248642011-01-14 18:23:24 -05003542 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003543 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003544 if (!cmd_sg_entries)
3545 cmd_sg_entries = srp_sg_tablesize;
3546 }
3547
3548 if (!cmd_sg_entries)
3549 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3550
3551 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003552 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003553 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003554 }
3555
David Dillowc07d4242011-01-16 13:57:10 -05003556 if (!indirect_sg_entries)
3557 indirect_sg_entries = cmd_sg_entries;
3558 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003559 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3560 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003561 indirect_sg_entries = cmd_sg_entries;
3562 }
3563
Bart Van Asschebcc05912014-07-09 15:57:26 +02003564 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003565 if (!srp_remove_wq) {
3566 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003567 goto out;
3568 }
3569
3570 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003571 ib_srp_transport_template =
3572 srp_attach_transport(&ib_srp_transport_functions);
3573 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003574 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003575
Roland Dreieraef9ec32005-11-02 14:07:13 -08003576 ret = class_register(&srp_class);
3577 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003578 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003579 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003580 }
3581
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003582 ib_sa_register_client(&srp_sa_client);
3583
Roland Dreieraef9ec32005-11-02 14:07:13 -08003584 ret = ib_register_client(&srp_client);
3585 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003586 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003587 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003588 }
3589
Bart Van Asschebcc05912014-07-09 15:57:26 +02003590out:
3591 return ret;
3592
3593unreg_sa:
3594 ib_sa_unregister_client(&srp_sa_client);
3595 class_unregister(&srp_class);
3596
3597release_tr:
3598 srp_release_transport(ib_srp_transport_template);
3599
3600destroy_wq:
3601 destroy_workqueue(srp_remove_wq);
3602 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003603}
3604
3605static void __exit srp_cleanup_module(void)
3606{
3607 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003608 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003609 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003610 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003611 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003612}
3613
3614module_init(srp_init_module);
3615module_exit(srp_cleanup_module);