blob: 4f8be375dd4b400230195c1670b57848b02db33f [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000033#define pr_fmt(fmt) PFX fmt
34
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080043
Arun Sharma600634972011-07-26 16:09:06 -070044#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080045
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010049#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080050#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090051#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080052
Roland Dreieraef9ec32005-11-02 14:07:13 -080053#include "ib_srp.h"
54
55#define DRV_NAME "ib_srp"
56#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020057#define DRV_VERSION "1.0"
58#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080059
60MODULE_AUTHOR("Roland Dreier");
61MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63MODULE_LICENSE("Dual BSD/GPL");
64
David Dillow49248642011-01-14 18:23:24 -050065static unsigned int srp_sg_tablesize;
66static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050067static unsigned int indirect_sg_entries;
68static bool allow_ext_sg;
Roland Dreieraef9ec32005-11-02 14:07:13 -080069static int topspin_workarounds = 1;
70
David Dillow49248642011-01-14 18:23:24 -050071module_param(srp_sg_tablesize, uint, 0444);
72MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
73
74module_param(cmd_sg_entries, uint, 0444);
75MODULE_PARM_DESC(cmd_sg_entries,
76 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
77
David Dillowc07d4242011-01-16 13:57:10 -050078module_param(indirect_sg_entries, uint, 0444);
79MODULE_PARM_DESC(indirect_sg_entries,
80 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
81
82module_param(allow_ext_sg, bool, 0444);
83MODULE_PARM_DESC(allow_ext_sg,
84 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
85
Roland Dreieraef9ec32005-11-02 14:07:13 -080086module_param(topspin_workarounds, int, 0444);
87MODULE_PARM_DESC(topspin_workarounds,
88 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
89
Bart Van Asscheed9b2262013-10-26 14:34:27 +020090static struct kernel_param_ops srp_tmo_ops;
91
Bart Van Asschea95cadb2013-10-26 14:37:17 +020092static int srp_reconnect_delay = 10;
93module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
94 S_IRUGO | S_IWUSR);
95MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
96
Bart Van Asscheed9b2262013-10-26 14:34:27 +020097static int srp_fast_io_fail_tmo = 15;
98module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
99 S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(fast_io_fail_tmo,
101 "Number of seconds between the observation of a transport"
102 " layer error and failing all I/O. \"off\" means that this"
103 " functionality is disabled.");
104
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200105static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200106module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
107 S_IRUGO | S_IWUSR);
108MODULE_PARM_DESC(dev_loss_tmo,
109 "Maximum number of seconds that the SRP transport should"
110 " insulate transport layer errors. After this time has been"
111 " exceeded the SCSI host is removed. Should be"
112 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
113 " if fast_io_fail_tmo has not been set. \"off\" means that"
114 " this functionality is disabled.");
115
Roland Dreieraef9ec32005-11-02 14:07:13 -0800116static void srp_add_one(struct ib_device *device);
117static void srp_remove_one(struct ib_device *device);
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000118static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
119static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800120static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
121
FUJITA Tomonori32368222007-06-27 16:33:12 +0900122static struct scsi_transport_template *ib_srp_transport_template;
123
Roland Dreieraef9ec32005-11-02 14:07:13 -0800124static struct ib_client srp_client = {
125 .name = "srp",
126 .add = srp_add_one,
127 .remove = srp_remove_one
128};
129
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700130static struct ib_sa_client srp_sa_client;
131
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200132static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
133{
134 int tmo = *(int *)kp->arg;
135
136 if (tmo >= 0)
137 return sprintf(buffer, "%d", tmo);
138 else
139 return sprintf(buffer, "off");
140}
141
142static int srp_tmo_set(const char *val, const struct kernel_param *kp)
143{
144 int tmo, res;
145
146 if (strncmp(val, "off", 3) != 0) {
147 res = kstrtoint(val, 0, &tmo);
148 if (res)
149 goto out;
150 } else {
151 tmo = -1;
152 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200153 if (kp->arg == &srp_reconnect_delay)
154 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
155 srp_dev_loss_tmo);
156 else if (kp->arg == &srp_fast_io_fail_tmo)
157 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200158 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200159 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
160 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200161 if (res)
162 goto out;
163 *(int *)kp->arg = tmo;
164
165out:
166 return res;
167}
168
169static struct kernel_param_ops srp_tmo_ops = {
170 .get = srp_tmo_get,
171 .set = srp_tmo_set,
172};
173
Roland Dreieraef9ec32005-11-02 14:07:13 -0800174static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
175{
176 return (struct srp_target_port *) host->hostdata;
177}
178
179static const char *srp_target_info(struct Scsi_Host *host)
180{
181 return host_to_target(host)->target_name;
182}
183
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700184static int srp_target_is_topspin(struct srp_target_port *target)
185{
186 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700187 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700188
189 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700190 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
191 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700192}
193
Roland Dreieraef9ec32005-11-02 14:07:13 -0800194static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
195 gfp_t gfp_mask,
196 enum dma_data_direction direction)
197{
198 struct srp_iu *iu;
199
200 iu = kmalloc(sizeof *iu, gfp_mask);
201 if (!iu)
202 goto out;
203
204 iu->buf = kzalloc(size, gfp_mask);
205 if (!iu->buf)
206 goto out_free_iu;
207
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100208 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
209 direction);
210 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800211 goto out_free_buf;
212
213 iu->size = size;
214 iu->direction = direction;
215
216 return iu;
217
218out_free_buf:
219 kfree(iu->buf);
220out_free_iu:
221 kfree(iu);
222out:
223 return NULL;
224}
225
226static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
227{
228 if (!iu)
229 return;
230
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100231 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
232 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800233 kfree(iu->buf);
234 kfree(iu);
235}
236
237static void srp_qp_event(struct ib_event *event, void *context)
238{
Bart Van Asschee0bda7d2012-01-14 12:39:44 +0000239 pr_debug("QP event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800240}
241
242static int srp_init_qp(struct srp_target_port *target,
243 struct ib_qp *qp)
244{
245 struct ib_qp_attr *attr;
246 int ret;
247
248 attr = kmalloc(sizeof *attr, GFP_KERNEL);
249 if (!attr)
250 return -ENOMEM;
251
Roland Dreier969a60f2008-07-14 23:48:43 -0700252 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
253 target->srp_host->port,
254 be16_to_cpu(target->path.pkey),
255 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800256 if (ret)
257 goto out;
258
259 attr->qp_state = IB_QPS_INIT;
260 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
261 IB_ACCESS_REMOTE_WRITE);
262 attr->port_num = target->srp_host->port;
263
264 ret = ib_modify_qp(qp, attr,
265 IB_QP_STATE |
266 IB_QP_PKEY_INDEX |
267 IB_QP_ACCESS_FLAGS |
268 IB_QP_PORT);
269
270out:
271 kfree(attr);
272 return ret;
273}
274
David Dillow9fe4bcf2008-01-08 17:08:52 -0500275static int srp_new_cm_id(struct srp_target_port *target)
276{
277 struct ib_cm_id *new_cm_id;
278
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100279 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
David Dillow9fe4bcf2008-01-08 17:08:52 -0500280 srp_cm_handler, target);
281 if (IS_ERR(new_cm_id))
282 return PTR_ERR(new_cm_id);
283
284 if (target->cm_id)
285 ib_destroy_cm_id(target->cm_id);
286 target->cm_id = new_cm_id;
287
288 return 0;
289}
290
Roland Dreieraef9ec32005-11-02 14:07:13 -0800291static int srp_create_target_ib(struct srp_target_port *target)
292{
293 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100294 struct ib_cq *recv_cq, *send_cq;
295 struct ib_qp *qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800296 int ret;
297
298 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
299 if (!init_attr)
300 return -ENOMEM;
301
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100302 recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
Bart Van Assche4d73f952013-10-26 14:40:37 +0200303 srp_recv_completion, NULL, target,
304 target->queue_size, target->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100305 if (IS_ERR(recv_cq)) {
306 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800307 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800308 }
309
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100310 send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
Bart Van Assche4d73f952013-10-26 14:40:37 +0200311 srp_send_completion, NULL, target,
312 target->queue_size, target->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100313 if (IS_ERR(send_cq)) {
314 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800315 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000316 }
317
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100318 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800319
320 init_attr->event_handler = srp_qp_event;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200321 init_attr->cap.max_send_wr = target->queue_size;
322 init_attr->cap.max_recv_wr = target->queue_size;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800323 init_attr->cap.max_recv_sge = 1;
324 init_attr->cap.max_send_sge = 1;
325 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
326 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100327 init_attr->send_cq = send_cq;
328 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800329
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100330 qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
331 if (IS_ERR(qp)) {
332 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800333 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800334 }
335
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100336 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800337 if (ret)
338 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800339
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100340 if (target->qp)
341 ib_destroy_qp(target->qp);
342 if (target->recv_cq)
343 ib_destroy_cq(target->recv_cq);
344 if (target->send_cq)
345 ib_destroy_cq(target->send_cq);
346
347 target->qp = qp;
348 target->recv_cq = recv_cq;
349 target->send_cq = send_cq;
350
Roland Dreierda9d2f02010-02-24 15:07:59 -0800351 kfree(init_attr);
352 return 0;
353
354err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100355 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800356
357err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100358 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800359
360err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100361 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800362
363err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800364 kfree(init_attr);
365 return ret;
366}
367
Bart Van Assche4d73f952013-10-26 14:40:37 +0200368/*
369 * Note: this function may be called without srp_alloc_iu_bufs() having been
370 * invoked. Hence the target->[rt]x_ring checks.
371 */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800372static void srp_free_target_ib(struct srp_target_port *target)
373{
374 int i;
375
376 ib_destroy_qp(target->qp);
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000377 ib_destroy_cq(target->send_cq);
378 ib_destroy_cq(target->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800379
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100380 target->qp = NULL;
381 target->send_cq = target->recv_cq = NULL;
382
Bart Van Assche4d73f952013-10-26 14:40:37 +0200383 if (target->rx_ring) {
384 for (i = 0; i < target->queue_size; ++i)
385 srp_free_iu(target->srp_host, target->rx_ring[i]);
386 kfree(target->rx_ring);
387 target->rx_ring = NULL;
388 }
389 if (target->tx_ring) {
390 for (i = 0; i < target->queue_size; ++i)
391 srp_free_iu(target->srp_host, target->tx_ring[i]);
392 kfree(target->tx_ring);
393 target->tx_ring = NULL;
394 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800395}
396
397static void srp_path_rec_completion(int status,
398 struct ib_sa_path_rec *pathrec,
399 void *target_ptr)
400{
401 struct srp_target_port *target = target_ptr;
402
403 target->status = status;
404 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500405 shost_printk(KERN_ERR, target->scsi_host,
406 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800407 else
408 target->path = *pathrec;
409 complete(&target->done);
410}
411
412static int srp_lookup_path(struct srp_target_port *target)
413{
Bart Van Asschea702adc2014-03-14 13:53:10 +0100414 int ret;
415
Roland Dreieraef9ec32005-11-02 14:07:13 -0800416 target->path.numb_path = 1;
417
418 init_completion(&target->done);
419
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700420 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100421 target->srp_host->srp_dev->dev,
Roland Dreieraef9ec32005-11-02 14:07:13 -0800422 target->srp_host->port,
423 &target->path,
Sean Hefty247e0202007-08-08 15:51:18 -0700424 IB_SA_PATH_REC_SERVICE_ID |
Roland Dreieraef9ec32005-11-02 14:07:13 -0800425 IB_SA_PATH_REC_DGID |
426 IB_SA_PATH_REC_SGID |
427 IB_SA_PATH_REC_NUMB_PATH |
428 IB_SA_PATH_REC_PKEY,
429 SRP_PATH_REC_TIMEOUT_MS,
430 GFP_KERNEL,
431 srp_path_rec_completion,
432 target, &target->path_query);
433 if (target->path_query_id < 0)
434 return target->path_query_id;
435
Bart Van Asschea702adc2014-03-14 13:53:10 +0100436 ret = wait_for_completion_interruptible(&target->done);
437 if (ret < 0)
438 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800439
440 if (target->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500441 shost_printk(KERN_WARNING, target->scsi_host,
442 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800443
444 return target->status;
445}
446
447static int srp_send_req(struct srp_target_port *target)
448{
449 struct {
450 struct ib_cm_req_param param;
451 struct srp_login_req priv;
452 } *req = NULL;
453 int status;
454
455 req = kzalloc(sizeof *req, GFP_KERNEL);
456 if (!req)
457 return -ENOMEM;
458
459 req->param.primary_path = &target->path;
460 req->param.alternate_path = NULL;
461 req->param.service_id = target->service_id;
462 req->param.qp_num = target->qp->qp_num;
463 req->param.qp_type = target->qp->qp_type;
464 req->param.private_data = &req->priv;
465 req->param.private_data_len = sizeof req->priv;
466 req->param.flow_control = 1;
467
468 get_random_bytes(&req->param.starting_psn, 4);
469 req->param.starting_psn &= 0xffffff;
470
471 /*
472 * Pick some arbitrary defaults here; we could make these
473 * module parameters if anyone cared about setting them.
474 */
475 req->param.responder_resources = 4;
476 req->param.remote_cm_response_timeout = 20;
477 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200478 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800479 req->param.rnr_retry_count = 7;
480 req->param.max_cm_retries = 15;
481
482 req->priv.opcode = SRP_LOGIN_REQ;
483 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500484 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800485 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
486 SRP_BUF_FORMAT_INDIRECT);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700487 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700488 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700489 * port identifier format is 8 bytes of ID extension followed
490 * by 8 bytes of GUID. Older drafts put the two halves in the
491 * opposite order, so that the GUID comes first.
492 *
493 * Targets conforming to these obsolete drafts can be
494 * recognized by the I/O Class they report.
495 */
496 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
497 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200498 &target->path.sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700499 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200500 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700501 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
502 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
503 } else {
504 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200505 &target->initiator_ext, 8);
506 memcpy(req->priv.initiator_port_id + 8,
507 &target->path.sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700508 memcpy(req->priv.target_port_id, &target->id_ext, 8);
509 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
510 }
511
Roland Dreieraef9ec32005-11-02 14:07:13 -0800512 /*
513 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200514 * zero out the first 8 bytes of our initiator port ID and set
515 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800516 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700517 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500518 shost_printk(KERN_DEBUG, target->scsi_host,
519 PFX "Topspin/Cisco initiator port ID workaround "
520 "activated for target GUID %016llx\n",
521 (unsigned long long) be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800522 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200523 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100524 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800525 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800526
527 status = ib_send_cm_req(target->cm_id, &req->param);
528
529 kfree(req);
530
531 return status;
532}
533
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000534static bool srp_queue_remove_work(struct srp_target_port *target)
535{
536 bool changed = false;
537
538 spin_lock_irq(&target->lock);
539 if (target->state != SRP_TARGET_REMOVED) {
540 target->state = SRP_TARGET_REMOVED;
541 changed = true;
542 }
543 spin_unlock_irq(&target->lock);
544
545 if (changed)
546 queue_work(system_long_wq, &target->remove_work);
547
548 return changed;
549}
550
Bart Van Assche294c8752011-12-25 12:18:12 +0000551static bool srp_change_conn_state(struct srp_target_port *target,
552 bool connected)
553{
554 bool changed = false;
555
556 spin_lock_irq(&target->lock);
557 if (target->connected != connected) {
558 target->connected = connected;
559 changed = true;
560 }
561 spin_unlock_irq(&target->lock);
562
563 return changed;
564}
565
Roland Dreieraef9ec32005-11-02 14:07:13 -0800566static void srp_disconnect_target(struct srp_target_port *target)
567{
Bart Van Assche294c8752011-12-25 12:18:12 +0000568 if (srp_change_conn_state(target, false)) {
569 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800570
Bart Van Assche294c8752011-12-25 12:18:12 +0000571 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
572 shost_printk(KERN_DEBUG, target->scsi_host,
573 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000574 }
Roland Dreiere6581052006-05-17 09:13:21 -0700575 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800576}
577
David Dillow8f26c9f2011-01-14 19:45:50 -0500578static void srp_free_req_data(struct srp_target_port *target)
579{
David Dillowc07d4242011-01-16 13:57:10 -0500580 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500581 struct srp_request *req;
582 int i;
583
Bart Van Assche4d73f952013-10-26 14:40:37 +0200584 if (!target->req_ring)
585 return;
586
587 for (i = 0; i < target->req_ring_size; ++i) {
588 req = &target->req_ring[i];
David Dillow8f26c9f2011-01-14 19:45:50 -0500589 kfree(req->fmr_list);
590 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500591 if (req->indirect_dma_addr) {
592 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
593 target->indirect_size,
594 DMA_TO_DEVICE);
595 }
596 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500597 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200598
599 kfree(target->req_ring);
600 target->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500601}
602
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200603static int srp_alloc_req_data(struct srp_target_port *target)
604{
605 struct srp_device *srp_dev = target->srp_host->srp_dev;
606 struct ib_device *ibdev = srp_dev->dev;
607 struct srp_request *req;
608 dma_addr_t dma_addr;
609 int i, ret = -ENOMEM;
610
611 INIT_LIST_HEAD(&target->free_reqs);
612
Bart Van Assche4d73f952013-10-26 14:40:37 +0200613 target->req_ring = kzalloc(target->req_ring_size *
614 sizeof(*target->req_ring), GFP_KERNEL);
615 if (!target->req_ring)
616 goto out;
617
618 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200619 req = &target->req_ring[i];
620 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
621 GFP_KERNEL);
622 req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *),
623 GFP_KERNEL);
624 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
625 if (!req->fmr_list || !req->map_page || !req->indirect_desc)
626 goto out;
627
628 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
629 target->indirect_size,
630 DMA_TO_DEVICE);
631 if (ib_dma_mapping_error(ibdev, dma_addr))
632 goto out;
633
634 req->indirect_dma_addr = dma_addr;
635 req->index = i;
636 list_add_tail(&req->list, &target->free_reqs);
637 }
638 ret = 0;
639
640out:
641 return ret;
642}
643
Bart Van Assche683b1592012-01-14 12:40:44 +0000644/**
645 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
646 * @shost: SCSI host whose attributes to remove from sysfs.
647 *
648 * Note: Any attributes defined in the host template and that did not exist
649 * before invocation of this function will be ignored.
650 */
651static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
652{
653 struct device_attribute **attr;
654
655 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
656 device_remove_file(&shost->shost_dev, *attr);
657}
658
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000659static void srp_remove_target(struct srp_target_port *target)
660{
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000661 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
662
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000663 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200664 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000665 srp_remove_host(target->scsi_host);
666 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100667 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000668 srp_disconnect_target(target);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000669 ib_destroy_cm_id(target->cm_id);
670 srp_free_target_ib(target);
Bart Van Asschec1120f82013-10-26 14:35:08 +0200671 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200672 srp_rport_put(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000673 srp_free_req_data(target);
Vu Pham65d7dd22013-10-10 13:50:29 +0200674
675 spin_lock(&target->srp_host->target_lock);
676 list_del(&target->list);
677 spin_unlock(&target->srp_host->target_lock);
678
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000679 scsi_host_put(target->scsi_host);
680}
681
David Howellsc4028952006-11-22 14:57:56 +0000682static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800683{
David Howellsc4028952006-11-22 14:57:56 +0000684 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000685 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800686
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000687 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800688
Bart Van Assche96fc2482013-06-28 14:51:26 +0200689 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800690}
691
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200692static void srp_rport_delete(struct srp_rport *rport)
693{
694 struct srp_target_port *target = rport->lld_data;
695
696 srp_queue_remove_work(target);
697}
698
Roland Dreieraef9ec32005-11-02 14:07:13 -0800699static int srp_connect_target(struct srp_target_port *target)
700{
David Dillow9fe4bcf2008-01-08 17:08:52 -0500701 int retries = 3;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800702 int ret;
703
Bart Van Assche294c8752011-12-25 12:18:12 +0000704 WARN_ON_ONCE(target->connected);
705
Bart Van Assche948d1e82011-09-03 09:25:42 +0200706 target->qp_in_error = false;
707
Roland Dreieraef9ec32005-11-02 14:07:13 -0800708 ret = srp_lookup_path(target);
709 if (ret)
710 return ret;
711
712 while (1) {
713 init_completion(&target->done);
714 ret = srp_send_req(target);
715 if (ret)
716 return ret;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100717 ret = wait_for_completion_interruptible(&target->done);
718 if (ret < 0)
719 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800720
721 /*
722 * The CM event handling code will set status to
723 * SRP_PORT_REDIRECT if we get a port redirect REJ
724 * back, or SRP_DLID_REDIRECT if we get a lid/qp
725 * redirect REJ back.
726 */
727 switch (target->status) {
728 case 0:
Bart Van Assche294c8752011-12-25 12:18:12 +0000729 srp_change_conn_state(target, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800730 return 0;
731
732 case SRP_PORT_REDIRECT:
733 ret = srp_lookup_path(target);
734 if (ret)
735 return ret;
736 break;
737
738 case SRP_DLID_REDIRECT:
739 break;
740
David Dillow9fe4bcf2008-01-08 17:08:52 -0500741 case SRP_STALE_CONN:
742 /* Our current CM id was stale, and is now in timewait.
743 * Try to reconnect with a new one.
744 */
745 if (!retries-- || srp_new_cm_id(target)) {
746 shost_printk(KERN_ERR, target->scsi_host, PFX
747 "giving up on stale connection\n");
748 target->status = -ECONNRESET;
749 return target->status;
750 }
751
752 shost_printk(KERN_ERR, target->scsi_host, PFX
753 "retrying stale connection\n");
754 break;
755
Roland Dreieraef9ec32005-11-02 14:07:13 -0800756 default:
757 return target->status;
758 }
759 }
760}
761
Roland Dreierd945e1d2006-05-09 10:50:28 -0700762static void srp_unmap_data(struct scsi_cmnd *scmnd,
763 struct srp_target_port *target,
764 struct srp_request *req)
765{
David Dillow8f26c9f2011-01-14 19:45:50 -0500766 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
767 struct ib_pool_fmr **pfmr;
768
FUJITA Tomonoribb350d12007-05-26 02:28:25 +0900769 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -0700770 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
771 scmnd->sc_data_direction != DMA_FROM_DEVICE))
772 return;
773
David Dillow8f26c9f2011-01-14 19:45:50 -0500774 pfmr = req->fmr_list;
775 while (req->nfmr--)
776 ib_fmr_pool_unmap(*pfmr++);
Roland Dreierf5358a12006-06-17 20:37:29 -0700777
David Dillow8f26c9f2011-01-14 19:45:50 -0500778 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
779 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -0700780}
781
Bart Van Assche22032992012-08-14 13:18:53 +0000782/**
783 * srp_claim_req - Take ownership of the scmnd associated with a request.
784 * @target: SRP target port.
785 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +0100786 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +0000787 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
788 * ownership of @req->scmnd if it equals @scmnd.
789 *
790 * Return value:
791 * Either NULL or a pointer to the SCSI command the caller became owner of.
792 */
793static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
794 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +0100795 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +0000796 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700797{
Bart Van Assche94a91742010-11-26 14:50:09 -0500798 unsigned long flags;
799
Bart Van Assche22032992012-08-14 13:18:53 +0000800 spin_lock_irqsave(&target->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +0100801 if (req->scmnd &&
802 (!sdev || req->scmnd->device == sdev) &&
803 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +0000804 scmnd = req->scmnd;
805 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +0000806 } else {
807 scmnd = NULL;
808 }
809 spin_unlock_irqrestore(&target->lock, flags);
810
811 return scmnd;
812}
813
814/**
815 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Asscheaf246632014-05-20 15:04:21 +0200816 * @target: SRP target port.
817 * @req: Request to be freed.
818 * @scmnd: SCSI command associated with @req.
819 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +0000820 */
821static void srp_free_req(struct srp_target_port *target,
822 struct srp_request *req, struct scsi_cmnd *scmnd,
823 s32 req_lim_delta)
824{
825 unsigned long flags;
826
827 srp_unmap_data(scmnd, target, req);
828
Bart Van Asschee9684672010-11-26 15:08:38 -0500829 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -0500830 target->req_lim += req_lim_delta;
Bart Van Assche536ae142010-11-26 13:58:27 -0500831 list_add_tail(&req->list, &target->free_reqs);
Bart Van Asschee9684672010-11-26 15:08:38 -0500832 spin_unlock_irqrestore(&target->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700833}
834
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200835static void srp_finish_req(struct srp_target_port *target,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +0100836 struct srp_request *req, struct scsi_device *sdev,
837 int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700838{
Bart Van Asscheb3fe6282014-03-14 13:54:11 +0100839 struct scsi_cmnd *scmnd = srp_claim_req(target, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +0000840
841 if (scmnd) {
Bart Van Assche9b796d02012-08-24 10:27:54 +0000842 srp_free_req(target, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200843 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +0000844 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +0000845 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700846}
847
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200848static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800849{
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200850 struct srp_target_port *target = rport->lld_data;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +0100851 struct Scsi_Host *shost = target->scsi_host;
852 struct scsi_device *sdev;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200853 int i;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800854
Bart Van Asscheb3fe6282014-03-14 13:54:11 +0100855 /*
856 * Invoking srp_terminate_io() while srp_queuecommand() is running
857 * is not safe. Hence the warning statement below.
858 */
859 shost_for_each_device(sdev, shost)
860 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
861
Bart Van Assche4d73f952013-10-26 14:40:37 +0200862 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200863 struct srp_request *req = &target->req_ring[i];
Bart Van Asscheb3fe6282014-03-14 13:54:11 +0100864 srp_finish_req(target, req, NULL, DID_TRANSPORT_FAILFAST << 16);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200865 }
866}
867
868/*
869 * It is up to the caller to ensure that srp_rport_reconnect() calls are
870 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
871 * srp_reset_device() or srp_reset_host() calls will occur while this function
872 * is in progress. One way to realize that is not to call this function
873 * directly but to call srp_reconnect_rport() instead since that last function
874 * serializes calls of this function via rport->mutex and also blocks
875 * srp_queuecommand() calls before invoking this function.
876 */
877static int srp_rport_reconnect(struct srp_rport *rport)
878{
879 struct srp_target_port *target = rport->lld_data;
880 int i, ret;
Bart Van Assche09be70a2012-03-17 17:18:54 +0000881
Roland Dreieraef9ec32005-11-02 14:07:13 -0800882 srp_disconnect_target(target);
883 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +0000884 * Now get a new local CM ID so that we avoid confusing the target in
885 * case things are really fouled up. Doing so also ensures that all CM
886 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800887 */
David Dillow9fe4bcf2008-01-08 17:08:52 -0500888 ret = srp_new_cm_id(target);
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +0000889 /*
890 * Whether or not creating a new CM ID succeeded, create a new
891 * QP. This guarantees that all completion callback function
892 * invocations have finished before request resetting starts.
893 */
894 if (ret == 0)
895 ret = srp_create_target_ib(target);
896 else
897 srp_create_target_ib(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800898
Bart Van Assche4d73f952013-10-26 14:40:37 +0200899 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche536ae142010-11-26 13:58:27 -0500900 struct srp_request *req = &target->req_ring[i];
Bart Van Asscheb3fe6282014-03-14 13:54:11 +0100901 srp_finish_req(target, req, NULL, DID_RESET << 16);
Bart Van Assche536ae142010-11-26 13:58:27 -0500902 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800903
Bart Van Assche536ae142010-11-26 13:58:27 -0500904 INIT_LIST_HEAD(&target->free_tx);
Bart Van Assche4d73f952013-10-26 14:40:37 +0200905 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche536ae142010-11-26 13:58:27 -0500906 list_add(&target->tx_ring[i]->list, &target->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800907
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +0000908 if (ret == 0)
909 ret = srp_connect_target(target);
Bart Van Assche09be70a2012-03-17 17:18:54 +0000910
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200911 if (ret == 0)
912 shost_printk(KERN_INFO, target->scsi_host,
913 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800914
915 return ret;
916}
917
David Dillow8f26c9f2011-01-14 19:45:50 -0500918static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
919 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -0700920{
David Dillow8f26c9f2011-01-14 19:45:50 -0500921 struct srp_direct_buf *desc = state->desc;
922
923 desc->va = cpu_to_be64(dma_addr);
924 desc->key = cpu_to_be32(rkey);
925 desc->len = cpu_to_be32(dma_len);
926
927 state->total_len += dma_len;
928 state->desc++;
929 state->ndesc++;
930}
931
932static int srp_map_finish_fmr(struct srp_map_state *state,
933 struct srp_target_port *target)
934{
935 struct srp_device *dev = target->srp_host->srp_dev;
936 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -0700937 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -0500938
939 if (!state->npages)
940 return 0;
941
942 if (state->npages == 1) {
943 srp_map_desc(state, state->base_dma_addr, state->fmr_len,
944 target->rkey);
945 state->npages = state->fmr_len = 0;
946 return 0;
947 }
948
949 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
950 state->npages, io_addr);
951 if (IS_ERR(fmr))
952 return PTR_ERR(fmr);
953
954 *state->next_fmr++ = fmr;
955 state->nfmr++;
956
957 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
958 state->npages = state->fmr_len = 0;
959 return 0;
960}
961
962static void srp_map_update_start(struct srp_map_state *state,
963 struct scatterlist *sg, int sg_index,
964 dma_addr_t dma_addr)
965{
966 state->unmapped_sg = sg;
967 state->unmapped_index = sg_index;
968 state->unmapped_addr = dma_addr;
969}
970
971static int srp_map_sg_entry(struct srp_map_state *state,
972 struct srp_target_port *target,
973 struct scatterlist *sg, int sg_index,
974 int use_fmr)
975{
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100976 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -0800977 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500978 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
979 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
980 unsigned int len;
981 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -0700982
David Dillow8f26c9f2011-01-14 19:45:50 -0500983 if (!dma_len)
984 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -0700985
David Dillow8f26c9f2011-01-14 19:45:50 -0500986 if (use_fmr == SRP_MAP_NO_FMR) {
987 /* Once we're in direct map mode for a request, we don't
988 * go back to FMR mode, so no need to update anything
989 * other than the descriptor.
990 */
991 srp_map_desc(state, dma_addr, dma_len, target->rkey);
992 return 0;
993 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -0700994
David Dillow8f26c9f2011-01-14 19:45:50 -0500995 /* If we start at an offset into the FMR page, don't merge into
996 * the current FMR. Finish it out, and use the kernel's MR for this
997 * sg entry. This is to avoid potential bugs on some SRP targets
998 * that were never quite defined, but went away when the initiator
999 * avoided using FMR on such page fragments.
1000 */
1001 if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
1002 ret = srp_map_finish_fmr(state, target);
1003 if (ret)
1004 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001005
David Dillow8f26c9f2011-01-14 19:45:50 -05001006 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1007 srp_map_update_start(state, NULL, 0, 0);
1008 return 0;
1009 }
1010
1011 /* If this is the first sg to go into the FMR, save our position.
1012 * We need to know the first unmapped entry, its index, and the
1013 * first unmapped address within that entry to be able to restart
1014 * mapping after an error.
1015 */
1016 if (!state->unmapped_sg)
1017 srp_map_update_start(state, sg, sg_index, dma_addr);
1018
1019 while (dma_len) {
1020 if (state->npages == SRP_FMR_SIZE) {
1021 ret = srp_map_finish_fmr(state, target);
1022 if (ret)
1023 return ret;
1024
1025 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001026 }
1027
David Dillow8f26c9f2011-01-14 19:45:50 -05001028 len = min_t(unsigned int, dma_len, dev->fmr_page_size);
1029
1030 if (!state->npages)
1031 state->base_dma_addr = dma_addr;
1032 state->pages[state->npages++] = dma_addr;
1033 state->fmr_len += len;
1034 dma_addr += len;
1035 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001036 }
1037
David Dillow8f26c9f2011-01-14 19:45:50 -05001038 /* If the last entry of the FMR wasn't a full page, then we need to
1039 * close it out and start a new one -- we can only merge at page
1040 * boundries.
1041 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001042 ret = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001043 if (len != dev->fmr_page_size) {
1044 ret = srp_map_finish_fmr(state, target);
1045 if (!ret)
1046 srp_map_update_start(state, NULL, 0, 0);
1047 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001048 return ret;
1049}
1050
Roland Dreieraef9ec32005-11-02 14:07:13 -08001051static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
1052 struct srp_request *req)
1053{
David Dillow8f26c9f2011-01-14 19:45:50 -05001054 struct scatterlist *scat, *sg;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001055 struct srp_cmd *cmd = req->cmd->buf;
David Dillow8f26c9f2011-01-14 19:45:50 -05001056 int i, len, nents, count, use_fmr;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001057 struct srp_device *dev;
1058 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001059 struct srp_map_state state;
1060 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001061 u32 table_len;
1062 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001063
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001064 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001065 return sizeof (struct srp_cmd);
1066
1067 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1068 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001069 shost_printk(KERN_WARNING, target->scsi_host,
1070 PFX "Unhandled data direction %d\n",
1071 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001072 return -EINVAL;
1073 }
1074
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001075 nents = scsi_sg_count(scmnd);
1076 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001077
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001078 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001079 ibdev = dev->dev;
1080
1081 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001082 if (unlikely(count == 0))
1083 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001084
1085 fmt = SRP_DATA_DESC_DIRECT;
1086 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001087
1088 if (count == 1) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001089 /*
1090 * The midlayer only generated a single gather/scatter
1091 * entry, or DMA mapping coalesced everything to a
1092 * single entry. So a direct descriptor along with
1093 * the DMA MR suffices.
1094 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001095 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001096
Ralph Campbell85507bc2006-12-12 14:30:55 -08001097 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001098 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001099 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001100
David Dillow8f26c9f2011-01-14 19:45:50 -05001101 req->nfmr = 0;
1102 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001103 }
1104
David Dillow8f26c9f2011-01-14 19:45:50 -05001105 /* We have more than one scatter/gather entry, so build our indirect
1106 * descriptor table, trying to merge as many entries with FMR as we
1107 * can.
1108 */
1109 indirect_hdr = (void *) cmd->add_data;
1110
David Dillowc07d4242011-01-16 13:57:10 -05001111 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1112 target->indirect_size, DMA_TO_DEVICE);
1113
David Dillow8f26c9f2011-01-14 19:45:50 -05001114 memset(&state, 0, sizeof(state));
David Dillowc07d4242011-01-16 13:57:10 -05001115 state.desc = req->indirect_desc;
David Dillow8f26c9f2011-01-14 19:45:50 -05001116 state.pages = req->map_page;
1117 state.next_fmr = req->fmr_list;
1118
1119 use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
1120
1121 for_each_sg(scat, sg, count, i) {
1122 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
1123 /* FMR mapping failed, so backtrack to the first
1124 * unmapped entry and continue on without using FMR.
1125 */
1126 dma_addr_t dma_addr;
1127 unsigned int dma_len;
1128
1129backtrack:
1130 sg = state.unmapped_sg;
1131 i = state.unmapped_index;
1132
1133 dma_addr = ib_sg_dma_address(ibdev, sg);
1134 dma_len = ib_sg_dma_len(ibdev, sg);
1135 dma_len -= (state.unmapped_addr - dma_addr);
1136 dma_addr = state.unmapped_addr;
1137 use_fmr = SRP_MAP_NO_FMR;
1138 srp_map_desc(&state, dma_addr, dma_len, target->rkey);
1139 }
1140 }
1141
1142 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
1143 goto backtrack;
1144
David Dillowc07d4242011-01-16 13:57:10 -05001145 /* We've mapped the request, now pull as much of the indirect
1146 * descriptor table as we can into the command buffer. If this
1147 * target is not using an external indirect table, we are
1148 * guaranteed to fit into the command, as the SCSI layer won't
1149 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001150 */
1151 req->nfmr = state.nfmr;
1152 if (state.ndesc == 1) {
1153 /* FMR mapping was able to collapse this to one entry,
1154 * so use a direct descriptor.
1155 */
1156 struct srp_direct_buf *buf = (void *) cmd->add_data;
1157
David Dillowc07d4242011-01-16 13:57:10 -05001158 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001159 goto map_complete;
1160 }
1161
David Dillowc07d4242011-01-16 13:57:10 -05001162 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1163 !target->allow_ext_sg)) {
1164 shost_printk(KERN_ERR, target->scsi_host,
1165 "Could not fit S/G list into SRP_CMD\n");
1166 return -EIO;
1167 }
1168
1169 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001170 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1171
1172 fmt = SRP_DATA_DESC_INDIRECT;
1173 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001174 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001175
David Dillowc07d4242011-01-16 13:57:10 -05001176 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1177 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001178
David Dillowc07d4242011-01-16 13:57:10 -05001179 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001180 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1181 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1182 indirect_hdr->len = cpu_to_be32(state.total_len);
1183
1184 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001185 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001186 else
David Dillowc07d4242011-01-16 13:57:10 -05001187 cmd->data_in_desc_cnt = count;
1188
1189 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1190 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001191
1192map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001193 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1194 cmd->buf_fmt = fmt << 4;
1195 else
1196 cmd->buf_fmt = fmt;
1197
Roland Dreieraef9ec32005-11-02 14:07:13 -08001198 return len;
1199}
1200
David Dillow05a1d752010-10-08 14:48:14 -04001201/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001202 * Return an IU and possible credit to the free pool
1203 */
1204static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1205 enum srp_iu_type iu_type)
1206{
1207 unsigned long flags;
1208
Bart Van Asschee9684672010-11-26 15:08:38 -05001209 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001210 list_add(&iu->list, &target->free_tx);
1211 if (iu_type != SRP_IU_RSP)
1212 ++target->req_lim;
Bart Van Asschee9684672010-11-26 15:08:38 -05001213 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001214}
1215
1216/*
Bart Van Asschee9684672010-11-26 15:08:38 -05001217 * Must be called with target->lock held to protect req_lim and free_tx.
1218 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001219 *
1220 * Note:
1221 * An upper limit for the number of allocated information units for each
1222 * request type is:
1223 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1224 * more than Scsi_Host.can_queue requests.
1225 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1226 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1227 * one unanswered SRP request to an initiator.
1228 */
1229static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1230 enum srp_iu_type iu_type)
1231{
1232 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1233 struct srp_iu *iu;
1234
1235 srp_send_completion(target->send_cq, target);
1236
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001237 if (list_empty(&target->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001238 return NULL;
1239
1240 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001241 if (iu_type != SRP_IU_RSP) {
1242 if (target->req_lim <= rsv) {
1243 ++target->zero_req_lim;
1244 return NULL;
1245 }
1246
1247 --target->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001248 }
1249
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001250 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001251 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001252 return iu;
1253}
1254
Bart Van Assche76c75b22010-11-26 14:37:47 -05001255static int srp_post_send(struct srp_target_port *target,
1256 struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001257{
1258 struct ib_sge list;
1259 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001260
1261 list.addr = iu->dma;
1262 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001263 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001264
1265 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001266 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001267 wr.sg_list = &list;
1268 wr.num_sge = 1;
1269 wr.opcode = IB_WR_SEND;
1270 wr.send_flags = IB_SEND_SIGNALED;
1271
Bart Van Assche76c75b22010-11-26 14:37:47 -05001272 return ib_post_send(target->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001273}
1274
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001275static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001276{
Bart Van Asschec996bb42010-07-30 10:59:05 +00001277 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001278 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001279
1280 list.addr = iu->dma;
1281 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001282 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001283
1284 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001285 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001286 wr.sg_list = &list;
1287 wr.num_sge = 1;
1288
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001289 return ib_post_recv(target->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001290}
1291
Roland Dreieraef9ec32005-11-02 14:07:13 -08001292static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1293{
1294 struct srp_request *req;
1295 struct scsi_cmnd *scmnd;
1296 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001297
Roland Dreieraef9ec32005-11-02 14:07:13 -08001298 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Asschee9684672010-11-26 15:08:38 -05001299 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001300 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
Bart Van Asschee9684672010-11-26 15:08:38 -05001301 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001302
David Dillowf8b6e312010-11-26 13:02:21 -05001303 target->tsk_mgmt_status = -1;
1304 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1305 target->tsk_mgmt_status = rsp->data[3];
1306 complete(&target->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001307 } else {
David Dillowf8b6e312010-11-26 13:02:21 -05001308 req = &target->req_ring[rsp->tag];
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001309 scmnd = srp_claim_req(target, req, NULL, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001310 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001311 shost_printk(KERN_ERR, target->scsi_host,
1312 "Null scmnd for RSP w/tag %016llx\n",
1313 (unsigned long long) rsp->tag);
Bart Van Assche22032992012-08-14 13:18:53 +00001314
1315 spin_lock_irqsave(&target->lock, flags);
1316 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1317 spin_unlock_irqrestore(&target->lock, flags);
1318
1319 return;
1320 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001321 scmnd->result = rsp->status;
1322
1323 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1324 memcpy(scmnd->sense_buffer, rsp->data +
1325 be32_to_cpu(rsp->resp_data_len),
1326 min_t(int, be32_to_cpu(rsp->sense_data_len),
1327 SCSI_SENSE_BUFFERSIZE));
1328 }
1329
1330 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001331 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001332 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001333 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001334
Bart Van Assche22032992012-08-14 13:18:53 +00001335 srp_free_req(target, req, scmnd,
1336 be32_to_cpu(rsp->req_lim_delta));
1337
David Dillowf8b6e312010-11-26 13:02:21 -05001338 scmnd->host_scribble = NULL;
1339 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001340 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001341}
1342
David Dillowbb125882010-10-08 14:40:47 -04001343static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1344 void *rsp, int len)
1345{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001346 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001347 unsigned long flags;
1348 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001349 int err;
David Dillowbb125882010-10-08 14:40:47 -04001350
Bart Van Asschee9684672010-11-26 15:08:38 -05001351 spin_lock_irqsave(&target->lock, flags);
David Dillowbb125882010-10-08 14:40:47 -04001352 target->req_lim += req_delta;
David Dillowbb125882010-10-08 14:40:47 -04001353 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
Bart Van Asschee9684672010-11-26 15:08:38 -05001354 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001355
David Dillowbb125882010-10-08 14:40:47 -04001356 if (!iu) {
1357 shost_printk(KERN_ERR, target->scsi_host, PFX
1358 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001359 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001360 }
1361
1362 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1363 memcpy(iu->buf, rsp, len);
1364 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1365
Bart Van Assche76c75b22010-11-26 14:37:47 -05001366 err = srp_post_send(target, iu, len);
1367 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001368 shost_printk(KERN_ERR, target->scsi_host, PFX
1369 "unable to post response: %d\n", err);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001370 srp_put_tx_iu(target, iu, SRP_IU_RSP);
1371 }
David Dillowbb125882010-10-08 14:40:47 -04001372
David Dillowbb125882010-10-08 14:40:47 -04001373 return err;
1374}
1375
1376static void srp_process_cred_req(struct srp_target_port *target,
1377 struct srp_cred_req *req)
1378{
1379 struct srp_cred_rsp rsp = {
1380 .opcode = SRP_CRED_RSP,
1381 .tag = req->tag,
1382 };
1383 s32 delta = be32_to_cpu(req->req_lim_delta);
1384
1385 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1386 shost_printk(KERN_ERR, target->scsi_host, PFX
1387 "problems processing SRP_CRED_REQ\n");
1388}
1389
1390static void srp_process_aer_req(struct srp_target_port *target,
1391 struct srp_aer_req *req)
1392{
1393 struct srp_aer_rsp rsp = {
1394 .opcode = SRP_AER_RSP,
1395 .tag = req->tag,
1396 };
1397 s32 delta = be32_to_cpu(req->req_lim_delta);
1398
1399 shost_printk(KERN_ERR, target->scsi_host, PFX
1400 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1401
1402 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1403 shost_printk(KERN_ERR, target->scsi_host, PFX
1404 "problems processing SRP_AER_REQ\n");
1405}
1406
Roland Dreieraef9ec32005-11-02 14:07:13 -08001407static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1408{
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001409 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001410 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001411 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001412 u8 opcode;
1413
Ralph Campbell85507bc2006-12-12 14:30:55 -08001414 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1415 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001416
1417 opcode = *(u8 *) iu->buf;
1418
1419 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001420 shost_printk(KERN_ERR, target->scsi_host,
1421 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001422 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1423 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001424 }
1425
1426 switch (opcode) {
1427 case SRP_RSP:
1428 srp_process_rsp(target, iu->buf);
1429 break;
1430
David Dillowbb125882010-10-08 14:40:47 -04001431 case SRP_CRED_REQ:
1432 srp_process_cred_req(target, iu->buf);
1433 break;
1434
1435 case SRP_AER_REQ:
1436 srp_process_aer_req(target, iu->buf);
1437 break;
1438
Roland Dreieraef9ec32005-11-02 14:07:13 -08001439 case SRP_T_LOGOUT:
1440 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001441 shost_printk(KERN_WARNING, target->scsi_host,
1442 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001443 break;
1444
1445 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001446 shost_printk(KERN_WARNING, target->scsi_host,
1447 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001448 break;
1449 }
1450
Ralph Campbell85507bc2006-12-12 14:30:55 -08001451 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1452 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001453
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001454 res = srp_post_recv(target, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001455 if (res != 0)
1456 shost_printk(KERN_ERR, target->scsi_host,
1457 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001458}
1459
Bart Van Asschec1120f82013-10-26 14:35:08 +02001460/**
1461 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001462 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001463 *
1464 * Note: This function may get invoked before the rport has been created,
1465 * hence the target->rport test.
1466 */
1467static void srp_tl_err_work(struct work_struct *work)
1468{
1469 struct srp_target_port *target;
1470
1471 target = container_of(work, struct srp_target_port, tl_err_work);
1472 if (target->rport)
1473 srp_start_tl_fail_timers(target->rport);
1474}
1475
Bart Van Asschecd4e3852013-10-10 13:53:25 +02001476static void srp_handle_qp_err(enum ib_wc_status wc_status, bool send_err,
Bart Van Assche948d1e82011-09-03 09:25:42 +02001477 struct srp_target_port *target)
1478{
Bart Van Assche294c8752011-12-25 12:18:12 +00001479 if (target->connected && !target->qp_in_error) {
Bart Van Assche4f0af692012-11-26 11:16:40 +01001480 shost_printk(KERN_ERR, target->scsi_host,
1481 PFX "failed %s status %d\n",
Bart Van Asschecd4e3852013-10-10 13:53:25 +02001482 send_err ? "send" : "receive",
Bart Van Assche4f0af692012-11-26 11:16:40 +01001483 wc_status);
Bart Van Asschec1120f82013-10-26 14:35:08 +02001484 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001485 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001486 target->qp_in_error = true;
1487}
1488
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001489static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001490{
1491 struct srp_target_port *target = target_ptr;
1492 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001493
1494 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1495 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001496 if (likely(wc.status == IB_WC_SUCCESS)) {
1497 srp_handle_recv(target, &wc);
1498 } else {
Bart Van Asschecd4e3852013-10-10 13:53:25 +02001499 srp_handle_qp_err(wc.status, false, target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001500 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001501 }
1502}
1503
1504static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1505{
1506 struct srp_target_port *target = target_ptr;
1507 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001508 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001509
1510 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001511 if (likely(wc.status == IB_WC_SUCCESS)) {
1512 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1513 list_add(&iu->list, &target->free_tx);
1514 } else {
Bart Van Asschecd4e3852013-10-10 13:53:25 +02001515 srp_handle_qp_err(wc.status, true, target);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001516 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001517 }
1518}
1519
Bart Van Assche76c75b22010-11-26 14:37:47 -05001520static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001521{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001522 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001523 struct srp_rport *rport = target->rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001524 struct srp_request *req;
1525 struct srp_iu *iu;
1526 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001527 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001528 unsigned long flags;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001529 int len, result;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001530 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1531
1532 /*
1533 * The SCSI EH thread is the only context from which srp_queuecommand()
1534 * can get invoked for blocked devices (SDEV_BLOCK /
1535 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1536 * locking the rport mutex if invoked from inside the SCSI EH.
1537 */
1538 if (in_scsi_eh)
1539 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001540
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001541 result = srp_chkready(target->rport);
1542 if (unlikely(result)) {
1543 scmnd->result = result;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00001544 scmnd->scsi_done(scmnd);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001545 goto unlock_rport;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00001546 }
1547
Bart Van Asschee9684672010-11-26 15:08:38 -05001548 spin_lock_irqsave(&target->lock, flags);
David Dillowbb125882010-10-08 14:40:47 -04001549 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001550 if (!iu)
Bart Van Assche695b8342011-01-13 19:02:25 +00001551 goto err_unlock;
1552
1553 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1554 list_del(&req->list);
1555 spin_unlock_irqrestore(&target->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001556
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001557 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05001558 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001559 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001560
Roland Dreieraef9ec32005-11-02 14:07:13 -08001561 scmnd->result = 0;
David Dillowf8b6e312010-11-26 13:02:21 -05001562 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001563
1564 cmd = iu->buf;
1565 memset(cmd, 0, sizeof *cmd);
1566
1567 cmd->opcode = SRP_CMD;
1568 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001569 cmd->tag = req->index;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001570 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1571
Roland Dreieraef9ec32005-11-02 14:07:13 -08001572 req->scmnd = scmnd;
1573 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001574
1575 len = srp_map_data(scmnd, target, req);
1576 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001577 shost_printk(KERN_ERR, target->scsi_host,
1578 PFX "Failed to map data\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001579 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001580 }
1581
David Dillow49248642011-01-14 18:23:24 -05001582 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001583 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001584
Bart Van Assche76c75b22010-11-26 14:37:47 -05001585 if (srp_post_send(target, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001586 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001587 goto err_unmap;
1588 }
1589
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001590unlock_rport:
1591 if (in_scsi_eh)
1592 mutex_unlock(&rport->mutex);
1593
Roland Dreieraef9ec32005-11-02 14:07:13 -08001594 return 0;
1595
1596err_unmap:
1597 srp_unmap_data(scmnd, target, req);
1598
Bart Van Assche76c75b22010-11-26 14:37:47 -05001599err_iu:
1600 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1601
Bart Van Assche024ca902014-05-20 15:03:49 +02001602 /*
1603 * Avoid that the loops that iterate over the request ring can
1604 * encounter a dangling SCSI command pointer.
1605 */
1606 req->scmnd = NULL;
1607
Bart Van Asschee9684672010-11-26 15:08:38 -05001608 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001609 list_add(&req->list, &target->free_reqs);
Bart Van Assche695b8342011-01-13 19:02:25 +00001610
1611err_unlock:
Bart Van Asschee9684672010-11-26 15:08:38 -05001612 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001613
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001614 if (in_scsi_eh)
1615 mutex_unlock(&rport->mutex);
1616
Roland Dreieraef9ec32005-11-02 14:07:13 -08001617 return SCSI_MLQUEUE_HOST_BUSY;
1618}
1619
Bart Van Assche4d73f952013-10-26 14:40:37 +02001620/*
1621 * Note: the resources allocated in this function are freed in
1622 * srp_free_target_ib().
1623 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001624static int srp_alloc_iu_bufs(struct srp_target_port *target)
1625{
1626 int i;
1627
Bart Van Assche4d73f952013-10-26 14:40:37 +02001628 target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring),
1629 GFP_KERNEL);
1630 if (!target->rx_ring)
1631 goto err_no_ring;
1632 target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring),
1633 GFP_KERNEL);
1634 if (!target->tx_ring)
1635 goto err_no_ring;
1636
1637 for (i = 0; i < target->queue_size; ++i) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001638 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1639 target->max_ti_iu_len,
1640 GFP_KERNEL, DMA_FROM_DEVICE);
1641 if (!target->rx_ring[i])
1642 goto err;
1643 }
1644
Bart Van Assche4d73f952013-10-26 14:40:37 +02001645 for (i = 0; i < target->queue_size; ++i) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001646 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
David Dillow49248642011-01-14 18:23:24 -05001647 target->max_iu_len,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001648 GFP_KERNEL, DMA_TO_DEVICE);
1649 if (!target->tx_ring[i])
1650 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001651
1652 list_add(&target->tx_ring[i]->list, &target->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001653 }
1654
1655 return 0;
1656
1657err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02001658 for (i = 0; i < target->queue_size; ++i) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001659 srp_free_iu(target->srp_host, target->rx_ring[i]);
Bart Van Assche4d73f952013-10-26 14:40:37 +02001660 srp_free_iu(target->srp_host, target->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001661 }
1662
Bart Van Assche4d73f952013-10-26 14:40:37 +02001663
1664err_no_ring:
1665 kfree(target->tx_ring);
1666 target->tx_ring = NULL;
1667 kfree(target->rx_ring);
1668 target->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001669
1670 return -ENOMEM;
1671}
1672
Bart Van Asschec9b03c12011-09-03 09:34:48 +02001673static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
1674{
1675 uint64_t T_tr_ns, max_compl_time_ms;
1676 uint32_t rq_tmo_jiffies;
1677
1678 /*
1679 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1680 * table 91), both the QP timeout and the retry count have to be set
1681 * for RC QP's during the RTR to RTS transition.
1682 */
1683 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
1684 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
1685
1686 /*
1687 * Set target->rq_tmo_jiffies to one second more than the largest time
1688 * it can take before an error completion is generated. See also
1689 * C9-140..142 in the IBTA spec for more information about how to
1690 * convert the QP Local ACK Timeout value to nanoseconds.
1691 */
1692 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
1693 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
1694 do_div(max_compl_time_ms, NSEC_PER_MSEC);
1695 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
1696
1697 return rq_tmo_jiffies;
1698}
1699
David Dillow961e0be2011-01-14 17:32:07 -05001700static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1701 struct srp_login_rsp *lrsp,
1702 struct srp_target_port *target)
1703{
1704 struct ib_qp_attr *qp_attr = NULL;
1705 int attr_mask = 0;
1706 int ret;
1707 int i;
1708
1709 if (lrsp->opcode == SRP_LOGIN_RSP) {
1710 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1711 target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1712
1713 /*
1714 * Reserve credits for task management so we don't
1715 * bounce requests back to the SCSI mid-layer.
1716 */
1717 target->scsi_host->can_queue
1718 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1719 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02001720 target->scsi_host->cmd_per_lun
1721 = min_t(int, target->scsi_host->can_queue,
1722 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05001723 } else {
1724 shost_printk(KERN_WARNING, target->scsi_host,
1725 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1726 ret = -ECONNRESET;
1727 goto error;
1728 }
1729
Bart Van Assche4d73f952013-10-26 14:40:37 +02001730 if (!target->rx_ring) {
David Dillow961e0be2011-01-14 17:32:07 -05001731 ret = srp_alloc_iu_bufs(target);
1732 if (ret)
1733 goto error;
1734 }
1735
1736 ret = -ENOMEM;
1737 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1738 if (!qp_attr)
1739 goto error;
1740
1741 qp_attr->qp_state = IB_QPS_RTR;
1742 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1743 if (ret)
1744 goto error_free;
1745
1746 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1747 if (ret)
1748 goto error_free;
1749
Bart Van Assche4d73f952013-10-26 14:40:37 +02001750 for (i = 0; i < target->queue_size; i++) {
David Dillow961e0be2011-01-14 17:32:07 -05001751 struct srp_iu *iu = target->rx_ring[i];
1752 ret = srp_post_recv(target, iu);
1753 if (ret)
1754 goto error_free;
1755 }
1756
1757 qp_attr->qp_state = IB_QPS_RTS;
1758 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1759 if (ret)
1760 goto error_free;
1761
Bart Van Asschec9b03c12011-09-03 09:34:48 +02001762 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
1763
David Dillow961e0be2011-01-14 17:32:07 -05001764 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1765 if (ret)
1766 goto error_free;
1767
1768 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1769
1770error_free:
1771 kfree(qp_attr);
1772
1773error:
1774 target->status = ret;
1775}
1776
Roland Dreieraef9ec32005-11-02 14:07:13 -08001777static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1778 struct ib_cm_event *event,
1779 struct srp_target_port *target)
1780{
David Dillow7aa54bd2008-01-07 18:23:41 -05001781 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001782 struct ib_class_port_info *cpi;
1783 int opcode;
1784
1785 switch (event->param.rej_rcvd.reason) {
1786 case IB_CM_REJ_PORT_CM_REDIRECT:
1787 cpi = event->param.rej_rcvd.ari;
1788 target->path.dlid = cpi->redirect_lid;
1789 target->path.pkey = cpi->redirect_pkey;
1790 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1791 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1792
1793 target->status = target->path.dlid ?
1794 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1795 break;
1796
1797 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07001798 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001799 /*
1800 * Topspin/Cisco SRP gateways incorrectly send
1801 * reject reason code 25 when they mean 24
1802 * (port redirect).
1803 */
1804 memcpy(target->path.dgid.raw,
1805 event->param.rej_rcvd.ari, 16);
1806
David Dillow7aa54bd2008-01-07 18:23:41 -05001807 shost_printk(KERN_DEBUG, shost,
1808 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1809 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1810 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001811
1812 target->status = SRP_PORT_REDIRECT;
1813 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05001814 shost_printk(KERN_WARNING, shost,
1815 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001816 target->status = -ECONNRESET;
1817 }
1818 break;
1819
1820 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05001821 shost_printk(KERN_WARNING, shost,
1822 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001823 target->status = -ECONNRESET;
1824 break;
1825
1826 case IB_CM_REJ_CONSUMER_DEFINED:
1827 opcode = *(u8 *) event->private_data;
1828 if (opcode == SRP_LOGIN_REJ) {
1829 struct srp_login_rej *rej = event->private_data;
1830 u32 reason = be32_to_cpu(rej->reason);
1831
1832 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05001833 shost_printk(KERN_WARNING, shost,
1834 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001835 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01001836 shost_printk(KERN_WARNING, shost, PFX
1837 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
1838 target->path.sgid.raw,
1839 target->orig_dgid, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001840 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05001841 shost_printk(KERN_WARNING, shost,
1842 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1843 " opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001844 target->status = -ECONNRESET;
1845 break;
1846
David Dillow9fe4bcf2008-01-08 17:08:52 -05001847 case IB_CM_REJ_STALE_CONN:
1848 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
1849 target->status = SRP_STALE_CONN;
1850 break;
1851
Roland Dreieraef9ec32005-11-02 14:07:13 -08001852 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001853 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
1854 event->param.rej_rcvd.reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001855 target->status = -ECONNRESET;
1856 }
1857}
1858
1859static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1860{
1861 struct srp_target_port *target = cm_id->context;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001862 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001863
1864 switch (event->event) {
1865 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05001866 shost_printk(KERN_DEBUG, target->scsi_host,
1867 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001868 comp = 1;
1869 target->status = -ECONNRESET;
1870 break;
1871
1872 case IB_CM_REP_RECEIVED:
1873 comp = 1;
David Dillow961e0be2011-01-14 17:32:07 -05001874 srp_cm_rep_handler(cm_id, event->private_data, target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001875 break;
1876
1877 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05001878 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001879 comp = 1;
1880
1881 srp_cm_rej_handler(cm_id, event, target);
1882 break;
1883
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07001884 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05001885 shost_printk(KERN_WARNING, target->scsi_host,
1886 PFX "DREQ received - connection closed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +00001887 srp_change_conn_state(target, false);
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07001888 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05001889 shost_printk(KERN_ERR, target->scsi_host,
1890 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02001891 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001892 break;
1893
1894 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05001895 shost_printk(KERN_ERR, target->scsi_host,
1896 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01001897 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001898
Roland Dreieraef9ec32005-11-02 14:07:13 -08001899 target->status = 0;
1900 break;
1901
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07001902 case IB_CM_MRA_RECEIVED:
1903 case IB_CM_DREQ_ERROR:
1904 case IB_CM_DREP_RECEIVED:
1905 break;
1906
Roland Dreieraef9ec32005-11-02 14:07:13 -08001907 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001908 shost_printk(KERN_WARNING, target->scsi_host,
1909 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001910 break;
1911 }
1912
1913 if (comp)
1914 complete(&target->done);
1915
Roland Dreieraef9ec32005-11-02 14:07:13 -08001916 return 0;
1917}
1918
Jack Wang71444b92013-11-07 11:37:37 +01001919/**
1920 * srp_change_queue_type - changing device queue tag type
1921 * @sdev: scsi device struct
1922 * @tag_type: requested tag type
1923 *
1924 * Returns queue tag type.
1925 */
1926static int
1927srp_change_queue_type(struct scsi_device *sdev, int tag_type)
1928{
1929 if (sdev->tagged_supported) {
1930 scsi_set_tag_type(sdev, tag_type);
1931 if (tag_type)
1932 scsi_activate_tcq(sdev, sdev->queue_depth);
1933 else
1934 scsi_deactivate_tcq(sdev, sdev->queue_depth);
1935 } else
1936 tag_type = 0;
1937
1938 return tag_type;
1939}
1940
1941/**
1942 * srp_change_queue_depth - setting device queue depth
1943 * @sdev: scsi device struct
1944 * @qdepth: requested queue depth
1945 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
1946 * (see include/scsi/scsi_host.h for definition)
1947 *
1948 * Returns queue depth.
1949 */
1950static int
1951srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1952{
1953 struct Scsi_Host *shost = sdev->host;
1954 int max_depth;
1955 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
1956 max_depth = shost->can_queue;
1957 if (!sdev->tagged_supported)
1958 max_depth = 1;
1959 if (qdepth > max_depth)
1960 qdepth = max_depth;
1961 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1962 } else if (reason == SCSI_QDEPTH_QFULL)
1963 scsi_track_queue_full(sdev, qdepth);
1964 else
1965 return -EOPNOTSUPP;
1966
1967 return sdev->queue_depth;
1968}
1969
Roland Dreierd945e1d2006-05-09 10:50:28 -07001970static int srp_send_tsk_mgmt(struct srp_target_port *target,
David Dillowf8b6e312010-11-26 13:02:21 -05001971 u64 req_tag, unsigned int lun, u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001972{
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001973 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04001974 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001975 struct srp_iu *iu;
1976 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001977
Bart Van Assche3780d1f2013-02-21 17:18:00 +00001978 if (!target->connected || target->qp_in_error)
1979 return -1;
1980
David Dillowf8b6e312010-11-26 13:02:21 -05001981 init_completion(&target->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001982
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001983 /*
1984 * Lock the rport mutex to avoid that srp_create_target_ib() is
1985 * invoked while a task management function is being sent.
1986 */
1987 mutex_lock(&rport->mutex);
Bart Van Asschee9684672010-11-26 15:08:38 -05001988 spin_lock_irq(&target->lock);
David Dillowbb125882010-10-08 14:40:47 -04001989 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
Bart Van Asschee9684672010-11-26 15:08:38 -05001990 spin_unlock_irq(&target->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001991
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001992 if (!iu) {
1993 mutex_unlock(&rport->mutex);
1994
Bart Van Assche76c75b22010-11-26 14:37:47 -05001995 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001996 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001997
David Dillow19081f32010-10-18 08:54:49 -04001998 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1999 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002000 tsk_mgmt = iu->buf;
2001 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2002
2003 tsk_mgmt->opcode = SRP_TSK_MGMT;
David Dillowf8b6e312010-11-26 13:02:21 -05002004 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2005 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002006 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002007 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002008
David Dillow19081f32010-10-18 08:54:49 -04002009 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2010 DMA_TO_DEVICE);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002011 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
2012 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002013 mutex_unlock(&rport->mutex);
2014
Bart Van Assche76c75b22010-11-26 14:37:47 -05002015 return -1;
2016 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002017 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002018
David Dillowf8b6e312010-11-26 13:02:21 -05002019 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002020 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002021 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002022
Roland Dreierd945e1d2006-05-09 10:50:28 -07002023 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002024}
2025
Roland Dreieraef9ec32005-11-02 14:07:13 -08002026static int srp_abort(struct scsi_cmnd *scmnd)
2027{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002028 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002029 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002030 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002031
David Dillow7aa54bd2008-01-07 18:23:41 -05002032 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002033
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01002034 if (!req || !srp_claim_req(target, req, NULL, scmnd))
Bart Van Assche99b66972013-10-10 13:52:33 +02002035 return SUCCESS;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002036 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002037 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002038 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002039 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002040 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002041 else
2042 ret = FAILED;
Bart Van Assche22032992012-08-14 13:18:53 +00002043 srp_free_req(target, req, scmnd, 0);
2044 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002045 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002046
Bart Van Assche086f44f2013-06-12 15:23:04 +02002047 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002048}
2049
2050static int srp_reset_device(struct scsi_cmnd *scmnd)
2051{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002052 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assche536ae142010-11-26 13:58:27 -05002053 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002054
David Dillow7aa54bd2008-01-07 18:23:41 -05002055 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002056
David Dillowf8b6e312010-11-26 13:02:21 -05002057 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
2058 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002059 return FAILED;
David Dillowf8b6e312010-11-26 13:02:21 -05002060 if (target->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002061 return FAILED;
2062
Bart Van Assche4d73f952013-10-26 14:40:37 +02002063 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche536ae142010-11-26 13:58:27 -05002064 struct srp_request *req = &target->req_ring[i];
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01002065 srp_finish_req(target, req, scmnd->device, DID_RESET << 16);
Bart Van Assche536ae142010-11-26 13:58:27 -05002066 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002067
Roland Dreierd945e1d2006-05-09 10:50:28 -07002068 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002069}
2070
2071static int srp_reset_host(struct scsi_cmnd *scmnd)
2072{
2073 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002074
David Dillow7aa54bd2008-01-07 18:23:41 -05002075 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002076
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002077 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002078}
2079
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002080static int srp_slave_configure(struct scsi_device *sdev)
2081{
2082 struct Scsi_Host *shost = sdev->host;
2083 struct srp_target_port *target = host_to_target(shost);
2084 struct request_queue *q = sdev->request_queue;
2085 unsigned long timeout;
2086
2087 if (sdev->type == TYPE_DISK) {
2088 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2089 blk_queue_rq_timeout(q, timeout);
2090 }
2091
2092 return 0;
2093}
2094
Tony Jonesee959b02008-02-22 00:13:36 +01002095static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2096 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002097{
Tony Jonesee959b02008-02-22 00:13:36 +01002098 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002099
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002100 return sprintf(buf, "0x%016llx\n",
2101 (unsigned long long) be64_to_cpu(target->id_ext));
2102}
2103
Tony Jonesee959b02008-02-22 00:13:36 +01002104static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2105 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002106{
Tony Jonesee959b02008-02-22 00:13:36 +01002107 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002108
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002109 return sprintf(buf, "0x%016llx\n",
2110 (unsigned long long) be64_to_cpu(target->ioc_guid));
2111}
2112
Tony Jonesee959b02008-02-22 00:13:36 +01002113static ssize_t show_service_id(struct device *dev,
2114 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002115{
Tony Jonesee959b02008-02-22 00:13:36 +01002116 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002117
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002118 return sprintf(buf, "0x%016llx\n",
2119 (unsigned long long) be64_to_cpu(target->service_id));
2120}
2121
Tony Jonesee959b02008-02-22 00:13:36 +01002122static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2123 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002124{
Tony Jonesee959b02008-02-22 00:13:36 +01002125 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002126
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002127 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
2128}
2129
Bart Van Assche848b3082013-10-26 14:38:12 +02002130static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2131 char *buf)
2132{
2133 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2134
2135 return sprintf(buf, "%pI6\n", target->path.sgid.raw);
2136}
2137
Tony Jonesee959b02008-02-22 00:13:36 +01002138static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2139 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002140{
Tony Jonesee959b02008-02-22 00:13:36 +01002141 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002142
Harvey Harrison5b095d9892008-10-29 12:52:50 -07002143 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002144}
2145
Tony Jonesee959b02008-02-22 00:13:36 +01002146static ssize_t show_orig_dgid(struct device *dev,
2147 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002148{
Tony Jonesee959b02008-02-22 00:13:36 +01002149 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002150
Harvey Harrison5b095d9892008-10-29 12:52:50 -07002151 return sprintf(buf, "%pI6\n", target->orig_dgid);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002152}
2153
Bart Van Assche89de7482010-08-03 14:08:45 +00002154static ssize_t show_req_lim(struct device *dev,
2155 struct device_attribute *attr, char *buf)
2156{
2157 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2158
Bart Van Assche89de7482010-08-03 14:08:45 +00002159 return sprintf(buf, "%d\n", target->req_lim);
2160}
2161
Tony Jonesee959b02008-02-22 00:13:36 +01002162static ssize_t show_zero_req_lim(struct device *dev,
2163 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002164{
Tony Jonesee959b02008-02-22 00:13:36 +01002165 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002166
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002167 return sprintf(buf, "%d\n", target->zero_req_lim);
2168}
2169
Tony Jonesee959b02008-02-22 00:13:36 +01002170static ssize_t show_local_ib_port(struct device *dev,
2171 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002172{
Tony Jonesee959b02008-02-22 00:13:36 +01002173 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002174
2175 return sprintf(buf, "%d\n", target->srp_host->port);
2176}
2177
Tony Jonesee959b02008-02-22 00:13:36 +01002178static ssize_t show_local_ib_device(struct device *dev,
2179 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002180{
Tony Jonesee959b02008-02-22 00:13:36 +01002181 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002182
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002183 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002184}
2185
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002186static ssize_t show_comp_vector(struct device *dev,
2187 struct device_attribute *attr, char *buf)
2188{
2189 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2190
2191 return sprintf(buf, "%d\n", target->comp_vector);
2192}
2193
Vu Pham7bb312e2013-10-26 14:31:27 +02002194static ssize_t show_tl_retry_count(struct device *dev,
2195 struct device_attribute *attr, char *buf)
2196{
2197 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2198
2199 return sprintf(buf, "%d\n", target->tl_retry_count);
2200}
2201
David Dillow49248642011-01-14 18:23:24 -05002202static ssize_t show_cmd_sg_entries(struct device *dev,
2203 struct device_attribute *attr, char *buf)
2204{
2205 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2206
2207 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2208}
2209
David Dillowc07d4242011-01-16 13:57:10 -05002210static ssize_t show_allow_ext_sg(struct device *dev,
2211 struct device_attribute *attr, char *buf)
2212{
2213 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2214
2215 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2216}
2217
Tony Jonesee959b02008-02-22 00:13:36 +01002218static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2219static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2220static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2221static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002222static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002223static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2224static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002225static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002226static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2227static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2228static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002229static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002230static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002231static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002232static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002233
Tony Jonesee959b02008-02-22 00:13:36 +01002234static struct device_attribute *srp_host_attrs[] = {
2235 &dev_attr_id_ext,
2236 &dev_attr_ioc_guid,
2237 &dev_attr_service_id,
2238 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002239 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002240 &dev_attr_dgid,
2241 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002242 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002243 &dev_attr_zero_req_lim,
2244 &dev_attr_local_ib_port,
2245 &dev_attr_local_ib_device,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002246 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002247 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002248 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002249 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002250 NULL
2251};
2252
Roland Dreieraef9ec32005-11-02 14:07:13 -08002253static struct scsi_host_template srp_template = {
2254 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002255 .name = "InfiniBand SRP initiator",
2256 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002257 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002258 .info = srp_target_info,
2259 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002260 .change_queue_depth = srp_change_queue_depth,
2261 .change_queue_type = srp_change_queue_type,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002262 .eh_abort_handler = srp_abort,
2263 .eh_device_reset_handler = srp_reset_device,
2264 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002265 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002266 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002267 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002268 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002269 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002270 .use_clustering = ENABLE_CLUSTERING,
2271 .shost_attrs = srp_host_attrs
Roland Dreieraef9ec32005-11-02 14:07:13 -08002272};
2273
2274static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2275{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002276 struct srp_rport_identifiers ids;
2277 struct srp_rport *rport;
2278
Roland Dreieraef9ec32005-11-02 14:07:13 -08002279 sprintf(target->target_name, "SRP.T10:%016llX",
2280 (unsigned long long) be64_to_cpu(target->id_ext));
2281
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002282 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002283 return -ENODEV;
2284
FUJITA Tomonori32368222007-06-27 16:33:12 +09002285 memcpy(ids.port_id, &target->id_ext, 8);
2286 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002287 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002288 rport = srp_rport_add(target->scsi_host, &ids);
2289 if (IS_ERR(rport)) {
2290 scsi_remove_host(target->scsi_host);
2291 return PTR_ERR(rport);
2292 }
2293
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002294 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002295 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002296
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002297 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002298 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002299 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002300
2301 target->state = SRP_TARGET_LIVE;
2302
Roland Dreieraef9ec32005-11-02 14:07:13 -08002303 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002304 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002305
2306 return 0;
2307}
2308
Tony Jonesee959b02008-02-22 00:13:36 +01002309static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002310{
2311 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002312 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002313
2314 complete(&host->released);
2315}
2316
2317static struct class srp_class = {
2318 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002319 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002320};
2321
Bart Van Assche96fc2482013-06-28 14:51:26 +02002322/**
2323 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002324 * @host: SRP host.
2325 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002326 */
2327static bool srp_conn_unique(struct srp_host *host,
2328 struct srp_target_port *target)
2329{
2330 struct srp_target_port *t;
2331 bool ret = false;
2332
2333 if (target->state == SRP_TARGET_REMOVED)
2334 goto out;
2335
2336 ret = true;
2337
2338 spin_lock(&host->target_lock);
2339 list_for_each_entry(t, &host->target_list, list) {
2340 if (t != target &&
2341 target->id_ext == t->id_ext &&
2342 target->ioc_guid == t->ioc_guid &&
2343 target->initiator_ext == t->initiator_ext) {
2344 ret = false;
2345 break;
2346 }
2347 }
2348 spin_unlock(&host->target_lock);
2349
2350out:
2351 return ret;
2352}
2353
Roland Dreieraef9ec32005-11-02 14:07:13 -08002354/*
2355 * Target ports are added by writing
2356 *
2357 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2358 * pkey=<P_Key>,service_id=<service ID>
2359 *
2360 * to the add_target sysfs attribute.
2361 */
2362enum {
2363 SRP_OPT_ERR = 0,
2364 SRP_OPT_ID_EXT = 1 << 0,
2365 SRP_OPT_IOC_GUID = 1 << 1,
2366 SRP_OPT_DGID = 1 << 2,
2367 SRP_OPT_PKEY = 1 << 3,
2368 SRP_OPT_SERVICE_ID = 1 << 4,
2369 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002370 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002371 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002372 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002373 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002374 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2375 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002376 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002377 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002378 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002379 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2380 SRP_OPT_IOC_GUID |
2381 SRP_OPT_DGID |
2382 SRP_OPT_PKEY |
2383 SRP_OPT_SERVICE_ID),
2384};
2385
Steven Whitehousea447c092008-10-13 10:46:57 +01002386static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002387 { SRP_OPT_ID_EXT, "id_ext=%s" },
2388 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2389 { SRP_OPT_DGID, "dgid=%s" },
2390 { SRP_OPT_PKEY, "pkey=%x" },
2391 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2392 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2393 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002394 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002395 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002396 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002397 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2398 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002399 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002400 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002401 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002402 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002403};
2404
2405static int srp_parse_options(const char *buf, struct srp_target_port *target)
2406{
2407 char *options, *sep_opt;
2408 char *p;
2409 char dgid[3];
2410 substring_t args[MAX_OPT_ARGS];
2411 int opt_mask = 0;
2412 int token;
2413 int ret = -EINVAL;
2414 int i;
2415
2416 options = kstrdup(buf, GFP_KERNEL);
2417 if (!options)
2418 return -ENOMEM;
2419
2420 sep_opt = options;
2421 while ((p = strsep(&sep_opt, ",")) != NULL) {
2422 if (!*p)
2423 continue;
2424
2425 token = match_token(p, srp_opt_tokens, args);
2426 opt_mask |= token;
2427
2428 switch (token) {
2429 case SRP_OPT_ID_EXT:
2430 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002431 if (!p) {
2432 ret = -ENOMEM;
2433 goto out;
2434 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002435 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2436 kfree(p);
2437 break;
2438
2439 case SRP_OPT_IOC_GUID:
2440 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002441 if (!p) {
2442 ret = -ENOMEM;
2443 goto out;
2444 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002445 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2446 kfree(p);
2447 break;
2448
2449 case SRP_OPT_DGID:
2450 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002451 if (!p) {
2452 ret = -ENOMEM;
2453 goto out;
2454 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002455 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002456 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002457 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002458 goto out;
2459 }
2460
2461 for (i = 0; i < 16; ++i) {
2462 strlcpy(dgid, p + i * 2, 3);
2463 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2464 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002465 kfree(p);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002466 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002467 break;
2468
2469 case SRP_OPT_PKEY:
2470 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002471 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002472 goto out;
2473 }
2474 target->path.pkey = cpu_to_be16(token);
2475 break;
2476
2477 case SRP_OPT_SERVICE_ID:
2478 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002479 if (!p) {
2480 ret = -ENOMEM;
2481 goto out;
2482 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002483 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
Sean Hefty247e0202007-08-08 15:51:18 -07002484 target->path.service_id = target->service_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002485 kfree(p);
2486 break;
2487
2488 case SRP_OPT_MAX_SECT:
2489 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002490 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002491 goto out;
2492 }
2493 target->scsi_host->max_sectors = token;
2494 break;
2495
Bart Van Assche4d73f952013-10-26 14:40:37 +02002496 case SRP_OPT_QUEUE_SIZE:
2497 if (match_int(args, &token) || token < 1) {
2498 pr_warn("bad queue_size parameter '%s'\n", p);
2499 goto out;
2500 }
2501 target->scsi_host->can_queue = token;
2502 target->queue_size = token + SRP_RSP_SQ_SIZE +
2503 SRP_TSK_MGMT_SQ_SIZE;
2504 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2505 target->scsi_host->cmd_per_lun = token;
2506 break;
2507
Vu Pham52fb2b502006-06-17 20:37:31 -07002508 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002509 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002510 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2511 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07002512 goto out;
2513 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02002514 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07002515 break;
2516
Ramachandra K0c0450db2006-06-17 20:37:38 -07002517 case SRP_OPT_IO_CLASS:
2518 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002519 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07002520 goto out;
2521 }
2522 if (token != SRP_REV10_IB_IO_CLASS &&
2523 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002524 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2525 token, SRP_REV10_IB_IO_CLASS,
2526 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07002527 goto out;
2528 }
2529 target->io_class = token;
2530 break;
2531
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002532 case SRP_OPT_INITIATOR_EXT:
2533 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002534 if (!p) {
2535 ret = -ENOMEM;
2536 goto out;
2537 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002538 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2539 kfree(p);
2540 break;
2541
David Dillow49248642011-01-14 18:23:24 -05002542 case SRP_OPT_CMD_SG_ENTRIES:
2543 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002544 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2545 p);
David Dillow49248642011-01-14 18:23:24 -05002546 goto out;
2547 }
2548 target->cmd_sg_cnt = token;
2549 break;
2550
David Dillowc07d4242011-01-16 13:57:10 -05002551 case SRP_OPT_ALLOW_EXT_SG:
2552 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002553 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05002554 goto out;
2555 }
2556 target->allow_ext_sg = !!token;
2557 break;
2558
2559 case SRP_OPT_SG_TABLESIZE:
2560 if (match_int(args, &token) || token < 1 ||
2561 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002562 pr_warn("bad max sg_tablesize parameter '%s'\n",
2563 p);
David Dillowc07d4242011-01-16 13:57:10 -05002564 goto out;
2565 }
2566 target->sg_tablesize = token;
2567 break;
2568
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002569 case SRP_OPT_COMP_VECTOR:
2570 if (match_int(args, &token) || token < 0) {
2571 pr_warn("bad comp_vector parameter '%s'\n", p);
2572 goto out;
2573 }
2574 target->comp_vector = token;
2575 break;
2576
Vu Pham7bb312e2013-10-26 14:31:27 +02002577 case SRP_OPT_TL_RETRY_COUNT:
2578 if (match_int(args, &token) || token < 2 || token > 7) {
2579 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2580 p);
2581 goto out;
2582 }
2583 target->tl_retry_count = token;
2584 break;
2585
Roland Dreieraef9ec32005-11-02 14:07:13 -08002586 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002587 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2588 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002589 goto out;
2590 }
2591 }
2592
2593 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2594 ret = 0;
2595 else
2596 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2597 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2598 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002599 pr_warn("target creation request is missing parameter '%s'\n",
2600 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002601
Bart Van Assche4d73f952013-10-26 14:40:37 +02002602 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
2603 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2604 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
2605 target->scsi_host->cmd_per_lun,
2606 target->scsi_host->can_queue);
2607
Roland Dreieraef9ec32005-11-02 14:07:13 -08002608out:
2609 kfree(options);
2610 return ret;
2611}
2612
Tony Jonesee959b02008-02-22 00:13:36 +01002613static ssize_t srp_create_target(struct device *dev,
2614 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002615 const char *buf, size_t count)
2616{
2617 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002618 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002619 struct Scsi_Host *target_host;
2620 struct srp_target_port *target;
David Dillowc07d4242011-01-16 13:57:10 -05002621 struct ib_device *ibdev = host->srp_dev->dev;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +02002622 int ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002623
2624 target_host = scsi_host_alloc(&srp_template,
2625 sizeof (struct srp_target_port));
2626 if (!target_host)
2627 return -ENOMEM;
2628
David Dillow49248642011-01-14 18:23:24 -05002629 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07002630 target_host->max_channel = 0;
2631 target_host->max_id = 1;
Arne Redlich3c8edf02006-11-15 12:43:00 +01002632 target_host->max_lun = SRP_MAX_LUN;
2633 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08002634
Roland Dreieraef9ec32005-11-02 14:07:13 -08002635 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002636
David Dillow49248642011-01-14 18:23:24 -05002637 target->io_class = SRP_REV16A_IB_IO_CLASS;
2638 target->scsi_host = target_host;
2639 target->srp_host = host;
2640 target->lkey = host->srp_dev->mr->lkey;
2641 target->rkey = host->srp_dev->mr->rkey;
2642 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05002643 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
2644 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02002645 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02002646 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002647
Bart Van Assche2d7091b2014-03-14 13:52:45 +01002648 mutex_lock(&host->add_target_mutex);
2649
Roland Dreieraef9ec32005-11-02 14:07:13 -08002650 ret = srp_parse_options(buf, target);
2651 if (ret)
2652 goto err;
2653
Bart Van Assche4d73f952013-10-26 14:40:37 +02002654 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
2655
Bart Van Assche96fc2482013-06-28 14:51:26 +02002656 if (!srp_conn_unique(target->srp_host, target)) {
2657 shost_printk(KERN_INFO, target->scsi_host,
2658 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
2659 be64_to_cpu(target->id_ext),
2660 be64_to_cpu(target->ioc_guid),
2661 be64_to_cpu(target->initiator_ext));
2662 ret = -EEXIST;
2663 goto err;
2664 }
2665
David Dillowc07d4242011-01-16 13:57:10 -05002666 if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2667 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002668 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05002669 target->sg_tablesize = target->cmd_sg_cnt;
2670 }
2671
2672 target_host->sg_tablesize = target->sg_tablesize;
2673 target->indirect_size = target->sg_tablesize *
2674 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05002675 target->max_iu_len = sizeof (struct srp_cmd) +
2676 sizeof (struct srp_indirect_buf) +
2677 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2678
Bart Van Asschec1120f82013-10-26 14:35:08 +02002679 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002680 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05002681 spin_lock_init(&target->lock);
2682 INIT_LIST_HEAD(&target->free_tx);
Bart Van Asscheb81d00b2013-10-26 14:38:47 +02002683 ret = srp_alloc_req_data(target);
2684 if (ret)
2685 goto err_free_mem;
David Dillow8f26c9f2011-01-14 19:45:50 -05002686
Sagi Grimberg2088ca62014-03-14 13:51:58 +01002687 ret = ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
2688 if (ret)
2689 goto err_free_mem;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002690
Roland Dreieraef9ec32005-11-02 14:07:13 -08002691 ret = srp_create_target_ib(target);
2692 if (ret)
David Dillow8f26c9f2011-01-14 19:45:50 -05002693 goto err_free_mem;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002694
David Dillow9fe4bcf2008-01-08 17:08:52 -05002695 ret = srp_new_cm_id(target);
2696 if (ret)
David Dillow8f26c9f2011-01-14 19:45:50 -05002697 goto err_free_ib;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002698
2699 ret = srp_connect_target(target);
2700 if (ret) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002701 shost_printk(KERN_ERR, target->scsi_host,
2702 PFX "Connection failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002703 goto err_cm_id;
2704 }
2705
2706 ret = srp_add_target(host, target);
2707 if (ret)
2708 goto err_disconnect;
2709
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002710 shost_printk(KERN_DEBUG, target->scsi_host, PFX
2711 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
2712 be64_to_cpu(target->id_ext),
2713 be64_to_cpu(target->ioc_guid),
2714 be16_to_cpu(target->path.pkey),
2715 be64_to_cpu(target->service_id),
2716 target->path.sgid.raw, target->path.dgid.raw);
2717
Bart Van Assche2d7091b2014-03-14 13:52:45 +01002718 ret = count;
2719
2720out:
2721 mutex_unlock(&host->add_target_mutex);
2722 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002723
2724err_disconnect:
2725 srp_disconnect_target(target);
2726
2727err_cm_id:
2728 ib_destroy_cm_id(target->cm_id);
2729
David Dillow8f26c9f2011-01-14 19:45:50 -05002730err_free_ib:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002731 srp_free_target_ib(target);
2732
David Dillow8f26c9f2011-01-14 19:45:50 -05002733err_free_mem:
2734 srp_free_req_data(target);
2735
Roland Dreieraef9ec32005-11-02 14:07:13 -08002736err:
2737 scsi_host_put(target_host);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01002738 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002739}
2740
Tony Jonesee959b02008-02-22 00:13:36 +01002741static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002742
Tony Jonesee959b02008-02-22 00:13:36 +01002743static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2744 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002745{
Tony Jonesee959b02008-02-22 00:13:36 +01002746 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002747
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002748 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002749}
2750
Tony Jonesee959b02008-02-22 00:13:36 +01002751static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002752
Tony Jonesee959b02008-02-22 00:13:36 +01002753static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2754 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002755{
Tony Jonesee959b02008-02-22 00:13:36 +01002756 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002757
2758 return sprintf(buf, "%d\n", host->port);
2759}
2760
Tony Jonesee959b02008-02-22 00:13:36 +01002761static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002762
Roland Dreierf5358a12006-06-17 20:37:29 -07002763static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002764{
2765 struct srp_host *host;
2766
2767 host = kzalloc(sizeof *host, GFP_KERNEL);
2768 if (!host)
2769 return NULL;
2770
2771 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002772 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002773 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01002774 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002775 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002776 host->port = port;
2777
Tony Jonesee959b02008-02-22 00:13:36 +01002778 host->dev.class = &srp_class;
2779 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08002780 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002781
Tony Jonesee959b02008-02-22 00:13:36 +01002782 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07002783 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01002784 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002785 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01002786 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002787 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01002788 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002789 goto err_class;
2790
2791 return host;
2792
2793err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01002794 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002795
Roland Dreierf5358a12006-06-17 20:37:29 -07002796free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002797 kfree(host);
2798
2799 return NULL;
2800}
2801
2802static void srp_add_one(struct ib_device *device)
2803{
Roland Dreierf5358a12006-06-17 20:37:29 -07002804 struct srp_device *srp_dev;
2805 struct ib_device_attr *dev_attr;
2806 struct ib_fmr_pool_param fmr_param;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002807 struct srp_host *host;
David Dillowbe8b9812011-01-18 21:58:09 -05002808 int max_pages_per_fmr, fmr_page_shift, s, e, p;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002809
Roland Dreierf5358a12006-06-17 20:37:29 -07002810 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2811 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08002812 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002813
Roland Dreierf5358a12006-06-17 20:37:29 -07002814 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002815 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07002816 goto free_attr;
2817 }
2818
2819 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2820 if (!srp_dev)
2821 goto free_attr;
2822
2823 /*
2824 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05002825 * minimum of 4096 bytes. We're unlikely to build large sglists
2826 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07002827 */
David Dillow8f26c9f2011-01-14 19:45:50 -05002828 fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
2829 srp_dev->fmr_page_size = 1 << fmr_page_shift;
2830 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
2831 srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
Roland Dreierf5358a12006-06-17 20:37:29 -07002832
2833 INIT_LIST_HEAD(&srp_dev->dev_list);
2834
2835 srp_dev->dev = device;
2836 srp_dev->pd = ib_alloc_pd(device);
2837 if (IS_ERR(srp_dev->pd))
2838 goto free_dev;
2839
2840 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2841 IB_ACCESS_LOCAL_WRITE |
2842 IB_ACCESS_REMOTE_READ |
2843 IB_ACCESS_REMOTE_WRITE);
2844 if (IS_ERR(srp_dev->mr))
2845 goto err_pd;
2846
David Dillowbe8b9812011-01-18 21:58:09 -05002847 for (max_pages_per_fmr = SRP_FMR_SIZE;
2848 max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2849 max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2850 memset(&fmr_param, 0, sizeof fmr_param);
2851 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
2852 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
2853 fmr_param.cache = 1;
2854 fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2855 fmr_param.page_shift = fmr_page_shift;
2856 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
2857 IB_ACCESS_REMOTE_WRITE |
2858 IB_ACCESS_REMOTE_READ);
Roland Dreierf5358a12006-06-17 20:37:29 -07002859
David Dillowbe8b9812011-01-18 21:58:09 -05002860 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2861 if (!IS_ERR(srp_dev->fmr_pool))
2862 break;
2863 }
2864
Roland Dreierf5358a12006-06-17 20:37:29 -07002865 if (IS_ERR(srp_dev->fmr_pool))
2866 srp_dev->fmr_pool = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002867
Tom Tucker07ebafb2006-08-03 16:02:42 -05002868 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002869 s = 0;
2870 e = 0;
2871 } else {
2872 s = 1;
2873 e = device->phys_port_cnt;
2874 }
2875
2876 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07002877 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002878 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07002879 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002880 }
2881
Roland Dreierf5358a12006-06-17 20:37:29 -07002882 ib_set_client_data(device, &srp_client, srp_dev);
2883
2884 goto free_attr;
2885
2886err_pd:
2887 ib_dealloc_pd(srp_dev->pd);
2888
2889free_dev:
2890 kfree(srp_dev);
2891
2892free_attr:
2893 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002894}
2895
2896static void srp_remove_one(struct ib_device *device)
2897{
Roland Dreierf5358a12006-06-17 20:37:29 -07002898 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002899 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002900 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002901
Roland Dreierf5358a12006-06-17 20:37:29 -07002902 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02002903 if (!srp_dev)
2904 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002905
Roland Dreierf5358a12006-06-17 20:37:29 -07002906 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01002907 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002908 /*
2909 * Wait for the sysfs entry to go away, so that no new
2910 * target ports can be created.
2911 */
2912 wait_for_completion(&host->released);
2913
2914 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002915 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08002916 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002917 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002918 list_for_each_entry(target, &host->target_list, list)
2919 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002920 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002921
2922 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002923 * Wait for target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08002924 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002925 flush_workqueue(system_long_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002926
Roland Dreieraef9ec32005-11-02 14:07:13 -08002927 kfree(host);
2928 }
2929
Roland Dreierf5358a12006-06-17 20:37:29 -07002930 if (srp_dev->fmr_pool)
2931 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2932 ib_dereg_mr(srp_dev->mr);
2933 ib_dealloc_pd(srp_dev->pd);
2934
2935 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002936}
2937
FUJITA Tomonori32368222007-06-27 16:33:12 +09002938static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002939 .has_rport_state = true,
2940 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002941 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002942 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
2943 .dev_loss_tmo = &srp_dev_loss_tmo,
2944 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002945 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002946 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09002947};
2948
Roland Dreieraef9ec32005-11-02 14:07:13 -08002949static int __init srp_init_module(void)
2950{
2951 int ret;
2952
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002953 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00002954
David Dillow49248642011-01-14 18:23:24 -05002955 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002956 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05002957 if (!cmd_sg_entries)
2958 cmd_sg_entries = srp_sg_tablesize;
2959 }
2960
2961 if (!cmd_sg_entries)
2962 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2963
2964 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002965 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05002966 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07002967 }
2968
David Dillowc07d4242011-01-16 13:57:10 -05002969 if (!indirect_sg_entries)
2970 indirect_sg_entries = cmd_sg_entries;
2971 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002972 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2973 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05002974 indirect_sg_entries = cmd_sg_entries;
2975 }
2976
FUJITA Tomonori32368222007-06-27 16:33:12 +09002977 ib_srp_transport_template =
2978 srp_attach_transport(&ib_srp_transport_functions);
2979 if (!ib_srp_transport_template)
2980 return -ENOMEM;
2981
Roland Dreieraef9ec32005-11-02 14:07:13 -08002982 ret = class_register(&srp_class);
2983 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002984 pr_err("couldn't register class infiniband_srp\n");
FUJITA Tomonori32368222007-06-27 16:33:12 +09002985 srp_release_transport(ib_srp_transport_template);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002986 return ret;
2987 }
2988
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002989 ib_sa_register_client(&srp_sa_client);
2990
Roland Dreieraef9ec32005-11-02 14:07:13 -08002991 ret = ib_register_client(&srp_client);
2992 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002993 pr_err("couldn't register IB client\n");
FUJITA Tomonori32368222007-06-27 16:33:12 +09002994 srp_release_transport(ib_srp_transport_template);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002995 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002996 class_unregister(&srp_class);
2997 return ret;
2998 }
2999
3000 return 0;
3001}
3002
3003static void __exit srp_cleanup_module(void)
3004{
3005 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003006 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003007 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003008 srp_release_transport(ib_srp_transport_template);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003009}
3010
3011module_init(srp_init_module);
3012module_exit(srp_cleanup_module);