blob: 62e1f2a1c522518edd8197991e64f9e7a573976e [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000033#define pr_fmt(fmt) PFX fmt
34
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080043
Arun Sharma600634972011-07-26 16:09:06 -070044#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080045
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_dbg.h>
49#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090050#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080051
Roland Dreieraef9ec32005-11-02 14:07:13 -080052#include "ib_srp.h"
53
54#define DRV_NAME "ib_srp"
55#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020056#define DRV_VERSION "1.0"
57#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080058
59MODULE_AUTHOR("Roland Dreier");
60MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
61 "v" DRV_VERSION " (" DRV_RELDATE ")");
62MODULE_LICENSE("Dual BSD/GPL");
63
David Dillow49248642011-01-14 18:23:24 -050064static unsigned int srp_sg_tablesize;
65static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050066static unsigned int indirect_sg_entries;
67static bool allow_ext_sg;
Roland Dreieraef9ec32005-11-02 14:07:13 -080068static int topspin_workarounds = 1;
69
David Dillow49248642011-01-14 18:23:24 -050070module_param(srp_sg_tablesize, uint, 0444);
71MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
72
73module_param(cmd_sg_entries, uint, 0444);
74MODULE_PARM_DESC(cmd_sg_entries,
75 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
76
David Dillowc07d4242011-01-16 13:57:10 -050077module_param(indirect_sg_entries, uint, 0444);
78MODULE_PARM_DESC(indirect_sg_entries,
79 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
80
81module_param(allow_ext_sg, bool, 0444);
82MODULE_PARM_DESC(allow_ext_sg,
83 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
84
Roland Dreieraef9ec32005-11-02 14:07:13 -080085module_param(topspin_workarounds, int, 0444);
86MODULE_PARM_DESC(topspin_workarounds,
87 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
88
Bart Van Asscheed9b2262013-10-26 14:34:27 +020089static struct kernel_param_ops srp_tmo_ops;
90
Bart Van Asschea95cadb2013-10-26 14:37:17 +020091static int srp_reconnect_delay = 10;
92module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
93 S_IRUGO | S_IWUSR);
94MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
95
Bart Van Asscheed9b2262013-10-26 14:34:27 +020096static int srp_fast_io_fail_tmo = 15;
97module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
98 S_IRUGO | S_IWUSR);
99MODULE_PARM_DESC(fast_io_fail_tmo,
100 "Number of seconds between the observation of a transport"
101 " layer error and failing all I/O. \"off\" means that this"
102 " functionality is disabled.");
103
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200104static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200105module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(dev_loss_tmo,
108 "Maximum number of seconds that the SRP transport should"
109 " insulate transport layer errors. After this time has been"
110 " exceeded the SCSI host is removed. Should be"
111 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
112 " if fast_io_fail_tmo has not been set. \"off\" means that"
113 " this functionality is disabled.");
114
Roland Dreieraef9ec32005-11-02 14:07:13 -0800115static void srp_add_one(struct ib_device *device);
116static void srp_remove_one(struct ib_device *device);
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000117static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
118static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800119static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
120
FUJITA Tomonori32368222007-06-27 16:33:12 +0900121static struct scsi_transport_template *ib_srp_transport_template;
122
Roland Dreieraef9ec32005-11-02 14:07:13 -0800123static struct ib_client srp_client = {
124 .name = "srp",
125 .add = srp_add_one,
126 .remove = srp_remove_one
127};
128
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700129static struct ib_sa_client srp_sa_client;
130
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200131static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
132{
133 int tmo = *(int *)kp->arg;
134
135 if (tmo >= 0)
136 return sprintf(buffer, "%d", tmo);
137 else
138 return sprintf(buffer, "off");
139}
140
141static int srp_tmo_set(const char *val, const struct kernel_param *kp)
142{
143 int tmo, res;
144
145 if (strncmp(val, "off", 3) != 0) {
146 res = kstrtoint(val, 0, &tmo);
147 if (res)
148 goto out;
149 } else {
150 tmo = -1;
151 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200152 if (kp->arg == &srp_reconnect_delay)
153 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
154 srp_dev_loss_tmo);
155 else if (kp->arg == &srp_fast_io_fail_tmo)
156 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200157 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200158 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
159 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200160 if (res)
161 goto out;
162 *(int *)kp->arg = tmo;
163
164out:
165 return res;
166}
167
168static struct kernel_param_ops srp_tmo_ops = {
169 .get = srp_tmo_get,
170 .set = srp_tmo_set,
171};
172
Roland Dreieraef9ec32005-11-02 14:07:13 -0800173static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
174{
175 return (struct srp_target_port *) host->hostdata;
176}
177
178static const char *srp_target_info(struct Scsi_Host *host)
179{
180 return host_to_target(host)->target_name;
181}
182
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700183static int srp_target_is_topspin(struct srp_target_port *target)
184{
185 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700186 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700187
188 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700189 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
190 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700191}
192
Roland Dreieraef9ec32005-11-02 14:07:13 -0800193static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
194 gfp_t gfp_mask,
195 enum dma_data_direction direction)
196{
197 struct srp_iu *iu;
198
199 iu = kmalloc(sizeof *iu, gfp_mask);
200 if (!iu)
201 goto out;
202
203 iu->buf = kzalloc(size, gfp_mask);
204 if (!iu->buf)
205 goto out_free_iu;
206
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100207 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
208 direction);
209 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800210 goto out_free_buf;
211
212 iu->size = size;
213 iu->direction = direction;
214
215 return iu;
216
217out_free_buf:
218 kfree(iu->buf);
219out_free_iu:
220 kfree(iu);
221out:
222 return NULL;
223}
224
225static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
226{
227 if (!iu)
228 return;
229
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100230 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
231 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800232 kfree(iu->buf);
233 kfree(iu);
234}
235
236static void srp_qp_event(struct ib_event *event, void *context)
237{
Bart Van Asschee0bda7d2012-01-14 12:39:44 +0000238 pr_debug("QP event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800239}
240
241static int srp_init_qp(struct srp_target_port *target,
242 struct ib_qp *qp)
243{
244 struct ib_qp_attr *attr;
245 int ret;
246
247 attr = kmalloc(sizeof *attr, GFP_KERNEL);
248 if (!attr)
249 return -ENOMEM;
250
Roland Dreier969a60f2008-07-14 23:48:43 -0700251 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
252 target->srp_host->port,
253 be16_to_cpu(target->path.pkey),
254 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800255 if (ret)
256 goto out;
257
258 attr->qp_state = IB_QPS_INIT;
259 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
260 IB_ACCESS_REMOTE_WRITE);
261 attr->port_num = target->srp_host->port;
262
263 ret = ib_modify_qp(qp, attr,
264 IB_QP_STATE |
265 IB_QP_PKEY_INDEX |
266 IB_QP_ACCESS_FLAGS |
267 IB_QP_PORT);
268
269out:
270 kfree(attr);
271 return ret;
272}
273
David Dillow9fe4bcf2008-01-08 17:08:52 -0500274static int srp_new_cm_id(struct srp_target_port *target)
275{
276 struct ib_cm_id *new_cm_id;
277
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100278 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
David Dillow9fe4bcf2008-01-08 17:08:52 -0500279 srp_cm_handler, target);
280 if (IS_ERR(new_cm_id))
281 return PTR_ERR(new_cm_id);
282
283 if (target->cm_id)
284 ib_destroy_cm_id(target->cm_id);
285 target->cm_id = new_cm_id;
286
287 return 0;
288}
289
Roland Dreieraef9ec32005-11-02 14:07:13 -0800290static int srp_create_target_ib(struct srp_target_port *target)
291{
292 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100293 struct ib_cq *recv_cq, *send_cq;
294 struct ib_qp *qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800295 int ret;
296
297 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
298 if (!init_attr)
299 return -ENOMEM;
300
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100301 recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +0200302 srp_recv_completion, NULL, target, SRP_RQ_SIZE,
303 target->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100304 if (IS_ERR(recv_cq)) {
305 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800306 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800307 }
308
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100309 send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +0200310 srp_send_completion, NULL, target, SRP_SQ_SIZE,
311 target->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100312 if (IS_ERR(send_cq)) {
313 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800314 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000315 }
316
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100317 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800318
319 init_attr->event_handler = srp_qp_event;
320 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
321 init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
322 init_attr->cap.max_recv_sge = 1;
323 init_attr->cap.max_send_sge = 1;
324 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
325 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100326 init_attr->send_cq = send_cq;
327 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800328
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100329 qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
330 if (IS_ERR(qp)) {
331 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800332 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800333 }
334
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100335 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800336 if (ret)
337 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800338
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100339 if (target->qp)
340 ib_destroy_qp(target->qp);
341 if (target->recv_cq)
342 ib_destroy_cq(target->recv_cq);
343 if (target->send_cq)
344 ib_destroy_cq(target->send_cq);
345
346 target->qp = qp;
347 target->recv_cq = recv_cq;
348 target->send_cq = send_cq;
349
Roland Dreierda9d2f02010-02-24 15:07:59 -0800350 kfree(init_attr);
351 return 0;
352
353err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100354 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800355
356err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100357 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800358
359err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100360 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800361
362err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800363 kfree(init_attr);
364 return ret;
365}
366
367static void srp_free_target_ib(struct srp_target_port *target)
368{
369 int i;
370
371 ib_destroy_qp(target->qp);
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000372 ib_destroy_cq(target->send_cq);
373 ib_destroy_cq(target->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800374
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100375 target->qp = NULL;
376 target->send_cq = target->recv_cq = NULL;
377
Roland Dreieraef9ec32005-11-02 14:07:13 -0800378 for (i = 0; i < SRP_RQ_SIZE; ++i)
379 srp_free_iu(target->srp_host, target->rx_ring[i]);
Bart Van Asschedd5e6e32010-08-30 19:27:20 +0000380 for (i = 0; i < SRP_SQ_SIZE; ++i)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800381 srp_free_iu(target->srp_host, target->tx_ring[i]);
382}
383
384static void srp_path_rec_completion(int status,
385 struct ib_sa_path_rec *pathrec,
386 void *target_ptr)
387{
388 struct srp_target_port *target = target_ptr;
389
390 target->status = status;
391 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500392 shost_printk(KERN_ERR, target->scsi_host,
393 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800394 else
395 target->path = *pathrec;
396 complete(&target->done);
397}
398
399static int srp_lookup_path(struct srp_target_port *target)
400{
401 target->path.numb_path = 1;
402
403 init_completion(&target->done);
404
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700405 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100406 target->srp_host->srp_dev->dev,
Roland Dreieraef9ec32005-11-02 14:07:13 -0800407 target->srp_host->port,
408 &target->path,
Sean Hefty247e0202007-08-08 15:51:18 -0700409 IB_SA_PATH_REC_SERVICE_ID |
Roland Dreieraef9ec32005-11-02 14:07:13 -0800410 IB_SA_PATH_REC_DGID |
411 IB_SA_PATH_REC_SGID |
412 IB_SA_PATH_REC_NUMB_PATH |
413 IB_SA_PATH_REC_PKEY,
414 SRP_PATH_REC_TIMEOUT_MS,
415 GFP_KERNEL,
416 srp_path_rec_completion,
417 target, &target->path_query);
418 if (target->path_query_id < 0)
419 return target->path_query_id;
420
421 wait_for_completion(&target->done);
422
423 if (target->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500424 shost_printk(KERN_WARNING, target->scsi_host,
425 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800426
427 return target->status;
428}
429
430static int srp_send_req(struct srp_target_port *target)
431{
432 struct {
433 struct ib_cm_req_param param;
434 struct srp_login_req priv;
435 } *req = NULL;
436 int status;
437
438 req = kzalloc(sizeof *req, GFP_KERNEL);
439 if (!req)
440 return -ENOMEM;
441
442 req->param.primary_path = &target->path;
443 req->param.alternate_path = NULL;
444 req->param.service_id = target->service_id;
445 req->param.qp_num = target->qp->qp_num;
446 req->param.qp_type = target->qp->qp_type;
447 req->param.private_data = &req->priv;
448 req->param.private_data_len = sizeof req->priv;
449 req->param.flow_control = 1;
450
451 get_random_bytes(&req->param.starting_psn, 4);
452 req->param.starting_psn &= 0xffffff;
453
454 /*
455 * Pick some arbitrary defaults here; we could make these
456 * module parameters if anyone cared about setting them.
457 */
458 req->param.responder_resources = 4;
459 req->param.remote_cm_response_timeout = 20;
460 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200461 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800462 req->param.rnr_retry_count = 7;
463 req->param.max_cm_retries = 15;
464
465 req->priv.opcode = SRP_LOGIN_REQ;
466 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500467 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800468 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
469 SRP_BUF_FORMAT_INDIRECT);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700470 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700471 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700472 * port identifier format is 8 bytes of ID extension followed
473 * by 8 bytes of GUID. Older drafts put the two halves in the
474 * opposite order, so that the GUID comes first.
475 *
476 * Targets conforming to these obsolete drafts can be
477 * recognized by the I/O Class they report.
478 */
479 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
480 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200481 &target->path.sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700482 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200483 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700484 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
485 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
486 } else {
487 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200488 &target->initiator_ext, 8);
489 memcpy(req->priv.initiator_port_id + 8,
490 &target->path.sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700491 memcpy(req->priv.target_port_id, &target->id_ext, 8);
492 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
493 }
494
Roland Dreieraef9ec32005-11-02 14:07:13 -0800495 /*
496 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200497 * zero out the first 8 bytes of our initiator port ID and set
498 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800499 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700500 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500501 shost_printk(KERN_DEBUG, target->scsi_host,
502 PFX "Topspin/Cisco initiator port ID workaround "
503 "activated for target GUID %016llx\n",
504 (unsigned long long) be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800505 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200506 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100507 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800508 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800509
510 status = ib_send_cm_req(target->cm_id, &req->param);
511
512 kfree(req);
513
514 return status;
515}
516
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000517static bool srp_queue_remove_work(struct srp_target_port *target)
518{
519 bool changed = false;
520
521 spin_lock_irq(&target->lock);
522 if (target->state != SRP_TARGET_REMOVED) {
523 target->state = SRP_TARGET_REMOVED;
524 changed = true;
525 }
526 spin_unlock_irq(&target->lock);
527
528 if (changed)
529 queue_work(system_long_wq, &target->remove_work);
530
531 return changed;
532}
533
Bart Van Assche294c8752011-12-25 12:18:12 +0000534static bool srp_change_conn_state(struct srp_target_port *target,
535 bool connected)
536{
537 bool changed = false;
538
539 spin_lock_irq(&target->lock);
540 if (target->connected != connected) {
541 target->connected = connected;
542 changed = true;
543 }
544 spin_unlock_irq(&target->lock);
545
546 return changed;
547}
548
Roland Dreieraef9ec32005-11-02 14:07:13 -0800549static void srp_disconnect_target(struct srp_target_port *target)
550{
Bart Van Assche294c8752011-12-25 12:18:12 +0000551 if (srp_change_conn_state(target, false)) {
552 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800553
Bart Van Assche294c8752011-12-25 12:18:12 +0000554 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
555 shost_printk(KERN_DEBUG, target->scsi_host,
556 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000557 }
Roland Dreiere6581052006-05-17 09:13:21 -0700558 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800559}
560
David Dillow8f26c9f2011-01-14 19:45:50 -0500561static void srp_free_req_data(struct srp_target_port *target)
562{
David Dillowc07d4242011-01-16 13:57:10 -0500563 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500564 struct srp_request *req;
565 int i;
566
567 for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
568 kfree(req->fmr_list);
569 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500570 if (req->indirect_dma_addr) {
571 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
572 target->indirect_size,
573 DMA_TO_DEVICE);
574 }
575 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500576 }
577}
578
Bart Van Assche683b1592012-01-14 12:40:44 +0000579/**
580 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
581 * @shost: SCSI host whose attributes to remove from sysfs.
582 *
583 * Note: Any attributes defined in the host template and that did not exist
584 * before invocation of this function will be ignored.
585 */
586static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
587{
588 struct device_attribute **attr;
589
590 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
591 device_remove_file(&shost->shost_dev, *attr);
592}
593
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000594static void srp_remove_target(struct srp_target_port *target)
595{
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000596 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
597
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000598 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200599 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000600 srp_remove_host(target->scsi_host);
601 scsi_remove_host(target->scsi_host);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000602 srp_disconnect_target(target);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000603 ib_destroy_cm_id(target->cm_id);
604 srp_free_target_ib(target);
Bart Van Asschec1120f82013-10-26 14:35:08 +0200605 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200606 srp_rport_put(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000607 srp_free_req_data(target);
608 scsi_host_put(target->scsi_host);
609}
610
David Howellsc4028952006-11-22 14:57:56 +0000611static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800612{
David Howellsc4028952006-11-22 14:57:56 +0000613 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000614 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800615
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000616 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800617
Bart Van Assche96fc2482013-06-28 14:51:26 +0200618 srp_remove_target(target);
619
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -0700620 spin_lock(&target->srp_host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800621 list_del(&target->list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -0700622 spin_unlock(&target->srp_host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800623}
624
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200625static void srp_rport_delete(struct srp_rport *rport)
626{
627 struct srp_target_port *target = rport->lld_data;
628
629 srp_queue_remove_work(target);
630}
631
Roland Dreieraef9ec32005-11-02 14:07:13 -0800632static int srp_connect_target(struct srp_target_port *target)
633{
David Dillow9fe4bcf2008-01-08 17:08:52 -0500634 int retries = 3;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800635 int ret;
636
Bart Van Assche294c8752011-12-25 12:18:12 +0000637 WARN_ON_ONCE(target->connected);
638
Bart Van Assche948d1e82011-09-03 09:25:42 +0200639 target->qp_in_error = false;
640
Roland Dreieraef9ec32005-11-02 14:07:13 -0800641 ret = srp_lookup_path(target);
642 if (ret)
643 return ret;
644
645 while (1) {
646 init_completion(&target->done);
647 ret = srp_send_req(target);
648 if (ret)
649 return ret;
650 wait_for_completion(&target->done);
651
652 /*
653 * The CM event handling code will set status to
654 * SRP_PORT_REDIRECT if we get a port redirect REJ
655 * back, or SRP_DLID_REDIRECT if we get a lid/qp
656 * redirect REJ back.
657 */
658 switch (target->status) {
659 case 0:
Bart Van Assche294c8752011-12-25 12:18:12 +0000660 srp_change_conn_state(target, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800661 return 0;
662
663 case SRP_PORT_REDIRECT:
664 ret = srp_lookup_path(target);
665 if (ret)
666 return ret;
667 break;
668
669 case SRP_DLID_REDIRECT:
670 break;
671
David Dillow9fe4bcf2008-01-08 17:08:52 -0500672 case SRP_STALE_CONN:
673 /* Our current CM id was stale, and is now in timewait.
674 * Try to reconnect with a new one.
675 */
676 if (!retries-- || srp_new_cm_id(target)) {
677 shost_printk(KERN_ERR, target->scsi_host, PFX
678 "giving up on stale connection\n");
679 target->status = -ECONNRESET;
680 return target->status;
681 }
682
683 shost_printk(KERN_ERR, target->scsi_host, PFX
684 "retrying stale connection\n");
685 break;
686
Roland Dreieraef9ec32005-11-02 14:07:13 -0800687 default:
688 return target->status;
689 }
690 }
691}
692
Roland Dreierd945e1d2006-05-09 10:50:28 -0700693static void srp_unmap_data(struct scsi_cmnd *scmnd,
694 struct srp_target_port *target,
695 struct srp_request *req)
696{
David Dillow8f26c9f2011-01-14 19:45:50 -0500697 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
698 struct ib_pool_fmr **pfmr;
699
FUJITA Tomonoribb350d12007-05-26 02:28:25 +0900700 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -0700701 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
702 scmnd->sc_data_direction != DMA_FROM_DEVICE))
703 return;
704
David Dillow8f26c9f2011-01-14 19:45:50 -0500705 pfmr = req->fmr_list;
706 while (req->nfmr--)
707 ib_fmr_pool_unmap(*pfmr++);
Roland Dreierf5358a12006-06-17 20:37:29 -0700708
David Dillow8f26c9f2011-01-14 19:45:50 -0500709 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
710 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -0700711}
712
Bart Van Assche22032992012-08-14 13:18:53 +0000713/**
714 * srp_claim_req - Take ownership of the scmnd associated with a request.
715 * @target: SRP target port.
716 * @req: SRP request.
717 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
718 * ownership of @req->scmnd if it equals @scmnd.
719 *
720 * Return value:
721 * Either NULL or a pointer to the SCSI command the caller became owner of.
722 */
723static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
724 struct srp_request *req,
725 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700726{
Bart Van Assche94a91742010-11-26 14:50:09 -0500727 unsigned long flags;
728
Bart Van Assche22032992012-08-14 13:18:53 +0000729 spin_lock_irqsave(&target->lock, flags);
730 if (!scmnd) {
731 scmnd = req->scmnd;
732 req->scmnd = NULL;
733 } else if (req->scmnd == scmnd) {
734 req->scmnd = NULL;
735 } else {
736 scmnd = NULL;
737 }
738 spin_unlock_irqrestore(&target->lock, flags);
739
740 return scmnd;
741}
742
743/**
744 * srp_free_req() - Unmap data and add request to the free request list.
745 */
746static void srp_free_req(struct srp_target_port *target,
747 struct srp_request *req, struct scsi_cmnd *scmnd,
748 s32 req_lim_delta)
749{
750 unsigned long flags;
751
752 srp_unmap_data(scmnd, target, req);
753
Bart Van Asschee9684672010-11-26 15:08:38 -0500754 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -0500755 target->req_lim += req_lim_delta;
Bart Van Assche536ae142010-11-26 13:58:27 -0500756 list_add_tail(&req->list, &target->free_reqs);
Bart Van Asschee9684672010-11-26 15:08:38 -0500757 spin_unlock_irqrestore(&target->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700758}
759
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200760static void srp_finish_req(struct srp_target_port *target,
761 struct srp_request *req, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700762{
Bart Van Assche22032992012-08-14 13:18:53 +0000763 struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
764
765 if (scmnd) {
Bart Van Assche9b796d02012-08-24 10:27:54 +0000766 srp_free_req(target, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200767 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +0000768 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +0000769 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700770}
771
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200772static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800773{
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200774 struct srp_target_port *target = rport->lld_data;
775 int i;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800776
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200777 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
778 struct srp_request *req = &target->req_ring[i];
779 srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16);
780 }
781}
782
783/*
784 * It is up to the caller to ensure that srp_rport_reconnect() calls are
785 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
786 * srp_reset_device() or srp_reset_host() calls will occur while this function
787 * is in progress. One way to realize that is not to call this function
788 * directly but to call srp_reconnect_rport() instead since that last function
789 * serializes calls of this function via rport->mutex and also blocks
790 * srp_queuecommand() calls before invoking this function.
791 */
792static int srp_rport_reconnect(struct srp_rport *rport)
793{
794 struct srp_target_port *target = rport->lld_data;
795 int i, ret;
Bart Van Assche09be70a2012-03-17 17:18:54 +0000796
Roland Dreieraef9ec32005-11-02 14:07:13 -0800797 srp_disconnect_target(target);
798 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +0000799 * Now get a new local CM ID so that we avoid confusing the target in
800 * case things are really fouled up. Doing so also ensures that all CM
801 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800802 */
David Dillow9fe4bcf2008-01-08 17:08:52 -0500803 ret = srp_new_cm_id(target);
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +0000804 /*
805 * Whether or not creating a new CM ID succeeded, create a new
806 * QP. This guarantees that all completion callback function
807 * invocations have finished before request resetting starts.
808 */
809 if (ret == 0)
810 ret = srp_create_target_ib(target);
811 else
812 srp_create_target_ib(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800813
Bart Van Assche536ae142010-11-26 13:58:27 -0500814 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
815 struct srp_request *req = &target->req_ring[i];
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200816 srp_finish_req(target, req, DID_RESET << 16);
Bart Van Assche536ae142010-11-26 13:58:27 -0500817 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800818
Bart Van Assche536ae142010-11-26 13:58:27 -0500819 INIT_LIST_HEAD(&target->free_tx);
Bart Van Asschedcb4cb82010-11-26 13:22:48 -0500820 for (i = 0; i < SRP_SQ_SIZE; ++i)
Bart Van Assche536ae142010-11-26 13:58:27 -0500821 list_add(&target->tx_ring[i]->list, &target->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800822
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +0000823 if (ret == 0)
824 ret = srp_connect_target(target);
Bart Van Assche09be70a2012-03-17 17:18:54 +0000825
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200826 if (ret == 0)
827 shost_printk(KERN_INFO, target->scsi_host,
828 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800829
830 return ret;
831}
832
David Dillow8f26c9f2011-01-14 19:45:50 -0500833static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
834 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -0700835{
David Dillow8f26c9f2011-01-14 19:45:50 -0500836 struct srp_direct_buf *desc = state->desc;
837
838 desc->va = cpu_to_be64(dma_addr);
839 desc->key = cpu_to_be32(rkey);
840 desc->len = cpu_to_be32(dma_len);
841
842 state->total_len += dma_len;
843 state->desc++;
844 state->ndesc++;
845}
846
847static int srp_map_finish_fmr(struct srp_map_state *state,
848 struct srp_target_port *target)
849{
850 struct srp_device *dev = target->srp_host->srp_dev;
851 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -0700852 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -0500853
854 if (!state->npages)
855 return 0;
856
857 if (state->npages == 1) {
858 srp_map_desc(state, state->base_dma_addr, state->fmr_len,
859 target->rkey);
860 state->npages = state->fmr_len = 0;
861 return 0;
862 }
863
864 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
865 state->npages, io_addr);
866 if (IS_ERR(fmr))
867 return PTR_ERR(fmr);
868
869 *state->next_fmr++ = fmr;
870 state->nfmr++;
871
872 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
873 state->npages = state->fmr_len = 0;
874 return 0;
875}
876
877static void srp_map_update_start(struct srp_map_state *state,
878 struct scatterlist *sg, int sg_index,
879 dma_addr_t dma_addr)
880{
881 state->unmapped_sg = sg;
882 state->unmapped_index = sg_index;
883 state->unmapped_addr = dma_addr;
884}
885
886static int srp_map_sg_entry(struct srp_map_state *state,
887 struct srp_target_port *target,
888 struct scatterlist *sg, int sg_index,
889 int use_fmr)
890{
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100891 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -0800892 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500893 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
894 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
895 unsigned int len;
896 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -0700897
David Dillow8f26c9f2011-01-14 19:45:50 -0500898 if (!dma_len)
899 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -0700900
David Dillow8f26c9f2011-01-14 19:45:50 -0500901 if (use_fmr == SRP_MAP_NO_FMR) {
902 /* Once we're in direct map mode for a request, we don't
903 * go back to FMR mode, so no need to update anything
904 * other than the descriptor.
905 */
906 srp_map_desc(state, dma_addr, dma_len, target->rkey);
907 return 0;
908 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -0700909
David Dillow8f26c9f2011-01-14 19:45:50 -0500910 /* If we start at an offset into the FMR page, don't merge into
911 * the current FMR. Finish it out, and use the kernel's MR for this
912 * sg entry. This is to avoid potential bugs on some SRP targets
913 * that were never quite defined, but went away when the initiator
914 * avoided using FMR on such page fragments.
915 */
916 if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
917 ret = srp_map_finish_fmr(state, target);
918 if (ret)
919 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -0800920
David Dillow8f26c9f2011-01-14 19:45:50 -0500921 srp_map_desc(state, dma_addr, dma_len, target->rkey);
922 srp_map_update_start(state, NULL, 0, 0);
923 return 0;
924 }
925
926 /* If this is the first sg to go into the FMR, save our position.
927 * We need to know the first unmapped entry, its index, and the
928 * first unmapped address within that entry to be able to restart
929 * mapping after an error.
930 */
931 if (!state->unmapped_sg)
932 srp_map_update_start(state, sg, sg_index, dma_addr);
933
934 while (dma_len) {
935 if (state->npages == SRP_FMR_SIZE) {
936 ret = srp_map_finish_fmr(state, target);
937 if (ret)
938 return ret;
939
940 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -0700941 }
942
David Dillow8f26c9f2011-01-14 19:45:50 -0500943 len = min_t(unsigned int, dma_len, dev->fmr_page_size);
944
945 if (!state->npages)
946 state->base_dma_addr = dma_addr;
947 state->pages[state->npages++] = dma_addr;
948 state->fmr_len += len;
949 dma_addr += len;
950 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -0700951 }
952
David Dillow8f26c9f2011-01-14 19:45:50 -0500953 /* If the last entry of the FMR wasn't a full page, then we need to
954 * close it out and start a new one -- we can only merge at page
955 * boundries.
956 */
Roland Dreierf5358a12006-06-17 20:37:29 -0700957 ret = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -0500958 if (len != dev->fmr_page_size) {
959 ret = srp_map_finish_fmr(state, target);
960 if (!ret)
961 srp_map_update_start(state, NULL, 0, 0);
962 }
Roland Dreierf5358a12006-06-17 20:37:29 -0700963 return ret;
964}
965
Roland Dreieraef9ec32005-11-02 14:07:13 -0800966static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
967 struct srp_request *req)
968{
David Dillow8f26c9f2011-01-14 19:45:50 -0500969 struct scatterlist *scat, *sg;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800970 struct srp_cmd *cmd = req->cmd->buf;
David Dillow8f26c9f2011-01-14 19:45:50 -0500971 int i, len, nents, count, use_fmr;
Ralph Campbell85507bc2006-12-12 14:30:55 -0800972 struct srp_device *dev;
973 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500974 struct srp_map_state state;
975 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -0500976 u32 table_len;
977 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800978
FUJITA Tomonoribb350d12007-05-26 02:28:25 +0900979 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800980 return sizeof (struct srp_cmd);
981
982 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
983 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500984 shost_printk(KERN_WARNING, target->scsi_host,
985 PFX "Unhandled data direction %d\n",
986 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800987 return -EINVAL;
988 }
989
FUJITA Tomonoribb350d12007-05-26 02:28:25 +0900990 nents = scsi_sg_count(scmnd);
991 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -0800992
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100993 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -0800994 ibdev = dev->dev;
995
996 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -0500997 if (unlikely(count == 0))
998 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -0700999
1000 fmt = SRP_DATA_DESC_DIRECT;
1001 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001002
1003 if (count == 1) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001004 /*
1005 * The midlayer only generated a single gather/scatter
1006 * entry, or DMA mapping coalesced everything to a
1007 * single entry. So a direct descriptor along with
1008 * the DMA MR suffices.
1009 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001010 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001011
Ralph Campbell85507bc2006-12-12 14:30:55 -08001012 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001013 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001014 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001015
David Dillow8f26c9f2011-01-14 19:45:50 -05001016 req->nfmr = 0;
1017 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001018 }
1019
David Dillow8f26c9f2011-01-14 19:45:50 -05001020 /* We have more than one scatter/gather entry, so build our indirect
1021 * descriptor table, trying to merge as many entries with FMR as we
1022 * can.
1023 */
1024 indirect_hdr = (void *) cmd->add_data;
1025
David Dillowc07d4242011-01-16 13:57:10 -05001026 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1027 target->indirect_size, DMA_TO_DEVICE);
1028
David Dillow8f26c9f2011-01-14 19:45:50 -05001029 memset(&state, 0, sizeof(state));
David Dillowc07d4242011-01-16 13:57:10 -05001030 state.desc = req->indirect_desc;
David Dillow8f26c9f2011-01-14 19:45:50 -05001031 state.pages = req->map_page;
1032 state.next_fmr = req->fmr_list;
1033
1034 use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
1035
1036 for_each_sg(scat, sg, count, i) {
1037 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
1038 /* FMR mapping failed, so backtrack to the first
1039 * unmapped entry and continue on without using FMR.
1040 */
1041 dma_addr_t dma_addr;
1042 unsigned int dma_len;
1043
1044backtrack:
1045 sg = state.unmapped_sg;
1046 i = state.unmapped_index;
1047
1048 dma_addr = ib_sg_dma_address(ibdev, sg);
1049 dma_len = ib_sg_dma_len(ibdev, sg);
1050 dma_len -= (state.unmapped_addr - dma_addr);
1051 dma_addr = state.unmapped_addr;
1052 use_fmr = SRP_MAP_NO_FMR;
1053 srp_map_desc(&state, dma_addr, dma_len, target->rkey);
1054 }
1055 }
1056
1057 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
1058 goto backtrack;
1059
David Dillowc07d4242011-01-16 13:57:10 -05001060 /* We've mapped the request, now pull as much of the indirect
1061 * descriptor table as we can into the command buffer. If this
1062 * target is not using an external indirect table, we are
1063 * guaranteed to fit into the command, as the SCSI layer won't
1064 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001065 */
1066 req->nfmr = state.nfmr;
1067 if (state.ndesc == 1) {
1068 /* FMR mapping was able to collapse this to one entry,
1069 * so use a direct descriptor.
1070 */
1071 struct srp_direct_buf *buf = (void *) cmd->add_data;
1072
David Dillowc07d4242011-01-16 13:57:10 -05001073 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001074 goto map_complete;
1075 }
1076
David Dillowc07d4242011-01-16 13:57:10 -05001077 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1078 !target->allow_ext_sg)) {
1079 shost_printk(KERN_ERR, target->scsi_host,
1080 "Could not fit S/G list into SRP_CMD\n");
1081 return -EIO;
1082 }
1083
1084 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001085 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1086
1087 fmt = SRP_DATA_DESC_INDIRECT;
1088 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001089 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001090
David Dillowc07d4242011-01-16 13:57:10 -05001091 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1092 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001093
David Dillowc07d4242011-01-16 13:57:10 -05001094 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001095 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1096 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1097 indirect_hdr->len = cpu_to_be32(state.total_len);
1098
1099 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001100 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001101 else
David Dillowc07d4242011-01-16 13:57:10 -05001102 cmd->data_in_desc_cnt = count;
1103
1104 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1105 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001106
1107map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001108 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1109 cmd->buf_fmt = fmt << 4;
1110 else
1111 cmd->buf_fmt = fmt;
1112
Roland Dreieraef9ec32005-11-02 14:07:13 -08001113 return len;
1114}
1115
David Dillow05a1d752010-10-08 14:48:14 -04001116/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001117 * Return an IU and possible credit to the free pool
1118 */
1119static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1120 enum srp_iu_type iu_type)
1121{
1122 unsigned long flags;
1123
Bart Van Asschee9684672010-11-26 15:08:38 -05001124 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001125 list_add(&iu->list, &target->free_tx);
1126 if (iu_type != SRP_IU_RSP)
1127 ++target->req_lim;
Bart Van Asschee9684672010-11-26 15:08:38 -05001128 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001129}
1130
1131/*
Bart Van Asschee9684672010-11-26 15:08:38 -05001132 * Must be called with target->lock held to protect req_lim and free_tx.
1133 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001134 *
1135 * Note:
1136 * An upper limit for the number of allocated information units for each
1137 * request type is:
1138 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1139 * more than Scsi_Host.can_queue requests.
1140 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1141 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1142 * one unanswered SRP request to an initiator.
1143 */
1144static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1145 enum srp_iu_type iu_type)
1146{
1147 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1148 struct srp_iu *iu;
1149
1150 srp_send_completion(target->send_cq, target);
1151
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001152 if (list_empty(&target->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001153 return NULL;
1154
1155 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001156 if (iu_type != SRP_IU_RSP) {
1157 if (target->req_lim <= rsv) {
1158 ++target->zero_req_lim;
1159 return NULL;
1160 }
1161
1162 --target->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001163 }
1164
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001165 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001166 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001167 return iu;
1168}
1169
Bart Van Assche76c75b22010-11-26 14:37:47 -05001170static int srp_post_send(struct srp_target_port *target,
1171 struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001172{
1173 struct ib_sge list;
1174 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001175
1176 list.addr = iu->dma;
1177 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001178 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001179
1180 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001181 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001182 wr.sg_list = &list;
1183 wr.num_sge = 1;
1184 wr.opcode = IB_WR_SEND;
1185 wr.send_flags = IB_SEND_SIGNALED;
1186
Bart Van Assche76c75b22010-11-26 14:37:47 -05001187 return ib_post_send(target->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001188}
1189
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001190static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001191{
Bart Van Asschec996bb42010-07-30 10:59:05 +00001192 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001193 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001194
1195 list.addr = iu->dma;
1196 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001197 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001198
1199 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001200 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001201 wr.sg_list = &list;
1202 wr.num_sge = 1;
1203
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001204 return ib_post_recv(target->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001205}
1206
Roland Dreieraef9ec32005-11-02 14:07:13 -08001207static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1208{
1209 struct srp_request *req;
1210 struct scsi_cmnd *scmnd;
1211 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001212
Roland Dreieraef9ec32005-11-02 14:07:13 -08001213 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Asschee9684672010-11-26 15:08:38 -05001214 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001215 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
Bart Van Asschee9684672010-11-26 15:08:38 -05001216 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001217
David Dillowf8b6e312010-11-26 13:02:21 -05001218 target->tsk_mgmt_status = -1;
1219 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1220 target->tsk_mgmt_status = rsp->data[3];
1221 complete(&target->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001222 } else {
David Dillowf8b6e312010-11-26 13:02:21 -05001223 req = &target->req_ring[rsp->tag];
Bart Van Assche22032992012-08-14 13:18:53 +00001224 scmnd = srp_claim_req(target, req, NULL);
1225 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001226 shost_printk(KERN_ERR, target->scsi_host,
1227 "Null scmnd for RSP w/tag %016llx\n",
1228 (unsigned long long) rsp->tag);
Bart Van Assche22032992012-08-14 13:18:53 +00001229
1230 spin_lock_irqsave(&target->lock, flags);
1231 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1232 spin_unlock_irqrestore(&target->lock, flags);
1233
1234 return;
1235 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001236 scmnd->result = rsp->status;
1237
1238 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1239 memcpy(scmnd->sense_buffer, rsp->data +
1240 be32_to_cpu(rsp->resp_data_len),
1241 min_t(int, be32_to_cpu(rsp->sense_data_len),
1242 SCSI_SENSE_BUFFERSIZE));
1243 }
1244
1245 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001246 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001247 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001248 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001249
Bart Van Assche22032992012-08-14 13:18:53 +00001250 srp_free_req(target, req, scmnd,
1251 be32_to_cpu(rsp->req_lim_delta));
1252
David Dillowf8b6e312010-11-26 13:02:21 -05001253 scmnd->host_scribble = NULL;
1254 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001255 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001256}
1257
David Dillowbb125882010-10-08 14:40:47 -04001258static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1259 void *rsp, int len)
1260{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001261 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001262 unsigned long flags;
1263 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001264 int err;
David Dillowbb125882010-10-08 14:40:47 -04001265
Bart Van Asschee9684672010-11-26 15:08:38 -05001266 spin_lock_irqsave(&target->lock, flags);
David Dillowbb125882010-10-08 14:40:47 -04001267 target->req_lim += req_delta;
David Dillowbb125882010-10-08 14:40:47 -04001268 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
Bart Van Asschee9684672010-11-26 15:08:38 -05001269 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001270
David Dillowbb125882010-10-08 14:40:47 -04001271 if (!iu) {
1272 shost_printk(KERN_ERR, target->scsi_host, PFX
1273 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001274 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001275 }
1276
1277 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1278 memcpy(iu->buf, rsp, len);
1279 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1280
Bart Van Assche76c75b22010-11-26 14:37:47 -05001281 err = srp_post_send(target, iu, len);
1282 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001283 shost_printk(KERN_ERR, target->scsi_host, PFX
1284 "unable to post response: %d\n", err);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001285 srp_put_tx_iu(target, iu, SRP_IU_RSP);
1286 }
David Dillowbb125882010-10-08 14:40:47 -04001287
David Dillowbb125882010-10-08 14:40:47 -04001288 return err;
1289}
1290
1291static void srp_process_cred_req(struct srp_target_port *target,
1292 struct srp_cred_req *req)
1293{
1294 struct srp_cred_rsp rsp = {
1295 .opcode = SRP_CRED_RSP,
1296 .tag = req->tag,
1297 };
1298 s32 delta = be32_to_cpu(req->req_lim_delta);
1299
1300 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1301 shost_printk(KERN_ERR, target->scsi_host, PFX
1302 "problems processing SRP_CRED_REQ\n");
1303}
1304
1305static void srp_process_aer_req(struct srp_target_port *target,
1306 struct srp_aer_req *req)
1307{
1308 struct srp_aer_rsp rsp = {
1309 .opcode = SRP_AER_RSP,
1310 .tag = req->tag,
1311 };
1312 s32 delta = be32_to_cpu(req->req_lim_delta);
1313
1314 shost_printk(KERN_ERR, target->scsi_host, PFX
1315 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1316
1317 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1318 shost_printk(KERN_ERR, target->scsi_host, PFX
1319 "problems processing SRP_AER_REQ\n");
1320}
1321
Roland Dreieraef9ec32005-11-02 14:07:13 -08001322static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1323{
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001324 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001325 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001326 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001327 u8 opcode;
1328
Ralph Campbell85507bc2006-12-12 14:30:55 -08001329 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1330 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001331
1332 opcode = *(u8 *) iu->buf;
1333
1334 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001335 shost_printk(KERN_ERR, target->scsi_host,
1336 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001337 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1338 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001339 }
1340
1341 switch (opcode) {
1342 case SRP_RSP:
1343 srp_process_rsp(target, iu->buf);
1344 break;
1345
David Dillowbb125882010-10-08 14:40:47 -04001346 case SRP_CRED_REQ:
1347 srp_process_cred_req(target, iu->buf);
1348 break;
1349
1350 case SRP_AER_REQ:
1351 srp_process_aer_req(target, iu->buf);
1352 break;
1353
Roland Dreieraef9ec32005-11-02 14:07:13 -08001354 case SRP_T_LOGOUT:
1355 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001356 shost_printk(KERN_WARNING, target->scsi_host,
1357 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001358 break;
1359
1360 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001361 shost_printk(KERN_WARNING, target->scsi_host,
1362 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001363 break;
1364 }
1365
Ralph Campbell85507bc2006-12-12 14:30:55 -08001366 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1367 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001368
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001369 res = srp_post_recv(target, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001370 if (res != 0)
1371 shost_printk(KERN_ERR, target->scsi_host,
1372 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001373}
1374
Bart Van Asschec1120f82013-10-26 14:35:08 +02001375/**
1376 * srp_tl_err_work() - handle a transport layer error
1377 *
1378 * Note: This function may get invoked before the rport has been created,
1379 * hence the target->rport test.
1380 */
1381static void srp_tl_err_work(struct work_struct *work)
1382{
1383 struct srp_target_port *target;
1384
1385 target = container_of(work, struct srp_target_port, tl_err_work);
1386 if (target->rport)
1387 srp_start_tl_fail_timers(target->rport);
1388}
1389
Bart Van Assche948d1e82011-09-03 09:25:42 +02001390static void srp_handle_qp_err(enum ib_wc_status wc_status,
1391 enum ib_wc_opcode wc_opcode,
1392 struct srp_target_port *target)
1393{
Bart Van Assche294c8752011-12-25 12:18:12 +00001394 if (target->connected && !target->qp_in_error) {
Bart Van Assche4f0af692012-11-26 11:16:40 +01001395 shost_printk(KERN_ERR, target->scsi_host,
1396 PFX "failed %s status %d\n",
1397 wc_opcode & IB_WC_RECV ? "receive" : "send",
1398 wc_status);
Bart Van Asschec1120f82013-10-26 14:35:08 +02001399 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001400 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001401 target->qp_in_error = true;
1402}
1403
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001404static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001405{
1406 struct srp_target_port *target = target_ptr;
1407 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001408
1409 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1410 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001411 if (likely(wc.status == IB_WC_SUCCESS)) {
1412 srp_handle_recv(target, &wc);
1413 } else {
1414 srp_handle_qp_err(wc.status, wc.opcode, target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001415 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001416 }
1417}
1418
1419static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1420{
1421 struct srp_target_port *target = target_ptr;
1422 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001423 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001424
1425 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001426 if (likely(wc.status == IB_WC_SUCCESS)) {
1427 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1428 list_add(&iu->list, &target->free_tx);
1429 } else {
1430 srp_handle_qp_err(wc.status, wc.opcode, target);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001431 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001432 }
1433}
1434
Bart Van Assche76c75b22010-11-26 14:37:47 -05001435static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001436{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001437 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001438 struct srp_rport *rport = target->rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001439 struct srp_request *req;
1440 struct srp_iu *iu;
1441 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001442 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001443 unsigned long flags;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001444 int len, result;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001445 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1446
1447 /*
1448 * The SCSI EH thread is the only context from which srp_queuecommand()
1449 * can get invoked for blocked devices (SDEV_BLOCK /
1450 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1451 * locking the rport mutex if invoked from inside the SCSI EH.
1452 */
1453 if (in_scsi_eh)
1454 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001455
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001456 result = srp_chkready(target->rport);
1457 if (unlikely(result)) {
1458 scmnd->result = result;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00001459 scmnd->scsi_done(scmnd);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001460 goto unlock_rport;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00001461 }
1462
Bart Van Asschee9684672010-11-26 15:08:38 -05001463 spin_lock_irqsave(&target->lock, flags);
David Dillowbb125882010-10-08 14:40:47 -04001464 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001465 if (!iu)
Bart Van Assche695b8342011-01-13 19:02:25 +00001466 goto err_unlock;
1467
1468 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1469 list_del(&req->list);
1470 spin_unlock_irqrestore(&target->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001471
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001472 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05001473 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001474 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001475
Roland Dreieraef9ec32005-11-02 14:07:13 -08001476 scmnd->result = 0;
David Dillowf8b6e312010-11-26 13:02:21 -05001477 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001478
1479 cmd = iu->buf;
1480 memset(cmd, 0, sizeof *cmd);
1481
1482 cmd->opcode = SRP_CMD;
1483 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001484 cmd->tag = req->index;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001485 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1486
Roland Dreieraef9ec32005-11-02 14:07:13 -08001487 req->scmnd = scmnd;
1488 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001489
1490 len = srp_map_data(scmnd, target, req);
1491 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001492 shost_printk(KERN_ERR, target->scsi_host,
1493 PFX "Failed to map data\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001494 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001495 }
1496
David Dillow49248642011-01-14 18:23:24 -05001497 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001498 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001499
Bart Van Assche76c75b22010-11-26 14:37:47 -05001500 if (srp_post_send(target, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001501 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001502 goto err_unmap;
1503 }
1504
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001505unlock_rport:
1506 if (in_scsi_eh)
1507 mutex_unlock(&rport->mutex);
1508
Roland Dreieraef9ec32005-11-02 14:07:13 -08001509 return 0;
1510
1511err_unmap:
1512 srp_unmap_data(scmnd, target, req);
1513
Bart Van Assche76c75b22010-11-26 14:37:47 -05001514err_iu:
1515 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1516
Bart Van Asschee9684672010-11-26 15:08:38 -05001517 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001518 list_add(&req->list, &target->free_reqs);
Bart Van Assche695b8342011-01-13 19:02:25 +00001519
1520err_unlock:
Bart Van Asschee9684672010-11-26 15:08:38 -05001521 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001522
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001523 if (in_scsi_eh)
1524 mutex_unlock(&rport->mutex);
1525
Roland Dreieraef9ec32005-11-02 14:07:13 -08001526 return SCSI_MLQUEUE_HOST_BUSY;
1527}
1528
1529static int srp_alloc_iu_bufs(struct srp_target_port *target)
1530{
1531 int i;
1532
1533 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1534 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1535 target->max_ti_iu_len,
1536 GFP_KERNEL, DMA_FROM_DEVICE);
1537 if (!target->rx_ring[i])
1538 goto err;
1539 }
1540
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00001541 for (i = 0; i < SRP_SQ_SIZE; ++i) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001542 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
David Dillow49248642011-01-14 18:23:24 -05001543 target->max_iu_len,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001544 GFP_KERNEL, DMA_TO_DEVICE);
1545 if (!target->tx_ring[i])
1546 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001547
1548 list_add(&target->tx_ring[i]->list, &target->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001549 }
1550
1551 return 0;
1552
1553err:
1554 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1555 srp_free_iu(target->srp_host, target->rx_ring[i]);
1556 target->rx_ring[i] = NULL;
1557 }
1558
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00001559 for (i = 0; i < SRP_SQ_SIZE; ++i) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001560 srp_free_iu(target->srp_host, target->tx_ring[i]);
1561 target->tx_ring[i] = NULL;
1562 }
1563
1564 return -ENOMEM;
1565}
1566
Bart Van Asschec9b03c12011-09-03 09:34:48 +02001567static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
1568{
1569 uint64_t T_tr_ns, max_compl_time_ms;
1570 uint32_t rq_tmo_jiffies;
1571
1572 /*
1573 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1574 * table 91), both the QP timeout and the retry count have to be set
1575 * for RC QP's during the RTR to RTS transition.
1576 */
1577 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
1578 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
1579
1580 /*
1581 * Set target->rq_tmo_jiffies to one second more than the largest time
1582 * it can take before an error completion is generated. See also
1583 * C9-140..142 in the IBTA spec for more information about how to
1584 * convert the QP Local ACK Timeout value to nanoseconds.
1585 */
1586 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
1587 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
1588 do_div(max_compl_time_ms, NSEC_PER_MSEC);
1589 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
1590
1591 return rq_tmo_jiffies;
1592}
1593
David Dillow961e0be2011-01-14 17:32:07 -05001594static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1595 struct srp_login_rsp *lrsp,
1596 struct srp_target_port *target)
1597{
1598 struct ib_qp_attr *qp_attr = NULL;
1599 int attr_mask = 0;
1600 int ret;
1601 int i;
1602
1603 if (lrsp->opcode == SRP_LOGIN_RSP) {
1604 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1605 target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1606
1607 /*
1608 * Reserve credits for task management so we don't
1609 * bounce requests back to the SCSI mid-layer.
1610 */
1611 target->scsi_host->can_queue
1612 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1613 target->scsi_host->can_queue);
1614 } else {
1615 shost_printk(KERN_WARNING, target->scsi_host,
1616 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1617 ret = -ECONNRESET;
1618 goto error;
1619 }
1620
1621 if (!target->rx_ring[0]) {
1622 ret = srp_alloc_iu_bufs(target);
1623 if (ret)
1624 goto error;
1625 }
1626
1627 ret = -ENOMEM;
1628 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1629 if (!qp_attr)
1630 goto error;
1631
1632 qp_attr->qp_state = IB_QPS_RTR;
1633 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1634 if (ret)
1635 goto error_free;
1636
1637 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1638 if (ret)
1639 goto error_free;
1640
1641 for (i = 0; i < SRP_RQ_SIZE; i++) {
1642 struct srp_iu *iu = target->rx_ring[i];
1643 ret = srp_post_recv(target, iu);
1644 if (ret)
1645 goto error_free;
1646 }
1647
1648 qp_attr->qp_state = IB_QPS_RTS;
1649 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1650 if (ret)
1651 goto error_free;
1652
Bart Van Asschec9b03c12011-09-03 09:34:48 +02001653 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
1654
David Dillow961e0be2011-01-14 17:32:07 -05001655 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1656 if (ret)
1657 goto error_free;
1658
1659 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1660
1661error_free:
1662 kfree(qp_attr);
1663
1664error:
1665 target->status = ret;
1666}
1667
Roland Dreieraef9ec32005-11-02 14:07:13 -08001668static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1669 struct ib_cm_event *event,
1670 struct srp_target_port *target)
1671{
David Dillow7aa54bd2008-01-07 18:23:41 -05001672 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001673 struct ib_class_port_info *cpi;
1674 int opcode;
1675
1676 switch (event->param.rej_rcvd.reason) {
1677 case IB_CM_REJ_PORT_CM_REDIRECT:
1678 cpi = event->param.rej_rcvd.ari;
1679 target->path.dlid = cpi->redirect_lid;
1680 target->path.pkey = cpi->redirect_pkey;
1681 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1682 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1683
1684 target->status = target->path.dlid ?
1685 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1686 break;
1687
1688 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07001689 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001690 /*
1691 * Topspin/Cisco SRP gateways incorrectly send
1692 * reject reason code 25 when they mean 24
1693 * (port redirect).
1694 */
1695 memcpy(target->path.dgid.raw,
1696 event->param.rej_rcvd.ari, 16);
1697
David Dillow7aa54bd2008-01-07 18:23:41 -05001698 shost_printk(KERN_DEBUG, shost,
1699 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1700 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1701 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001702
1703 target->status = SRP_PORT_REDIRECT;
1704 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05001705 shost_printk(KERN_WARNING, shost,
1706 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001707 target->status = -ECONNRESET;
1708 }
1709 break;
1710
1711 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05001712 shost_printk(KERN_WARNING, shost,
1713 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001714 target->status = -ECONNRESET;
1715 break;
1716
1717 case IB_CM_REJ_CONSUMER_DEFINED:
1718 opcode = *(u8 *) event->private_data;
1719 if (opcode == SRP_LOGIN_REJ) {
1720 struct srp_login_rej *rej = event->private_data;
1721 u32 reason = be32_to_cpu(rej->reason);
1722
1723 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05001724 shost_printk(KERN_WARNING, shost,
1725 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001726 else
David Dillow7aa54bd2008-01-07 18:23:41 -05001727 shost_printk(KERN_WARNING, shost,
1728 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001729 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05001730 shost_printk(KERN_WARNING, shost,
1731 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1732 " opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001733 target->status = -ECONNRESET;
1734 break;
1735
David Dillow9fe4bcf2008-01-08 17:08:52 -05001736 case IB_CM_REJ_STALE_CONN:
1737 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
1738 target->status = SRP_STALE_CONN;
1739 break;
1740
Roland Dreieraef9ec32005-11-02 14:07:13 -08001741 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001742 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
1743 event->param.rej_rcvd.reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001744 target->status = -ECONNRESET;
1745 }
1746}
1747
1748static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1749{
1750 struct srp_target_port *target = cm_id->context;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001751 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001752
1753 switch (event->event) {
1754 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05001755 shost_printk(KERN_DEBUG, target->scsi_host,
1756 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001757 comp = 1;
1758 target->status = -ECONNRESET;
1759 break;
1760
1761 case IB_CM_REP_RECEIVED:
1762 comp = 1;
David Dillow961e0be2011-01-14 17:32:07 -05001763 srp_cm_rep_handler(cm_id, event->private_data, target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001764 break;
1765
1766 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05001767 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001768 comp = 1;
1769
1770 srp_cm_rej_handler(cm_id, event, target);
1771 break;
1772
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07001773 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05001774 shost_printk(KERN_WARNING, target->scsi_host,
1775 PFX "DREQ received - connection closed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +00001776 srp_change_conn_state(target, false);
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07001777 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05001778 shost_printk(KERN_ERR, target->scsi_host,
1779 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02001780 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001781 break;
1782
1783 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05001784 shost_printk(KERN_ERR, target->scsi_host,
1785 PFX "connection closed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001786
Roland Dreieraef9ec32005-11-02 14:07:13 -08001787 target->status = 0;
1788 break;
1789
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07001790 case IB_CM_MRA_RECEIVED:
1791 case IB_CM_DREQ_ERROR:
1792 case IB_CM_DREP_RECEIVED:
1793 break;
1794
Roland Dreieraef9ec32005-11-02 14:07:13 -08001795 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001796 shost_printk(KERN_WARNING, target->scsi_host,
1797 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001798 break;
1799 }
1800
1801 if (comp)
1802 complete(&target->done);
1803
Roland Dreieraef9ec32005-11-02 14:07:13 -08001804 return 0;
1805}
1806
Roland Dreierd945e1d2006-05-09 10:50:28 -07001807static int srp_send_tsk_mgmt(struct srp_target_port *target,
David Dillowf8b6e312010-11-26 13:02:21 -05001808 u64 req_tag, unsigned int lun, u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001809{
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001810 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04001811 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001812 struct srp_iu *iu;
1813 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001814
Bart Van Assche3780d1f2013-02-21 17:18:00 +00001815 if (!target->connected || target->qp_in_error)
1816 return -1;
1817
David Dillowf8b6e312010-11-26 13:02:21 -05001818 init_completion(&target->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001819
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001820 /*
1821 * Lock the rport mutex to avoid that srp_create_target_ib() is
1822 * invoked while a task management function is being sent.
1823 */
1824 mutex_lock(&rport->mutex);
Bart Van Asschee9684672010-11-26 15:08:38 -05001825 spin_lock_irq(&target->lock);
David Dillowbb125882010-10-08 14:40:47 -04001826 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
Bart Van Asschee9684672010-11-26 15:08:38 -05001827 spin_unlock_irq(&target->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001828
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001829 if (!iu) {
1830 mutex_unlock(&rport->mutex);
1831
Bart Van Assche76c75b22010-11-26 14:37:47 -05001832 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001833 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001834
David Dillow19081f32010-10-18 08:54:49 -04001835 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1836 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001837 tsk_mgmt = iu->buf;
1838 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1839
1840 tsk_mgmt->opcode = SRP_TSK_MGMT;
David Dillowf8b6e312010-11-26 13:02:21 -05001841 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1842 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001843 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05001844 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001845
David Dillow19081f32010-10-18 08:54:49 -04001846 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1847 DMA_TO_DEVICE);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001848 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1849 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001850 mutex_unlock(&rport->mutex);
1851
Bart Van Assche76c75b22010-11-26 14:37:47 -05001852 return -1;
1853 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001854 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001855
David Dillowf8b6e312010-11-26 13:02:21 -05001856 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001857 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07001858 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001859
Roland Dreierd945e1d2006-05-09 10:50:28 -07001860 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07001861}
1862
Roland Dreieraef9ec32005-11-02 14:07:13 -08001863static int srp_abort(struct scsi_cmnd *scmnd)
1864{
Roland Dreierd945e1d2006-05-09 10:50:28 -07001865 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05001866 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche086f44f2013-06-12 15:23:04 +02001867 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07001868
David Dillow7aa54bd2008-01-07 18:23:41 -05001869 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001870
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001871 if (!req || !srp_claim_req(target, req, scmnd))
Ishai Rabinovitz1033ff62007-01-16 17:26:22 +02001872 return FAILED;
Bart Van Assche086f44f2013-06-12 15:23:04 +02001873 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02001874 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02001875 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001876 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02001877 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02001878 else
1879 ret = FAILED;
Bart Van Assche22032992012-08-14 13:18:53 +00001880 srp_free_req(target, req, scmnd, 0);
1881 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00001882 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001883
Bart Van Assche086f44f2013-06-12 15:23:04 +02001884 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001885}
1886
1887static int srp_reset_device(struct scsi_cmnd *scmnd)
1888{
Roland Dreierd945e1d2006-05-09 10:50:28 -07001889 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assche536ae142010-11-26 13:58:27 -05001890 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07001891
David Dillow7aa54bd2008-01-07 18:23:41 -05001892 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001893
David Dillowf8b6e312010-11-26 13:02:21 -05001894 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1895 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07001896 return FAILED;
David Dillowf8b6e312010-11-26 13:02:21 -05001897 if (target->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07001898 return FAILED;
1899
Bart Van Assche536ae142010-11-26 13:58:27 -05001900 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1901 struct srp_request *req = &target->req_ring[i];
David Dillowf8b6e312010-11-26 13:02:21 -05001902 if (req->scmnd && req->scmnd->device == scmnd->device)
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001903 srp_finish_req(target, req, DID_RESET << 16);
Bart Van Assche536ae142010-11-26 13:58:27 -05001904 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07001905
Roland Dreierd945e1d2006-05-09 10:50:28 -07001906 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001907}
1908
1909static int srp_reset_host(struct scsi_cmnd *scmnd)
1910{
1911 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001912
David Dillow7aa54bd2008-01-07 18:23:41 -05001913 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001914
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001915 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001916}
1917
Bart Van Asschec9b03c12011-09-03 09:34:48 +02001918static int srp_slave_configure(struct scsi_device *sdev)
1919{
1920 struct Scsi_Host *shost = sdev->host;
1921 struct srp_target_port *target = host_to_target(shost);
1922 struct request_queue *q = sdev->request_queue;
1923 unsigned long timeout;
1924
1925 if (sdev->type == TYPE_DISK) {
1926 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
1927 blk_queue_rq_timeout(q, timeout);
1928 }
1929
1930 return 0;
1931}
1932
Tony Jonesee959b02008-02-22 00:13:36 +01001933static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1934 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001935{
Tony Jonesee959b02008-02-22 00:13:36 +01001936 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001937
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001938 return sprintf(buf, "0x%016llx\n",
1939 (unsigned long long) be64_to_cpu(target->id_ext));
1940}
1941
Tony Jonesee959b02008-02-22 00:13:36 +01001942static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1943 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001944{
Tony Jonesee959b02008-02-22 00:13:36 +01001945 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001946
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001947 return sprintf(buf, "0x%016llx\n",
1948 (unsigned long long) be64_to_cpu(target->ioc_guid));
1949}
1950
Tony Jonesee959b02008-02-22 00:13:36 +01001951static ssize_t show_service_id(struct device *dev,
1952 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001953{
Tony Jonesee959b02008-02-22 00:13:36 +01001954 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001955
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001956 return sprintf(buf, "0x%016llx\n",
1957 (unsigned long long) be64_to_cpu(target->service_id));
1958}
1959
Tony Jonesee959b02008-02-22 00:13:36 +01001960static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1961 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001962{
Tony Jonesee959b02008-02-22 00:13:36 +01001963 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001964
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001965 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1966}
1967
Bart Van Assche848b3082013-10-26 14:38:12 +02001968static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
1969 char *buf)
1970{
1971 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1972
1973 return sprintf(buf, "%pI6\n", target->path.sgid.raw);
1974}
1975
Tony Jonesee959b02008-02-22 00:13:36 +01001976static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
1977 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001978{
Tony Jonesee959b02008-02-22 00:13:36 +01001979 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001980
Harvey Harrison5b095d9892008-10-29 12:52:50 -07001981 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001982}
1983
Tony Jonesee959b02008-02-22 00:13:36 +01001984static ssize_t show_orig_dgid(struct device *dev,
1985 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07001986{
Tony Jonesee959b02008-02-22 00:13:36 +01001987 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07001988
Harvey Harrison5b095d9892008-10-29 12:52:50 -07001989 return sprintf(buf, "%pI6\n", target->orig_dgid);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07001990}
1991
Bart Van Assche89de7482010-08-03 14:08:45 +00001992static ssize_t show_req_lim(struct device *dev,
1993 struct device_attribute *attr, char *buf)
1994{
1995 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1996
Bart Van Assche89de7482010-08-03 14:08:45 +00001997 return sprintf(buf, "%d\n", target->req_lim);
1998}
1999
Tony Jonesee959b02008-02-22 00:13:36 +01002000static ssize_t show_zero_req_lim(struct device *dev,
2001 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002002{
Tony Jonesee959b02008-02-22 00:13:36 +01002003 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002004
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002005 return sprintf(buf, "%d\n", target->zero_req_lim);
2006}
2007
Tony Jonesee959b02008-02-22 00:13:36 +01002008static ssize_t show_local_ib_port(struct device *dev,
2009 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002010{
Tony Jonesee959b02008-02-22 00:13:36 +01002011 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002012
2013 return sprintf(buf, "%d\n", target->srp_host->port);
2014}
2015
Tony Jonesee959b02008-02-22 00:13:36 +01002016static ssize_t show_local_ib_device(struct device *dev,
2017 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002018{
Tony Jonesee959b02008-02-22 00:13:36 +01002019 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002020
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002021 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002022}
2023
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002024static ssize_t show_comp_vector(struct device *dev,
2025 struct device_attribute *attr, char *buf)
2026{
2027 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2028
2029 return sprintf(buf, "%d\n", target->comp_vector);
2030}
2031
Vu Pham7bb312e2013-10-26 14:31:27 +02002032static ssize_t show_tl_retry_count(struct device *dev,
2033 struct device_attribute *attr, char *buf)
2034{
2035 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2036
2037 return sprintf(buf, "%d\n", target->tl_retry_count);
2038}
2039
David Dillow49248642011-01-14 18:23:24 -05002040static ssize_t show_cmd_sg_entries(struct device *dev,
2041 struct device_attribute *attr, char *buf)
2042{
2043 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2044
2045 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2046}
2047
David Dillowc07d4242011-01-16 13:57:10 -05002048static ssize_t show_allow_ext_sg(struct device *dev,
2049 struct device_attribute *attr, char *buf)
2050{
2051 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2052
2053 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2054}
2055
Tony Jonesee959b02008-02-22 00:13:36 +01002056static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2057static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2058static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2059static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002060static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002061static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2062static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002063static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002064static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2065static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2066static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002067static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002068static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002069static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002070static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002071
Tony Jonesee959b02008-02-22 00:13:36 +01002072static struct device_attribute *srp_host_attrs[] = {
2073 &dev_attr_id_ext,
2074 &dev_attr_ioc_guid,
2075 &dev_attr_service_id,
2076 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002077 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002078 &dev_attr_dgid,
2079 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002080 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002081 &dev_attr_zero_req_lim,
2082 &dev_attr_local_ib_port,
2083 &dev_attr_local_ib_device,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002084 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002085 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002086 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002087 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002088 NULL
2089};
2090
Roland Dreieraef9ec32005-11-02 14:07:13 -08002091static struct scsi_host_template srp_template = {
2092 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002093 .name = "InfiniBand SRP initiator",
2094 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002095 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002096 .info = srp_target_info,
2097 .queuecommand = srp_queuecommand,
2098 .eh_abort_handler = srp_abort,
2099 .eh_device_reset_handler = srp_reset_device,
2100 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002101 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002102 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00002103 .can_queue = SRP_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002104 .this_id = -1,
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00002105 .cmd_per_lun = SRP_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002106 .use_clustering = ENABLE_CLUSTERING,
2107 .shost_attrs = srp_host_attrs
Roland Dreieraef9ec32005-11-02 14:07:13 -08002108};
2109
2110static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2111{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002112 struct srp_rport_identifiers ids;
2113 struct srp_rport *rport;
2114
Roland Dreieraef9ec32005-11-02 14:07:13 -08002115 sprintf(target->target_name, "SRP.T10:%016llX",
2116 (unsigned long long) be64_to_cpu(target->id_ext));
2117
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002118 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002119 return -ENODEV;
2120
FUJITA Tomonori32368222007-06-27 16:33:12 +09002121 memcpy(ids.port_id, &target->id_ext, 8);
2122 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002123 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002124 rport = srp_rport_add(target->scsi_host, &ids);
2125 if (IS_ERR(rport)) {
2126 scsi_remove_host(target->scsi_host);
2127 return PTR_ERR(rport);
2128 }
2129
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002130 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002131 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002132
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002133 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002134 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002135 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002136
2137 target->state = SRP_TARGET_LIVE;
2138
Roland Dreieraef9ec32005-11-02 14:07:13 -08002139 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002140 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002141
2142 return 0;
2143}
2144
Tony Jonesee959b02008-02-22 00:13:36 +01002145static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002146{
2147 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002148 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002149
2150 complete(&host->released);
2151}
2152
2153static struct class srp_class = {
2154 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002155 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002156};
2157
Bart Van Assche96fc2482013-06-28 14:51:26 +02002158/**
2159 * srp_conn_unique() - check whether the connection to a target is unique
2160 */
2161static bool srp_conn_unique(struct srp_host *host,
2162 struct srp_target_port *target)
2163{
2164 struct srp_target_port *t;
2165 bool ret = false;
2166
2167 if (target->state == SRP_TARGET_REMOVED)
2168 goto out;
2169
2170 ret = true;
2171
2172 spin_lock(&host->target_lock);
2173 list_for_each_entry(t, &host->target_list, list) {
2174 if (t != target &&
2175 target->id_ext == t->id_ext &&
2176 target->ioc_guid == t->ioc_guid &&
2177 target->initiator_ext == t->initiator_ext) {
2178 ret = false;
2179 break;
2180 }
2181 }
2182 spin_unlock(&host->target_lock);
2183
2184out:
2185 return ret;
2186}
2187
Roland Dreieraef9ec32005-11-02 14:07:13 -08002188/*
2189 * Target ports are added by writing
2190 *
2191 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2192 * pkey=<P_Key>,service_id=<service ID>
2193 *
2194 * to the add_target sysfs attribute.
2195 */
2196enum {
2197 SRP_OPT_ERR = 0,
2198 SRP_OPT_ID_EXT = 1 << 0,
2199 SRP_OPT_IOC_GUID = 1 << 1,
2200 SRP_OPT_DGID = 1 << 2,
2201 SRP_OPT_PKEY = 1 << 3,
2202 SRP_OPT_SERVICE_ID = 1 << 4,
2203 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002204 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002205 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002206 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002207 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002208 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2209 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002210 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002211 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002212 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2213 SRP_OPT_IOC_GUID |
2214 SRP_OPT_DGID |
2215 SRP_OPT_PKEY |
2216 SRP_OPT_SERVICE_ID),
2217};
2218
Steven Whitehousea447c092008-10-13 10:46:57 +01002219static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002220 { SRP_OPT_ID_EXT, "id_ext=%s" },
2221 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2222 { SRP_OPT_DGID, "dgid=%s" },
2223 { SRP_OPT_PKEY, "pkey=%x" },
2224 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2225 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2226 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002227 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002228 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002229 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002230 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2231 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002232 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002233 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002234 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002235};
2236
2237static int srp_parse_options(const char *buf, struct srp_target_port *target)
2238{
2239 char *options, *sep_opt;
2240 char *p;
2241 char dgid[3];
2242 substring_t args[MAX_OPT_ARGS];
2243 int opt_mask = 0;
2244 int token;
2245 int ret = -EINVAL;
2246 int i;
2247
2248 options = kstrdup(buf, GFP_KERNEL);
2249 if (!options)
2250 return -ENOMEM;
2251
2252 sep_opt = options;
2253 while ((p = strsep(&sep_opt, ",")) != NULL) {
2254 if (!*p)
2255 continue;
2256
2257 token = match_token(p, srp_opt_tokens, args);
2258 opt_mask |= token;
2259
2260 switch (token) {
2261 case SRP_OPT_ID_EXT:
2262 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002263 if (!p) {
2264 ret = -ENOMEM;
2265 goto out;
2266 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002267 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2268 kfree(p);
2269 break;
2270
2271 case SRP_OPT_IOC_GUID:
2272 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002273 if (!p) {
2274 ret = -ENOMEM;
2275 goto out;
2276 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002277 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2278 kfree(p);
2279 break;
2280
2281 case SRP_OPT_DGID:
2282 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002283 if (!p) {
2284 ret = -ENOMEM;
2285 goto out;
2286 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002287 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002288 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002289 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002290 goto out;
2291 }
2292
2293 for (i = 0; i < 16; ++i) {
2294 strlcpy(dgid, p + i * 2, 3);
2295 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2296 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002297 kfree(p);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002298 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002299 break;
2300
2301 case SRP_OPT_PKEY:
2302 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002303 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002304 goto out;
2305 }
2306 target->path.pkey = cpu_to_be16(token);
2307 break;
2308
2309 case SRP_OPT_SERVICE_ID:
2310 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002311 if (!p) {
2312 ret = -ENOMEM;
2313 goto out;
2314 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002315 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
Sean Hefty247e0202007-08-08 15:51:18 -07002316 target->path.service_id = target->service_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002317 kfree(p);
2318 break;
2319
2320 case SRP_OPT_MAX_SECT:
2321 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002322 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002323 goto out;
2324 }
2325 target->scsi_host->max_sectors = token;
2326 break;
2327
Vu Pham52fb2b502006-06-17 20:37:31 -07002328 case SRP_OPT_MAX_CMD_PER_LUN:
2329 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002330 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2331 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07002332 goto out;
2333 }
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00002334 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
Vu Pham52fb2b502006-06-17 20:37:31 -07002335 break;
2336
Ramachandra K0c0450db2006-06-17 20:37:38 -07002337 case SRP_OPT_IO_CLASS:
2338 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002339 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07002340 goto out;
2341 }
2342 if (token != SRP_REV10_IB_IO_CLASS &&
2343 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002344 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2345 token, SRP_REV10_IB_IO_CLASS,
2346 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07002347 goto out;
2348 }
2349 target->io_class = token;
2350 break;
2351
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002352 case SRP_OPT_INITIATOR_EXT:
2353 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002354 if (!p) {
2355 ret = -ENOMEM;
2356 goto out;
2357 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002358 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2359 kfree(p);
2360 break;
2361
David Dillow49248642011-01-14 18:23:24 -05002362 case SRP_OPT_CMD_SG_ENTRIES:
2363 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002364 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2365 p);
David Dillow49248642011-01-14 18:23:24 -05002366 goto out;
2367 }
2368 target->cmd_sg_cnt = token;
2369 break;
2370
David Dillowc07d4242011-01-16 13:57:10 -05002371 case SRP_OPT_ALLOW_EXT_SG:
2372 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002373 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05002374 goto out;
2375 }
2376 target->allow_ext_sg = !!token;
2377 break;
2378
2379 case SRP_OPT_SG_TABLESIZE:
2380 if (match_int(args, &token) || token < 1 ||
2381 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002382 pr_warn("bad max sg_tablesize parameter '%s'\n",
2383 p);
David Dillowc07d4242011-01-16 13:57:10 -05002384 goto out;
2385 }
2386 target->sg_tablesize = token;
2387 break;
2388
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002389 case SRP_OPT_COMP_VECTOR:
2390 if (match_int(args, &token) || token < 0) {
2391 pr_warn("bad comp_vector parameter '%s'\n", p);
2392 goto out;
2393 }
2394 target->comp_vector = token;
2395 break;
2396
Vu Pham7bb312e2013-10-26 14:31:27 +02002397 case SRP_OPT_TL_RETRY_COUNT:
2398 if (match_int(args, &token) || token < 2 || token > 7) {
2399 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2400 p);
2401 goto out;
2402 }
2403 target->tl_retry_count = token;
2404 break;
2405
Roland Dreieraef9ec32005-11-02 14:07:13 -08002406 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002407 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2408 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002409 goto out;
2410 }
2411 }
2412
2413 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2414 ret = 0;
2415 else
2416 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2417 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2418 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002419 pr_warn("target creation request is missing parameter '%s'\n",
2420 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002421
2422out:
2423 kfree(options);
2424 return ret;
2425}
2426
Tony Jonesee959b02008-02-22 00:13:36 +01002427static ssize_t srp_create_target(struct device *dev,
2428 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002429 const char *buf, size_t count)
2430{
2431 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002432 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002433 struct Scsi_Host *target_host;
2434 struct srp_target_port *target;
David Dillowc07d4242011-01-16 13:57:10 -05002435 struct ib_device *ibdev = host->srp_dev->dev;
2436 dma_addr_t dma_addr;
David Dillow8f26c9f2011-01-14 19:45:50 -05002437 int i, ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002438
2439 target_host = scsi_host_alloc(&srp_template,
2440 sizeof (struct srp_target_port));
2441 if (!target_host)
2442 return -ENOMEM;
2443
David Dillow49248642011-01-14 18:23:24 -05002444 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07002445 target_host->max_channel = 0;
2446 target_host->max_id = 1;
Arne Redlich3c8edf02006-11-15 12:43:00 +01002447 target_host->max_lun = SRP_MAX_LUN;
2448 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08002449
Roland Dreieraef9ec32005-11-02 14:07:13 -08002450 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002451
David Dillow49248642011-01-14 18:23:24 -05002452 target->io_class = SRP_REV16A_IB_IO_CLASS;
2453 target->scsi_host = target_host;
2454 target->srp_host = host;
2455 target->lkey = host->srp_dev->mr->lkey;
2456 target->rkey = host->srp_dev->mr->rkey;
2457 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05002458 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
2459 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02002460 target->tl_retry_count = 7;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002461
Roland Dreieraef9ec32005-11-02 14:07:13 -08002462 ret = srp_parse_options(buf, target);
2463 if (ret)
2464 goto err;
2465
Bart Van Assche96fc2482013-06-28 14:51:26 +02002466 if (!srp_conn_unique(target->srp_host, target)) {
2467 shost_printk(KERN_INFO, target->scsi_host,
2468 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
2469 be64_to_cpu(target->id_ext),
2470 be64_to_cpu(target->ioc_guid),
2471 be64_to_cpu(target->initiator_ext));
2472 ret = -EEXIST;
2473 goto err;
2474 }
2475
David Dillowc07d4242011-01-16 13:57:10 -05002476 if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2477 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002478 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05002479 target->sg_tablesize = target->cmd_sg_cnt;
2480 }
2481
2482 target_host->sg_tablesize = target->sg_tablesize;
2483 target->indirect_size = target->sg_tablesize *
2484 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05002485 target->max_iu_len = sizeof (struct srp_cmd) +
2486 sizeof (struct srp_indirect_buf) +
2487 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2488
Bart Van Asschec1120f82013-10-26 14:35:08 +02002489 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002490 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05002491 spin_lock_init(&target->lock);
2492 INIT_LIST_HEAD(&target->free_tx);
2493 INIT_LIST_HEAD(&target->free_reqs);
2494 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
2495 struct srp_request *req = &target->req_ring[i];
2496
2497 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
2498 GFP_KERNEL);
2499 req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
2500 GFP_KERNEL);
David Dillowc07d4242011-01-16 13:57:10 -05002501 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
2502 if (!req->fmr_list || !req->map_page || !req->indirect_desc)
David Dillow8f26c9f2011-01-14 19:45:50 -05002503 goto err_free_mem;
2504
David Dillowc07d4242011-01-16 13:57:10 -05002505 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
2506 target->indirect_size,
2507 DMA_TO_DEVICE);
2508 if (ib_dma_mapping_error(ibdev, dma_addr))
2509 goto err_free_mem;
2510
2511 req->indirect_dma_addr = dma_addr;
David Dillow8f26c9f2011-01-14 19:45:50 -05002512 req->index = i;
2513 list_add_tail(&req->list, &target->free_reqs);
2514 }
2515
David Dillowc07d4242011-01-16 13:57:10 -05002516 ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002517
David Dillow7aa54bd2008-01-07 18:23:41 -05002518 shost_printk(KERN_DEBUG, target->scsi_host, PFX
2519 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
Harvey Harrison5b095d9892008-10-29 12:52:50 -07002520 "service_id %016llx dgid %pI6\n",
Roland Dreieraef9ec32005-11-02 14:07:13 -08002521 (unsigned long long) be64_to_cpu(target->id_ext),
2522 (unsigned long long) be64_to_cpu(target->ioc_guid),
2523 be16_to_cpu(target->path.pkey),
2524 (unsigned long long) be64_to_cpu(target->service_id),
Harvey Harrison8867cd72008-10-28 22:36:33 -07002525 target->path.dgid.raw);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002526
2527 ret = srp_create_target_ib(target);
2528 if (ret)
David Dillow8f26c9f2011-01-14 19:45:50 -05002529 goto err_free_mem;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002530
David Dillow9fe4bcf2008-01-08 17:08:52 -05002531 ret = srp_new_cm_id(target);
2532 if (ret)
David Dillow8f26c9f2011-01-14 19:45:50 -05002533 goto err_free_ib;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002534
2535 ret = srp_connect_target(target);
2536 if (ret) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002537 shost_printk(KERN_ERR, target->scsi_host,
2538 PFX "Connection failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002539 goto err_cm_id;
2540 }
2541
2542 ret = srp_add_target(host, target);
2543 if (ret)
2544 goto err_disconnect;
2545
2546 return count;
2547
2548err_disconnect:
2549 srp_disconnect_target(target);
2550
2551err_cm_id:
2552 ib_destroy_cm_id(target->cm_id);
2553
David Dillow8f26c9f2011-01-14 19:45:50 -05002554err_free_ib:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002555 srp_free_target_ib(target);
2556
David Dillow8f26c9f2011-01-14 19:45:50 -05002557err_free_mem:
2558 srp_free_req_data(target);
2559
Roland Dreieraef9ec32005-11-02 14:07:13 -08002560err:
2561 scsi_host_put(target_host);
2562
2563 return ret;
2564}
2565
Tony Jonesee959b02008-02-22 00:13:36 +01002566static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002567
Tony Jonesee959b02008-02-22 00:13:36 +01002568static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2569 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002570{
Tony Jonesee959b02008-02-22 00:13:36 +01002571 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002572
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002573 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002574}
2575
Tony Jonesee959b02008-02-22 00:13:36 +01002576static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002577
Tony Jonesee959b02008-02-22 00:13:36 +01002578static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2579 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002580{
Tony Jonesee959b02008-02-22 00:13:36 +01002581 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002582
2583 return sprintf(buf, "%d\n", host->port);
2584}
2585
Tony Jonesee959b02008-02-22 00:13:36 +01002586static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002587
Roland Dreierf5358a12006-06-17 20:37:29 -07002588static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002589{
2590 struct srp_host *host;
2591
2592 host = kzalloc(sizeof *host, GFP_KERNEL);
2593 if (!host)
2594 return NULL;
2595
2596 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002597 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002598 init_completion(&host->released);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002599 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002600 host->port = port;
2601
Tony Jonesee959b02008-02-22 00:13:36 +01002602 host->dev.class = &srp_class;
2603 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08002604 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002605
Tony Jonesee959b02008-02-22 00:13:36 +01002606 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07002607 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01002608 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002609 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01002610 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002611 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01002612 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002613 goto err_class;
2614
2615 return host;
2616
2617err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01002618 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002619
Roland Dreierf5358a12006-06-17 20:37:29 -07002620free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002621 kfree(host);
2622
2623 return NULL;
2624}
2625
2626static void srp_add_one(struct ib_device *device)
2627{
Roland Dreierf5358a12006-06-17 20:37:29 -07002628 struct srp_device *srp_dev;
2629 struct ib_device_attr *dev_attr;
2630 struct ib_fmr_pool_param fmr_param;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002631 struct srp_host *host;
David Dillowbe8b9812011-01-18 21:58:09 -05002632 int max_pages_per_fmr, fmr_page_shift, s, e, p;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002633
Roland Dreierf5358a12006-06-17 20:37:29 -07002634 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2635 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08002636 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002637
Roland Dreierf5358a12006-06-17 20:37:29 -07002638 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002639 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07002640 goto free_attr;
2641 }
2642
2643 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2644 if (!srp_dev)
2645 goto free_attr;
2646
2647 /*
2648 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05002649 * minimum of 4096 bytes. We're unlikely to build large sglists
2650 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07002651 */
David Dillow8f26c9f2011-01-14 19:45:50 -05002652 fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
2653 srp_dev->fmr_page_size = 1 << fmr_page_shift;
2654 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
2655 srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
Roland Dreierf5358a12006-06-17 20:37:29 -07002656
2657 INIT_LIST_HEAD(&srp_dev->dev_list);
2658
2659 srp_dev->dev = device;
2660 srp_dev->pd = ib_alloc_pd(device);
2661 if (IS_ERR(srp_dev->pd))
2662 goto free_dev;
2663
2664 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2665 IB_ACCESS_LOCAL_WRITE |
2666 IB_ACCESS_REMOTE_READ |
2667 IB_ACCESS_REMOTE_WRITE);
2668 if (IS_ERR(srp_dev->mr))
2669 goto err_pd;
2670
David Dillowbe8b9812011-01-18 21:58:09 -05002671 for (max_pages_per_fmr = SRP_FMR_SIZE;
2672 max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2673 max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2674 memset(&fmr_param, 0, sizeof fmr_param);
2675 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
2676 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
2677 fmr_param.cache = 1;
2678 fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2679 fmr_param.page_shift = fmr_page_shift;
2680 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
2681 IB_ACCESS_REMOTE_WRITE |
2682 IB_ACCESS_REMOTE_READ);
Roland Dreierf5358a12006-06-17 20:37:29 -07002683
David Dillowbe8b9812011-01-18 21:58:09 -05002684 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2685 if (!IS_ERR(srp_dev->fmr_pool))
2686 break;
2687 }
2688
Roland Dreierf5358a12006-06-17 20:37:29 -07002689 if (IS_ERR(srp_dev->fmr_pool))
2690 srp_dev->fmr_pool = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002691
Tom Tucker07ebafb2006-08-03 16:02:42 -05002692 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002693 s = 0;
2694 e = 0;
2695 } else {
2696 s = 1;
2697 e = device->phys_port_cnt;
2698 }
2699
2700 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07002701 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002702 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07002703 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002704 }
2705
Roland Dreierf5358a12006-06-17 20:37:29 -07002706 ib_set_client_data(device, &srp_client, srp_dev);
2707
2708 goto free_attr;
2709
2710err_pd:
2711 ib_dealloc_pd(srp_dev->pd);
2712
2713free_dev:
2714 kfree(srp_dev);
2715
2716free_attr:
2717 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002718}
2719
2720static void srp_remove_one(struct ib_device *device)
2721{
Roland Dreierf5358a12006-06-17 20:37:29 -07002722 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002723 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002724 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002725
Roland Dreierf5358a12006-06-17 20:37:29 -07002726 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02002727 if (!srp_dev)
2728 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002729
Roland Dreierf5358a12006-06-17 20:37:29 -07002730 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01002731 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002732 /*
2733 * Wait for the sysfs entry to go away, so that no new
2734 * target ports can be created.
2735 */
2736 wait_for_completion(&host->released);
2737
2738 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002739 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08002740 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002741 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002742 list_for_each_entry(target, &host->target_list, list)
2743 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002744 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002745
2746 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002747 * Wait for target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08002748 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002749 flush_workqueue(system_long_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002750
Roland Dreieraef9ec32005-11-02 14:07:13 -08002751 kfree(host);
2752 }
2753
Roland Dreierf5358a12006-06-17 20:37:29 -07002754 if (srp_dev->fmr_pool)
2755 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2756 ib_dereg_mr(srp_dev->mr);
2757 ib_dealloc_pd(srp_dev->pd);
2758
2759 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002760}
2761
FUJITA Tomonori32368222007-06-27 16:33:12 +09002762static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002763 .has_rport_state = true,
2764 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002765 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002766 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
2767 .dev_loss_tmo = &srp_dev_loss_tmo,
2768 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002769 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002770 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09002771};
2772
Roland Dreieraef9ec32005-11-02 14:07:13 -08002773static int __init srp_init_module(void)
2774{
2775 int ret;
2776
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002777 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00002778
David Dillow49248642011-01-14 18:23:24 -05002779 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002780 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05002781 if (!cmd_sg_entries)
2782 cmd_sg_entries = srp_sg_tablesize;
2783 }
2784
2785 if (!cmd_sg_entries)
2786 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2787
2788 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002789 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05002790 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07002791 }
2792
David Dillowc07d4242011-01-16 13:57:10 -05002793 if (!indirect_sg_entries)
2794 indirect_sg_entries = cmd_sg_entries;
2795 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002796 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2797 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05002798 indirect_sg_entries = cmd_sg_entries;
2799 }
2800
FUJITA Tomonori32368222007-06-27 16:33:12 +09002801 ib_srp_transport_template =
2802 srp_attach_transport(&ib_srp_transport_functions);
2803 if (!ib_srp_transport_template)
2804 return -ENOMEM;
2805
Roland Dreieraef9ec32005-11-02 14:07:13 -08002806 ret = class_register(&srp_class);
2807 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002808 pr_err("couldn't register class infiniband_srp\n");
FUJITA Tomonori32368222007-06-27 16:33:12 +09002809 srp_release_transport(ib_srp_transport_template);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002810 return ret;
2811 }
2812
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002813 ib_sa_register_client(&srp_sa_client);
2814
Roland Dreieraef9ec32005-11-02 14:07:13 -08002815 ret = ib_register_client(&srp_client);
2816 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002817 pr_err("couldn't register IB client\n");
FUJITA Tomonori32368222007-06-27 16:33:12 +09002818 srp_release_transport(ib_srp_transport_template);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002819 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002820 class_unregister(&srp_class);
2821 return ret;
2822 }
2823
2824 return 0;
2825}
2826
2827static void __exit srp_cleanup_module(void)
2828{
2829 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002830 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002831 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09002832 srp_release_transport(ib_srp_transport_template);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002833}
2834
2835module_init(srp_init_module);
2836module_exit(srp_cleanup_module);