blob: 9ce129ab3beb36a20b41d0fc5e425489d6d7af81 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Roland Dreieraef9ec32005-11-02 14:07:13 -080033#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/slab.h>
36#include <linux/err.h>
37#include <linux/string.h>
38#include <linux/parser.h>
39#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080040#include <linux/jiffies.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080041
42#include <asm/atomic.h>
43
44#include <scsi/scsi.h>
45#include <scsi/scsi_device.h>
46#include <scsi/scsi_dbg.h>
47#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090048#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080049
Roland Dreieraef9ec32005-11-02 14:07:13 -080050#include "ib_srp.h"
51
52#define DRV_NAME "ib_srp"
53#define PFX DRV_NAME ": "
54#define DRV_VERSION "0.2"
55#define DRV_RELDATE "November 1, 2005"
56
57MODULE_AUTHOR("Roland Dreier");
58MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
59 "v" DRV_VERSION " (" DRV_RELDATE ")");
60MODULE_LICENSE("Dual BSD/GPL");
61
David Dillow49248642011-01-14 18:23:24 -050062static unsigned int srp_sg_tablesize;
63static unsigned int cmd_sg_entries;
Roland Dreieraef9ec32005-11-02 14:07:13 -080064static int topspin_workarounds = 1;
65
David Dillow49248642011-01-14 18:23:24 -050066module_param(srp_sg_tablesize, uint, 0444);
67MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
68
69module_param(cmd_sg_entries, uint, 0444);
70MODULE_PARM_DESC(cmd_sg_entries,
71 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
72
Roland Dreieraef9ec32005-11-02 14:07:13 -080073module_param(topspin_workarounds, int, 0444);
74MODULE_PARM_DESC(topspin_workarounds,
75 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
76
Roland Dreieraef9ec32005-11-02 14:07:13 -080077static void srp_add_one(struct ib_device *device);
78static void srp_remove_one(struct ib_device *device);
Bart Van Assche9c03dc92010-02-02 19:23:54 +000079static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
80static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -080081static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
82
FUJITA Tomonori32368222007-06-27 16:33:12 +090083static struct scsi_transport_template *ib_srp_transport_template;
84
Roland Dreieraef9ec32005-11-02 14:07:13 -080085static struct ib_client srp_client = {
86 .name = "srp",
87 .add = srp_add_one,
88 .remove = srp_remove_one
89};
90
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -070091static struct ib_sa_client srp_sa_client;
92
Roland Dreieraef9ec32005-11-02 14:07:13 -080093static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
94{
95 return (struct srp_target_port *) host->hostdata;
96}
97
98static const char *srp_target_info(struct Scsi_Host *host)
99{
100 return host_to_target(host)->target_name;
101}
102
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700103static int srp_target_is_topspin(struct srp_target_port *target)
104{
105 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700106 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700107
108 return topspin_workarounds &&
Raghava Kondapalli3d1ff482007-08-03 10:45:18 -0700109 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
110 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700111}
112
Roland Dreieraef9ec32005-11-02 14:07:13 -0800113static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
114 gfp_t gfp_mask,
115 enum dma_data_direction direction)
116{
117 struct srp_iu *iu;
118
119 iu = kmalloc(sizeof *iu, gfp_mask);
120 if (!iu)
121 goto out;
122
123 iu->buf = kzalloc(size, gfp_mask);
124 if (!iu->buf)
125 goto out_free_iu;
126
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100127 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
128 direction);
129 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800130 goto out_free_buf;
131
132 iu->size = size;
133 iu->direction = direction;
134
135 return iu;
136
137out_free_buf:
138 kfree(iu->buf);
139out_free_iu:
140 kfree(iu);
141out:
142 return NULL;
143}
144
145static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
146{
147 if (!iu)
148 return;
149
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100150 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
151 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800152 kfree(iu->buf);
153 kfree(iu);
154}
155
156static void srp_qp_event(struct ib_event *event, void *context)
157{
158 printk(KERN_ERR PFX "QP event %d\n", event->event);
159}
160
161static int srp_init_qp(struct srp_target_port *target,
162 struct ib_qp *qp)
163{
164 struct ib_qp_attr *attr;
165 int ret;
166
167 attr = kmalloc(sizeof *attr, GFP_KERNEL);
168 if (!attr)
169 return -ENOMEM;
170
Roland Dreier969a60f2008-07-14 23:48:43 -0700171 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
172 target->srp_host->port,
173 be16_to_cpu(target->path.pkey),
174 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800175 if (ret)
176 goto out;
177
178 attr->qp_state = IB_QPS_INIT;
179 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
180 IB_ACCESS_REMOTE_WRITE);
181 attr->port_num = target->srp_host->port;
182
183 ret = ib_modify_qp(qp, attr,
184 IB_QP_STATE |
185 IB_QP_PKEY_INDEX |
186 IB_QP_ACCESS_FLAGS |
187 IB_QP_PORT);
188
189out:
190 kfree(attr);
191 return ret;
192}
193
David Dillow9fe4bcf2008-01-08 17:08:52 -0500194static int srp_new_cm_id(struct srp_target_port *target)
195{
196 struct ib_cm_id *new_cm_id;
197
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100198 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
David Dillow9fe4bcf2008-01-08 17:08:52 -0500199 srp_cm_handler, target);
200 if (IS_ERR(new_cm_id))
201 return PTR_ERR(new_cm_id);
202
203 if (target->cm_id)
204 ib_destroy_cm_id(target->cm_id);
205 target->cm_id = new_cm_id;
206
207 return 0;
208}
209
Roland Dreieraef9ec32005-11-02 14:07:13 -0800210static int srp_create_target_ib(struct srp_target_port *target)
211{
212 struct ib_qp_init_attr *init_attr;
213 int ret;
214
215 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
216 if (!init_attr)
217 return -ENOMEM;
218
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000219 target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
220 srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
221 if (IS_ERR(target->recv_cq)) {
222 ret = PTR_ERR(target->recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800223 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800224 }
225
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000226 target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
227 srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
228 if (IS_ERR(target->send_cq)) {
229 ret = PTR_ERR(target->send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800230 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000231 }
232
233 ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800234
235 init_attr->event_handler = srp_qp_event;
236 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
237 init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
238 init_attr->cap.max_recv_sge = 1;
239 init_attr->cap.max_send_sge = 1;
240 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
241 init_attr->qp_type = IB_QPT_RC;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000242 init_attr->send_cq = target->send_cq;
243 init_attr->recv_cq = target->recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800244
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100245 target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800246 if (IS_ERR(target->qp)) {
247 ret = PTR_ERR(target->qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800248 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800249 }
250
251 ret = srp_init_qp(target, target->qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800252 if (ret)
253 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800254
Roland Dreierda9d2f02010-02-24 15:07:59 -0800255 kfree(init_attr);
256 return 0;
257
258err_qp:
259 ib_destroy_qp(target->qp);
260
261err_send_cq:
262 ib_destroy_cq(target->send_cq);
263
264err_recv_cq:
265 ib_destroy_cq(target->recv_cq);
266
267err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800268 kfree(init_attr);
269 return ret;
270}
271
272static void srp_free_target_ib(struct srp_target_port *target)
273{
274 int i;
275
276 ib_destroy_qp(target->qp);
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000277 ib_destroy_cq(target->send_cq);
278 ib_destroy_cq(target->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800279
280 for (i = 0; i < SRP_RQ_SIZE; ++i)
281 srp_free_iu(target->srp_host, target->rx_ring[i]);
Bart Van Asschedd5e6e32010-08-30 19:27:20 +0000282 for (i = 0; i < SRP_SQ_SIZE; ++i)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800283 srp_free_iu(target->srp_host, target->tx_ring[i]);
284}
285
286static void srp_path_rec_completion(int status,
287 struct ib_sa_path_rec *pathrec,
288 void *target_ptr)
289{
290 struct srp_target_port *target = target_ptr;
291
292 target->status = status;
293 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500294 shost_printk(KERN_ERR, target->scsi_host,
295 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800296 else
297 target->path = *pathrec;
298 complete(&target->done);
299}
300
301static int srp_lookup_path(struct srp_target_port *target)
302{
303 target->path.numb_path = 1;
304
305 init_completion(&target->done);
306
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700307 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100308 target->srp_host->srp_dev->dev,
Roland Dreieraef9ec32005-11-02 14:07:13 -0800309 target->srp_host->port,
310 &target->path,
Sean Hefty247e0202007-08-08 15:51:18 -0700311 IB_SA_PATH_REC_SERVICE_ID |
Roland Dreieraef9ec32005-11-02 14:07:13 -0800312 IB_SA_PATH_REC_DGID |
313 IB_SA_PATH_REC_SGID |
314 IB_SA_PATH_REC_NUMB_PATH |
315 IB_SA_PATH_REC_PKEY,
316 SRP_PATH_REC_TIMEOUT_MS,
317 GFP_KERNEL,
318 srp_path_rec_completion,
319 target, &target->path_query);
320 if (target->path_query_id < 0)
321 return target->path_query_id;
322
323 wait_for_completion(&target->done);
324
325 if (target->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500326 shost_printk(KERN_WARNING, target->scsi_host,
327 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800328
329 return target->status;
330}
331
332static int srp_send_req(struct srp_target_port *target)
333{
334 struct {
335 struct ib_cm_req_param param;
336 struct srp_login_req priv;
337 } *req = NULL;
338 int status;
339
340 req = kzalloc(sizeof *req, GFP_KERNEL);
341 if (!req)
342 return -ENOMEM;
343
344 req->param.primary_path = &target->path;
345 req->param.alternate_path = NULL;
346 req->param.service_id = target->service_id;
347 req->param.qp_num = target->qp->qp_num;
348 req->param.qp_type = target->qp->qp_type;
349 req->param.private_data = &req->priv;
350 req->param.private_data_len = sizeof req->priv;
351 req->param.flow_control = 1;
352
353 get_random_bytes(&req->param.starting_psn, 4);
354 req->param.starting_psn &= 0xffffff;
355
356 /*
357 * Pick some arbitrary defaults here; we could make these
358 * module parameters if anyone cared about setting them.
359 */
360 req->param.responder_resources = 4;
361 req->param.remote_cm_response_timeout = 20;
362 req->param.local_cm_response_timeout = 20;
363 req->param.retry_count = 7;
364 req->param.rnr_retry_count = 7;
365 req->param.max_cm_retries = 15;
366
367 req->priv.opcode = SRP_LOGIN_REQ;
368 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500369 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800370 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
371 SRP_BUF_FORMAT_INDIRECT);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700372 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700373 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700374 * port identifier format is 8 bytes of ID extension followed
375 * by 8 bytes of GUID. Older drafts put the two halves in the
376 * opposite order, so that the GUID comes first.
377 *
378 * Targets conforming to these obsolete drafts can be
379 * recognized by the I/O Class they report.
380 */
381 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
382 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200383 &target->path.sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700384 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200385 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700386 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
387 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
388 } else {
389 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200390 &target->initiator_ext, 8);
391 memcpy(req->priv.initiator_port_id + 8,
392 &target->path.sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700393 memcpy(req->priv.target_port_id, &target->id_ext, 8);
394 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
395 }
396
Roland Dreieraef9ec32005-11-02 14:07:13 -0800397 /*
398 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200399 * zero out the first 8 bytes of our initiator port ID and set
400 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800401 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700402 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500403 shost_printk(KERN_DEBUG, target->scsi_host,
404 PFX "Topspin/Cisco initiator port ID workaround "
405 "activated for target GUID %016llx\n",
406 (unsigned long long) be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800407 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200408 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100409 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800410 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800411
412 status = ib_send_cm_req(target->cm_id, &req->param);
413
414 kfree(req);
415
416 return status;
417}
418
419static void srp_disconnect_target(struct srp_target_port *target)
420{
421 /* XXX should send SRP_I_LOGOUT request */
422
423 init_completion(&target->done);
Roland Dreiere6581052006-05-17 09:13:21 -0700424 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500425 shost_printk(KERN_DEBUG, target->scsi_host,
426 PFX "Sending CM DREQ failed\n");
Roland Dreiere6581052006-05-17 09:13:21 -0700427 return;
428 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800429 wait_for_completion(&target->done);
430}
431
Bart Van Assche9709f0e2010-11-26 13:13:06 -0500432static bool srp_change_state(struct srp_target_port *target,
433 enum srp_target_state old,
434 enum srp_target_state new)
435{
436 bool changed = false;
437
Bart Van Asschee9684672010-11-26 15:08:38 -0500438 spin_lock_irq(&target->lock);
Bart Van Assche9709f0e2010-11-26 13:13:06 -0500439 if (target->state == old) {
440 target->state = new;
441 changed = true;
442 }
Bart Van Asschee9684672010-11-26 15:08:38 -0500443 spin_unlock_irq(&target->lock);
Bart Van Assche9709f0e2010-11-26 13:13:06 -0500444 return changed;
445}
446
David Dillow8f26c9f2011-01-14 19:45:50 -0500447static void srp_free_req_data(struct srp_target_port *target)
448{
449 struct srp_request *req;
450 int i;
451
452 for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
453 kfree(req->fmr_list);
454 kfree(req->map_page);
455 }
456}
457
David Howellsc4028952006-11-22 14:57:56 +0000458static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800459{
David Howellsc4028952006-11-22 14:57:56 +0000460 struct srp_target_port *target =
461 container_of(work, struct srp_target_port, work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800462
Bart Van Assche9709f0e2010-11-26 13:13:06 -0500463 if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800464 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800465
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -0700466 spin_lock(&target->srp_host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800467 list_del(&target->list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -0700468 spin_unlock(&target->srp_host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800469
FUJITA Tomonori32368222007-06-27 16:33:12 +0900470 srp_remove_host(target->scsi_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800471 scsi_remove_host(target->scsi_host);
472 ib_destroy_cm_id(target->cm_id);
473 srp_free_target_ib(target);
David Dillow8f26c9f2011-01-14 19:45:50 -0500474 srp_free_req_data(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800475 scsi_host_put(target->scsi_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800476}
477
478static int srp_connect_target(struct srp_target_port *target)
479{
David Dillow9fe4bcf2008-01-08 17:08:52 -0500480 int retries = 3;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800481 int ret;
482
483 ret = srp_lookup_path(target);
484 if (ret)
485 return ret;
486
487 while (1) {
488 init_completion(&target->done);
489 ret = srp_send_req(target);
490 if (ret)
491 return ret;
492 wait_for_completion(&target->done);
493
494 /*
495 * The CM event handling code will set status to
496 * SRP_PORT_REDIRECT if we get a port redirect REJ
497 * back, or SRP_DLID_REDIRECT if we get a lid/qp
498 * redirect REJ back.
499 */
500 switch (target->status) {
501 case 0:
502 return 0;
503
504 case SRP_PORT_REDIRECT:
505 ret = srp_lookup_path(target);
506 if (ret)
507 return ret;
508 break;
509
510 case SRP_DLID_REDIRECT:
511 break;
512
David Dillow9fe4bcf2008-01-08 17:08:52 -0500513 case SRP_STALE_CONN:
514 /* Our current CM id was stale, and is now in timewait.
515 * Try to reconnect with a new one.
516 */
517 if (!retries-- || srp_new_cm_id(target)) {
518 shost_printk(KERN_ERR, target->scsi_host, PFX
519 "giving up on stale connection\n");
520 target->status = -ECONNRESET;
521 return target->status;
522 }
523
524 shost_printk(KERN_ERR, target->scsi_host, PFX
525 "retrying stale connection\n");
526 break;
527
Roland Dreieraef9ec32005-11-02 14:07:13 -0800528 default:
529 return target->status;
530 }
531 }
532}
533
Roland Dreierd945e1d2006-05-09 10:50:28 -0700534static void srp_unmap_data(struct scsi_cmnd *scmnd,
535 struct srp_target_port *target,
536 struct srp_request *req)
537{
David Dillow8f26c9f2011-01-14 19:45:50 -0500538 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
539 struct ib_pool_fmr **pfmr;
540
FUJITA Tomonoribb350d12007-05-26 02:28:25 +0900541 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -0700542 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
543 scmnd->sc_data_direction != DMA_FROM_DEVICE))
544 return;
545
David Dillow8f26c9f2011-01-14 19:45:50 -0500546 pfmr = req->fmr_list;
547 while (req->nfmr--)
548 ib_fmr_pool_unmap(*pfmr++);
Roland Dreierf5358a12006-06-17 20:37:29 -0700549
David Dillow8f26c9f2011-01-14 19:45:50 -0500550 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
551 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -0700552}
553
Bart Van Assche94a91742010-11-26 14:50:09 -0500554static void srp_remove_req(struct srp_target_port *target,
555 struct srp_request *req, s32 req_lim_delta)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700556{
Bart Van Assche94a91742010-11-26 14:50:09 -0500557 unsigned long flags;
558
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700559 srp_unmap_data(req->scmnd, target, req);
Bart Van Asschee9684672010-11-26 15:08:38 -0500560 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -0500561 target->req_lim += req_lim_delta;
David Dillowf8b6e312010-11-26 13:02:21 -0500562 req->scmnd = NULL;
Bart Van Assche536ae142010-11-26 13:58:27 -0500563 list_add_tail(&req->list, &target->free_reqs);
Bart Van Asschee9684672010-11-26 15:08:38 -0500564 spin_unlock_irqrestore(&target->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700565}
566
567static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
568{
569 req->scmnd->result = DID_RESET << 16;
570 req->scmnd->scsi_done(req->scmnd);
Bart Van Assche94a91742010-11-26 14:50:09 -0500571 srp_remove_req(target, req, 0);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700572}
573
Roland Dreieraef9ec32005-11-02 14:07:13 -0800574static int srp_reconnect_target(struct srp_target_port *target)
575{
Roland Dreieraef9ec32005-11-02 14:07:13 -0800576 struct ib_qp_attr qp_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800577 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -0500578 int i, ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800579
Bart Van Assche9709f0e2010-11-26 13:13:06 -0500580 if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800581 return -EAGAIN;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800582
583 srp_disconnect_target(target);
584 /*
585 * Now get a new local CM ID so that we avoid confusing the
586 * target in case things are really fouled up.
587 */
David Dillow9fe4bcf2008-01-08 17:08:52 -0500588 ret = srp_new_cm_id(target);
589 if (ret)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800590 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800591
592 qp_attr.qp_state = IB_QPS_RESET;
593 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
594 if (ret)
595 goto err;
596
597 ret = srp_init_qp(target, target->qp);
598 if (ret)
599 goto err;
600
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000601 while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
602 ; /* nothing */
603 while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800604 ; /* nothing */
605
Bart Van Assche536ae142010-11-26 13:58:27 -0500606 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
607 struct srp_request *req = &target->req_ring[i];
608 if (req->scmnd)
609 srp_reset_req(target, req);
610 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800611
Bart Van Assche536ae142010-11-26 13:58:27 -0500612 INIT_LIST_HEAD(&target->free_tx);
Bart Van Asschedcb4cb82010-11-26 13:22:48 -0500613 for (i = 0; i < SRP_SQ_SIZE; ++i)
Bart Van Assche536ae142010-11-26 13:58:27 -0500614 list_add(&target->tx_ring[i]->list, &target->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800615
Ishai Rabinovitz1033ff62007-01-16 17:26:22 +0200616 target->qp_in_error = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800617 ret = srp_connect_target(target);
618 if (ret)
619 goto err;
620
Bart Van Assche9709f0e2010-11-26 13:13:06 -0500621 if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800622 ret = -EAGAIN;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800623
624 return ret;
625
626err:
David Dillow7aa54bd2008-01-07 18:23:41 -0500627 shost_printk(KERN_ERR, target->scsi_host,
628 PFX "reconnect failed (%d), removing target port.\n", ret);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800629
630 /*
631 * We couldn't reconnect, so kill our target port off.
Bart Van Assche9709f0e2010-11-26 13:13:06 -0500632 * However, we have to defer the real removal because we
633 * are in the context of the SCSI error handler now, which
634 * will deadlock if we call scsi_remove_host().
635 *
636 * Schedule our work inside the lock to avoid a race with
637 * the flush_scheduled_work() in srp_remove_one().
Roland Dreieraef9ec32005-11-02 14:07:13 -0800638 */
Bart Van Asschee9684672010-11-26 15:08:38 -0500639 spin_lock_irq(&target->lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800640 if (target->state == SRP_TARGET_CONNECTING) {
641 target->state = SRP_TARGET_DEAD;
David Howellsc4028952006-11-22 14:57:56 +0000642 INIT_WORK(&target->work, srp_remove_work);
Tejun Heof0626712010-10-19 15:24:36 +0000643 queue_work(ib_wq, &target->work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800644 }
Bart Van Asschee9684672010-11-26 15:08:38 -0500645 spin_unlock_irq(&target->lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800646
647 return ret;
648}
649
David Dillow8f26c9f2011-01-14 19:45:50 -0500650static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
651 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -0700652{
David Dillow8f26c9f2011-01-14 19:45:50 -0500653 struct srp_direct_buf *desc = state->desc;
654
655 desc->va = cpu_to_be64(dma_addr);
656 desc->key = cpu_to_be32(rkey);
657 desc->len = cpu_to_be32(dma_len);
658
659 state->total_len += dma_len;
660 state->desc++;
661 state->ndesc++;
662}
663
664static int srp_map_finish_fmr(struct srp_map_state *state,
665 struct srp_target_port *target)
666{
667 struct srp_device *dev = target->srp_host->srp_dev;
668 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -0700669 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -0500670
671 if (!state->npages)
672 return 0;
673
674 if (state->npages == 1) {
675 srp_map_desc(state, state->base_dma_addr, state->fmr_len,
676 target->rkey);
677 state->npages = state->fmr_len = 0;
678 return 0;
679 }
680
681 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
682 state->npages, io_addr);
683 if (IS_ERR(fmr))
684 return PTR_ERR(fmr);
685
686 *state->next_fmr++ = fmr;
687 state->nfmr++;
688
689 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
690 state->npages = state->fmr_len = 0;
691 return 0;
692}
693
694static void srp_map_update_start(struct srp_map_state *state,
695 struct scatterlist *sg, int sg_index,
696 dma_addr_t dma_addr)
697{
698 state->unmapped_sg = sg;
699 state->unmapped_index = sg_index;
700 state->unmapped_addr = dma_addr;
701}
702
703static int srp_map_sg_entry(struct srp_map_state *state,
704 struct srp_target_port *target,
705 struct scatterlist *sg, int sg_index,
706 int use_fmr)
707{
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100708 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -0800709 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500710 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
711 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
712 unsigned int len;
713 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -0700714
David Dillow8f26c9f2011-01-14 19:45:50 -0500715 if (!dma_len)
716 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -0700717
David Dillow8f26c9f2011-01-14 19:45:50 -0500718 if (use_fmr == SRP_MAP_NO_FMR) {
719 /* Once we're in direct map mode for a request, we don't
720 * go back to FMR mode, so no need to update anything
721 * other than the descriptor.
722 */
723 srp_map_desc(state, dma_addr, dma_len, target->rkey);
724 return 0;
725 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -0700726
David Dillow8f26c9f2011-01-14 19:45:50 -0500727 /* If we start at an offset into the FMR page, don't merge into
728 * the current FMR. Finish it out, and use the kernel's MR for this
729 * sg entry. This is to avoid potential bugs on some SRP targets
730 * that were never quite defined, but went away when the initiator
731 * avoided using FMR on such page fragments.
732 */
733 if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
734 ret = srp_map_finish_fmr(state, target);
735 if (ret)
736 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -0800737
David Dillow8f26c9f2011-01-14 19:45:50 -0500738 srp_map_desc(state, dma_addr, dma_len, target->rkey);
739 srp_map_update_start(state, NULL, 0, 0);
740 return 0;
741 }
742
743 /* If this is the first sg to go into the FMR, save our position.
744 * We need to know the first unmapped entry, its index, and the
745 * first unmapped address within that entry to be able to restart
746 * mapping after an error.
747 */
748 if (!state->unmapped_sg)
749 srp_map_update_start(state, sg, sg_index, dma_addr);
750
751 while (dma_len) {
752 if (state->npages == SRP_FMR_SIZE) {
753 ret = srp_map_finish_fmr(state, target);
754 if (ret)
755 return ret;
756
757 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -0700758 }
759
David Dillow8f26c9f2011-01-14 19:45:50 -0500760 len = min_t(unsigned int, dma_len, dev->fmr_page_size);
761
762 if (!state->npages)
763 state->base_dma_addr = dma_addr;
764 state->pages[state->npages++] = dma_addr;
765 state->fmr_len += len;
766 dma_addr += len;
767 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -0700768 }
769
David Dillow8f26c9f2011-01-14 19:45:50 -0500770 /* If the last entry of the FMR wasn't a full page, then we need to
771 * close it out and start a new one -- we can only merge at page
772 * boundries.
773 */
Roland Dreierf5358a12006-06-17 20:37:29 -0700774 ret = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -0500775 if (len != dev->fmr_page_size) {
776 ret = srp_map_finish_fmr(state, target);
777 if (!ret)
778 srp_map_update_start(state, NULL, 0, 0);
779 }
Roland Dreierf5358a12006-06-17 20:37:29 -0700780 return ret;
781}
782
Roland Dreieraef9ec32005-11-02 14:07:13 -0800783static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
784 struct srp_request *req)
785{
David Dillow8f26c9f2011-01-14 19:45:50 -0500786 struct scatterlist *scat, *sg;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800787 struct srp_cmd *cmd = req->cmd->buf;
David Dillow8f26c9f2011-01-14 19:45:50 -0500788 int i, len, nents, count, use_fmr;
Ralph Campbell85507bc2006-12-12 14:30:55 -0800789 struct srp_device *dev;
790 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500791 struct srp_map_state state;
792 struct srp_indirect_buf *indirect_hdr;
793 dma_addr_t indirect_addr;
794 u32 table_len;
795 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800796
FUJITA Tomonoribb350d12007-05-26 02:28:25 +0900797 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800798 return sizeof (struct srp_cmd);
799
800 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
801 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500802 shost_printk(KERN_WARNING, target->scsi_host,
803 PFX "Unhandled data direction %d\n",
804 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800805 return -EINVAL;
806 }
807
FUJITA Tomonoribb350d12007-05-26 02:28:25 +0900808 nents = scsi_sg_count(scmnd);
809 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -0800810
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100811 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -0800812 ibdev = dev->dev;
813
814 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -0500815 if (unlikely(count == 0))
816 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -0700817
818 fmt = SRP_DATA_DESC_DIRECT;
819 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -0800820
821 if (count == 1) {
Roland Dreierf5358a12006-06-17 20:37:29 -0700822 /*
823 * The midlayer only generated a single gather/scatter
824 * entry, or DMA mapping coalesced everything to a
825 * single entry. So a direct descriptor along with
826 * the DMA MR suffices.
827 */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800828 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800829
Ralph Campbell85507bc2006-12-12 14:30:55 -0800830 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -0500831 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -0800832 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -0800833
David Dillow8f26c9f2011-01-14 19:45:50 -0500834 req->nfmr = 0;
835 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800836 }
837
David Dillow8f26c9f2011-01-14 19:45:50 -0500838 /* We have more than one scatter/gather entry, so build our indirect
839 * descriptor table, trying to merge as many entries with FMR as we
840 * can.
841 */
842 indirect_hdr = (void *) cmd->add_data;
843
844 memset(&state, 0, sizeof(state));
845 state.desc = indirect_hdr->desc_list;
846 state.pages = req->map_page;
847 state.next_fmr = req->fmr_list;
848
849 use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
850
851 for_each_sg(scat, sg, count, i) {
852 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
853 /* FMR mapping failed, so backtrack to the first
854 * unmapped entry and continue on without using FMR.
855 */
856 dma_addr_t dma_addr;
857 unsigned int dma_len;
858
859backtrack:
860 sg = state.unmapped_sg;
861 i = state.unmapped_index;
862
863 dma_addr = ib_sg_dma_address(ibdev, sg);
864 dma_len = ib_sg_dma_len(ibdev, sg);
865 dma_len -= (state.unmapped_addr - dma_addr);
866 dma_addr = state.unmapped_addr;
867 use_fmr = SRP_MAP_NO_FMR;
868 srp_map_desc(&state, dma_addr, dma_len, target->rkey);
869 }
870 }
871
872 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
873 goto backtrack;
874
875 /* We've mapped the request, fill in the command buffer.
876 */
877 req->nfmr = state.nfmr;
878 if (state.ndesc == 1) {
879 /* FMR mapping was able to collapse this to one entry,
880 * so use a direct descriptor.
881 */
882 struct srp_direct_buf *buf = (void *) cmd->add_data;
883
884 *buf = indirect_hdr->desc_list[0];
885 goto map_complete;
886 }
887
888 table_len = state.ndesc * sizeof (struct srp_direct_buf);
889
890 fmt = SRP_DATA_DESC_INDIRECT;
891 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
892 len += table_len;
893
894 indirect_addr = req->cmd->dma + sizeof *cmd + sizeof *indirect_hdr;
895
896 indirect_hdr->table_desc.va = cpu_to_be64(indirect_addr);
897 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
898 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
899 indirect_hdr->len = cpu_to_be32(state.total_len);
900
901 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
902 cmd->data_out_desc_cnt = state.ndesc;
903 else
904 cmd->data_in_desc_cnt = state.ndesc;
905
906map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800907 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
908 cmd->buf_fmt = fmt << 4;
909 else
910 cmd->buf_fmt = fmt;
911
Roland Dreieraef9ec32005-11-02 14:07:13 -0800912 return len;
913}
914
David Dillow05a1d752010-10-08 14:48:14 -0400915/*
Bart Van Assche76c75b22010-11-26 14:37:47 -0500916 * Return an IU and possible credit to the free pool
917 */
918static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
919 enum srp_iu_type iu_type)
920{
921 unsigned long flags;
922
Bart Van Asschee9684672010-11-26 15:08:38 -0500923 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -0500924 list_add(&iu->list, &target->free_tx);
925 if (iu_type != SRP_IU_RSP)
926 ++target->req_lim;
Bart Van Asschee9684672010-11-26 15:08:38 -0500927 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -0500928}
929
930/*
Bart Van Asschee9684672010-11-26 15:08:38 -0500931 * Must be called with target->lock held to protect req_lim and free_tx.
932 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -0400933 *
934 * Note:
935 * An upper limit for the number of allocated information units for each
936 * request type is:
937 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
938 * more than Scsi_Host.can_queue requests.
939 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
940 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
941 * one unanswered SRP request to an initiator.
942 */
943static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
944 enum srp_iu_type iu_type)
945{
946 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
947 struct srp_iu *iu;
948
949 srp_send_completion(target->send_cq, target);
950
Bart Van Asschedcb4cb82010-11-26 13:22:48 -0500951 if (list_empty(&target->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -0400952 return NULL;
953
954 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -0500955 if (iu_type != SRP_IU_RSP) {
956 if (target->req_lim <= rsv) {
957 ++target->zero_req_lim;
958 return NULL;
959 }
960
961 --target->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -0400962 }
963
Bart Van Asschedcb4cb82010-11-26 13:22:48 -0500964 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -0500965 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -0400966 return iu;
967}
968
Bart Van Assche76c75b22010-11-26 14:37:47 -0500969static int srp_post_send(struct srp_target_port *target,
970 struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -0400971{
972 struct ib_sge list;
973 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -0400974
975 list.addr = iu->dma;
976 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -0500977 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -0400978
979 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -0500980 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -0400981 wr.sg_list = &list;
982 wr.num_sge = 1;
983 wr.opcode = IB_WR_SEND;
984 wr.send_flags = IB_SEND_SIGNALED;
985
Bart Van Assche76c75b22010-11-26 14:37:47 -0500986 return ib_post_send(target->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -0400987}
988
Bart Van Asschedcb4cb82010-11-26 13:22:48 -0500989static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +0000990{
Bart Van Asschec996bb42010-07-30 10:59:05 +0000991 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -0500992 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +0000993
994 list.addr = iu->dma;
995 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -0500996 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +0000997
998 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -0500999 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001000 wr.sg_list = &list;
1001 wr.num_sge = 1;
1002
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001003 return ib_post_recv(target->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001004}
1005
Roland Dreieraef9ec32005-11-02 14:07:13 -08001006static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1007{
1008 struct srp_request *req;
1009 struct scsi_cmnd *scmnd;
1010 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001011
Roland Dreieraef9ec32005-11-02 14:07:13 -08001012 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Asschee9684672010-11-26 15:08:38 -05001013 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001014 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
Bart Van Asschee9684672010-11-26 15:08:38 -05001015 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001016
David Dillowf8b6e312010-11-26 13:02:21 -05001017 target->tsk_mgmt_status = -1;
1018 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1019 target->tsk_mgmt_status = rsp->data[3];
1020 complete(&target->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001021 } else {
David Dillowf8b6e312010-11-26 13:02:21 -05001022 req = &target->req_ring[rsp->tag];
Roland Dreierd945e1d2006-05-09 10:50:28 -07001023 scmnd = req->scmnd;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001024 if (!scmnd)
David Dillow7aa54bd2008-01-07 18:23:41 -05001025 shost_printk(KERN_ERR, target->scsi_host,
1026 "Null scmnd for RSP w/tag %016llx\n",
1027 (unsigned long long) rsp->tag);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001028 scmnd->result = rsp->status;
1029
1030 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1031 memcpy(scmnd->sense_buffer, rsp->data +
1032 be32_to_cpu(rsp->resp_data_len),
1033 min_t(int, be32_to_cpu(rsp->sense_data_len),
1034 SCSI_SENSE_BUFFERSIZE));
1035 }
1036
1037 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001038 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001039 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001040 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001041
Bart Van Assche94a91742010-11-26 14:50:09 -05001042 srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
David Dillowf8b6e312010-11-26 13:02:21 -05001043 scmnd->host_scribble = NULL;
1044 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001045 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001046}
1047
David Dillowbb125882010-10-08 14:40:47 -04001048static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1049 void *rsp, int len)
1050{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001051 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001052 unsigned long flags;
1053 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001054 int err;
David Dillowbb125882010-10-08 14:40:47 -04001055
Bart Van Asschee9684672010-11-26 15:08:38 -05001056 spin_lock_irqsave(&target->lock, flags);
David Dillowbb125882010-10-08 14:40:47 -04001057 target->req_lim += req_delta;
David Dillowbb125882010-10-08 14:40:47 -04001058 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
Bart Van Asschee9684672010-11-26 15:08:38 -05001059 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001060
David Dillowbb125882010-10-08 14:40:47 -04001061 if (!iu) {
1062 shost_printk(KERN_ERR, target->scsi_host, PFX
1063 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001064 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001065 }
1066
1067 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1068 memcpy(iu->buf, rsp, len);
1069 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1070
Bart Van Assche76c75b22010-11-26 14:37:47 -05001071 err = srp_post_send(target, iu, len);
1072 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001073 shost_printk(KERN_ERR, target->scsi_host, PFX
1074 "unable to post response: %d\n", err);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001075 srp_put_tx_iu(target, iu, SRP_IU_RSP);
1076 }
David Dillowbb125882010-10-08 14:40:47 -04001077
David Dillowbb125882010-10-08 14:40:47 -04001078 return err;
1079}
1080
1081static void srp_process_cred_req(struct srp_target_port *target,
1082 struct srp_cred_req *req)
1083{
1084 struct srp_cred_rsp rsp = {
1085 .opcode = SRP_CRED_RSP,
1086 .tag = req->tag,
1087 };
1088 s32 delta = be32_to_cpu(req->req_lim_delta);
1089
1090 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1091 shost_printk(KERN_ERR, target->scsi_host, PFX
1092 "problems processing SRP_CRED_REQ\n");
1093}
1094
1095static void srp_process_aer_req(struct srp_target_port *target,
1096 struct srp_aer_req *req)
1097{
1098 struct srp_aer_rsp rsp = {
1099 .opcode = SRP_AER_RSP,
1100 .tag = req->tag,
1101 };
1102 s32 delta = be32_to_cpu(req->req_lim_delta);
1103
1104 shost_printk(KERN_ERR, target->scsi_host, PFX
1105 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1106
1107 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1108 shost_printk(KERN_ERR, target->scsi_host, PFX
1109 "problems processing SRP_AER_REQ\n");
1110}
1111
Roland Dreieraef9ec32005-11-02 14:07:13 -08001112static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1113{
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001114 struct ib_device *dev = target->srp_host->srp_dev->dev;
1115 struct srp_iu *iu = (struct srp_iu *) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001116 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001117 u8 opcode;
1118
Ralph Campbell85507bc2006-12-12 14:30:55 -08001119 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1120 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001121
1122 opcode = *(u8 *) iu->buf;
1123
1124 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001125 shost_printk(KERN_ERR, target->scsi_host,
1126 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001127 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1128 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001129 }
1130
1131 switch (opcode) {
1132 case SRP_RSP:
1133 srp_process_rsp(target, iu->buf);
1134 break;
1135
David Dillowbb125882010-10-08 14:40:47 -04001136 case SRP_CRED_REQ:
1137 srp_process_cred_req(target, iu->buf);
1138 break;
1139
1140 case SRP_AER_REQ:
1141 srp_process_aer_req(target, iu->buf);
1142 break;
1143
Roland Dreieraef9ec32005-11-02 14:07:13 -08001144 case SRP_T_LOGOUT:
1145 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001146 shost_printk(KERN_WARNING, target->scsi_host,
1147 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001148 break;
1149
1150 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001151 shost_printk(KERN_WARNING, target->scsi_host,
1152 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001153 break;
1154 }
1155
Ralph Campbell85507bc2006-12-12 14:30:55 -08001156 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1157 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001158
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001159 res = srp_post_recv(target, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001160 if (res != 0)
1161 shost_printk(KERN_ERR, target->scsi_host,
1162 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001163}
1164
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001165static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001166{
1167 struct srp_target_port *target = target_ptr;
1168 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001169
1170 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1171 while (ib_poll_cq(cq, 1, &wc) > 0) {
1172 if (wc.status) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001173 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001174 PFX "failed receive status %d\n",
David Dillow7aa54bd2008-01-07 18:23:41 -05001175 wc.status);
Ishai Rabinovitz1033ff62007-01-16 17:26:22 +02001176 target->qp_in_error = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001177 break;
1178 }
1179
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001180 srp_handle_recv(target, &wc);
1181 }
1182}
1183
1184static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1185{
1186 struct srp_target_port *target = target_ptr;
1187 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001188 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001189
1190 while (ib_poll_cq(cq, 1, &wc) > 0) {
1191 if (wc.status) {
1192 shost_printk(KERN_ERR, target->scsi_host,
1193 PFX "failed send status %d\n",
1194 wc.status);
1195 target->qp_in_error = 1;
1196 break;
1197 }
1198
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001199 iu = (struct srp_iu *) wc.wr_id;
1200 list_add(&iu->list, &target->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001201 }
1202}
1203
Bart Van Assche76c75b22010-11-26 14:37:47 -05001204static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001205{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001206 struct srp_target_port *target = host_to_target(shost);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001207 struct srp_request *req;
1208 struct srp_iu *iu;
1209 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001210 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001211 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001212 int len;
1213
1214 if (target->state == SRP_TARGET_CONNECTING)
1215 goto err;
1216
1217 if (target->state == SRP_TARGET_DEAD ||
1218 target->state == SRP_TARGET_REMOVED) {
1219 scmnd->result = DID_BAD_TARGET << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001220 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001221 return 0;
1222 }
1223
Bart Van Asschee9684672010-11-26 15:08:38 -05001224 spin_lock_irqsave(&target->lock, flags);
David Dillowbb125882010-10-08 14:40:47 -04001225 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001226 if (!iu)
Bart Van Assche695b8342011-01-13 19:02:25 +00001227 goto err_unlock;
1228
1229 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1230 list_del(&req->list);
1231 spin_unlock_irqrestore(&target->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001232
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001233 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05001234 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001235 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001236
Roland Dreieraef9ec32005-11-02 14:07:13 -08001237 scmnd->result = 0;
David Dillowf8b6e312010-11-26 13:02:21 -05001238 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001239
1240 cmd = iu->buf;
1241 memset(cmd, 0, sizeof *cmd);
1242
1243 cmd->opcode = SRP_CMD;
1244 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001245 cmd->tag = req->index;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001246 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1247
Roland Dreieraef9ec32005-11-02 14:07:13 -08001248 req->scmnd = scmnd;
1249 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001250
1251 len = srp_map_data(scmnd, target, req);
1252 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001253 shost_printk(KERN_ERR, target->scsi_host,
1254 PFX "Failed to map data\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001255 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001256 }
1257
David Dillow49248642011-01-14 18:23:24 -05001258 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001259 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001260
Bart Van Assche76c75b22010-11-26 14:37:47 -05001261 if (srp_post_send(target, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001262 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001263 goto err_unmap;
1264 }
1265
Roland Dreieraef9ec32005-11-02 14:07:13 -08001266 return 0;
1267
1268err_unmap:
1269 srp_unmap_data(scmnd, target, req);
1270
Bart Van Assche76c75b22010-11-26 14:37:47 -05001271err_iu:
1272 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1273
Bart Van Asschee9684672010-11-26 15:08:38 -05001274 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001275 list_add(&req->list, &target->free_reqs);
Bart Van Assche695b8342011-01-13 19:02:25 +00001276
1277err_unlock:
Bart Van Asschee9684672010-11-26 15:08:38 -05001278 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001279
Roland Dreieraef9ec32005-11-02 14:07:13 -08001280err:
1281 return SCSI_MLQUEUE_HOST_BUSY;
1282}
1283
1284static int srp_alloc_iu_bufs(struct srp_target_port *target)
1285{
1286 int i;
1287
1288 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1289 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1290 target->max_ti_iu_len,
1291 GFP_KERNEL, DMA_FROM_DEVICE);
1292 if (!target->rx_ring[i])
1293 goto err;
1294 }
1295
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00001296 for (i = 0; i < SRP_SQ_SIZE; ++i) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001297 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
David Dillow49248642011-01-14 18:23:24 -05001298 target->max_iu_len,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001299 GFP_KERNEL, DMA_TO_DEVICE);
1300 if (!target->tx_ring[i])
1301 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001302
1303 list_add(&target->tx_ring[i]->list, &target->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001304 }
1305
1306 return 0;
1307
1308err:
1309 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1310 srp_free_iu(target->srp_host, target->rx_ring[i]);
1311 target->rx_ring[i] = NULL;
1312 }
1313
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00001314 for (i = 0; i < SRP_SQ_SIZE; ++i) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001315 srp_free_iu(target->srp_host, target->tx_ring[i]);
1316 target->tx_ring[i] = NULL;
1317 }
1318
1319 return -ENOMEM;
1320}
1321
David Dillow961e0be2011-01-14 17:32:07 -05001322static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1323 struct srp_login_rsp *lrsp,
1324 struct srp_target_port *target)
1325{
1326 struct ib_qp_attr *qp_attr = NULL;
1327 int attr_mask = 0;
1328 int ret;
1329 int i;
1330
1331 if (lrsp->opcode == SRP_LOGIN_RSP) {
1332 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1333 target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1334
1335 /*
1336 * Reserve credits for task management so we don't
1337 * bounce requests back to the SCSI mid-layer.
1338 */
1339 target->scsi_host->can_queue
1340 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1341 target->scsi_host->can_queue);
1342 } else {
1343 shost_printk(KERN_WARNING, target->scsi_host,
1344 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1345 ret = -ECONNRESET;
1346 goto error;
1347 }
1348
1349 if (!target->rx_ring[0]) {
1350 ret = srp_alloc_iu_bufs(target);
1351 if (ret)
1352 goto error;
1353 }
1354
1355 ret = -ENOMEM;
1356 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1357 if (!qp_attr)
1358 goto error;
1359
1360 qp_attr->qp_state = IB_QPS_RTR;
1361 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1362 if (ret)
1363 goto error_free;
1364
1365 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1366 if (ret)
1367 goto error_free;
1368
1369 for (i = 0; i < SRP_RQ_SIZE; i++) {
1370 struct srp_iu *iu = target->rx_ring[i];
1371 ret = srp_post_recv(target, iu);
1372 if (ret)
1373 goto error_free;
1374 }
1375
1376 qp_attr->qp_state = IB_QPS_RTS;
1377 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1378 if (ret)
1379 goto error_free;
1380
1381 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1382 if (ret)
1383 goto error_free;
1384
1385 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1386
1387error_free:
1388 kfree(qp_attr);
1389
1390error:
1391 target->status = ret;
1392}
1393
Roland Dreieraef9ec32005-11-02 14:07:13 -08001394static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1395 struct ib_cm_event *event,
1396 struct srp_target_port *target)
1397{
David Dillow7aa54bd2008-01-07 18:23:41 -05001398 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001399 struct ib_class_port_info *cpi;
1400 int opcode;
1401
1402 switch (event->param.rej_rcvd.reason) {
1403 case IB_CM_REJ_PORT_CM_REDIRECT:
1404 cpi = event->param.rej_rcvd.ari;
1405 target->path.dlid = cpi->redirect_lid;
1406 target->path.pkey = cpi->redirect_pkey;
1407 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1408 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1409
1410 target->status = target->path.dlid ?
1411 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1412 break;
1413
1414 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07001415 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001416 /*
1417 * Topspin/Cisco SRP gateways incorrectly send
1418 * reject reason code 25 when they mean 24
1419 * (port redirect).
1420 */
1421 memcpy(target->path.dgid.raw,
1422 event->param.rej_rcvd.ari, 16);
1423
David Dillow7aa54bd2008-01-07 18:23:41 -05001424 shost_printk(KERN_DEBUG, shost,
1425 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1426 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1427 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001428
1429 target->status = SRP_PORT_REDIRECT;
1430 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05001431 shost_printk(KERN_WARNING, shost,
1432 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001433 target->status = -ECONNRESET;
1434 }
1435 break;
1436
1437 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05001438 shost_printk(KERN_WARNING, shost,
1439 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001440 target->status = -ECONNRESET;
1441 break;
1442
1443 case IB_CM_REJ_CONSUMER_DEFINED:
1444 opcode = *(u8 *) event->private_data;
1445 if (opcode == SRP_LOGIN_REJ) {
1446 struct srp_login_rej *rej = event->private_data;
1447 u32 reason = be32_to_cpu(rej->reason);
1448
1449 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05001450 shost_printk(KERN_WARNING, shost,
1451 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001452 else
David Dillow7aa54bd2008-01-07 18:23:41 -05001453 shost_printk(KERN_WARNING, shost,
1454 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001455 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05001456 shost_printk(KERN_WARNING, shost,
1457 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1458 " opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001459 target->status = -ECONNRESET;
1460 break;
1461
David Dillow9fe4bcf2008-01-08 17:08:52 -05001462 case IB_CM_REJ_STALE_CONN:
1463 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
1464 target->status = SRP_STALE_CONN;
1465 break;
1466
Roland Dreieraef9ec32005-11-02 14:07:13 -08001467 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001468 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
1469 event->param.rej_rcvd.reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001470 target->status = -ECONNRESET;
1471 }
1472}
1473
1474static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1475{
1476 struct srp_target_port *target = cm_id->context;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001477 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001478
1479 switch (event->event) {
1480 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05001481 shost_printk(KERN_DEBUG, target->scsi_host,
1482 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001483 comp = 1;
1484 target->status = -ECONNRESET;
1485 break;
1486
1487 case IB_CM_REP_RECEIVED:
1488 comp = 1;
David Dillow961e0be2011-01-14 17:32:07 -05001489 srp_cm_rep_handler(cm_id, event->private_data, target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001490 break;
1491
1492 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05001493 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001494 comp = 1;
1495
1496 srp_cm_rej_handler(cm_id, event, target);
1497 break;
1498
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07001499 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05001500 shost_printk(KERN_WARNING, target->scsi_host,
1501 PFX "DREQ received - connection closed\n");
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07001502 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05001503 shost_printk(KERN_ERR, target->scsi_host,
1504 PFX "Sending CM DREP failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001505 break;
1506
1507 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05001508 shost_printk(KERN_ERR, target->scsi_host,
1509 PFX "connection closed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001510
1511 comp = 1;
1512 target->status = 0;
1513 break;
1514
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07001515 case IB_CM_MRA_RECEIVED:
1516 case IB_CM_DREQ_ERROR:
1517 case IB_CM_DREP_RECEIVED:
1518 break;
1519
Roland Dreieraef9ec32005-11-02 14:07:13 -08001520 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001521 shost_printk(KERN_WARNING, target->scsi_host,
1522 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001523 break;
1524 }
1525
1526 if (comp)
1527 complete(&target->done);
1528
Roland Dreieraef9ec32005-11-02 14:07:13 -08001529 return 0;
1530}
1531
Roland Dreierd945e1d2006-05-09 10:50:28 -07001532static int srp_send_tsk_mgmt(struct srp_target_port *target,
David Dillowf8b6e312010-11-26 13:02:21 -05001533 u64 req_tag, unsigned int lun, u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001534{
David Dillow19081f32010-10-18 08:54:49 -04001535 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001536 struct srp_iu *iu;
1537 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001538
Roland Dreier1285b3a2006-03-03 15:47:25 -08001539 if (target->state == SRP_TARGET_DEAD ||
David Dillowf8b6e312010-11-26 13:02:21 -05001540 target->state == SRP_TARGET_REMOVED)
Bart Van Assche76c75b22010-11-26 14:37:47 -05001541 return -1;
Roland Dreier1285b3a2006-03-03 15:47:25 -08001542
David Dillowf8b6e312010-11-26 13:02:21 -05001543 init_completion(&target->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001544
Bart Van Asschee9684672010-11-26 15:08:38 -05001545 spin_lock_irq(&target->lock);
David Dillowbb125882010-10-08 14:40:47 -04001546 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
Bart Van Asschee9684672010-11-26 15:08:38 -05001547 spin_unlock_irq(&target->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001548
Roland Dreieraef9ec32005-11-02 14:07:13 -08001549 if (!iu)
Bart Van Assche76c75b22010-11-26 14:37:47 -05001550 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001551
David Dillow19081f32010-10-18 08:54:49 -04001552 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1553 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001554 tsk_mgmt = iu->buf;
1555 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1556
1557 tsk_mgmt->opcode = SRP_TSK_MGMT;
David Dillowf8b6e312010-11-26 13:02:21 -05001558 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1559 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001560 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05001561 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001562
David Dillow19081f32010-10-18 08:54:49 -04001563 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1564 DMA_TO_DEVICE);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001565 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1566 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1567 return -1;
1568 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07001569
David Dillowf8b6e312010-11-26 13:02:21 -05001570 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001571 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07001572 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001573
Roland Dreierd945e1d2006-05-09 10:50:28 -07001574 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07001575}
1576
Roland Dreieraef9ec32005-11-02 14:07:13 -08001577static int srp_abort(struct scsi_cmnd *scmnd)
1578{
Roland Dreierd945e1d2006-05-09 10:50:28 -07001579 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05001580 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Roland Dreierd945e1d2006-05-09 10:50:28 -07001581 int ret = SUCCESS;
1582
David Dillow7aa54bd2008-01-07 18:23:41 -05001583 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001584
David Dillowf8b6e312010-11-26 13:02:21 -05001585 if (!req || target->qp_in_error)
Ishai Rabinovitz1033ff62007-01-16 17:26:22 +02001586 return FAILED;
David Dillowf8b6e312010-11-26 13:02:21 -05001587 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1588 SRP_TSK_ABORT_TASK))
Roland Dreierd945e1d2006-05-09 10:50:28 -07001589 return FAILED;
1590
David Dillowf8b6e312010-11-26 13:02:21 -05001591 if (req->scmnd) {
1592 if (!target->tsk_mgmt_status) {
Bart Van Assche94a91742010-11-26 14:50:09 -05001593 srp_remove_req(target, req, 0);
David Dillowf8b6e312010-11-26 13:02:21 -05001594 scmnd->result = DID_ABORT << 16;
1595 } else
1596 ret = FAILED;
1597 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07001598
Roland Dreierd945e1d2006-05-09 10:50:28 -07001599 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001600}
1601
1602static int srp_reset_device(struct scsi_cmnd *scmnd)
1603{
Roland Dreierd945e1d2006-05-09 10:50:28 -07001604 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assche536ae142010-11-26 13:58:27 -05001605 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07001606
David Dillow7aa54bd2008-01-07 18:23:41 -05001607 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001608
Ishai Rabinovitz1033ff62007-01-16 17:26:22 +02001609 if (target->qp_in_error)
1610 return FAILED;
David Dillowf8b6e312010-11-26 13:02:21 -05001611 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1612 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07001613 return FAILED;
David Dillowf8b6e312010-11-26 13:02:21 -05001614 if (target->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07001615 return FAILED;
1616
Bart Van Assche536ae142010-11-26 13:58:27 -05001617 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1618 struct srp_request *req = &target->req_ring[i];
David Dillowf8b6e312010-11-26 13:02:21 -05001619 if (req->scmnd && req->scmnd->device == scmnd->device)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001620 srp_reset_req(target, req);
Bart Van Assche536ae142010-11-26 13:58:27 -05001621 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07001622
Roland Dreierd945e1d2006-05-09 10:50:28 -07001623 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001624}
1625
1626static int srp_reset_host(struct scsi_cmnd *scmnd)
1627{
1628 struct srp_target_port *target = host_to_target(scmnd->device->host);
1629 int ret = FAILED;
1630
David Dillow7aa54bd2008-01-07 18:23:41 -05001631 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001632
1633 if (!srp_reconnect_target(target))
1634 ret = SUCCESS;
1635
1636 return ret;
1637}
1638
Tony Jonesee959b02008-02-22 00:13:36 +01001639static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1640 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001641{
Tony Jonesee959b02008-02-22 00:13:36 +01001642 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001643
1644 if (target->state == SRP_TARGET_DEAD ||
1645 target->state == SRP_TARGET_REMOVED)
1646 return -ENODEV;
1647
1648 return sprintf(buf, "0x%016llx\n",
1649 (unsigned long long) be64_to_cpu(target->id_ext));
1650}
1651
Tony Jonesee959b02008-02-22 00:13:36 +01001652static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1653 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001654{
Tony Jonesee959b02008-02-22 00:13:36 +01001655 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001656
1657 if (target->state == SRP_TARGET_DEAD ||
1658 target->state == SRP_TARGET_REMOVED)
1659 return -ENODEV;
1660
1661 return sprintf(buf, "0x%016llx\n",
1662 (unsigned long long) be64_to_cpu(target->ioc_guid));
1663}
1664
Tony Jonesee959b02008-02-22 00:13:36 +01001665static ssize_t show_service_id(struct device *dev,
1666 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001667{
Tony Jonesee959b02008-02-22 00:13:36 +01001668 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001669
1670 if (target->state == SRP_TARGET_DEAD ||
1671 target->state == SRP_TARGET_REMOVED)
1672 return -ENODEV;
1673
1674 return sprintf(buf, "0x%016llx\n",
1675 (unsigned long long) be64_to_cpu(target->service_id));
1676}
1677
Tony Jonesee959b02008-02-22 00:13:36 +01001678static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1679 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001680{
Tony Jonesee959b02008-02-22 00:13:36 +01001681 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001682
1683 if (target->state == SRP_TARGET_DEAD ||
1684 target->state == SRP_TARGET_REMOVED)
1685 return -ENODEV;
1686
1687 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1688}
1689
Tony Jonesee959b02008-02-22 00:13:36 +01001690static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
1691 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001692{
Tony Jonesee959b02008-02-22 00:13:36 +01001693 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001694
1695 if (target->state == SRP_TARGET_DEAD ||
1696 target->state == SRP_TARGET_REMOVED)
1697 return -ENODEV;
1698
Harvey Harrison5b095d9892008-10-29 12:52:50 -07001699 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001700}
1701
Tony Jonesee959b02008-02-22 00:13:36 +01001702static ssize_t show_orig_dgid(struct device *dev,
1703 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07001704{
Tony Jonesee959b02008-02-22 00:13:36 +01001705 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07001706
1707 if (target->state == SRP_TARGET_DEAD ||
1708 target->state == SRP_TARGET_REMOVED)
1709 return -ENODEV;
1710
Harvey Harrison5b095d9892008-10-29 12:52:50 -07001711 return sprintf(buf, "%pI6\n", target->orig_dgid);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07001712}
1713
Bart Van Assche89de7482010-08-03 14:08:45 +00001714static ssize_t show_req_lim(struct device *dev,
1715 struct device_attribute *attr, char *buf)
1716{
1717 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1718
1719 if (target->state == SRP_TARGET_DEAD ||
1720 target->state == SRP_TARGET_REMOVED)
1721 return -ENODEV;
1722
1723 return sprintf(buf, "%d\n", target->req_lim);
1724}
1725
Tony Jonesee959b02008-02-22 00:13:36 +01001726static ssize_t show_zero_req_lim(struct device *dev,
1727 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07001728{
Tony Jonesee959b02008-02-22 00:13:36 +01001729 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07001730
1731 if (target->state == SRP_TARGET_DEAD ||
1732 target->state == SRP_TARGET_REMOVED)
1733 return -ENODEV;
1734
1735 return sprintf(buf, "%d\n", target->zero_req_lim);
1736}
1737
Tony Jonesee959b02008-02-22 00:13:36 +01001738static ssize_t show_local_ib_port(struct device *dev,
1739 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03001740{
Tony Jonesee959b02008-02-22 00:13:36 +01001741 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03001742
1743 return sprintf(buf, "%d\n", target->srp_host->port);
1744}
1745
Tony Jonesee959b02008-02-22 00:13:36 +01001746static ssize_t show_local_ib_device(struct device *dev,
1747 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03001748{
Tony Jonesee959b02008-02-22 00:13:36 +01001749 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03001750
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001751 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03001752}
1753
David Dillow49248642011-01-14 18:23:24 -05001754static ssize_t show_cmd_sg_entries(struct device *dev,
1755 struct device_attribute *attr, char *buf)
1756{
1757 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1758
1759 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
1760}
1761
Tony Jonesee959b02008-02-22 00:13:36 +01001762static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
1763static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1764static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1765static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1766static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1767static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00001768static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01001769static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1770static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
1771static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
David Dillow49248642011-01-14 18:23:24 -05001772static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001773
Tony Jonesee959b02008-02-22 00:13:36 +01001774static struct device_attribute *srp_host_attrs[] = {
1775 &dev_attr_id_ext,
1776 &dev_attr_ioc_guid,
1777 &dev_attr_service_id,
1778 &dev_attr_pkey,
1779 &dev_attr_dgid,
1780 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00001781 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01001782 &dev_attr_zero_req_lim,
1783 &dev_attr_local_ib_port,
1784 &dev_attr_local_ib_device,
David Dillow49248642011-01-14 18:23:24 -05001785 &dev_attr_cmd_sg_entries,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001786 NULL
1787};
1788
Roland Dreieraef9ec32005-11-02 14:07:13 -08001789static struct scsi_host_template srp_template = {
1790 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07001791 .name = "InfiniBand SRP initiator",
1792 .proc_name = DRV_NAME,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001793 .info = srp_target_info,
1794 .queuecommand = srp_queuecommand,
1795 .eh_abort_handler = srp_abort,
1796 .eh_device_reset_handler = srp_reset_device,
1797 .eh_host_reset_handler = srp_reset_host,
David Dillow49248642011-01-14 18:23:24 -05001798 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00001799 .can_queue = SRP_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001800 .this_id = -1,
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00001801 .cmd_per_lun = SRP_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08001802 .use_clustering = ENABLE_CLUSTERING,
1803 .shost_attrs = srp_host_attrs
Roland Dreieraef9ec32005-11-02 14:07:13 -08001804};
1805
1806static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1807{
FUJITA Tomonori32368222007-06-27 16:33:12 +09001808 struct srp_rport_identifiers ids;
1809 struct srp_rport *rport;
1810
Roland Dreieraef9ec32005-11-02 14:07:13 -08001811 sprintf(target->target_name, "SRP.T10:%016llX",
1812 (unsigned long long) be64_to_cpu(target->id_ext));
1813
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001814 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08001815 return -ENODEV;
1816
FUJITA Tomonori32368222007-06-27 16:33:12 +09001817 memcpy(ids.port_id, &target->id_ext, 8);
1818 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09001819 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09001820 rport = srp_rport_add(target->scsi_host, &ids);
1821 if (IS_ERR(rport)) {
1822 scsi_remove_host(target->scsi_host);
1823 return PTR_ERR(rport);
1824 }
1825
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07001826 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001827 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07001828 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001829
1830 target->state = SRP_TARGET_LIVE;
1831
Roland Dreieraef9ec32005-11-02 14:07:13 -08001832 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07001833 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001834
1835 return 0;
1836}
1837
Tony Jonesee959b02008-02-22 00:13:36 +01001838static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001839{
1840 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01001841 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001842
1843 complete(&host->released);
1844}
1845
1846static struct class srp_class = {
1847 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01001848 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08001849};
1850
1851/*
1852 * Target ports are added by writing
1853 *
1854 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1855 * pkey=<P_Key>,service_id=<service ID>
1856 *
1857 * to the add_target sysfs attribute.
1858 */
1859enum {
1860 SRP_OPT_ERR = 0,
1861 SRP_OPT_ID_EXT = 1 << 0,
1862 SRP_OPT_IOC_GUID = 1 << 1,
1863 SRP_OPT_DGID = 1 << 2,
1864 SRP_OPT_PKEY = 1 << 3,
1865 SRP_OPT_SERVICE_ID = 1 << 4,
1866 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07001867 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07001868 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02001869 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05001870 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001871 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
1872 SRP_OPT_IOC_GUID |
1873 SRP_OPT_DGID |
1874 SRP_OPT_PKEY |
1875 SRP_OPT_SERVICE_ID),
1876};
1877
Steven Whitehousea447c092008-10-13 10:46:57 +01001878static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07001879 { SRP_OPT_ID_EXT, "id_ext=%s" },
1880 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
1881 { SRP_OPT_DGID, "dgid=%s" },
1882 { SRP_OPT_PKEY, "pkey=%x" },
1883 { SRP_OPT_SERVICE_ID, "service_id=%s" },
1884 { SRP_OPT_MAX_SECT, "max_sect=%d" },
1885 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07001886 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02001887 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05001888 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
Vu Pham52fb2b502006-06-17 20:37:31 -07001889 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001890};
1891
1892static int srp_parse_options(const char *buf, struct srp_target_port *target)
1893{
1894 char *options, *sep_opt;
1895 char *p;
1896 char dgid[3];
1897 substring_t args[MAX_OPT_ARGS];
1898 int opt_mask = 0;
1899 int token;
1900 int ret = -EINVAL;
1901 int i;
1902
1903 options = kstrdup(buf, GFP_KERNEL);
1904 if (!options)
1905 return -ENOMEM;
1906
1907 sep_opt = options;
1908 while ((p = strsep(&sep_opt, ",")) != NULL) {
1909 if (!*p)
1910 continue;
1911
1912 token = match_token(p, srp_opt_tokens, args);
1913 opt_mask |= token;
1914
1915 switch (token) {
1916 case SRP_OPT_ID_EXT:
1917 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02001918 if (!p) {
1919 ret = -ENOMEM;
1920 goto out;
1921 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001922 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1923 kfree(p);
1924 break;
1925
1926 case SRP_OPT_IOC_GUID:
1927 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02001928 if (!p) {
1929 ret = -ENOMEM;
1930 goto out;
1931 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001932 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
1933 kfree(p);
1934 break;
1935
1936 case SRP_OPT_DGID:
1937 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02001938 if (!p) {
1939 ret = -ENOMEM;
1940 goto out;
1941 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001942 if (strlen(p) != 32) {
1943 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07001944 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001945 goto out;
1946 }
1947
1948 for (i = 0; i < 16; ++i) {
1949 strlcpy(dgid, p + i * 2, 3);
1950 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
1951 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08001952 kfree(p);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07001953 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001954 break;
1955
1956 case SRP_OPT_PKEY:
1957 if (match_hex(args, &token)) {
1958 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
1959 goto out;
1960 }
1961 target->path.pkey = cpu_to_be16(token);
1962 break;
1963
1964 case SRP_OPT_SERVICE_ID:
1965 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02001966 if (!p) {
1967 ret = -ENOMEM;
1968 goto out;
1969 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001970 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
Sean Hefty247e0202007-08-08 15:51:18 -07001971 target->path.service_id = target->service_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001972 kfree(p);
1973 break;
1974
1975 case SRP_OPT_MAX_SECT:
1976 if (match_int(args, &token)) {
1977 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
1978 goto out;
1979 }
1980 target->scsi_host->max_sectors = token;
1981 break;
1982
Vu Pham52fb2b502006-06-17 20:37:31 -07001983 case SRP_OPT_MAX_CMD_PER_LUN:
1984 if (match_int(args, &token)) {
1985 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
1986 goto out;
1987 }
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00001988 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
Vu Pham52fb2b502006-06-17 20:37:31 -07001989 break;
1990
Ramachandra K0c0450db2006-06-17 20:37:38 -07001991 case SRP_OPT_IO_CLASS:
1992 if (match_hex(args, &token)) {
1993 printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p);
1994 goto out;
1995 }
1996 if (token != SRP_REV10_IB_IO_CLASS &&
1997 token != SRP_REV16A_IB_IO_CLASS) {
1998 printk(KERN_WARNING PFX "unknown IO class parameter value"
1999 " %x specified (use %x or %x).\n",
2000 token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS);
2001 goto out;
2002 }
2003 target->io_class = token;
2004 break;
2005
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002006 case SRP_OPT_INITIATOR_EXT:
2007 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002008 if (!p) {
2009 ret = -ENOMEM;
2010 goto out;
2011 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002012 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2013 kfree(p);
2014 break;
2015
David Dillow49248642011-01-14 18:23:24 -05002016 case SRP_OPT_CMD_SG_ENTRIES:
2017 if (match_int(args, &token) || token < 1 || token > 255) {
2018 printk(KERN_WARNING PFX "bad max cmd_sg_entries parameter '%s'\n", p);
2019 goto out;
2020 }
2021 target->cmd_sg_cnt = token;
2022 break;
2023
Roland Dreieraef9ec32005-11-02 14:07:13 -08002024 default:
2025 printk(KERN_WARNING PFX "unknown parameter or missing value "
2026 "'%s' in target creation request\n", p);
2027 goto out;
2028 }
2029 }
2030
2031 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2032 ret = 0;
2033 else
2034 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2035 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2036 !(srp_opt_tokens[i].token & opt_mask))
2037 printk(KERN_WARNING PFX "target creation request is "
2038 "missing parameter '%s'\n",
2039 srp_opt_tokens[i].pattern);
2040
2041out:
2042 kfree(options);
2043 return ret;
2044}
2045
Tony Jonesee959b02008-02-22 00:13:36 +01002046static ssize_t srp_create_target(struct device *dev,
2047 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002048 const char *buf, size_t count)
2049{
2050 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002051 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002052 struct Scsi_Host *target_host;
2053 struct srp_target_port *target;
David Dillow8f26c9f2011-01-14 19:45:50 -05002054 int i, ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002055
2056 target_host = scsi_host_alloc(&srp_template,
2057 sizeof (struct srp_target_port));
2058 if (!target_host)
2059 return -ENOMEM;
2060
David Dillow49248642011-01-14 18:23:24 -05002061 target_host->transportt = ib_srp_transport_template;
Arne Redlich3c8edf02006-11-15 12:43:00 +01002062 target_host->max_lun = SRP_MAX_LUN;
2063 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08002064
Roland Dreieraef9ec32005-11-02 14:07:13 -08002065 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002066
David Dillow49248642011-01-14 18:23:24 -05002067 target->io_class = SRP_REV16A_IB_IO_CLASS;
2068 target->scsi_host = target_host;
2069 target->srp_host = host;
2070 target->lkey = host->srp_dev->mr->lkey;
2071 target->rkey = host->srp_dev->mr->rkey;
2072 target->cmd_sg_cnt = cmd_sg_entries;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002073
Roland Dreieraef9ec32005-11-02 14:07:13 -08002074 ret = srp_parse_options(buf, target);
2075 if (ret)
2076 goto err;
2077
David Dillow49248642011-01-14 18:23:24 -05002078 target_host->sg_tablesize = target->cmd_sg_cnt;
2079 target->max_iu_len = sizeof (struct srp_cmd) +
2080 sizeof (struct srp_indirect_buf) +
2081 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2082
David Dillow8f26c9f2011-01-14 19:45:50 -05002083 spin_lock_init(&target->lock);
2084 INIT_LIST_HEAD(&target->free_tx);
2085 INIT_LIST_HEAD(&target->free_reqs);
2086 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
2087 struct srp_request *req = &target->req_ring[i];
2088
2089 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
2090 GFP_KERNEL);
2091 req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
2092 GFP_KERNEL);
2093 if (!req->fmr_list || !req->map_page)
2094 goto err_free_mem;
2095
2096 req->index = i;
2097 list_add_tail(&req->list, &target->free_reqs);
2098 }
2099
Roland Dreier969a60f2008-07-14 23:48:43 -07002100 ib_query_gid(host->srp_dev->dev, host->port, 0, &target->path.sgid);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002101
David Dillow7aa54bd2008-01-07 18:23:41 -05002102 shost_printk(KERN_DEBUG, target->scsi_host, PFX
2103 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
Harvey Harrison5b095d9892008-10-29 12:52:50 -07002104 "service_id %016llx dgid %pI6\n",
Roland Dreieraef9ec32005-11-02 14:07:13 -08002105 (unsigned long long) be64_to_cpu(target->id_ext),
2106 (unsigned long long) be64_to_cpu(target->ioc_guid),
2107 be16_to_cpu(target->path.pkey),
2108 (unsigned long long) be64_to_cpu(target->service_id),
Harvey Harrison8867cd72008-10-28 22:36:33 -07002109 target->path.dgid.raw);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002110
2111 ret = srp_create_target_ib(target);
2112 if (ret)
David Dillow8f26c9f2011-01-14 19:45:50 -05002113 goto err_free_mem;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002114
David Dillow9fe4bcf2008-01-08 17:08:52 -05002115 ret = srp_new_cm_id(target);
2116 if (ret)
David Dillow8f26c9f2011-01-14 19:45:50 -05002117 goto err_free_ib;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002118
Ishai Rabinovitz1033ff62007-01-16 17:26:22 +02002119 target->qp_in_error = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002120 ret = srp_connect_target(target);
2121 if (ret) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002122 shost_printk(KERN_ERR, target->scsi_host,
2123 PFX "Connection failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002124 goto err_cm_id;
2125 }
2126
2127 ret = srp_add_target(host, target);
2128 if (ret)
2129 goto err_disconnect;
2130
2131 return count;
2132
2133err_disconnect:
2134 srp_disconnect_target(target);
2135
2136err_cm_id:
2137 ib_destroy_cm_id(target->cm_id);
2138
David Dillow8f26c9f2011-01-14 19:45:50 -05002139err_free_ib:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002140 srp_free_target_ib(target);
2141
David Dillow8f26c9f2011-01-14 19:45:50 -05002142err_free_mem:
2143 srp_free_req_data(target);
2144
Roland Dreieraef9ec32005-11-02 14:07:13 -08002145err:
2146 scsi_host_put(target_host);
2147
2148 return ret;
2149}
2150
Tony Jonesee959b02008-02-22 00:13:36 +01002151static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002152
Tony Jonesee959b02008-02-22 00:13:36 +01002153static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2154 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002155{
Tony Jonesee959b02008-02-22 00:13:36 +01002156 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002157
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002158 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002159}
2160
Tony Jonesee959b02008-02-22 00:13:36 +01002161static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002162
Tony Jonesee959b02008-02-22 00:13:36 +01002163static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2164 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002165{
Tony Jonesee959b02008-02-22 00:13:36 +01002166 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002167
2168 return sprintf(buf, "%d\n", host->port);
2169}
2170
Tony Jonesee959b02008-02-22 00:13:36 +01002171static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002172
Roland Dreierf5358a12006-06-17 20:37:29 -07002173static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002174{
2175 struct srp_host *host;
2176
2177 host = kzalloc(sizeof *host, GFP_KERNEL);
2178 if (!host)
2179 return NULL;
2180
2181 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002182 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002183 init_completion(&host->released);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002184 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002185 host->port = port;
2186
Tony Jonesee959b02008-02-22 00:13:36 +01002187 host->dev.class = &srp_class;
2188 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08002189 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002190
Tony Jonesee959b02008-02-22 00:13:36 +01002191 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07002192 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01002193 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002194 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01002195 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002196 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01002197 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002198 goto err_class;
2199
2200 return host;
2201
2202err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01002203 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002204
Roland Dreierf5358a12006-06-17 20:37:29 -07002205free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002206 kfree(host);
2207
2208 return NULL;
2209}
2210
2211static void srp_add_one(struct ib_device *device)
2212{
Roland Dreierf5358a12006-06-17 20:37:29 -07002213 struct srp_device *srp_dev;
2214 struct ib_device_attr *dev_attr;
2215 struct ib_fmr_pool_param fmr_param;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002216 struct srp_host *host;
David Dillow8f26c9f2011-01-14 19:45:50 -05002217 int fmr_page_shift, s, e, p;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002218
Roland Dreierf5358a12006-06-17 20:37:29 -07002219 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2220 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08002221 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002222
Roland Dreierf5358a12006-06-17 20:37:29 -07002223 if (ib_query_device(device, dev_attr)) {
2224 printk(KERN_WARNING PFX "Query device failed for %s\n",
2225 device->name);
2226 goto free_attr;
2227 }
2228
2229 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2230 if (!srp_dev)
2231 goto free_attr;
2232
2233 /*
2234 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05002235 * minimum of 4096 bytes. We're unlikely to build large sglists
2236 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07002237 */
David Dillow8f26c9f2011-01-14 19:45:50 -05002238 fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
2239 srp_dev->fmr_page_size = 1 << fmr_page_shift;
2240 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
2241 srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
Roland Dreierf5358a12006-06-17 20:37:29 -07002242
2243 INIT_LIST_HEAD(&srp_dev->dev_list);
2244
2245 srp_dev->dev = device;
2246 srp_dev->pd = ib_alloc_pd(device);
2247 if (IS_ERR(srp_dev->pd))
2248 goto free_dev;
2249
2250 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2251 IB_ACCESS_LOCAL_WRITE |
2252 IB_ACCESS_REMOTE_READ |
2253 IB_ACCESS_REMOTE_WRITE);
2254 if (IS_ERR(srp_dev->mr))
2255 goto err_pd;
2256
2257 memset(&fmr_param, 0, sizeof fmr_param);
2258 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
2259 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
2260 fmr_param.cache = 1;
2261 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE;
David Dillow8f26c9f2011-01-14 19:45:50 -05002262 fmr_param.page_shift = fmr_page_shift;
Roland Dreierf5358a12006-06-17 20:37:29 -07002263 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
2264 IB_ACCESS_REMOTE_WRITE |
2265 IB_ACCESS_REMOTE_READ);
2266
2267 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2268 if (IS_ERR(srp_dev->fmr_pool))
2269 srp_dev->fmr_pool = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002270
Tom Tucker07ebafb2006-08-03 16:02:42 -05002271 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002272 s = 0;
2273 e = 0;
2274 } else {
2275 s = 1;
2276 e = device->phys_port_cnt;
2277 }
2278
2279 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07002280 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002281 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07002282 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002283 }
2284
Roland Dreierf5358a12006-06-17 20:37:29 -07002285 ib_set_client_data(device, &srp_client, srp_dev);
2286
2287 goto free_attr;
2288
2289err_pd:
2290 ib_dealloc_pd(srp_dev->pd);
2291
2292free_dev:
2293 kfree(srp_dev);
2294
2295free_attr:
2296 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002297}
2298
2299static void srp_remove_one(struct ib_device *device)
2300{
Roland Dreierf5358a12006-06-17 20:37:29 -07002301 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002302 struct srp_host *host, *tmp_host;
2303 LIST_HEAD(target_list);
2304 struct srp_target_port *target, *tmp_target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002305
Roland Dreierf5358a12006-06-17 20:37:29 -07002306 srp_dev = ib_get_client_data(device, &srp_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002307
Roland Dreierf5358a12006-06-17 20:37:29 -07002308 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01002309 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002310 /*
2311 * Wait for the sysfs entry to go away, so that no new
2312 * target ports can be created.
2313 */
2314 wait_for_completion(&host->released);
2315
2316 /*
2317 * Mark all target ports as removed, so we stop queueing
2318 * commands and don't try to reconnect.
2319 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002320 spin_lock(&host->target_lock);
Matthew Wilcox549c5fc22006-06-17 20:37:30 -07002321 list_for_each_entry(target, &host->target_list, list) {
Bart Van Asschee9684672010-11-26 15:08:38 -05002322 spin_lock_irq(&target->lock);
Ishai Rabinovitz0c5b3952006-06-17 20:37:31 -07002323 target->state = SRP_TARGET_REMOVED;
Bart Van Asschee9684672010-11-26 15:08:38 -05002324 spin_unlock_irq(&target->lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002325 }
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002326 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002327
2328 /*
2329 * Wait for any reconnection tasks that may have
2330 * started before we marked our target ports as
2331 * removed, and any target port removal tasks.
2332 */
Tejun Heof0626712010-10-19 15:24:36 +00002333 flush_workqueue(ib_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002334
2335 list_for_each_entry_safe(target, tmp_target,
2336 &host->target_list, list) {
David Dillowb0e47c82008-01-03 10:25:27 -08002337 srp_remove_host(target->scsi_host);
Dave Dillowad696982008-01-03 22:35:41 -05002338 scsi_remove_host(target->scsi_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002339 srp_disconnect_target(target);
2340 ib_destroy_cm_id(target->cm_id);
2341 srp_free_target_ib(target);
David Dillow8f26c9f2011-01-14 19:45:50 -05002342 srp_free_req_data(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002343 scsi_host_put(target->scsi_host);
2344 }
2345
Roland Dreieraef9ec32005-11-02 14:07:13 -08002346 kfree(host);
2347 }
2348
Roland Dreierf5358a12006-06-17 20:37:29 -07002349 if (srp_dev->fmr_pool)
2350 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2351 ib_dereg_mr(srp_dev->mr);
2352 ib_dealloc_pd(srp_dev->pd);
2353
2354 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002355}
2356
FUJITA Tomonori32368222007-06-27 16:33:12 +09002357static struct srp_function_template ib_srp_transport_functions = {
2358};
2359
Roland Dreieraef9ec32005-11-02 14:07:13 -08002360static int __init srp_init_module(void)
2361{
2362 int ret;
2363
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002364 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00002365
David Dillow49248642011-01-14 18:23:24 -05002366 if (srp_sg_tablesize) {
2367 printk(KERN_WARNING PFX "srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2368 if (!cmd_sg_entries)
2369 cmd_sg_entries = srp_sg_tablesize;
2370 }
2371
2372 if (!cmd_sg_entries)
2373 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2374
2375 if (cmd_sg_entries > 255) {
2376 printk(KERN_WARNING PFX "Clamping cmd_sg_entries to 255\n");
2377 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07002378 }
2379
FUJITA Tomonori32368222007-06-27 16:33:12 +09002380 ib_srp_transport_template =
2381 srp_attach_transport(&ib_srp_transport_functions);
2382 if (!ib_srp_transport_template)
2383 return -ENOMEM;
2384
Roland Dreieraef9ec32005-11-02 14:07:13 -08002385 ret = class_register(&srp_class);
2386 if (ret) {
2387 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
FUJITA Tomonori32368222007-06-27 16:33:12 +09002388 srp_release_transport(ib_srp_transport_template);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002389 return ret;
2390 }
2391
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002392 ib_sa_register_client(&srp_sa_client);
2393
Roland Dreieraef9ec32005-11-02 14:07:13 -08002394 ret = ib_register_client(&srp_client);
2395 if (ret) {
2396 printk(KERN_ERR PFX "couldn't register IB client\n");
FUJITA Tomonori32368222007-06-27 16:33:12 +09002397 srp_release_transport(ib_srp_transport_template);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002398 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002399 class_unregister(&srp_class);
2400 return ret;
2401 }
2402
2403 return 0;
2404}
2405
2406static void __exit srp_cleanup_module(void)
2407{
2408 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002409 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002410 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09002411 srp_release_transport(ib_srp_transport_template);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002412}
2413
2414module_init(srp_init_module);
2415module_exit(srp_cleanup_module);