blob: 863fdd130b2b099dc729c25442d23f96ab7fcd10 [file] [log] [blame]
Bart Van Asschea42d9852011-10-14 01:30:46 +00001/*
2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/ctype.h>
40#include <linux/kthread.h>
41#include <linux/string.h>
42#include <linux/delay.h>
43#include <linux/atomic.h>
Bart Van Asscheba929992015-05-08 10:11:12 +020044#include <scsi/scsi_proto.h>
Bart Van Asschea42d9852011-10-14 01:30:46 +000045#include <scsi/scsi_tcq.h>
Bart Van Asschea42d9852011-10-14 01:30:46 +000046#include <target/target_core_base.h>
Bart Van Asschea42d9852011-10-14 01:30:46 +000047#include <target/target_core_fabric.h>
Bart Van Asschea42d9852011-10-14 01:30:46 +000048#include "ib_srpt.h"
49
50/* Name of this kernel module. */
51#define DRV_NAME "ib_srpt"
52#define DRV_VERSION "2.0.0"
53#define DRV_RELDATE "2011-02-14"
54
55#define SRPT_ID_STRING "Linux SRP target"
56
57#undef pr_fmt
58#define pr_fmt(fmt) DRV_NAME " " fmt
59
60MODULE_AUTHOR("Vu Pham and Bart Van Assche");
61MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63MODULE_LICENSE("Dual BSD/GPL");
64
65/*
66 * Global Variables
67 */
68
69static u64 srpt_service_guid;
Roland Dreier486d8b92012-02-02 12:55:58 -080070static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
71static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
Bart Van Asschea42d9852011-10-14 01:30:46 +000072
73static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
74module_param(srp_max_req_size, int, 0444);
75MODULE_PARM_DESC(srp_max_req_size,
76 "Maximum size of SRP request messages in bytes.");
77
78static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
79module_param(srpt_srq_size, int, 0444);
80MODULE_PARM_DESC(srpt_srq_size,
81 "Shared receive queue (SRQ) size.");
82
83static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
84{
85 return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
86}
87module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
88 0444);
89MODULE_PARM_DESC(srpt_service_guid,
90 "Using this value for ioc_guid, id_ext, and cm_listen_id"
91 " instead of using the node_guid of the first HCA.");
92
93static struct ib_client srpt_client;
Bart Van Asschea42d9852011-10-14 01:30:46 +000094static void srpt_release_channel(struct srpt_rdma_ch *ch);
95static int srpt_queue_status(struct se_cmd *cmd);
Christoph Hellwig59fae4d2015-09-29 13:00:44 +020096static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
97static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
Bart Van Asschea42d9852011-10-14 01:30:46 +000098
Bart Van Asschef130c222016-02-11 11:05:38 -080099/*
100 * The only allowed channel state changes are those that change the channel
101 * state into a state with a higher numerical value. Hence the new > prev test.
Bart Van Asschea42d9852011-10-14 01:30:46 +0000102 */
Bart Van Asschef130c222016-02-11 11:05:38 -0800103static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
Bart Van Asschea42d9852011-10-14 01:30:46 +0000104{
105 unsigned long flags;
106 enum rdma_ch_state prev;
Bart Van Asschef130c222016-02-11 11:05:38 -0800107 bool changed = false;
Bart Van Asschea42d9852011-10-14 01:30:46 +0000108
109 spin_lock_irqsave(&ch->spinlock, flags);
110 prev = ch->state;
Bart Van Asschef130c222016-02-11 11:05:38 -0800111 if (new > prev) {
Bart Van Asschea42d9852011-10-14 01:30:46 +0000112 ch->state = new;
Bart Van Asschef130c222016-02-11 11:05:38 -0800113 changed = true;
114 }
Bart Van Asschea42d9852011-10-14 01:30:46 +0000115 spin_unlock_irqrestore(&ch->spinlock, flags);
Bart Van Asschef130c222016-02-11 11:05:38 -0800116
117 return changed;
Bart Van Asschea42d9852011-10-14 01:30:46 +0000118}
119
120/**
121 * srpt_event_handler() - Asynchronous IB event callback function.
122 *
123 * Callback function called by the InfiniBand core when an asynchronous IB
124 * event occurs. This callback may occur in interrupt context. See also
125 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
126 * Architecture Specification.
127 */
128static void srpt_event_handler(struct ib_event_handler *handler,
129 struct ib_event *event)
130{
131 struct srpt_device *sdev;
132 struct srpt_port *sport;
133
134 sdev = ib_get_client_data(event->device, &srpt_client);
135 if (!sdev || sdev->device != event->device)
136 return;
137
138 pr_debug("ASYNC event= %d on device= %s\n", event->event,
Bart Van Asschef68cba4e92016-02-11 11:04:20 -0800139 sdev->device->name);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000140
141 switch (event->event) {
142 case IB_EVENT_PORT_ERR:
143 if (event->element.port_num <= sdev->device->phys_port_cnt) {
144 sport = &sdev->port[event->element.port_num - 1];
145 sport->lid = 0;
146 sport->sm_lid = 0;
147 }
148 break;
149 case IB_EVENT_PORT_ACTIVE:
150 case IB_EVENT_LID_CHANGE:
151 case IB_EVENT_PKEY_CHANGE:
152 case IB_EVENT_SM_CHANGE:
153 case IB_EVENT_CLIENT_REREGISTER:
Doug Ledford2aa1cf62014-08-12 19:20:10 -0400154 case IB_EVENT_GID_CHANGE:
Bart Van Asschea42d9852011-10-14 01:30:46 +0000155 /* Refresh port data asynchronously. */
156 if (event->element.port_num <= sdev->device->phys_port_cnt) {
157 sport = &sdev->port[event->element.port_num - 1];
158 if (!sport->lid && !sport->sm_lid)
159 schedule_work(&sport->work);
160 }
161 break;
162 default:
Doug Ledford9f5d32a2014-10-20 18:25:15 -0400163 pr_err("received unrecognized IB event %d\n",
Bart Van Asschea42d9852011-10-14 01:30:46 +0000164 event->event);
165 break;
166 }
167}
168
169/**
170 * srpt_srq_event() - SRQ event callback function.
171 */
172static void srpt_srq_event(struct ib_event *event, void *ctx)
173{
Doug Ledford9f5d32a2014-10-20 18:25:15 -0400174 pr_info("SRQ event %d\n", event->event);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000175}
176
177/**
178 * srpt_qp_event() - QP event callback function.
179 */
180static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
181{
182 pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
Bart Van Assche33912d72016-02-11 11:04:43 -0800183 event->event, ch->cm_id, ch->sess_name, ch->state);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000184
185 switch (event->event) {
186 case IB_EVENT_COMM_EST:
187 ib_cm_notify(ch->cm_id, event->event);
188 break;
189 case IB_EVENT_QP_LAST_WQE_REACHED:
Bart Van Asschef130c222016-02-11 11:05:38 -0800190 if (srpt_set_ch_state(ch, CH_RELEASING))
Bart Van Asschea42d9852011-10-14 01:30:46 +0000191 srpt_release_channel(ch);
192 else
193 pr_debug("%s: state %d - ignored LAST_WQE.\n",
Bart Van Assche33912d72016-02-11 11:04:43 -0800194 ch->sess_name, ch->state);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000195 break;
196 default:
Doug Ledford9f5d32a2014-10-20 18:25:15 -0400197 pr_err("received unrecognized IB QP event %d\n", event->event);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000198 break;
199 }
200}
201
202/**
203 * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
204 *
205 * @slot: one-based slot number.
206 * @value: four-bit value.
207 *
208 * Copies the lowest four bits of value in element slot of the array of four
209 * bit elements called c_list (controller list). The index slot is one-based.
210 */
211static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
212{
213 u16 id;
214 u8 tmp;
215
216 id = (slot - 1) / 2;
217 if (slot & 0x1) {
218 tmp = c_list[id] & 0xf;
219 c_list[id] = (value << 4) | tmp;
220 } else {
221 tmp = c_list[id] & 0xf0;
222 c_list[id] = (value & 0xf) | tmp;
223 }
224}
225
226/**
227 * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
228 *
229 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
230 * Specification.
231 */
232static void srpt_get_class_port_info(struct ib_dm_mad *mad)
233{
234 struct ib_class_port_info *cif;
235
236 cif = (struct ib_class_port_info *)mad->data;
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -0800237 memset(cif, 0, sizeof(*cif));
Bart Van Asschea42d9852011-10-14 01:30:46 +0000238 cif->base_version = 1;
239 cif->class_version = 1;
240 cif->resp_time_value = 20;
241
242 mad->mad_hdr.status = 0;
243}
244
245/**
246 * srpt_get_iou() - Write IOUnitInfo to a management datagram.
247 *
248 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
249 * Specification. See also section B.7, table B.6 in the SRP r16a document.
250 */
251static void srpt_get_iou(struct ib_dm_mad *mad)
252{
253 struct ib_dm_iou_info *ioui;
254 u8 slot;
255 int i;
256
257 ioui = (struct ib_dm_iou_info *)mad->data;
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +0530258 ioui->change_id = cpu_to_be16(1);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000259 ioui->max_controllers = 16;
260
261 /* set present for slot 1 and empty for the rest */
262 srpt_set_ioc(ioui->controller_list, 1, 1);
263 for (i = 1, slot = 2; i < 16; i++, slot++)
264 srpt_set_ioc(ioui->controller_list, slot, 0);
265
266 mad->mad_hdr.status = 0;
267}
268
269/**
270 * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
271 *
272 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
273 * Architecture Specification. See also section B.7, table B.7 in the SRP
274 * r16a document.
275 */
276static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
277 struct ib_dm_mad *mad)
278{
279 struct srpt_device *sdev = sport->sdev;
280 struct ib_dm_ioc_profile *iocp;
281
282 iocp = (struct ib_dm_ioc_profile *)mad->data;
283
284 if (!slot || slot > 16) {
285 mad->mad_hdr.status
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +0530286 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000287 return;
288 }
289
290 if (slot > 2) {
291 mad->mad_hdr.status
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +0530292 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000293 return;
294 }
295
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -0800296 memset(iocp, 0, sizeof(*iocp));
Bart Van Asschea42d9852011-10-14 01:30:46 +0000297 strcpy(iocp->id_string, SRPT_ID_STRING);
298 iocp->guid = cpu_to_be64(srpt_service_guid);
Or Gerlitz4a061b22015-12-18 10:59:46 +0200299 iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
300 iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
301 iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
302 iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000303 iocp->subsys_device_id = 0x0;
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +0530304 iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
305 iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
306 iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
307 iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000308 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
309 iocp->rdma_read_depth = 4;
310 iocp->send_size = cpu_to_be32(srp_max_req_size);
311 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
312 1U << 24));
313 iocp->num_svc_entries = 1;
314 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
315 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
316
317 mad->mad_hdr.status = 0;
318}
319
320/**
321 * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
322 *
323 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
324 * Specification. See also section B.7, table B.8 in the SRP r16a document.
325 */
326static void srpt_get_svc_entries(u64 ioc_guid,
327 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
328{
329 struct ib_dm_svc_entries *svc_entries;
330
331 WARN_ON(!ioc_guid);
332
333 if (!slot || slot > 16) {
334 mad->mad_hdr.status
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +0530335 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000336 return;
337 }
338
339 if (slot > 2 || lo > hi || hi > 1) {
340 mad->mad_hdr.status
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +0530341 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000342 return;
343 }
344
345 svc_entries = (struct ib_dm_svc_entries *)mad->data;
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -0800346 memset(svc_entries, 0, sizeof(*svc_entries));
Bart Van Asschea42d9852011-10-14 01:30:46 +0000347 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
348 snprintf(svc_entries->service_entries[0].name,
349 sizeof(svc_entries->service_entries[0].name),
350 "%s%016llx",
351 SRP_SERVICE_NAME_PREFIX,
352 ioc_guid);
353
354 mad->mad_hdr.status = 0;
355}
356
357/**
358 * srpt_mgmt_method_get() - Process a received management datagram.
359 * @sp: source port through which the MAD has been received.
360 * @rq_mad: received MAD.
361 * @rsp_mad: response MAD.
362 */
363static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
364 struct ib_dm_mad *rsp_mad)
365{
366 u16 attr_id;
367 u32 slot;
368 u8 hi, lo;
369
370 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
371 switch (attr_id) {
372 case DM_ATTR_CLASS_PORT_INFO:
373 srpt_get_class_port_info(rsp_mad);
374 break;
375 case DM_ATTR_IOU_INFO:
376 srpt_get_iou(rsp_mad);
377 break;
378 case DM_ATTR_IOC_PROFILE:
379 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
380 srpt_get_ioc(sp, slot, rsp_mad);
381 break;
382 case DM_ATTR_SVC_ENTRIES:
383 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
384 hi = (u8) ((slot >> 8) & 0xff);
385 lo = (u8) (slot & 0xff);
386 slot = (u16) ((slot >> 16) & 0xffff);
387 srpt_get_svc_entries(srpt_service_guid,
388 slot, hi, lo, rsp_mad);
389 break;
390 default:
391 rsp_mad->mad_hdr.status =
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +0530392 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000393 break;
394 }
395}
396
397/**
398 * srpt_mad_send_handler() - Post MAD-send callback function.
399 */
400static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
401 struct ib_mad_send_wc *mad_wc)
402{
403 ib_destroy_ah(mad_wc->send_buf->ah);
404 ib_free_send_mad(mad_wc->send_buf);
405}
406
407/**
408 * srpt_mad_recv_handler() - MAD reception callback function.
409 */
410static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
Christoph Hellwigca281262016-01-04 14:15:58 +0100411 struct ib_mad_send_buf *send_buf,
Bart Van Asschea42d9852011-10-14 01:30:46 +0000412 struct ib_mad_recv_wc *mad_wc)
413{
414 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
415 struct ib_ah *ah;
416 struct ib_mad_send_buf *rsp;
417 struct ib_dm_mad *dm_mad;
418
419 if (!mad_wc || !mad_wc->recv_buf.mad)
420 return;
421
422 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
423 mad_wc->recv_buf.grh, mad_agent->port_num);
424 if (IS_ERR(ah))
425 goto err;
426
427 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
428
429 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
430 mad_wc->wc->pkey_index, 0,
431 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
Ira Weinyda2dfaa2015-06-06 14:38:28 -0400432 GFP_KERNEL,
433 IB_MGMT_BASE_VERSION);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000434 if (IS_ERR(rsp))
435 goto err_rsp;
436
437 rsp->ah = ah;
438
439 dm_mad = rsp->mad;
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -0800440 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
Bart Van Asschea42d9852011-10-14 01:30:46 +0000441 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
442 dm_mad->mad_hdr.status = 0;
443
444 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
445 case IB_MGMT_METHOD_GET:
446 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
447 break;
448 case IB_MGMT_METHOD_SET:
449 dm_mad->mad_hdr.status =
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +0530450 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000451 break;
452 default:
453 dm_mad->mad_hdr.status =
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +0530454 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000455 break;
456 }
457
458 if (!ib_post_send_mad(rsp, NULL)) {
459 ib_free_recv_mad(mad_wc);
460 /* will destroy_ah & free_send_mad in send completion */
461 return;
462 }
463
464 ib_free_send_mad(rsp);
465
466err_rsp:
467 ib_destroy_ah(ah);
468err:
469 ib_free_recv_mad(mad_wc);
470}
471
472/**
473 * srpt_refresh_port() - Configure a HCA port.
474 *
475 * Enable InfiniBand management datagram processing, update the cached sm_lid,
476 * lid and gid values, and register a callback function for processing MADs
477 * on the specified port.
478 *
479 * Note: It is safe to call this function more than once for the same port.
480 */
481static int srpt_refresh_port(struct srpt_port *sport)
482{
483 struct ib_mad_reg_req reg_req;
484 struct ib_port_modify port_modify;
485 struct ib_port_attr port_attr;
486 int ret;
487
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -0800488 memset(&port_modify, 0, sizeof(port_modify));
Bart Van Asschea42d9852011-10-14 01:30:46 +0000489 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
490 port_modify.clr_port_cap_mask = 0;
491
492 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
493 if (ret)
494 goto err_mod_port;
495
496 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
497 if (ret)
498 goto err_query_port;
499
500 sport->sm_lid = port_attr.sm_lid;
501 sport->lid = port_attr.lid;
502
Matan Barak55ee3ab2015-10-15 18:38:45 +0300503 ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid,
504 NULL);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000505 if (ret)
506 goto err_query_port;
507
508 if (!sport->mad_agent) {
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -0800509 memset(&reg_req, 0, sizeof(reg_req));
Bart Van Asschea42d9852011-10-14 01:30:46 +0000510 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
511 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
512 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
513 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
514
515 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
516 sport->port,
517 IB_QPT_GSI,
518 &reg_req, 0,
519 srpt_mad_send_handler,
520 srpt_mad_recv_handler,
Ira Weiny0f29b462014-08-08 19:00:55 -0400521 sport, 0);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000522 if (IS_ERR(sport->mad_agent)) {
523 ret = PTR_ERR(sport->mad_agent);
524 sport->mad_agent = NULL;
525 goto err_query_port;
526 }
527 }
528
529 return 0;
530
531err_query_port:
532
533 port_modify.set_port_cap_mask = 0;
534 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
535 ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
536
537err_mod_port:
538
539 return ret;
540}
541
542/**
543 * srpt_unregister_mad_agent() - Unregister MAD callback functions.
544 *
545 * Note: It is safe to call this function more than once for the same device.
546 */
547static void srpt_unregister_mad_agent(struct srpt_device *sdev)
548{
549 struct ib_port_modify port_modify = {
550 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
551 };
552 struct srpt_port *sport;
553 int i;
554
555 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
556 sport = &sdev->port[i - 1];
557 WARN_ON(sport->port != i);
558 if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
Doug Ledford9f5d32a2014-10-20 18:25:15 -0400559 pr_err("disabling MAD processing failed.\n");
Bart Van Asschea42d9852011-10-14 01:30:46 +0000560 if (sport->mad_agent) {
561 ib_unregister_mad_agent(sport->mad_agent);
562 sport->mad_agent = NULL;
563 }
564 }
565}
566
567/**
568 * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
569 */
570static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
571 int ioctx_size, int dma_size,
572 enum dma_data_direction dir)
573{
574 struct srpt_ioctx *ioctx;
575
576 ioctx = kmalloc(ioctx_size, GFP_KERNEL);
577 if (!ioctx)
578 goto err;
579
580 ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
581 if (!ioctx->buf)
582 goto err_free_ioctx;
583
584 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
585 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
586 goto err_free_buf;
587
588 return ioctx;
589
590err_free_buf:
591 kfree(ioctx->buf);
592err_free_ioctx:
593 kfree(ioctx);
594err:
595 return NULL;
596}
597
598/**
599 * srpt_free_ioctx() - Free an SRPT I/O context structure.
600 */
601static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
602 int dma_size, enum dma_data_direction dir)
603{
604 if (!ioctx)
605 return;
606
607 ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
608 kfree(ioctx->buf);
609 kfree(ioctx);
610}
611
612/**
613 * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
614 * @sdev: Device to allocate the I/O context ring for.
615 * @ring_size: Number of elements in the I/O context ring.
616 * @ioctx_size: I/O context size.
617 * @dma_size: DMA buffer size.
618 * @dir: DMA data direction.
619 */
620static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
621 int ring_size, int ioctx_size,
622 int dma_size, enum dma_data_direction dir)
623{
624 struct srpt_ioctx **ring;
625 int i;
626
627 WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
628 && ioctx_size != sizeof(struct srpt_send_ioctx));
629
630 ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
631 if (!ring)
632 goto out;
633 for (i = 0; i < ring_size; ++i) {
634 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
635 if (!ring[i])
636 goto err;
637 ring[i]->index = i;
638 }
639 goto out;
640
641err:
642 while (--i >= 0)
643 srpt_free_ioctx(sdev, ring[i], dma_size, dir);
644 kfree(ring);
Jesper Juhl715252d2012-02-04 23:49:40 +0100645 ring = NULL;
Bart Van Asschea42d9852011-10-14 01:30:46 +0000646out:
647 return ring;
648}
649
650/**
651 * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
652 */
653static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
654 struct srpt_device *sdev, int ring_size,
655 int dma_size, enum dma_data_direction dir)
656{
657 int i;
658
659 for (i = 0; i < ring_size; ++i)
660 srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
661 kfree(ioctx_ring);
662}
663
664/**
665 * srpt_get_cmd_state() - Get the state of a SCSI command.
666 */
667static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
668{
669 enum srpt_command_state state;
670 unsigned long flags;
671
672 BUG_ON(!ioctx);
673
674 spin_lock_irqsave(&ioctx->spinlock, flags);
675 state = ioctx->state;
676 spin_unlock_irqrestore(&ioctx->spinlock, flags);
677 return state;
678}
679
680/**
681 * srpt_set_cmd_state() - Set the state of a SCSI command.
682 *
683 * Does not modify the state of aborted commands. Returns the previous command
684 * state.
685 */
686static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
687 enum srpt_command_state new)
688{
689 enum srpt_command_state previous;
690 unsigned long flags;
691
692 BUG_ON(!ioctx);
693
694 spin_lock_irqsave(&ioctx->spinlock, flags);
695 previous = ioctx->state;
696 if (previous != SRPT_STATE_DONE)
697 ioctx->state = new;
698 spin_unlock_irqrestore(&ioctx->spinlock, flags);
699
700 return previous;
701}
702
703/**
704 * srpt_test_and_set_cmd_state() - Test and set the state of a command.
705 *
706 * Returns true if and only if the previous command state was equal to 'old'.
707 */
708static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
709 enum srpt_command_state old,
710 enum srpt_command_state new)
711{
712 enum srpt_command_state previous;
713 unsigned long flags;
714
715 WARN_ON(!ioctx);
716 WARN_ON(old == SRPT_STATE_DONE);
717 WARN_ON(new == SRPT_STATE_NEW);
718
719 spin_lock_irqsave(&ioctx->spinlock, flags);
720 previous = ioctx->state;
721 if (previous == old)
722 ioctx->state = new;
723 spin_unlock_irqrestore(&ioctx->spinlock, flags);
724 return previous == old;
725}
726
727/**
728 * srpt_post_recv() - Post an IB receive request.
729 */
730static int srpt_post_recv(struct srpt_device *sdev,
731 struct srpt_recv_ioctx *ioctx)
732{
733 struct ib_sge list;
734 struct ib_recv_wr wr, *bad_wr;
735
736 BUG_ON(!sdev);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000737 list.addr = ioctx->ioctx.dma;
738 list.length = srp_max_req_size;
Jason Gunthorpe5a783952015-07-30 17:22:24 -0600739 list.lkey = sdev->pd->local_dma_lkey;
Bart Van Asschea42d9852011-10-14 01:30:46 +0000740
Christoph Hellwig59fae4d2015-09-29 13:00:44 +0200741 ioctx->ioctx.cqe.done = srpt_recv_done;
742 wr.wr_cqe = &ioctx->ioctx.cqe;
Bart Van Asschea42d9852011-10-14 01:30:46 +0000743 wr.next = NULL;
744 wr.sg_list = &list;
745 wr.num_sge = 1;
746
747 return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
748}
749
750/**
751 * srpt_post_send() - Post an IB send request.
752 *
753 * Returns zero upon success and a non-zero value upon failure.
754 */
755static int srpt_post_send(struct srpt_rdma_ch *ch,
756 struct srpt_send_ioctx *ioctx, int len)
757{
758 struct ib_sge list;
759 struct ib_send_wr wr, *bad_wr;
760 struct srpt_device *sdev = ch->sport->sdev;
761 int ret;
762
763 atomic_inc(&ch->req_lim);
764
765 ret = -ENOMEM;
766 if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -0400767 pr_warn("IB send queue full (needed 1)\n");
Bart Van Asschea42d9852011-10-14 01:30:46 +0000768 goto out;
769 }
770
771 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
772 DMA_TO_DEVICE);
773
774 list.addr = ioctx->ioctx.dma;
775 list.length = len;
Jason Gunthorpe5a783952015-07-30 17:22:24 -0600776 list.lkey = sdev->pd->local_dma_lkey;
Bart Van Asschea42d9852011-10-14 01:30:46 +0000777
Christoph Hellwig59fae4d2015-09-29 13:00:44 +0200778 ioctx->ioctx.cqe.done = srpt_send_done;
Bart Van Asschea42d9852011-10-14 01:30:46 +0000779 wr.next = NULL;
Christoph Hellwig59fae4d2015-09-29 13:00:44 +0200780 wr.wr_cqe = &ioctx->ioctx.cqe;
Bart Van Asschea42d9852011-10-14 01:30:46 +0000781 wr.sg_list = &list;
782 wr.num_sge = 1;
783 wr.opcode = IB_WR_SEND;
784 wr.send_flags = IB_SEND_SIGNALED;
785
786 ret = ib_post_send(ch->qp, &wr, &bad_wr);
787
788out:
789 if (ret < 0) {
790 atomic_inc(&ch->sq_wr_avail);
791 atomic_dec(&ch->req_lim);
792 }
793 return ret;
794}
795
796/**
797 * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
798 * @ioctx: Pointer to the I/O context associated with the request.
799 * @srp_cmd: Pointer to the SRP_CMD request data.
800 * @dir: Pointer to the variable to which the transfer direction will be
801 * written.
802 * @data_len: Pointer to the variable to which the total data length of all
803 * descriptors in the SRP_CMD request will be written.
804 *
805 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
806 *
807 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
808 * -ENOMEM when memory allocation fails and zero upon success.
809 */
810static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
811 struct srp_cmd *srp_cmd,
812 enum dma_data_direction *dir, u64 *data_len)
813{
814 struct srp_indirect_buf *idb;
815 struct srp_direct_buf *db;
816 unsigned add_cdb_offset;
817 int ret;
818
819 /*
820 * The pointer computations below will only be compiled correctly
821 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
822 * whether srp_cmd::add_data has been declared as a byte pointer.
823 */
824 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
825 && !__same_type(srp_cmd->add_data[0], (u8)0));
826
827 BUG_ON(!dir);
828 BUG_ON(!data_len);
829
830 ret = 0;
831 *data_len = 0;
832
833 /*
834 * The lower four bits of the buffer format field contain the DATA-IN
835 * buffer descriptor format, and the highest four bits contain the
836 * DATA-OUT buffer descriptor format.
837 */
838 *dir = DMA_NONE;
839 if (srp_cmd->buf_fmt & 0xf)
840 /* DATA-IN: transfer data from target to initiator (read). */
841 *dir = DMA_FROM_DEVICE;
842 else if (srp_cmd->buf_fmt >> 4)
843 /* DATA-OUT: transfer data from initiator to target (write). */
844 *dir = DMA_TO_DEVICE;
845
846 /*
847 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
848 * CDB LENGTH' field are reserved and the size in bytes of this field
849 * is four times the value specified in bits 3..7. Hence the "& ~3".
850 */
851 add_cdb_offset = srp_cmd->add_cdb_len & ~3;
852 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
853 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
854 ioctx->n_rbuf = 1;
855 ioctx->rbufs = &ioctx->single_rbuf;
856
857 db = (struct srp_direct_buf *)(srp_cmd->add_data
858 + add_cdb_offset);
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -0800859 memcpy(ioctx->rbufs, db, sizeof(*db));
Bart Van Asschea42d9852011-10-14 01:30:46 +0000860 *data_len = be32_to_cpu(db->len);
861 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
862 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
863 idb = (struct srp_indirect_buf *)(srp_cmd->add_data
864 + add_cdb_offset);
865
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -0800866 ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof(*db);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000867
868 if (ioctx->n_rbuf >
869 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -0400870 pr_err("received unsupported SRP_CMD request"
Bart Van Asschea42d9852011-10-14 01:30:46 +0000871 " type (%u out + %u in != %u / %zu)\n",
872 srp_cmd->data_out_desc_cnt,
873 srp_cmd->data_in_desc_cnt,
874 be32_to_cpu(idb->table_desc.len),
875 sizeof(*db));
876 ioctx->n_rbuf = 0;
877 ret = -EINVAL;
878 goto out;
879 }
880
881 if (ioctx->n_rbuf == 1)
882 ioctx->rbufs = &ioctx->single_rbuf;
883 else {
884 ioctx->rbufs =
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -0800885 kmalloc(ioctx->n_rbuf * sizeof(*db), GFP_ATOMIC);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000886 if (!ioctx->rbufs) {
887 ioctx->n_rbuf = 0;
888 ret = -ENOMEM;
889 goto out;
890 }
891 }
892
893 db = idb->desc_list;
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -0800894 memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof(*db));
Bart Van Asschea42d9852011-10-14 01:30:46 +0000895 *data_len = be32_to_cpu(idb->len);
896 }
897out:
898 return ret;
899}
900
901/**
902 * srpt_init_ch_qp() - Initialize queue pair attributes.
903 *
904 * Initialized the attributes of queue pair 'qp' by allowing local write,
905 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
906 */
907static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
908{
909 struct ib_qp_attr *attr;
910 int ret;
911
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -0800912 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
Bart Van Asschea42d9852011-10-14 01:30:46 +0000913 if (!attr)
914 return -ENOMEM;
915
916 attr->qp_state = IB_QPS_INIT;
917 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
918 IB_ACCESS_REMOTE_WRITE;
919 attr->port_num = ch->sport->port;
920 attr->pkey_index = 0;
921
922 ret = ib_modify_qp(qp, attr,
923 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
924 IB_QP_PKEY_INDEX);
925
926 kfree(attr);
927 return ret;
928}
929
930/**
931 * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
932 * @ch: channel of the queue pair.
933 * @qp: queue pair to change the state of.
934 *
935 * Returns zero upon success and a negative value upon failure.
936 *
937 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
938 * If this structure ever becomes larger, it might be necessary to allocate
939 * it dynamically instead of on the stack.
940 */
941static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
942{
943 struct ib_qp_attr qp_attr;
944 int attr_mask;
945 int ret;
946
947 qp_attr.qp_state = IB_QPS_RTR;
948 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
949 if (ret)
950 goto out;
951
952 qp_attr.max_dest_rd_atomic = 4;
953
954 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
955
956out:
957 return ret;
958}
959
960/**
961 * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
962 * @ch: channel of the queue pair.
963 * @qp: queue pair to change the state of.
964 *
965 * Returns zero upon success and a negative value upon failure.
966 *
967 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
968 * If this structure ever becomes larger, it might be necessary to allocate
969 * it dynamically instead of on the stack.
970 */
971static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
972{
973 struct ib_qp_attr qp_attr;
974 int attr_mask;
975 int ret;
976
977 qp_attr.qp_state = IB_QPS_RTS;
978 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
979 if (ret)
980 goto out;
981
982 qp_attr.max_rd_atomic = 4;
983
984 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
985
986out:
987 return ret;
988}
989
990/**
991 * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
992 */
993static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
994{
995 struct ib_qp_attr qp_attr;
996
997 qp_attr.qp_state = IB_QPS_ERR;
998 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
999}
1000
1001/**
1002 * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
1003 */
1004static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1005 struct srpt_send_ioctx *ioctx)
1006{
1007 struct scatterlist *sg;
1008 enum dma_data_direction dir;
1009
1010 BUG_ON(!ch);
1011 BUG_ON(!ioctx);
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001012 BUG_ON(ioctx->n_rdma && !ioctx->rdma_wrs);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001013
1014 while (ioctx->n_rdma)
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001015 kfree(ioctx->rdma_wrs[--ioctx->n_rdma].wr.sg_list);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001016
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001017 kfree(ioctx->rdma_wrs);
1018 ioctx->rdma_wrs = NULL;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001019
1020 if (ioctx->mapped_sg_count) {
1021 sg = ioctx->sg;
1022 WARN_ON(!sg);
1023 dir = ioctx->cmd.data_direction;
1024 BUG_ON(dir == DMA_NONE);
1025 ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
Bart Van Assche671ec1b2016-02-11 11:05:01 -08001026 target_reverse_dma_direction(&ioctx->cmd));
Bart Van Asschea42d9852011-10-14 01:30:46 +00001027 ioctx->mapped_sg_count = 0;
1028 }
1029}
1030
1031/**
1032 * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
1033 */
1034static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1035 struct srpt_send_ioctx *ioctx)
1036{
Mike Marciniszynb0768082014-04-07 13:58:35 -04001037 struct ib_device *dev = ch->sport->sdev->device;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001038 struct se_cmd *cmd;
1039 struct scatterlist *sg, *sg_orig;
1040 int sg_cnt;
1041 enum dma_data_direction dir;
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001042 struct ib_rdma_wr *riu;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001043 struct srp_direct_buf *db;
1044 dma_addr_t dma_addr;
1045 struct ib_sge *sge;
1046 u64 raddr;
1047 u32 rsize;
1048 u32 tsize;
1049 u32 dma_len;
1050 int count, nrdma;
1051 int i, j, k;
1052
1053 BUG_ON(!ch);
1054 BUG_ON(!ioctx);
1055 cmd = &ioctx->cmd;
1056 dir = cmd->data_direction;
1057 BUG_ON(dir == DMA_NONE);
1058
Roland Dreier6f9e7f02012-03-30 11:29:12 -07001059 ioctx->sg = sg = sg_orig = cmd->t_data_sg;
1060 ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001061
1062 count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
Bart Van Assche671ec1b2016-02-11 11:05:01 -08001063 target_reverse_dma_direction(cmd));
Bart Van Asschea42d9852011-10-14 01:30:46 +00001064 if (unlikely(!count))
1065 return -EAGAIN;
1066
1067 ioctx->mapped_sg_count = count;
1068
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001069 if (ioctx->rdma_wrs && ioctx->n_rdma_wrs)
1070 nrdma = ioctx->n_rdma_wrs;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001071 else {
1072 nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
1073 + ioctx->n_rbuf;
1074
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001075 ioctx->rdma_wrs = kcalloc(nrdma, sizeof(*ioctx->rdma_wrs),
1076 GFP_KERNEL);
1077 if (!ioctx->rdma_wrs)
Bart Van Asschea42d9852011-10-14 01:30:46 +00001078 goto free_mem;
1079
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001080 ioctx->n_rdma_wrs = nrdma;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001081 }
1082
1083 db = ioctx->rbufs;
1084 tsize = cmd->data_length;
Mike Marciniszynb0768082014-04-07 13:58:35 -04001085 dma_len = ib_sg_dma_len(dev, &sg[0]);
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001086 riu = ioctx->rdma_wrs;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001087
1088 /*
1089 * For each remote desc - calculate the #ib_sge.
1090 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
1091 * each remote desc rdma_iu is required a rdma wr;
1092 * else
1093 * we need to allocate extra rdma_iu to carry extra #ib_sge in
1094 * another rdma wr
1095 */
1096 for (i = 0, j = 0;
1097 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1098 rsize = be32_to_cpu(db->len);
1099 raddr = be64_to_cpu(db->va);
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001100 riu->remote_addr = raddr;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001101 riu->rkey = be32_to_cpu(db->key);
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001102 riu->wr.num_sge = 0;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001103
1104 /* calculate how many sge required for this remote_buf */
1105 while (rsize > 0 && tsize > 0) {
1106
1107 if (rsize >= dma_len) {
1108 tsize -= dma_len;
1109 rsize -= dma_len;
1110 raddr += dma_len;
1111
1112 if (tsize > 0) {
1113 ++j;
1114 if (j < count) {
1115 sg = sg_next(sg);
Mike Marciniszynb0768082014-04-07 13:58:35 -04001116 dma_len = ib_sg_dma_len(
1117 dev, sg);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001118 }
1119 }
1120 } else {
1121 tsize -= rsize;
1122 dma_len -= rsize;
1123 rsize = 0;
1124 }
1125
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001126 ++riu->wr.num_sge;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001127
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001128 if (rsize > 0 &&
1129 riu->wr.num_sge == SRPT_DEF_SG_PER_WQE) {
Bart Van Asschea42d9852011-10-14 01:30:46 +00001130 ++ioctx->n_rdma;
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001131 riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
1132 sizeof(*riu->wr.sg_list),
1133 GFP_KERNEL);
1134 if (!riu->wr.sg_list)
Bart Van Asschea42d9852011-10-14 01:30:46 +00001135 goto free_mem;
1136
1137 ++riu;
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001138 riu->wr.num_sge = 0;
1139 riu->remote_addr = raddr;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001140 riu->rkey = be32_to_cpu(db->key);
1141 }
1142 }
1143
1144 ++ioctx->n_rdma;
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001145 riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
1146 sizeof(*riu->wr.sg_list),
1147 GFP_KERNEL);
1148 if (!riu->wr.sg_list)
Bart Van Asschea42d9852011-10-14 01:30:46 +00001149 goto free_mem;
1150 }
1151
1152 db = ioctx->rbufs;
1153 tsize = cmd->data_length;
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001154 riu = ioctx->rdma_wrs;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001155 sg = sg_orig;
Mike Marciniszynb0768082014-04-07 13:58:35 -04001156 dma_len = ib_sg_dma_len(dev, &sg[0]);
1157 dma_addr = ib_sg_dma_address(dev, &sg[0]);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001158
1159 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
1160 for (i = 0, j = 0;
1161 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1162 rsize = be32_to_cpu(db->len);
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001163 sge = riu->wr.sg_list;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001164 k = 0;
1165
1166 while (rsize > 0 && tsize > 0) {
1167 sge->addr = dma_addr;
Jason Gunthorpe5a783952015-07-30 17:22:24 -06001168 sge->lkey = ch->sport->sdev->pd->local_dma_lkey;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001169
1170 if (rsize >= dma_len) {
1171 sge->length =
1172 (tsize < dma_len) ? tsize : dma_len;
1173 tsize -= dma_len;
1174 rsize -= dma_len;
1175
1176 if (tsize > 0) {
1177 ++j;
1178 if (j < count) {
1179 sg = sg_next(sg);
Mike Marciniszynb0768082014-04-07 13:58:35 -04001180 dma_len = ib_sg_dma_len(
1181 dev, sg);
1182 dma_addr = ib_sg_dma_address(
1183 dev, sg);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001184 }
1185 }
1186 } else {
1187 sge->length = (tsize < rsize) ? tsize : rsize;
1188 tsize -= rsize;
1189 dma_len -= rsize;
1190 dma_addr += rsize;
1191 rsize = 0;
1192 }
1193
1194 ++k;
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001195 if (k == riu->wr.num_sge && rsize > 0 && tsize > 0) {
Bart Van Asschea42d9852011-10-14 01:30:46 +00001196 ++riu;
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001197 sge = riu->wr.sg_list;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001198 k = 0;
1199 } else if (rsize > 0 && tsize > 0)
1200 ++sge;
1201 }
1202 }
1203
1204 return 0;
1205
1206free_mem:
1207 srpt_unmap_sg_to_ib_sge(ch, ioctx);
1208
1209 return -ENOMEM;
1210}
1211
1212/**
1213 * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
1214 */
1215static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1216{
1217 struct srpt_send_ioctx *ioctx;
1218 unsigned long flags;
1219
1220 BUG_ON(!ch);
1221
1222 ioctx = NULL;
1223 spin_lock_irqsave(&ch->spinlock, flags);
1224 if (!list_empty(&ch->free_list)) {
1225 ioctx = list_first_entry(&ch->free_list,
1226 struct srpt_send_ioctx, free_list);
1227 list_del(&ioctx->free_list);
1228 }
1229 spin_unlock_irqrestore(&ch->spinlock, flags);
1230
1231 if (!ioctx)
1232 return ioctx;
1233
1234 BUG_ON(ioctx->ch != ch);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001235 spin_lock_init(&ioctx->spinlock);
1236 ioctx->state = SRPT_STATE_NEW;
1237 ioctx->n_rbuf = 0;
1238 ioctx->rbufs = NULL;
1239 ioctx->n_rdma = 0;
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001240 ioctx->n_rdma_wrs = 0;
1241 ioctx->rdma_wrs = NULL;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001242 ioctx->mapped_sg_count = 0;
1243 init_completion(&ioctx->tx_done);
1244 ioctx->queue_status_only = false;
1245 /*
1246 * transport_init_se_cmd() does not initialize all fields, so do it
1247 * here.
1248 */
1249 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1250 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1251
1252 return ioctx;
1253}
1254
1255/**
Bart Van Asschea42d9852011-10-14 01:30:46 +00001256 * srpt_abort_cmd() - Abort a SCSI command.
1257 * @ioctx: I/O context associated with the SCSI command.
1258 * @context: Preferred execution context.
1259 */
1260static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1261{
1262 enum srpt_command_state state;
1263 unsigned long flags;
1264
1265 BUG_ON(!ioctx);
1266
1267 /*
1268 * If the command is in a state where the target core is waiting for
1269 * the ib_srpt driver, change the state to the next state. Changing
1270 * the state of the command from SRPT_STATE_NEED_DATA to
1271 * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
1272 * function a second time.
1273 */
1274
1275 spin_lock_irqsave(&ioctx->spinlock, flags);
1276 state = ioctx->state;
1277 switch (state) {
1278 case SRPT_STATE_NEED_DATA:
1279 ioctx->state = SRPT_STATE_DATA_IN;
1280 break;
1281 case SRPT_STATE_DATA_IN:
1282 case SRPT_STATE_CMD_RSP_SENT:
1283 case SRPT_STATE_MGMT_RSP_SENT:
1284 ioctx->state = SRPT_STATE_DONE;
1285 break;
1286 default:
1287 break;
1288 }
1289 spin_unlock_irqrestore(&ioctx->spinlock, flags);
1290
Nicholas Bellinger9474b042012-11-27 23:55:57 -08001291 if (state == SRPT_STATE_DONE) {
1292 struct srpt_rdma_ch *ch = ioctx->ch;
1293
1294 BUG_ON(ch->sess == NULL);
1295
Bart Van Asscheafc16602015-04-27 13:52:36 +02001296 target_put_sess_cmd(&ioctx->cmd);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001297 goto out;
Nicholas Bellinger9474b042012-11-27 23:55:57 -08001298 }
Bart Van Asschea42d9852011-10-14 01:30:46 +00001299
1300 pr_debug("Aborting cmd with state %d and tag %lld\n", state,
Bart Van Assche649ee052015-04-14 13:26:44 +02001301 ioctx->cmd.tag);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001302
1303 switch (state) {
1304 case SRPT_STATE_NEW:
1305 case SRPT_STATE_DATA_IN:
1306 case SRPT_STATE_MGMT:
1307 /*
1308 * Do nothing - defer abort processing until
1309 * srpt_queue_response() is invoked.
1310 */
1311 WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
1312 break;
1313 case SRPT_STATE_NEED_DATA:
1314 /* DMA_TO_DEVICE (write) - RDMA read error. */
Christoph Hellwige672a472012-07-08 15:58:43 -04001315
1316 /* XXX(hch): this is a horrible layering violation.. */
Christoph Hellwig7d680f32011-12-21 14:13:47 -05001317 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
Christoph Hellwige672a472012-07-08 15:58:43 -04001318 ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
Christoph Hellwig7d680f32011-12-21 14:13:47 -05001319 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001320 break;
1321 case SRPT_STATE_CMD_RSP_SENT:
1322 /*
1323 * SRP_RSP sending failed or the SRP_RSP send completion has
1324 * not been received in time.
1325 */
1326 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
Bart Van Asscheafc16602015-04-27 13:52:36 +02001327 target_put_sess_cmd(&ioctx->cmd);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001328 break;
1329 case SRPT_STATE_MGMT_RSP_SENT:
1330 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
Bart Van Asscheafc16602015-04-27 13:52:36 +02001331 target_put_sess_cmd(&ioctx->cmd);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001332 break;
1333 default:
Grant Grundler532ec6f2013-03-26 21:48:28 +00001334 WARN(1, "Unexpected command state (%d)", state);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001335 break;
1336 }
1337
1338out:
1339 return state;
1340}
1341
1342/**
Christoph Hellwige672a472012-07-08 15:58:43 -04001343 * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
1344 * the data that has been transferred via IB RDMA had to be postponed until the
Masanari Iida142ad5d2012-08-10 00:07:58 +00001345 * check_stop_free() callback. None of this is necessary anymore and needs to
Christoph Hellwige672a472012-07-08 15:58:43 -04001346 * be cleaned up.
Bart Van Asschea42d9852011-10-14 01:30:46 +00001347 */
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001348static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
Bart Van Asschea42d9852011-10-14 01:30:46 +00001349{
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001350 struct srpt_rdma_ch *ch = cq->cq_context;
1351 struct srpt_send_ioctx *ioctx =
Bart Van Assche19f57292015-12-31 09:56:43 +01001352 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001353
Bart Van Asschea42d9852011-10-14 01:30:46 +00001354 WARN_ON(ioctx->n_rdma <= 0);
1355 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1356
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001357 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1358 pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
1359 ioctx, wc->status);
1360 srpt_abort_cmd(ioctx);
1361 return;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001362 }
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001363
1364 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1365 SRPT_STATE_DATA_IN))
1366 target_execute_cmd(&ioctx->cmd);
1367 else
1368 pr_err("%s[%d]: wrong state = %d\n", __func__,
1369 __LINE__, srpt_get_cmd_state(ioctx));
Bart Van Asschea42d9852011-10-14 01:30:46 +00001370}
1371
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001372static void srpt_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
Bart Van Asschea42d9852011-10-14 01:30:46 +00001373{
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001374 struct srpt_send_ioctx *ioctx =
Bart Van Assche19f57292015-12-31 09:56:43 +01001375 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001376
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001377 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1378 pr_info("RDMA_WRITE for ioctx 0x%p failed with status %d\n",
1379 ioctx, wc->status);
1380 srpt_abort_cmd(ioctx);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001381 }
1382}
1383
1384/**
1385 * srpt_build_cmd_rsp() - Build an SRP_RSP response.
1386 * @ch: RDMA channel through which the request has been received.
1387 * @ioctx: I/O context associated with the SRP_CMD request. The response will
1388 * be built in the buffer ioctx->buf points at and hence this function will
1389 * overwrite the request data.
1390 * @tag: tag of the request for which this response is being generated.
1391 * @status: value for the STATUS field of the SRP_RSP information unit.
1392 *
1393 * Returns the size in bytes of the SRP_RSP response.
1394 *
1395 * An SRP_RSP response contains a SCSI status or service response. See also
1396 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1397 * response. See also SPC-2 for more information about sense data.
1398 */
1399static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1400 struct srpt_send_ioctx *ioctx, u64 tag,
1401 int status)
1402{
1403 struct srp_rsp *srp_rsp;
1404 const u8 *sense_data;
1405 int sense_data_len, max_sense_len;
1406
1407 /*
1408 * The lowest bit of all SAM-3 status codes is zero (see also
1409 * paragraph 5.3 in SAM-3).
1410 */
1411 WARN_ON(status & 1);
1412
1413 srp_rsp = ioctx->ioctx.buf;
1414 BUG_ON(!srp_rsp);
1415
1416 sense_data = ioctx->sense_data;
1417 sense_data_len = ioctx->cmd.scsi_sense_length;
1418 WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1419
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -08001420 memset(srp_rsp, 0, sizeof(*srp_rsp));
Bart Van Asschea42d9852011-10-14 01:30:46 +00001421 srp_rsp->opcode = SRP_RSP;
1422 srp_rsp->req_lim_delta =
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +05301423 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
Bart Van Asschea42d9852011-10-14 01:30:46 +00001424 srp_rsp->tag = tag;
1425 srp_rsp->status = status;
1426
1427 if (sense_data_len) {
1428 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1429 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1430 if (sense_data_len > max_sense_len) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04001431 pr_warn("truncated sense data from %d to %d"
1432 " bytes\n", sense_data_len, max_sense_len);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001433 sense_data_len = max_sense_len;
1434 }
1435
1436 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1437 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1438 memcpy(srp_rsp + 1, sense_data, sense_data_len);
1439 }
1440
1441 return sizeof(*srp_rsp) + sense_data_len;
1442}
1443
1444/**
1445 * srpt_build_tskmgmt_rsp() - Build a task management response.
1446 * @ch: RDMA channel through which the request has been received.
1447 * @ioctx: I/O context in which the SRP_RSP response will be built.
1448 * @rsp_code: RSP_CODE that will be stored in the response.
1449 * @tag: Tag of the request for which this response is being generated.
1450 *
1451 * Returns the size in bytes of the SRP_RSP response.
1452 *
1453 * An SRP_RSP response contains a SCSI status or service response. See also
1454 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1455 * response.
1456 */
1457static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1458 struct srpt_send_ioctx *ioctx,
1459 u8 rsp_code, u64 tag)
1460{
1461 struct srp_rsp *srp_rsp;
1462 int resp_data_len;
1463 int resp_len;
1464
Jack Wangc807f642013-09-30 10:09:05 +02001465 resp_data_len = 4;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001466 resp_len = sizeof(*srp_rsp) + resp_data_len;
1467
1468 srp_rsp = ioctx->ioctx.buf;
1469 BUG_ON(!srp_rsp);
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -08001470 memset(srp_rsp, 0, sizeof(*srp_rsp));
Bart Van Asschea42d9852011-10-14 01:30:46 +00001471
1472 srp_rsp->opcode = SRP_RSP;
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +05301473 srp_rsp->req_lim_delta =
1474 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
Bart Van Asschea42d9852011-10-14 01:30:46 +00001475 srp_rsp->tag = tag;
1476
Jack Wangc807f642013-09-30 10:09:05 +02001477 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1478 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1479 srp_rsp->data[3] = rsp_code;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001480
1481 return resp_len;
1482}
1483
Bart Van Asschea42d9852011-10-14 01:30:46 +00001484static int srpt_check_stop_free(struct se_cmd *cmd)
1485{
Nicholas Bellinger9474b042012-11-27 23:55:57 -08001486 struct srpt_send_ioctx *ioctx = container_of(cmd,
1487 struct srpt_send_ioctx, cmd);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001488
Bart Van Asscheafc16602015-04-27 13:52:36 +02001489 return target_put_sess_cmd(&ioctx->cmd);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001490}
1491
1492/**
1493 * srpt_handle_cmd() - Process SRP_CMD.
1494 */
1495static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
1496 struct srpt_recv_ioctx *recv_ioctx,
1497 struct srpt_send_ioctx *send_ioctx)
1498{
1499 struct se_cmd *cmd;
1500 struct srp_cmd *srp_cmd;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001501 u64 data_len;
1502 enum dma_data_direction dir;
Christoph Hellwigde103c92012-11-06 12:24:09 -08001503 sense_reason_t ret;
Nicholas Bellinger9474b042012-11-27 23:55:57 -08001504 int rc;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001505
1506 BUG_ON(!send_ioctx);
1507
1508 srp_cmd = recv_ioctx->ioctx.buf;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001509 cmd = &send_ioctx->cmd;
Bart Van Assche649ee052015-04-14 13:26:44 +02001510 cmd->tag = srp_cmd->tag;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001511
1512 switch (srp_cmd->task_attr) {
1513 case SRP_CMD_SIMPLE_Q:
Christoph Hellwig68d81f42014-11-24 07:07:25 -08001514 cmd->sam_task_attr = TCM_SIMPLE_TAG;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001515 break;
1516 case SRP_CMD_ORDERED_Q:
1517 default:
Christoph Hellwig68d81f42014-11-24 07:07:25 -08001518 cmd->sam_task_attr = TCM_ORDERED_TAG;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001519 break;
1520 case SRP_CMD_HEAD_OF_Q:
Christoph Hellwig68d81f42014-11-24 07:07:25 -08001521 cmd->sam_task_attr = TCM_HEAD_TAG;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001522 break;
1523 case SRP_CMD_ACA:
Christoph Hellwig68d81f42014-11-24 07:07:25 -08001524 cmd->sam_task_attr = TCM_ACA_TAG;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001525 break;
1526 }
1527
Christoph Hellwigde103c92012-11-06 12:24:09 -08001528 if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04001529 pr_err("0x%llx: parsing SRP descriptor table failed.\n",
Bart Van Asschea42d9852011-10-14 01:30:46 +00001530 srp_cmd->tag);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001531 ret = TCM_INVALID_CDB_FIELD;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001532 goto send_sense;
1533 }
1534
Nicholas Bellinger9474b042012-11-27 23:55:57 -08001535 rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
Bart Van Asschee1dd4132016-02-11 11:05:19 -08001536 &send_ioctx->sense_data[0],
1537 scsilun_to_int(&srp_cmd->lun), data_len,
1538 TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
Nicholas Bellinger9474b042012-11-27 23:55:57 -08001539 if (rc != 0) {
1540 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001541 goto send_sense;
Nicholas Bellinger187e70a2012-03-17 20:12:36 -07001542 }
Bart Van Asschea42d9852011-10-14 01:30:46 +00001543 return 0;
1544
1545send_sense:
Christoph Hellwigde103c92012-11-06 12:24:09 -08001546 transport_send_check_condition_and_sense(cmd, ret, 0);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001547 return -1;
1548}
1549
Bart Van Asschea42d9852011-10-14 01:30:46 +00001550static int srp_tmr_to_tcm(int fn)
1551{
1552 switch (fn) {
1553 case SRP_TSK_ABORT_TASK:
1554 return TMR_ABORT_TASK;
1555 case SRP_TSK_ABORT_TASK_SET:
1556 return TMR_ABORT_TASK_SET;
1557 case SRP_TSK_CLEAR_TASK_SET:
1558 return TMR_CLEAR_TASK_SET;
1559 case SRP_TSK_LUN_RESET:
1560 return TMR_LUN_RESET;
1561 case SRP_TSK_CLEAR_ACA:
1562 return TMR_CLEAR_ACA;
1563 default:
1564 return -1;
1565 }
1566}
1567
1568/**
1569 * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
1570 *
1571 * Returns 0 if and only if the request will be processed by the target core.
1572 *
1573 * For more information about SRP_TSK_MGMT information units, see also section
1574 * 6.7 in the SRP r16a document.
1575 */
1576static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1577 struct srpt_recv_ioctx *recv_ioctx,
1578 struct srpt_send_ioctx *send_ioctx)
1579{
1580 struct srp_tsk_mgmt *srp_tsk;
1581 struct se_cmd *cmd;
Nicholas Bellinger3e4f5742012-11-28 01:38:04 -08001582 struct se_session *sess = ch->sess;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001583 int tcm_tmr;
Nicholas Bellinger3e4f5742012-11-28 01:38:04 -08001584 int rc;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001585
1586 BUG_ON(!send_ioctx);
1587
1588 srp_tsk = recv_ioctx->ioctx.buf;
1589 cmd = &send_ioctx->cmd;
1590
1591 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
1592 " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
1593 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
1594
1595 srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
Bart Van Assche649ee052015-04-14 13:26:44 +02001596 send_ioctx->cmd.tag = srp_tsk->tag;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001597 tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
Bart Van Asschee1dd4132016-02-11 11:05:19 -08001598 rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
1599 scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
1600 GFP_KERNEL, srp_tsk->task_tag,
1601 TARGET_SCF_ACK_KREF);
Nicholas Bellinger3e4f5742012-11-28 01:38:04 -08001602 if (rc != 0) {
Bart Van Asschea42d9852011-10-14 01:30:46 +00001603 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
Christoph Hellwigde103c92012-11-06 12:24:09 -08001604 goto fail;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001605 }
Christoph Hellwigde103c92012-11-06 12:24:09 -08001606 return;
1607fail:
Christoph Hellwigde103c92012-11-06 12:24:09 -08001608 transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
Bart Van Asschea42d9852011-10-14 01:30:46 +00001609}
1610
1611/**
1612 * srpt_handle_new_iu() - Process a newly received information unit.
1613 * @ch: RDMA channel through which the information unit has been received.
1614 * @ioctx: SRPT I/O context associated with the information unit.
1615 */
1616static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1617 struct srpt_recv_ioctx *recv_ioctx,
1618 struct srpt_send_ioctx *send_ioctx)
1619{
1620 struct srp_cmd *srp_cmd;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001621
1622 BUG_ON(!ch);
1623 BUG_ON(!recv_ioctx);
1624
1625 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1626 recv_ioctx->ioctx.dma, srp_max_req_size,
1627 DMA_FROM_DEVICE);
1628
Bart Van Assche33912d72016-02-11 11:04:43 -08001629 if (unlikely(ch->state == CH_CONNECTING)) {
Bart Van Asschea42d9852011-10-14 01:30:46 +00001630 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1631 goto out;
1632 }
1633
Bart Van Assche33912d72016-02-11 11:04:43 -08001634 if (unlikely(ch->state != CH_LIVE))
Bart Van Asschea42d9852011-10-14 01:30:46 +00001635 goto out;
1636
1637 srp_cmd = recv_ioctx->ioctx.buf;
1638 if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
1639 if (!send_ioctx)
1640 send_ioctx = srpt_get_send_ioctx(ch);
1641 if (unlikely(!send_ioctx)) {
1642 list_add_tail(&recv_ioctx->wait_list,
1643 &ch->cmd_wait_list);
1644 goto out;
1645 }
1646 }
1647
Bart Van Asschea42d9852011-10-14 01:30:46 +00001648 switch (srp_cmd->opcode) {
1649 case SRP_CMD:
1650 srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1651 break;
1652 case SRP_TSK_MGMT:
1653 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1654 break;
1655 case SRP_I_LOGOUT:
Doug Ledford9f5d32a2014-10-20 18:25:15 -04001656 pr_err("Not yet implemented: SRP_I_LOGOUT\n");
Bart Van Asschea42d9852011-10-14 01:30:46 +00001657 break;
1658 case SRP_CRED_RSP:
1659 pr_debug("received SRP_CRED_RSP\n");
1660 break;
1661 case SRP_AER_RSP:
1662 pr_debug("received SRP_AER_RSP\n");
1663 break;
1664 case SRP_RSP:
Doug Ledford9f5d32a2014-10-20 18:25:15 -04001665 pr_err("Received SRP_RSP\n");
Bart Van Asschea42d9852011-10-14 01:30:46 +00001666 break;
1667 default:
Doug Ledford9f5d32a2014-10-20 18:25:15 -04001668 pr_err("received IU with unknown opcode 0x%x\n",
Bart Van Asschea42d9852011-10-14 01:30:46 +00001669 srp_cmd->opcode);
1670 break;
1671 }
1672
1673 srpt_post_recv(ch->sport->sdev, recv_ioctx);
1674out:
1675 return;
1676}
1677
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001678static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
Bart Van Asschea42d9852011-10-14 01:30:46 +00001679{
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001680 struct srpt_rdma_ch *ch = cq->cq_context;
1681 struct srpt_recv_ioctx *ioctx =
1682 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001683
Bart Van Asschea42d9852011-10-14 01:30:46 +00001684 if (wc->status == IB_WC_SUCCESS) {
1685 int req_lim;
1686
1687 req_lim = atomic_dec_return(&ch->req_lim);
1688 if (unlikely(req_lim < 0))
Doug Ledford9f5d32a2014-10-20 18:25:15 -04001689 pr_err("req_lim = %d < 0\n", req_lim);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001690 srpt_handle_new_iu(ch, ioctx, NULL);
1691 } else {
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001692 pr_info("receiving failed for ioctx %p with status %d\n",
1693 ioctx, wc->status);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001694 }
1695}
1696
1697/**
Bart Van Asschea42d9852011-10-14 01:30:46 +00001698 * Note: Although this has not yet been observed during tests, at least in
1699 * theory it is possible that the srpt_get_send_ioctx() call invoked by
1700 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
1701 * value in each response is set to one, and it is possible that this response
1702 * makes the initiator send a new request before the send completion for that
1703 * response has been processed. This could e.g. happen if the call to
1704 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
1705 * if IB retransmission causes generation of the send completion to be
1706 * delayed. Incoming information units for which srpt_get_send_ioctx() fails
1707 * are queued on cmd_wait_list. The code below processes these delayed
1708 * requests one at a time.
1709 */
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001710static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
Bart Van Asschea42d9852011-10-14 01:30:46 +00001711{
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001712 struct srpt_rdma_ch *ch = cq->cq_context;
1713 struct srpt_send_ioctx *ioctx =
1714 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
1715 enum srpt_command_state state;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001716
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001717 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1718
1719 WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
1720 state != SRPT_STATE_MGMT_RSP_SENT);
1721
1722 atomic_inc(&ch->sq_wr_avail);
1723
1724 if (wc->status != IB_WC_SUCCESS) {
1725 pr_info("sending response for ioctx 0x%p failed"
1726 " with status %d\n", ioctx, wc->status);
1727
1728 atomic_dec(&ch->req_lim);
1729 srpt_abort_cmd(ioctx);
1730 goto out;
Bart Van Asschea42d9852011-10-14 01:30:46 +00001731 }
1732
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001733 if (state != SRPT_STATE_DONE) {
1734 srpt_unmap_sg_to_ib_sge(ch, ioctx);
1735 transport_generic_free_cmd(&ioctx->cmd, 0);
1736 } else {
1737 pr_err("IB completion has been received too late for"
1738 " wr_id = %u.\n", ioctx->ioctx.index);
1739 }
1740
1741out:
1742 while (!list_empty(&ch->cmd_wait_list) &&
Bart Van Assche33912d72016-02-11 11:04:43 -08001743 ch->state == CH_LIVE &&
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001744 (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
Bart Van Asschea42d9852011-10-14 01:30:46 +00001745 struct srpt_recv_ioctx *recv_ioctx;
1746
1747 recv_ioctx = list_first_entry(&ch->cmd_wait_list,
1748 struct srpt_recv_ioctx,
1749 wait_list);
1750 list_del(&recv_ioctx->wait_list);
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001751 srpt_handle_new_iu(ch, recv_ioctx, ioctx);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001752 }
1753}
1754
Bart Van Asschea42d9852011-10-14 01:30:46 +00001755/**
1756 * srpt_create_ch_ib() - Create receive and send completion queues.
1757 */
1758static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1759{
1760 struct ib_qp_init_attr *qp_init;
1761 struct srpt_port *sport = ch->sport;
1762 struct srpt_device *sdev = sport->sdev;
1763 u32 srp_sq_size = sport->port_attrib.srp_sq_size;
1764 int ret;
1765
1766 WARN_ON(ch->rq_size < 1);
1767
1768 ret = -ENOMEM;
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -08001769 qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001770 if (!qp_init)
1771 goto out;
1772
Bart Van Asscheab477c12014-10-19 18:05:33 +03001773retry:
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001774 ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + srp_sq_size,
1775 0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001776 if (IS_ERR(ch->cq)) {
1777 ret = PTR_ERR(ch->cq);
Doug Ledford9f5d32a2014-10-20 18:25:15 -04001778 pr_err("failed to create CQ cqe= %d ret= %d\n",
Bart Van Asschea42d9852011-10-14 01:30:46 +00001779 ch->rq_size + srp_sq_size, ret);
1780 goto out;
1781 }
1782
1783 qp_init->qp_context = (void *)ch;
1784 qp_init->event_handler
1785 = (void(*)(struct ib_event *, void*))srpt_qp_event;
1786 qp_init->send_cq = ch->cq;
1787 qp_init->recv_cq = ch->cq;
1788 qp_init->srq = sdev->srq;
1789 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
1790 qp_init->qp_type = IB_QPT_RC;
1791 qp_init->cap.max_send_wr = srp_sq_size;
1792 qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
1793
1794 ch->qp = ib_create_qp(sdev->pd, qp_init);
1795 if (IS_ERR(ch->qp)) {
1796 ret = PTR_ERR(ch->qp);
Bart Van Asscheab477c12014-10-19 18:05:33 +03001797 if (ret == -ENOMEM) {
1798 srp_sq_size /= 2;
1799 if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
1800 ib_destroy_cq(ch->cq);
1801 goto retry;
1802 }
1803 }
Doug Ledford9f5d32a2014-10-20 18:25:15 -04001804 pr_err("failed to create_qp ret= %d\n", ret);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001805 goto err_destroy_cq;
1806 }
1807
1808 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
1809
1810 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1811 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1812 qp_init->cap.max_send_wr, ch->cm_id);
1813
1814 ret = srpt_init_ch_qp(ch, ch->qp);
1815 if (ret)
1816 goto err_destroy_qp;
1817
Bart Van Asschea42d9852011-10-14 01:30:46 +00001818out:
1819 kfree(qp_init);
1820 return ret;
1821
1822err_destroy_qp:
1823 ib_destroy_qp(ch->qp);
1824err_destroy_cq:
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001825 ib_free_cq(ch->cq);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001826 goto out;
1827}
1828
1829static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
1830{
Bart Van Asschea42d9852011-10-14 01:30:46 +00001831 ib_destroy_qp(ch->qp);
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02001832 ib_free_cq(ch->cq);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001833}
1834
1835/**
1836 * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
1837 *
1838 * Reset the QP and make sure all resources associated with the channel will
1839 * be deallocated at an appropriate time.
1840 *
1841 * Note: The caller must hold ch->sport->sdev->spinlock.
1842 */
1843static void __srpt_close_ch(struct srpt_rdma_ch *ch)
1844{
Bart Van Asschea42d9852011-10-14 01:30:46 +00001845 enum rdma_ch_state prev_state;
1846 unsigned long flags;
1847
Bart Van Asschea42d9852011-10-14 01:30:46 +00001848 spin_lock_irqsave(&ch->spinlock, flags);
1849 prev_state = ch->state;
1850 switch (prev_state) {
1851 case CH_CONNECTING:
1852 case CH_LIVE:
1853 ch->state = CH_DISCONNECTING;
1854 break;
1855 default:
1856 break;
1857 }
1858 spin_unlock_irqrestore(&ch->spinlock, flags);
1859
1860 switch (prev_state) {
1861 case CH_CONNECTING:
1862 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
1863 NULL, 0);
1864 /* fall through */
1865 case CH_LIVE:
1866 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
Doug Ledford9f5d32a2014-10-20 18:25:15 -04001867 pr_err("sending CM DREQ failed.\n");
Bart Van Asschea42d9852011-10-14 01:30:46 +00001868 break;
1869 case CH_DISCONNECTING:
1870 break;
1871 case CH_DRAINING:
1872 case CH_RELEASING:
1873 break;
1874 }
1875}
1876
1877/**
1878 * srpt_close_ch() - Close an RDMA channel.
1879 */
1880static void srpt_close_ch(struct srpt_rdma_ch *ch)
1881{
1882 struct srpt_device *sdev;
1883
1884 sdev = ch->sport->sdev;
1885 spin_lock_irq(&sdev->spinlock);
1886 __srpt_close_ch(ch);
1887 spin_unlock_irq(&sdev->spinlock);
1888}
1889
1890/**
Nicholas Bellinger1d19f782013-05-15 01:30:01 -07001891 * srpt_shutdown_session() - Whether or not a session may be shut down.
1892 */
1893static int srpt_shutdown_session(struct se_session *se_sess)
1894{
1895 struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
1896 unsigned long flags;
1897
1898 spin_lock_irqsave(&ch->spinlock, flags);
1899 if (ch->in_shutdown) {
1900 spin_unlock_irqrestore(&ch->spinlock, flags);
1901 return true;
1902 }
1903
1904 ch->in_shutdown = true;
1905 target_sess_cmd_list_set_waiting(se_sess);
1906 spin_unlock_irqrestore(&ch->spinlock, flags);
1907
1908 return true;
1909}
1910
1911/**
Bart Van Asschea42d9852011-10-14 01:30:46 +00001912 * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
1913 * @cm_id: Pointer to the CM ID of the channel to be drained.
1914 *
1915 * Note: Must be called from inside srpt_cm_handler to avoid a race between
1916 * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
1917 * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
1918 * waits until all target sessions for the associated IB device have been
1919 * unregistered and target session registration involves a call to
1920 * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
1921 * this function has finished).
1922 */
1923static void srpt_drain_channel(struct ib_cm_id *cm_id)
1924{
1925 struct srpt_device *sdev;
1926 struct srpt_rdma_ch *ch;
1927 int ret;
1928 bool do_reset = false;
1929
1930 WARN_ON_ONCE(irqs_disabled());
1931
1932 sdev = cm_id->context;
1933 BUG_ON(!sdev);
1934 spin_lock_irq(&sdev->spinlock);
1935 list_for_each_entry(ch, &sdev->rch_list, list) {
1936 if (ch->cm_id == cm_id) {
Bart Van Asschef130c222016-02-11 11:05:38 -08001937 do_reset = srpt_set_ch_state(ch, CH_DRAINING);
Bart Van Asschea42d9852011-10-14 01:30:46 +00001938 break;
1939 }
1940 }
1941 spin_unlock_irq(&sdev->spinlock);
1942
1943 if (do_reset) {
Nicholas Bellinger1d19f782013-05-15 01:30:01 -07001944 if (ch->sess)
1945 srpt_shutdown_session(ch->sess);
1946
Bart Van Asschea42d9852011-10-14 01:30:46 +00001947 ret = srpt_ch_qp_err(ch);
1948 if (ret < 0)
Doug Ledford9f5d32a2014-10-20 18:25:15 -04001949 pr_err("Setting queue pair in error state"
Bart Van Asschea42d9852011-10-14 01:30:46 +00001950 " failed: %d\n", ret);
1951 }
1952}
1953
1954/**
1955 * srpt_find_channel() - Look up an RDMA channel.
1956 * @cm_id: Pointer to the CM ID of the channel to be looked up.
1957 *
1958 * Return NULL if no matching RDMA channel has been found.
1959 */
1960static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
1961 struct ib_cm_id *cm_id)
1962{
1963 struct srpt_rdma_ch *ch;
1964 bool found;
1965
1966 WARN_ON_ONCE(irqs_disabled());
1967 BUG_ON(!sdev);
1968
1969 found = false;
1970 spin_lock_irq(&sdev->spinlock);
1971 list_for_each_entry(ch, &sdev->rch_list, list) {
1972 if (ch->cm_id == cm_id) {
1973 found = true;
1974 break;
1975 }
1976 }
1977 spin_unlock_irq(&sdev->spinlock);
1978
1979 return found ? ch : NULL;
1980}
1981
1982/**
1983 * srpt_release_channel() - Release channel resources.
1984 *
1985 * Schedules the actual release because:
1986 * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
1987 * trigger a deadlock.
1988 * - It is not safe to call TCM transport_* functions from interrupt context.
1989 */
1990static void srpt_release_channel(struct srpt_rdma_ch *ch)
1991{
1992 schedule_work(&ch->release_work);
1993}
1994
1995static void srpt_release_channel_work(struct work_struct *w)
1996{
1997 struct srpt_rdma_ch *ch;
1998 struct srpt_device *sdev;
Nicholas Bellinger9474b042012-11-27 23:55:57 -08001999 struct se_session *se_sess;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002000
2001 ch = container_of(w, struct srpt_rdma_ch, release_work);
2002 pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
2003 ch->release_done);
2004
2005 sdev = ch->sport->sdev;
2006 BUG_ON(!sdev);
2007
Nicholas Bellinger9474b042012-11-27 23:55:57 -08002008 se_sess = ch->sess;
2009 BUG_ON(!se_sess);
2010
Joern Engelbe646c2d2013-05-15 00:44:07 -07002011 target_wait_for_sess_cmds(se_sess);
Nicholas Bellinger9474b042012-11-27 23:55:57 -08002012
2013 transport_deregister_session_configfs(se_sess);
2014 transport_deregister_session(se_sess);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002015 ch->sess = NULL;
2016
Nicholas Bellinger0b41d6c2013-09-18 12:48:27 -07002017 ib_destroy_cm_id(ch->cm_id);
2018
Bart Van Asschea42d9852011-10-14 01:30:46 +00002019 srpt_destroy_ch_ib(ch);
2020
2021 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2022 ch->sport->sdev, ch->rq_size,
2023 ch->rsp_size, DMA_TO_DEVICE);
2024
2025 spin_lock_irq(&sdev->spinlock);
2026 list_del(&ch->list);
2027 spin_unlock_irq(&sdev->spinlock);
2028
Bart Van Asschea42d9852011-10-14 01:30:46 +00002029 if (ch->release_done)
2030 complete(ch->release_done);
2031
2032 wake_up(&sdev->ch_releaseQ);
2033
2034 kfree(ch);
2035}
2036
Bart Van Asschea42d9852011-10-14 01:30:46 +00002037/**
2038 * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
2039 *
2040 * Ownership of the cm_id is transferred to the target session if this
2041 * functions returns zero. Otherwise the caller remains the owner of cm_id.
2042 */
2043static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2044 struct ib_cm_req_event_param *param,
2045 void *private_data)
2046{
2047 struct srpt_device *sdev = cm_id->context;
2048 struct srpt_port *sport = &sdev->port[param->port - 1];
2049 struct srp_login_req *req;
2050 struct srp_login_rsp *rsp;
2051 struct srp_login_rej *rej;
2052 struct ib_cm_rep_param *rep_param;
2053 struct srpt_rdma_ch *ch, *tmp_ch;
Nicholas Bellingerf246c942016-01-07 22:19:21 -08002054 struct se_node_acl *se_acl;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002055 u32 it_iu_len;
Nicholas Bellingerf246c942016-01-07 22:19:21 -08002056 int i, ret = 0;
2057 unsigned char *p;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002058
2059 WARN_ON_ONCE(irqs_disabled());
2060
2061 if (WARN_ON(!sdev || !private_data))
2062 return -EINVAL;
2063
2064 req = (struct srp_login_req *)private_data;
2065
2066 it_iu_len = be32_to_cpu(req->req_it_iu_len);
2067
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002068 pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
2069 " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
2070 " (guid=0x%llx:0x%llx)\n",
2071 be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
2072 be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
2073 be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
2074 be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
2075 it_iu_len,
2076 param->port,
2077 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
2078 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
Bart Van Asschea42d9852011-10-14 01:30:46 +00002079
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -08002080 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
2081 rej = kzalloc(sizeof(*rej), GFP_KERNEL);
2082 rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002083
2084 if (!rsp || !rej || !rep_param) {
2085 ret = -ENOMEM;
2086 goto out;
2087 }
2088
2089 if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +05302090 rej->reason = cpu_to_be32(
2091 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002092 ret = -EINVAL;
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002093 pr_err("rejected SRP_LOGIN_REQ because its"
Bart Van Asschea42d9852011-10-14 01:30:46 +00002094 " length (%d bytes) is out of range (%d .. %d)\n",
2095 it_iu_len, 64, srp_max_req_size);
2096 goto reject;
2097 }
2098
2099 if (!sport->enabled) {
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +05302100 rej->reason = cpu_to_be32(
2101 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002102 ret = -EINVAL;
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002103 pr_err("rejected SRP_LOGIN_REQ because the target port"
Bart Van Asschea42d9852011-10-14 01:30:46 +00002104 " has not yet been enabled\n");
2105 goto reject;
2106 }
2107
2108 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
2109 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
2110
2111 spin_lock_irq(&sdev->spinlock);
2112
2113 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
2114 if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
2115 && !memcmp(ch->t_port_id, req->target_port_id, 16)
2116 && param->port == ch->sport->port
2117 && param->listen_id == ch->sport->sdev->cm_id
2118 && ch->cm_id) {
Bart Van Assche33912d72016-02-11 11:04:43 -08002119 if (ch->state != CH_CONNECTING
2120 && ch->state != CH_LIVE)
Bart Van Asschea42d9852011-10-14 01:30:46 +00002121 continue;
2122
2123 /* found an existing channel */
2124 pr_debug("Found existing channel %s"
2125 " cm_id= %p state= %d\n",
Bart Van Assche33912d72016-02-11 11:04:43 -08002126 ch->sess_name, ch->cm_id, ch->state);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002127
2128 __srpt_close_ch(ch);
2129
2130 rsp->rsp_flags =
2131 SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2132 }
2133 }
2134
2135 spin_unlock_irq(&sdev->spinlock);
2136
2137 } else
2138 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2139
2140 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2141 || *(__be64 *)(req->target_port_id + 8) !=
2142 cpu_to_be64(srpt_service_guid)) {
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +05302143 rej->reason = cpu_to_be32(
2144 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002145 ret = -ENOMEM;
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002146 pr_err("rejected SRP_LOGIN_REQ because it"
Bart Van Asschea42d9852011-10-14 01:30:46 +00002147 " has an invalid target port identifier.\n");
2148 goto reject;
2149 }
2150
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -08002151 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002152 if (!ch) {
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +05302153 rej->reason = cpu_to_be32(
2154 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002155 pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
Bart Van Asschea42d9852011-10-14 01:30:46 +00002156 ret = -ENOMEM;
2157 goto reject;
2158 }
2159
2160 INIT_WORK(&ch->release_work, srpt_release_channel_work);
2161 memcpy(ch->i_port_id, req->initiator_port_id, 16);
2162 memcpy(ch->t_port_id, req->target_port_id, 16);
2163 ch->sport = &sdev->port[param->port - 1];
2164 ch->cm_id = cm_id;
2165 /*
2166 * Avoid QUEUE_FULL conditions by limiting the number of buffers used
2167 * for the SRP protocol to the command queue size.
2168 */
2169 ch->rq_size = SRPT_RQ_SIZE;
2170 spin_lock_init(&ch->spinlock);
2171 ch->state = CH_CONNECTING;
2172 INIT_LIST_HEAD(&ch->cmd_wait_list);
2173 ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2174
2175 ch->ioctx_ring = (struct srpt_send_ioctx **)
2176 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2177 sizeof(*ch->ioctx_ring[0]),
2178 ch->rsp_size, DMA_TO_DEVICE);
2179 if (!ch->ioctx_ring)
2180 goto free_ch;
2181
2182 INIT_LIST_HEAD(&ch->free_list);
2183 for (i = 0; i < ch->rq_size; i++) {
2184 ch->ioctx_ring[i]->ch = ch;
2185 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2186 }
2187
2188 ret = srpt_create_ch_ib(ch);
2189 if (ret) {
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +05302190 rej->reason = cpu_to_be32(
2191 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002192 pr_err("rejected SRP_LOGIN_REQ because creating"
Bart Van Asschea42d9852011-10-14 01:30:46 +00002193 " a new RDMA channel failed.\n");
2194 goto free_ring;
2195 }
2196
2197 ret = srpt_ch_qp_rtr(ch, ch->qp);
2198 if (ret) {
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +05302199 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002200 pr_err("rejected SRP_LOGIN_REQ because enabling"
Bart Van Asschea42d9852011-10-14 01:30:46 +00002201 " RTR failed (error code = %d)\n", ret);
2202 goto destroy_ib;
2203 }
Nicholas Bellingerf246c942016-01-07 22:19:21 -08002204
Bart Van Asschea42d9852011-10-14 01:30:46 +00002205 /*
Nicholas Bellingerf246c942016-01-07 22:19:21 -08002206 * Use the initator port identifier as the session name, when
2207 * checking against se_node_acl->initiatorname[] this can be
2208 * with or without preceeding '0x'.
Bart Van Asschea42d9852011-10-14 01:30:46 +00002209 */
2210 snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
2211 be64_to_cpu(*(__be64 *)ch->i_port_id),
2212 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
2213
2214 pr_debug("registering session %s\n", ch->sess_name);
Nicholas Bellingerf246c942016-01-07 22:19:21 -08002215 p = &ch->sess_name[0];
Bart Van Asschea42d9852011-10-14 01:30:46 +00002216
Nicholas Bellingere70beee2014-04-02 12:52:38 -07002217 ch->sess = transport_init_session(TARGET_PROT_NORMAL);
Dan Carpenter3af33632011-11-04 21:27:32 +03002218 if (IS_ERR(ch->sess)) {
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +05302219 rej->reason = cpu_to_be32(
Nicholas Bellingerf246c942016-01-07 22:19:21 -08002220 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002221 pr_debug("Failed to create session\n");
Nicholas Bellingerf246c942016-01-07 22:19:21 -08002222 goto destroy_ib;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002223 }
Nicholas Bellingerf246c942016-01-07 22:19:21 -08002224
2225try_again:
2226 se_acl = core_tpg_get_initiator_node_acl(&sport->port_tpg_1, p);
2227 if (!se_acl) {
2228 pr_info("Rejected login because no ACL has been"
2229 " configured yet for initiator %s.\n", ch->sess_name);
2230 /*
2231 * XXX: Hack to retry of ch->i_port_id without leading '0x'
2232 */
2233 if (p == &ch->sess_name[0]) {
2234 p += 2;
2235 goto try_again;
2236 }
2237 rej->reason = cpu_to_be32(
2238 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2239 transport_free_session(ch->sess);
2240 goto destroy_ib;
2241 }
2242 ch->sess->se_node_acl = se_acl;
2243
2244 transport_register_session(&sport->port_tpg_1, se_acl, ch->sess, ch);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002245
2246 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
2247 ch->sess_name, ch->cm_id);
2248
2249 /* create srp_login_response */
2250 rsp->opcode = SRP_LOGIN_RSP;
2251 rsp->tag = req->tag;
2252 rsp->max_it_iu_len = req->req_it_iu_len;
2253 rsp->max_ti_iu_len = req->req_it_iu_len;
2254 ch->max_ti_iu_len = it_iu_len;
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +05302255 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2256 | SRP_BUF_FORMAT_INDIRECT);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002257 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2258 atomic_set(&ch->req_lim, ch->rq_size);
2259 atomic_set(&ch->req_lim_delta, 0);
2260
2261 /* create cm reply */
2262 rep_param->qp_num = ch->qp->qp_num;
2263 rep_param->private_data = (void *)rsp;
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -08002264 rep_param->private_data_len = sizeof(*rsp);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002265 rep_param->rnr_retry_count = 7;
2266 rep_param->flow_control = 1;
2267 rep_param->failover_accepted = 0;
2268 rep_param->srq = 1;
2269 rep_param->responder_resources = 4;
2270 rep_param->initiator_depth = 4;
2271
2272 ret = ib_send_cm_rep(cm_id, rep_param);
2273 if (ret) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002274 pr_err("sending SRP_LOGIN_REQ response failed"
Bart Van Asschea42d9852011-10-14 01:30:46 +00002275 " (error code = %d)\n", ret);
2276 goto release_channel;
2277 }
2278
2279 spin_lock_irq(&sdev->spinlock);
2280 list_add_tail(&ch->list, &sdev->rch_list);
2281 spin_unlock_irq(&sdev->spinlock);
2282
2283 goto out;
2284
2285release_channel:
2286 srpt_set_ch_state(ch, CH_RELEASING);
2287 transport_deregister_session_configfs(ch->sess);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002288 transport_deregister_session(ch->sess);
2289 ch->sess = NULL;
2290
2291destroy_ib:
2292 srpt_destroy_ch_ib(ch);
2293
2294free_ring:
2295 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2296 ch->sport->sdev, ch->rq_size,
2297 ch->rsp_size, DMA_TO_DEVICE);
2298free_ch:
2299 kfree(ch);
2300
2301reject:
2302 rej->opcode = SRP_LOGIN_REJ;
2303 rej->tag = req->tag;
Vaishali Thakkarb356c1c2015-06-24 10:12:13 +05302304 rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2305 | SRP_BUF_FORMAT_INDIRECT);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002306
2307 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -08002308 (void *)rej, sizeof(*rej));
Bart Van Asschea42d9852011-10-14 01:30:46 +00002309
2310out:
2311 kfree(rep_param);
2312 kfree(rsp);
2313 kfree(rej);
2314
2315 return ret;
2316}
2317
2318static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
2319{
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002320 pr_info("Received IB REJ for cm_id %p.\n", cm_id);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002321 srpt_drain_channel(cm_id);
2322}
2323
2324/**
2325 * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
2326 *
2327 * An IB_CM_RTU_RECEIVED message indicates that the connection is established
2328 * and that the recipient may begin transmitting (RTU = ready to use).
2329 */
2330static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
2331{
2332 struct srpt_rdma_ch *ch;
2333 int ret;
2334
2335 ch = srpt_find_channel(cm_id->context, cm_id);
2336 BUG_ON(!ch);
2337
Bart Van Asschef130c222016-02-11 11:05:38 -08002338 if (srpt_set_ch_state(ch, CH_LIVE)) {
Bart Van Asschea42d9852011-10-14 01:30:46 +00002339 struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
2340
2341 ret = srpt_ch_qp_rts(ch, ch->qp);
2342
2343 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
2344 wait_list) {
2345 list_del(&ioctx->wait_list);
2346 srpt_handle_new_iu(ch, ioctx, NULL);
2347 }
2348 if (ret)
2349 srpt_close_ch(ch);
2350 }
2351}
2352
2353static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
2354{
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002355 pr_info("Received IB TimeWait exit for cm_id %p.\n", cm_id);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002356 srpt_drain_channel(cm_id);
2357}
2358
2359static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
2360{
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002361 pr_info("Received IB REP error for cm_id %p.\n", cm_id);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002362 srpt_drain_channel(cm_id);
2363}
2364
2365/**
2366 * srpt_cm_dreq_recv() - Process reception of a DREQ message.
2367 */
2368static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
2369{
2370 struct srpt_rdma_ch *ch;
2371 unsigned long flags;
2372 bool send_drep = false;
2373
2374 ch = srpt_find_channel(cm_id->context, cm_id);
2375 BUG_ON(!ch);
2376
Bart Van Assche33912d72016-02-11 11:04:43 -08002377 pr_debug("cm_id= %p ch->state= %d\n", cm_id, ch->state);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002378
2379 spin_lock_irqsave(&ch->spinlock, flags);
2380 switch (ch->state) {
2381 case CH_CONNECTING:
2382 case CH_LIVE:
2383 send_drep = true;
2384 ch->state = CH_DISCONNECTING;
2385 break;
2386 case CH_DISCONNECTING:
2387 case CH_DRAINING:
2388 case CH_RELEASING:
2389 WARN(true, "unexpected channel state %d\n", ch->state);
2390 break;
2391 }
2392 spin_unlock_irqrestore(&ch->spinlock, flags);
2393
2394 if (send_drep) {
2395 if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002396 pr_err("Sending IB DREP failed.\n");
2397 pr_info("Received DREQ and sent DREP for session %s.\n",
2398 ch->sess_name);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002399 }
2400}
2401
2402/**
2403 * srpt_cm_drep_recv() - Process reception of a DREP message.
2404 */
2405static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
2406{
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002407 pr_info("Received InfiniBand DREP message for cm_id %p.\n", cm_id);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002408 srpt_drain_channel(cm_id);
2409}
2410
2411/**
2412 * srpt_cm_handler() - IB connection manager callback function.
2413 *
2414 * A non-zero return value will cause the caller destroy the CM ID.
2415 *
2416 * Note: srpt_cm_handler() must only return a non-zero value when transferring
2417 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
2418 * a non-zero value in any other case will trigger a race with the
2419 * ib_destroy_cm_id() call in srpt_release_channel().
2420 */
2421static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2422{
2423 int ret;
2424
2425 ret = 0;
2426 switch (event->event) {
2427 case IB_CM_REQ_RECEIVED:
2428 ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
2429 event->private_data);
2430 break;
2431 case IB_CM_REJ_RECEIVED:
2432 srpt_cm_rej_recv(cm_id);
2433 break;
2434 case IB_CM_RTU_RECEIVED:
2435 case IB_CM_USER_ESTABLISHED:
2436 srpt_cm_rtu_recv(cm_id);
2437 break;
2438 case IB_CM_DREQ_RECEIVED:
2439 srpt_cm_dreq_recv(cm_id);
2440 break;
2441 case IB_CM_DREP_RECEIVED:
2442 srpt_cm_drep_recv(cm_id);
2443 break;
2444 case IB_CM_TIMEWAIT_EXIT:
2445 srpt_cm_timewait_exit(cm_id);
2446 break;
2447 case IB_CM_REP_ERROR:
2448 srpt_cm_rep_error(cm_id);
2449 break;
2450 case IB_CM_DREQ_ERROR:
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002451 pr_info("Received IB DREQ ERROR event.\n");
Bart Van Asschea42d9852011-10-14 01:30:46 +00002452 break;
2453 case IB_CM_MRA_RECEIVED:
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002454 pr_info("Received IB MRA event\n");
Bart Van Asschea42d9852011-10-14 01:30:46 +00002455 break;
2456 default:
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002457 pr_err("received unrecognized IB CM event %d\n", event->event);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002458 break;
2459 }
2460
2461 return ret;
2462}
2463
2464/**
2465 * srpt_perform_rdmas() - Perform IB RDMA.
2466 *
2467 * Returns zero upon success or a negative number upon failure.
2468 */
2469static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
2470 struct srpt_send_ioctx *ioctx)
2471{
Bart Van Asschea42d9852011-10-14 01:30:46 +00002472 struct ib_send_wr *bad_wr;
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02002473 int sq_wr_avail, ret, i;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002474 enum dma_data_direction dir;
2475 const int n_rdma = ioctx->n_rdma;
2476
2477 dir = ioctx->cmd.data_direction;
2478 if (dir == DMA_TO_DEVICE) {
2479 /* write */
2480 ret = -ENOMEM;
2481 sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
2482 if (sq_wr_avail < 0) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002483 pr_warn("IB send queue full (needed %d)\n",
2484 n_rdma);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002485 goto out;
2486 }
2487 }
2488
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02002489 for (i = 0; i < n_rdma; i++) {
2490 struct ib_send_wr *wr = &ioctx->rdma_wrs[i].wr;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002491
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02002492 wr->opcode = (dir == DMA_FROM_DEVICE) ?
2493 IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
2494
2495 if (i == n_rdma - 1) {
2496 /* only get completion event for the last rdma read */
2497 if (dir == DMA_TO_DEVICE) {
2498 wr->send_flags = IB_SEND_SIGNALED;
2499 ioctx->rdma_cqe.done = srpt_rdma_read_done;
2500 } else {
2501 ioctx->rdma_cqe.done = srpt_rdma_write_done;
2502 }
2503 wr->wr_cqe = &ioctx->rdma_cqe;
2504 wr->next = NULL;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002505 } else {
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02002506 wr->wr_cqe = NULL;
2507 wr->next = &ioctx->rdma_wrs[i + 1].wr;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002508 }
Bart Van Asschea42d9852011-10-14 01:30:46 +00002509 }
2510
Christoph Hellwig59fae4d2015-09-29 13:00:44 +02002511 ret = ib_post_send(ch->qp, &ioctx->rdma_wrs->wr, &bad_wr);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002512 if (ret)
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002513 pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n",
Bart Van Asschea42d9852011-10-14 01:30:46 +00002514 __func__, __LINE__, ret, i, n_rdma);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002515out:
2516 if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
2517 atomic_add(n_rdma, &ch->sq_wr_avail);
2518 return ret;
2519}
2520
2521/**
2522 * srpt_xfer_data() - Start data transfer from initiator to target.
2523 */
2524static int srpt_xfer_data(struct srpt_rdma_ch *ch,
2525 struct srpt_send_ioctx *ioctx)
2526{
2527 int ret;
2528
2529 ret = srpt_map_sg_to_ib_sge(ch, ioctx);
2530 if (ret) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002531 pr_err("%s[%d] ret=%d\n", __func__, __LINE__, ret);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002532 goto out;
2533 }
2534
2535 ret = srpt_perform_rdmas(ch, ioctx);
2536 if (ret) {
2537 if (ret == -EAGAIN || ret == -ENOMEM)
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002538 pr_info("%s[%d] queue full -- ret=%d\n",
2539 __func__, __LINE__, ret);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002540 else
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002541 pr_err("%s[%d] fatal error -- ret=%d\n",
Bart Van Asschea42d9852011-10-14 01:30:46 +00002542 __func__, __LINE__, ret);
2543 goto out_unmap;
2544 }
2545
2546out:
2547 return ret;
2548out_unmap:
2549 srpt_unmap_sg_to_ib_sge(ch, ioctx);
2550 goto out;
2551}
2552
2553static int srpt_write_pending_status(struct se_cmd *se_cmd)
2554{
2555 struct srpt_send_ioctx *ioctx;
2556
2557 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2558 return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
2559}
2560
2561/*
2562 * srpt_write_pending() - Start data transfer from initiator to target (write).
2563 */
2564static int srpt_write_pending(struct se_cmd *se_cmd)
2565{
2566 struct srpt_rdma_ch *ch;
2567 struct srpt_send_ioctx *ioctx;
2568 enum srpt_command_state new_state;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002569 int ret;
2570
2571 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2572
2573 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2574 WARN_ON(new_state == SRPT_STATE_DONE);
2575
2576 ch = ioctx->ch;
2577 BUG_ON(!ch);
2578
Bart Van Assche33912d72016-02-11 11:04:43 -08002579 switch (ch->state) {
Bart Van Asschea42d9852011-10-14 01:30:46 +00002580 case CH_CONNECTING:
Bart Van Assche33912d72016-02-11 11:04:43 -08002581 WARN(true, "unexpected channel state %d\n", ch->state);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002582 ret = -EINVAL;
2583 goto out;
2584 case CH_LIVE:
2585 break;
2586 case CH_DISCONNECTING:
2587 case CH_DRAINING:
2588 case CH_RELEASING:
2589 pr_debug("cmd with tag %lld: channel disconnecting\n",
Bart Van Assche649ee052015-04-14 13:26:44 +02002590 ioctx->cmd.tag);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002591 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2592 ret = -EINVAL;
2593 goto out;
2594 }
2595 ret = srpt_xfer_data(ch, ioctx);
2596
2597out:
2598 return ret;
2599}
2600
2601static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
2602{
2603 switch (tcm_mgmt_status) {
2604 case TMR_FUNCTION_COMPLETE:
2605 return SRP_TSK_MGMT_SUCCESS;
2606 case TMR_FUNCTION_REJECTED:
2607 return SRP_TSK_MGMT_FUNC_NOT_SUPP;
2608 }
2609 return SRP_TSK_MGMT_FAILED;
2610}
2611
2612/**
2613 * srpt_queue_response() - Transmits the response to a SCSI command.
2614 *
2615 * Callback function called by the TCM core. Must not block since it can be
2616 * invoked on the context of the IB completion handler.
2617 */
Joern Engelb79fafa2013-07-03 11:22:17 -04002618static void srpt_queue_response(struct se_cmd *cmd)
Bart Van Asschea42d9852011-10-14 01:30:46 +00002619{
2620 struct srpt_rdma_ch *ch;
2621 struct srpt_send_ioctx *ioctx;
2622 enum srpt_command_state state;
2623 unsigned long flags;
2624 int ret;
2625 enum dma_data_direction dir;
2626 int resp_len;
2627 u8 srp_tm_status;
2628
Bart Van Asschea42d9852011-10-14 01:30:46 +00002629 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2630 ch = ioctx->ch;
2631 BUG_ON(!ch);
2632
2633 spin_lock_irqsave(&ioctx->spinlock, flags);
2634 state = ioctx->state;
2635 switch (state) {
2636 case SRPT_STATE_NEW:
2637 case SRPT_STATE_DATA_IN:
2638 ioctx->state = SRPT_STATE_CMD_RSP_SENT;
2639 break;
2640 case SRPT_STATE_MGMT:
2641 ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
2642 break;
2643 default:
2644 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
2645 ch, ioctx->ioctx.index, ioctx->state);
2646 break;
2647 }
2648 spin_unlock_irqrestore(&ioctx->spinlock, flags);
2649
2650 if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
2651 || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
2652 atomic_inc(&ch->req_lim_delta);
2653 srpt_abort_cmd(ioctx);
Joern Engelb79fafa2013-07-03 11:22:17 -04002654 return;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002655 }
2656
2657 dir = ioctx->cmd.data_direction;
2658
2659 /* For read commands, transfer the data to the initiator. */
2660 if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
2661 !ioctx->queue_status_only) {
2662 ret = srpt_xfer_data(ch, ioctx);
2663 if (ret) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002664 pr_err("xfer_data failed for tag %llu\n",
Bart Van Assche649ee052015-04-14 13:26:44 +02002665 ioctx->cmd.tag);
Joern Engelb79fafa2013-07-03 11:22:17 -04002666 return;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002667 }
2668 }
2669
2670 if (state != SRPT_STATE_MGMT)
Bart Van Assche649ee052015-04-14 13:26:44 +02002671 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
Bart Van Asschea42d9852011-10-14 01:30:46 +00002672 cmd->scsi_status);
2673 else {
2674 srp_tm_status
2675 = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
2676 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
Bart Van Assche649ee052015-04-14 13:26:44 +02002677 ioctx->cmd.tag);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002678 }
2679 ret = srpt_post_send(ch, ioctx, resp_len);
2680 if (ret) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002681 pr_err("sending cmd response failed for tag %llu\n",
Bart Van Assche649ee052015-04-14 13:26:44 +02002682 ioctx->cmd.tag);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002683 srpt_unmap_sg_to_ib_sge(ch, ioctx);
2684 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
Bart Van Asscheafc16602015-04-27 13:52:36 +02002685 target_put_sess_cmd(&ioctx->cmd);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002686 }
Joern Engelb79fafa2013-07-03 11:22:17 -04002687}
Bart Van Asschea42d9852011-10-14 01:30:46 +00002688
Joern Engelb79fafa2013-07-03 11:22:17 -04002689static int srpt_queue_data_in(struct se_cmd *cmd)
2690{
2691 srpt_queue_response(cmd);
2692 return 0;
2693}
2694
2695static void srpt_queue_tm_rsp(struct se_cmd *cmd)
2696{
2697 srpt_queue_response(cmd);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002698}
2699
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -07002700static void srpt_aborted_task(struct se_cmd *cmd)
2701{
2702 struct srpt_send_ioctx *ioctx = container_of(cmd,
2703 struct srpt_send_ioctx, cmd);
2704
2705 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
2706}
2707
Bart Van Asschea42d9852011-10-14 01:30:46 +00002708static int srpt_queue_status(struct se_cmd *cmd)
2709{
2710 struct srpt_send_ioctx *ioctx;
2711
2712 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2713 BUG_ON(ioctx->sense_data != cmd->sense_buffer);
2714 if (cmd->se_cmd_flags &
2715 (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
2716 WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
2717 ioctx->queue_status_only = true;
Joern Engelb79fafa2013-07-03 11:22:17 -04002718 srpt_queue_response(cmd);
2719 return 0;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002720}
2721
2722static void srpt_refresh_port_work(struct work_struct *work)
2723{
2724 struct srpt_port *sport = container_of(work, struct srpt_port, work);
2725
2726 srpt_refresh_port(sport);
2727}
2728
2729static int srpt_ch_list_empty(struct srpt_device *sdev)
2730{
2731 int res;
2732
2733 spin_lock_irq(&sdev->spinlock);
2734 res = list_empty(&sdev->rch_list);
2735 spin_unlock_irq(&sdev->spinlock);
2736
2737 return res;
2738}
2739
2740/**
2741 * srpt_release_sdev() - Free the channel resources associated with a target.
2742 */
2743static int srpt_release_sdev(struct srpt_device *sdev)
2744{
2745 struct srpt_rdma_ch *ch, *tmp_ch;
2746 int res;
2747
2748 WARN_ON_ONCE(irqs_disabled());
2749
2750 BUG_ON(!sdev);
2751
2752 spin_lock_irq(&sdev->spinlock);
2753 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
2754 __srpt_close_ch(ch);
2755 spin_unlock_irq(&sdev->spinlock);
2756
2757 res = wait_event_interruptible(sdev->ch_releaseQ,
2758 srpt_ch_list_empty(sdev));
2759 if (res)
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002760 pr_err("%s: interrupted.\n", __func__);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002761
2762 return 0;
2763}
2764
2765static struct srpt_port *__srpt_lookup_port(const char *name)
2766{
2767 struct ib_device *dev;
2768 struct srpt_device *sdev;
2769 struct srpt_port *sport;
2770 int i;
2771
2772 list_for_each_entry(sdev, &srpt_dev_list, list) {
2773 dev = sdev->device;
2774 if (!dev)
2775 continue;
2776
2777 for (i = 0; i < dev->phys_port_cnt; i++) {
2778 sport = &sdev->port[i];
2779
2780 if (!strcmp(sport->port_guid, name))
2781 return sport;
2782 }
2783 }
2784
2785 return NULL;
2786}
2787
2788static struct srpt_port *srpt_lookup_port(const char *name)
2789{
2790 struct srpt_port *sport;
2791
2792 spin_lock(&srpt_dev_lock);
2793 sport = __srpt_lookup_port(name);
2794 spin_unlock(&srpt_dev_lock);
2795
2796 return sport;
2797}
2798
2799/**
2800 * srpt_add_one() - Infiniband device addition callback function.
2801 */
2802static void srpt_add_one(struct ib_device *device)
2803{
2804 struct srpt_device *sdev;
2805 struct srpt_port *sport;
2806 struct ib_srq_init_attr srq_attr;
2807 int i;
2808
2809 pr_debug("device = %p, device->dma_ops = %p\n", device,
2810 device->dma_ops);
2811
Bart Van Assche9d2aa2b2016-02-11 11:03:31 -08002812 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002813 if (!sdev)
2814 goto err;
2815
2816 sdev->device = device;
2817 INIT_LIST_HEAD(&sdev->rch_list);
2818 init_waitqueue_head(&sdev->ch_releaseQ);
2819 spin_lock_init(&sdev->spinlock);
2820
Bart Van Asschea42d9852011-10-14 01:30:46 +00002821 sdev->pd = ib_alloc_pd(device);
2822 if (IS_ERR(sdev->pd))
2823 goto free_dev;
2824
Or Gerlitz4a061b22015-12-18 10:59:46 +02002825 sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002826
2827 srq_attr.event_handler = srpt_srq_event;
2828 srq_attr.srq_context = (void *)sdev;
2829 srq_attr.attr.max_wr = sdev->srq_size;
2830 srq_attr.attr.max_sge = 1;
2831 srq_attr.attr.srq_limit = 0;
Roland Dreier6f360332012-04-12 07:51:08 -07002832 srq_attr.srq_type = IB_SRQT_BASIC;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002833
2834 sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
2835 if (IS_ERR(sdev->srq))
Jason Gunthorpe5a783952015-07-30 17:22:24 -06002836 goto err_pd;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002837
2838 pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
Or Gerlitz4a061b22015-12-18 10:59:46 +02002839 __func__, sdev->srq_size, sdev->device->attrs.max_srq_wr,
Bart Van Asschea42d9852011-10-14 01:30:46 +00002840 device->name);
2841
2842 if (!srpt_service_guid)
2843 srpt_service_guid = be64_to_cpu(device->node_guid);
2844
2845 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
2846 if (IS_ERR(sdev->cm_id))
2847 goto err_srq;
2848
2849 /* print out target login information */
2850 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
2851 "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
2852 srpt_service_guid, srpt_service_guid);
2853
2854 /*
2855 * We do not have a consistent service_id (ie. also id_ext of target_id)
2856 * to identify this target. We currently use the guid of the first HCA
2857 * in the system as service_id; therefore, the target_id will change
2858 * if this HCA is gone bad and replaced by different HCA
2859 */
Haggai Eran73fec7f2015-07-30 17:50:26 +03002860 if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0))
Bart Van Asschea42d9852011-10-14 01:30:46 +00002861 goto err_cm;
2862
2863 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
2864 srpt_event_handler);
2865 if (ib_register_event_handler(&sdev->event_handler))
2866 goto err_cm;
2867
2868 sdev->ioctx_ring = (struct srpt_recv_ioctx **)
2869 srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
2870 sizeof(*sdev->ioctx_ring[0]),
2871 srp_max_req_size, DMA_FROM_DEVICE);
2872 if (!sdev->ioctx_ring)
2873 goto err_event;
2874
2875 for (i = 0; i < sdev->srq_size; ++i)
2876 srpt_post_recv(sdev, sdev->ioctx_ring[i]);
2877
Roland Dreierf2250662012-02-02 12:55:58 -08002878 WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
Bart Van Asschea42d9852011-10-14 01:30:46 +00002879
2880 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
2881 sport = &sdev->port[i - 1];
2882 sport->sdev = sdev;
2883 sport->port = i;
2884 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
2885 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
2886 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
2887 INIT_WORK(&sport->work, srpt_refresh_port_work);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002888
2889 if (srpt_refresh_port(sport)) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002890 pr_err("MAD registration failed for %s-%d.\n",
Bart Van Asschef68cba4e92016-02-11 11:04:20 -08002891 sdev->device->name, i);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002892 goto err_ring;
2893 }
2894 snprintf(sport->port_guid, sizeof(sport->port_guid),
2895 "0x%016llx%016llx",
2896 be64_to_cpu(sport->gid.global.subnet_prefix),
2897 be64_to_cpu(sport->gid.global.interface_id));
2898 }
2899
2900 spin_lock(&srpt_dev_lock);
2901 list_add_tail(&sdev->list, &srpt_dev_list);
2902 spin_unlock(&srpt_dev_lock);
2903
2904out:
2905 ib_set_client_data(device, &srpt_client, sdev);
2906 pr_debug("added %s.\n", device->name);
2907 return;
2908
2909err_ring:
2910 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
2911 sdev->srq_size, srp_max_req_size,
2912 DMA_FROM_DEVICE);
2913err_event:
2914 ib_unregister_event_handler(&sdev->event_handler);
2915err_cm:
2916 ib_destroy_cm_id(sdev->cm_id);
2917err_srq:
2918 ib_destroy_srq(sdev->srq);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002919err_pd:
2920 ib_dealloc_pd(sdev->pd);
2921free_dev:
2922 kfree(sdev);
2923err:
2924 sdev = NULL;
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002925 pr_info("%s(%s) failed.\n", __func__, device->name);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002926 goto out;
2927}
2928
2929/**
2930 * srpt_remove_one() - InfiniBand device removal callback function.
2931 */
Haggai Eran7c1eb452015-07-30 17:50:14 +03002932static void srpt_remove_one(struct ib_device *device, void *client_data)
Bart Van Asschea42d9852011-10-14 01:30:46 +00002933{
Haggai Eran7c1eb452015-07-30 17:50:14 +03002934 struct srpt_device *sdev = client_data;
Bart Van Asschea42d9852011-10-14 01:30:46 +00002935 int i;
2936
Bart Van Asschea42d9852011-10-14 01:30:46 +00002937 if (!sdev) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04002938 pr_info("%s(%s): nothing to do.\n", __func__, device->name);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002939 return;
2940 }
2941
2942 srpt_unregister_mad_agent(sdev);
2943
2944 ib_unregister_event_handler(&sdev->event_handler);
2945
2946 /* Cancel any work queued by the just unregistered IB event handler. */
2947 for (i = 0; i < sdev->device->phys_port_cnt; i++)
2948 cancel_work_sync(&sdev->port[i].work);
2949
2950 ib_destroy_cm_id(sdev->cm_id);
2951
2952 /*
2953 * Unregistering a target must happen after destroying sdev->cm_id
2954 * such that no new SRP_LOGIN_REQ information units can arrive while
2955 * destroying the target.
2956 */
2957 spin_lock(&srpt_dev_lock);
2958 list_del(&sdev->list);
2959 spin_unlock(&srpt_dev_lock);
2960 srpt_release_sdev(sdev);
2961
2962 ib_destroy_srq(sdev->srq);
Bart Van Asschea42d9852011-10-14 01:30:46 +00002963 ib_dealloc_pd(sdev->pd);
2964
2965 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
2966 sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
2967 sdev->ioctx_ring = NULL;
2968 kfree(sdev);
2969}
2970
2971static struct ib_client srpt_client = {
2972 .name = DRV_NAME,
2973 .add = srpt_add_one,
2974 .remove = srpt_remove_one
2975};
2976
2977static int srpt_check_true(struct se_portal_group *se_tpg)
2978{
2979 return 1;
2980}
2981
2982static int srpt_check_false(struct se_portal_group *se_tpg)
2983{
2984 return 0;
2985}
2986
2987static char *srpt_get_fabric_name(void)
2988{
2989 return "srpt";
2990}
2991
Bart Van Asschea42d9852011-10-14 01:30:46 +00002992static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
2993{
2994 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
2995
2996 return sport->port_guid;
2997}
2998
2999static u16 srpt_get_tag(struct se_portal_group *tpg)
3000{
3001 return 1;
3002}
3003
Bart Van Asschea42d9852011-10-14 01:30:46 +00003004static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
3005{
3006 return 1;
3007}
3008
3009static void srpt_release_cmd(struct se_cmd *se_cmd)
3010{
Nicholas Bellinger9474b042012-11-27 23:55:57 -08003011 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
3012 struct srpt_send_ioctx, cmd);
3013 struct srpt_rdma_ch *ch = ioctx->ch;
3014 unsigned long flags;
3015
3016 WARN_ON(ioctx->state != SRPT_STATE_DONE);
3017 WARN_ON(ioctx->mapped_sg_count != 0);
3018
3019 if (ioctx->n_rbuf > 1) {
3020 kfree(ioctx->rbufs);
3021 ioctx->rbufs = NULL;
3022 ioctx->n_rbuf = 0;
3023 }
3024
3025 spin_lock_irqsave(&ch->spinlock, flags);
3026 list_add(&ioctx->free_list, &ch->free_list);
3027 spin_unlock_irqrestore(&ch->spinlock, flags);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003028}
3029
3030/**
Bart Van Asschea42d9852011-10-14 01:30:46 +00003031 * srpt_close_session() - Forcibly close a session.
3032 *
3033 * Callback function invoked by the TCM core to clean up sessions associated
3034 * with a node ACL when the user invokes
3035 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3036 */
3037static void srpt_close_session(struct se_session *se_sess)
3038{
3039 DECLARE_COMPLETION_ONSTACK(release_done);
3040 struct srpt_rdma_ch *ch;
3041 struct srpt_device *sdev;
Nicholas Mc Guireecc3f3e2015-01-16 12:20:17 +01003042 unsigned long res;
Bart Van Asschea42d9852011-10-14 01:30:46 +00003043
3044 ch = se_sess->fabric_sess_ptr;
3045 WARN_ON(ch->sess != se_sess);
3046
Bart Van Assche33912d72016-02-11 11:04:43 -08003047 pr_debug("ch %p state %d\n", ch, ch->state);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003048
3049 sdev = ch->sport->sdev;
3050 spin_lock_irq(&sdev->spinlock);
3051 BUG_ON(ch->release_done);
3052 ch->release_done = &release_done;
3053 __srpt_close_ch(ch);
3054 spin_unlock_irq(&sdev->spinlock);
3055
3056 res = wait_for_completion_timeout(&release_done, 60 * HZ);
Nicholas Mc Guireecc3f3e2015-01-16 12:20:17 +01003057 WARN_ON(res == 0);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003058}
3059
3060/**
Bart Van Asschea42d9852011-10-14 01:30:46 +00003061 * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
3062 *
3063 * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
3064 * This object represents an arbitrary integer used to uniquely identify a
3065 * particular attached remote initiator port to a particular SCSI target port
3066 * within a particular SCSI target device within a particular SCSI instance.
3067 */
3068static u32 srpt_sess_get_index(struct se_session *se_sess)
3069{
3070 return 0;
3071}
3072
3073static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
3074{
3075}
3076
Bart Van Asschea42d9852011-10-14 01:30:46 +00003077/* Note: only used from inside debug printk's by the TCM core. */
3078static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
3079{
3080 struct srpt_send_ioctx *ioctx;
3081
3082 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3083 return srpt_get_cmd_state(ioctx);
3084}
3085
Bart Van Asschea42d9852011-10-14 01:30:46 +00003086/**
3087 * srpt_parse_i_port_id() - Parse an initiator port ID.
3088 * @name: ASCII representation of a 128-bit initiator port ID.
3089 * @i_port_id: Binary 128-bit port ID.
3090 */
3091static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
3092{
3093 const char *p;
3094 unsigned len, count, leading_zero_bytes;
3095 int ret, rc;
3096
3097 p = name;
Rasmus Villemoesb60459f2014-10-13 15:54:46 -07003098 if (strncasecmp(p, "0x", 2) == 0)
Bart Van Asschea42d9852011-10-14 01:30:46 +00003099 p += 2;
3100 ret = -EINVAL;
3101 len = strlen(p);
3102 if (len % 2)
3103 goto out;
3104 count = min(len / 2, 16U);
3105 leading_zero_bytes = 16 - count;
3106 memset(i_port_id, 0, leading_zero_bytes);
3107 rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
3108 if (rc < 0)
3109 pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
3110 ret = 0;
3111out:
3112 return ret;
3113}
3114
3115/*
3116 * configfs callback function invoked for
3117 * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3118 */
Christoph Hellwigc7d6a802015-04-13 19:51:14 +02003119static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
Bart Van Asschea42d9852011-10-14 01:30:46 +00003120{
Bart Van Asschea42d9852011-10-14 01:30:46 +00003121 u8 i_port_id[16];
3122
3123 if (srpt_parse_i_port_id(i_port_id, name) < 0) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04003124 pr_err("invalid initiator port ID %s\n", name);
Christoph Hellwigc7d6a802015-04-13 19:51:14 +02003125 return -EINVAL;
Bart Van Asschea42d9852011-10-14 01:30:46 +00003126 }
Christoph Hellwigc7d6a802015-04-13 19:51:14 +02003127 return 0;
Bart Van Asschea42d9852011-10-14 01:30:46 +00003128}
3129
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003130static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
3131 char *page)
Bart Van Asschea42d9852011-10-14 01:30:46 +00003132{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003133 struct se_portal_group *se_tpg = attrib_to_tpg(item);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003134 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3135
3136 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3137}
3138
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003139static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
3140 const char *page, size_t count)
Bart Van Asschea42d9852011-10-14 01:30:46 +00003141{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003142 struct se_portal_group *se_tpg = attrib_to_tpg(item);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003143 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3144 unsigned long val;
3145 int ret;
3146
Jingoo Han9d8abf42014-02-05 11:22:05 +09003147 ret = kstrtoul(page, 0, &val);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003148 if (ret < 0) {
Jingoo Han9d8abf42014-02-05 11:22:05 +09003149 pr_err("kstrtoul() failed with ret: %d\n", ret);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003150 return -EINVAL;
3151 }
3152 if (val > MAX_SRPT_RDMA_SIZE) {
3153 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
3154 MAX_SRPT_RDMA_SIZE);
3155 return -EINVAL;
3156 }
3157 if (val < DEFAULT_MAX_RDMA_SIZE) {
3158 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3159 val, DEFAULT_MAX_RDMA_SIZE);
3160 return -EINVAL;
3161 }
3162 sport->port_attrib.srp_max_rdma_size = val;
3163
3164 return count;
3165}
3166
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003167static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
3168 char *page)
Bart Van Asschea42d9852011-10-14 01:30:46 +00003169{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003170 struct se_portal_group *se_tpg = attrib_to_tpg(item);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003171 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3172
3173 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3174}
3175
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003176static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item,
3177 const char *page, size_t count)
Bart Van Asschea42d9852011-10-14 01:30:46 +00003178{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003179 struct se_portal_group *se_tpg = attrib_to_tpg(item);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003180 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3181 unsigned long val;
3182 int ret;
3183
Jingoo Han9d8abf42014-02-05 11:22:05 +09003184 ret = kstrtoul(page, 0, &val);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003185 if (ret < 0) {
Jingoo Han9d8abf42014-02-05 11:22:05 +09003186 pr_err("kstrtoul() failed with ret: %d\n", ret);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003187 return -EINVAL;
3188 }
3189 if (val > MAX_SRPT_RSP_SIZE) {
3190 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
3191 MAX_SRPT_RSP_SIZE);
3192 return -EINVAL;
3193 }
3194 if (val < MIN_MAX_RSP_SIZE) {
3195 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
3196 MIN_MAX_RSP_SIZE);
3197 return -EINVAL;
3198 }
3199 sport->port_attrib.srp_max_rsp_size = val;
3200
3201 return count;
3202}
3203
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003204static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item,
3205 char *page)
Bart Van Asschea42d9852011-10-14 01:30:46 +00003206{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003207 struct se_portal_group *se_tpg = attrib_to_tpg(item);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003208 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3209
3210 return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
3211}
3212
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003213static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
3214 const char *page, size_t count)
Bart Van Asschea42d9852011-10-14 01:30:46 +00003215{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003216 struct se_portal_group *se_tpg = attrib_to_tpg(item);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003217 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3218 unsigned long val;
3219 int ret;
3220
Jingoo Han9d8abf42014-02-05 11:22:05 +09003221 ret = kstrtoul(page, 0, &val);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003222 if (ret < 0) {
Jingoo Han9d8abf42014-02-05 11:22:05 +09003223 pr_err("kstrtoul() failed with ret: %d\n", ret);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003224 return -EINVAL;
3225 }
3226 if (val > MAX_SRPT_SRQ_SIZE) {
3227 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
3228 MAX_SRPT_SRQ_SIZE);
3229 return -EINVAL;
3230 }
3231 if (val < MIN_SRPT_SRQ_SIZE) {
3232 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
3233 MIN_SRPT_SRQ_SIZE);
3234 return -EINVAL;
3235 }
3236 sport->port_attrib.srp_sq_size = val;
3237
3238 return count;
3239}
3240
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003241CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
3242CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size);
3243CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003244
3245static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003246 &srpt_tpg_attrib_attr_srp_max_rdma_size,
3247 &srpt_tpg_attrib_attr_srp_max_rsp_size,
3248 &srpt_tpg_attrib_attr_srp_sq_size,
Bart Van Asschea42d9852011-10-14 01:30:46 +00003249 NULL,
3250};
3251
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003252static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
Bart Van Asschea42d9852011-10-14 01:30:46 +00003253{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003254 struct se_portal_group *se_tpg = to_tpg(item);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003255 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3256
3257 return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
3258}
3259
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003260static ssize_t srpt_tpg_enable_store(struct config_item *item,
3261 const char *page, size_t count)
Bart Van Asschea42d9852011-10-14 01:30:46 +00003262{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003263 struct se_portal_group *se_tpg = to_tpg(item);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003264 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3265 unsigned long tmp;
3266 int ret;
3267
Jingoo Han9d8abf42014-02-05 11:22:05 +09003268 ret = kstrtoul(page, 0, &tmp);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003269 if (ret < 0) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04003270 pr_err("Unable to extract srpt_tpg_store_enable\n");
Bart Van Asschea42d9852011-10-14 01:30:46 +00003271 return -EINVAL;
3272 }
3273
3274 if ((tmp != 0) && (tmp != 1)) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04003275 pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003276 return -EINVAL;
3277 }
3278 if (tmp == 1)
3279 sport->enabled = true;
3280 else
3281 sport->enabled = false;
3282
3283 return count;
3284}
3285
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003286CONFIGFS_ATTR(srpt_tpg_, enable);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003287
3288static struct configfs_attribute *srpt_tpg_attrs[] = {
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003289 &srpt_tpg_attr_enable,
Bart Van Asschea42d9852011-10-14 01:30:46 +00003290 NULL,
3291};
3292
3293/**
3294 * configfs callback invoked for
3295 * mkdir /sys/kernel/config/target/$driver/$port/$tpg
3296 */
3297static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3298 struct config_group *group,
3299 const char *name)
3300{
3301 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3302 int res;
3303
3304 /* Initialize sport->port_wwn and sport->port_tpg_1 */
Nicholas Bellingerbc0c94b2015-05-20 21:48:03 -07003305 res = core_tpg_register(&sport->port_wwn, &sport->port_tpg_1, SCSI_PROTOCOL_SRP);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003306 if (res)
3307 return ERR_PTR(res);
3308
3309 return &sport->port_tpg_1;
3310}
3311
3312/**
3313 * configfs callback invoked for
3314 * rmdir /sys/kernel/config/target/$driver/$port/$tpg
3315 */
3316static void srpt_drop_tpg(struct se_portal_group *tpg)
3317{
3318 struct srpt_port *sport = container_of(tpg,
3319 struct srpt_port, port_tpg_1);
3320
3321 sport->enabled = false;
3322 core_tpg_deregister(&sport->port_tpg_1);
3323}
3324
3325/**
3326 * configfs callback invoked for
3327 * mkdir /sys/kernel/config/target/$driver/$port
3328 */
3329static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3330 struct config_group *group,
3331 const char *name)
3332{
3333 struct srpt_port *sport;
3334 int ret;
3335
3336 sport = srpt_lookup_port(name);
3337 pr_debug("make_tport(%s)\n", name);
3338 ret = -EINVAL;
3339 if (!sport)
3340 goto err;
3341
3342 return &sport->port_wwn;
3343
3344err:
3345 return ERR_PTR(ret);
3346}
3347
3348/**
3349 * configfs callback invoked for
3350 * rmdir /sys/kernel/config/target/$driver/$port
3351 */
3352static void srpt_drop_tport(struct se_wwn *wwn)
3353{
3354 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3355
3356 pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
3357}
3358
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003359static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
Bart Van Asschea42d9852011-10-14 01:30:46 +00003360{
3361 return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
3362}
3363
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003364CONFIGFS_ATTR_RO(srpt_wwn_, version);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003365
3366static struct configfs_attribute *srpt_wwn_attrs[] = {
Christoph Hellwig2eafd722015-10-03 15:32:55 +02003367 &srpt_wwn_attr_version,
Bart Van Asschea42d9852011-10-14 01:30:46 +00003368 NULL,
3369};
3370
Christoph Hellwig9ac89282015-04-08 20:01:35 +02003371static const struct target_core_fabric_ops srpt_template = {
3372 .module = THIS_MODULE,
3373 .name = "srpt",
Bart Van Asschea42d9852011-10-14 01:30:46 +00003374 .get_fabric_name = srpt_get_fabric_name,
Bart Van Asschea42d9852011-10-14 01:30:46 +00003375 .tpg_get_wwn = srpt_get_fabric_wwn,
3376 .tpg_get_tag = srpt_get_tag,
Bart Van Asschea42d9852011-10-14 01:30:46 +00003377 .tpg_check_demo_mode = srpt_check_false,
3378 .tpg_check_demo_mode_cache = srpt_check_true,
3379 .tpg_check_demo_mode_write_protect = srpt_check_true,
3380 .tpg_check_prod_mode_write_protect = srpt_check_false,
Bart Van Asschea42d9852011-10-14 01:30:46 +00003381 .tpg_get_inst_index = srpt_tpg_get_inst_index,
3382 .release_cmd = srpt_release_cmd,
3383 .check_stop_free = srpt_check_stop_free,
3384 .shutdown_session = srpt_shutdown_session,
3385 .close_session = srpt_close_session,
Bart Van Asschea42d9852011-10-14 01:30:46 +00003386 .sess_get_index = srpt_sess_get_index,
3387 .sess_get_initiator_sid = NULL,
3388 .write_pending = srpt_write_pending,
3389 .write_pending_status = srpt_write_pending_status,
3390 .set_default_node_attributes = srpt_set_default_node_attrs,
Bart Van Asschea42d9852011-10-14 01:30:46 +00003391 .get_cmd_state = srpt_get_tcm_cmd_state,
Joern Engelb79fafa2013-07-03 11:22:17 -04003392 .queue_data_in = srpt_queue_data_in,
Bart Van Asschea42d9852011-10-14 01:30:46 +00003393 .queue_status = srpt_queue_status,
Joern Engelb79fafa2013-07-03 11:22:17 -04003394 .queue_tm_rsp = srpt_queue_tm_rsp,
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -07003395 .aborted_task = srpt_aborted_task,
Bart Van Asschea42d9852011-10-14 01:30:46 +00003396 /*
3397 * Setup function pointers for generic logic in
3398 * target_core_fabric_configfs.c
3399 */
3400 .fabric_make_wwn = srpt_make_tport,
3401 .fabric_drop_wwn = srpt_drop_tport,
3402 .fabric_make_tpg = srpt_make_tpg,
3403 .fabric_drop_tpg = srpt_drop_tpg,
Christoph Hellwigc7d6a802015-04-13 19:51:14 +02003404 .fabric_init_nodeacl = srpt_init_nodeacl,
Christoph Hellwig9ac89282015-04-08 20:01:35 +02003405
3406 .tfc_wwn_attrs = srpt_wwn_attrs,
3407 .tfc_tpg_base_attrs = srpt_tpg_attrs,
3408 .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
Bart Van Asschea42d9852011-10-14 01:30:46 +00003409};
3410
3411/**
3412 * srpt_init_module() - Kernel module initialization.
3413 *
3414 * Note: Since ib_register_client() registers callback functions, and since at
3415 * least one of these callback functions (srpt_add_one()) calls target core
3416 * functions, this driver must be registered with the target core before
3417 * ib_register_client() is called.
3418 */
3419static int __init srpt_init_module(void)
3420{
3421 int ret;
3422
3423 ret = -EINVAL;
3424 if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04003425 pr_err("invalid value %d for kernel module parameter"
Bart Van Asschea42d9852011-10-14 01:30:46 +00003426 " srp_max_req_size -- must be at least %d.\n",
3427 srp_max_req_size, MIN_MAX_REQ_SIZE);
3428 goto out;
3429 }
3430
3431 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
3432 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04003433 pr_err("invalid value %d for kernel module parameter"
Bart Van Asschea42d9852011-10-14 01:30:46 +00003434 " srpt_srq_size -- must be in the range [%d..%d].\n",
3435 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
3436 goto out;
3437 }
3438
Christoph Hellwig9ac89282015-04-08 20:01:35 +02003439 ret = target_register_template(&srpt_template);
3440 if (ret)
Bart Van Asschea42d9852011-10-14 01:30:46 +00003441 goto out;
Bart Van Asschea42d9852011-10-14 01:30:46 +00003442
3443 ret = ib_register_client(&srpt_client);
3444 if (ret) {
Doug Ledford9f5d32a2014-10-20 18:25:15 -04003445 pr_err("couldn't register IB client\n");
Bart Van Asschea42d9852011-10-14 01:30:46 +00003446 goto out_unregister_target;
3447 }
3448
3449 return 0;
3450
3451out_unregister_target:
Christoph Hellwig9ac89282015-04-08 20:01:35 +02003452 target_unregister_template(&srpt_template);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003453out:
3454 return ret;
3455}
3456
3457static void __exit srpt_cleanup_module(void)
3458{
3459 ib_unregister_client(&srpt_client);
Christoph Hellwig9ac89282015-04-08 20:01:35 +02003460 target_unregister_template(&srpt_template);
Bart Van Asschea42d9852011-10-14 01:30:46 +00003461}
3462
3463module_init(srpt_init_module);
3464module_exit(srpt_cleanup_module);