blob: 506c2eb60808aebd9a32aa0b0b3bda81c13e8e65 [file] [log] [blame]
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07004 * (c) Copyright 2013 Datera, Inc.
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08005 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
18
19#include <linux/string.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/socket.h>
23#include <linux/in.h>
24#include <linux/in6.h>
25#include <rdma/ib_verbs.h>
26#include <rdma/rdma_cm.h>
27#include <target/target_core_base.h>
28#include <target/target_core_fabric.h>
29#include <target/iscsi/iscsi_transport.h>
Sagi Grimberg531b7bf2014-04-29 13:13:45 +030030#include <linux/semaphore.h>
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080031
32#include "isert_proto.h"
33#include "ib_isert.h"
34
35#define ISERT_MAX_CONN 8
36#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
Sagi Grimbergbdf20e72014-12-02 16:57:43 +020038#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
39 ISERT_MAX_CONN)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080040
Sagi Grimberg45678b62015-01-25 19:11:19 +020041static int isert_debug_level;
Sagi Grimberg24f412d2014-12-07 13:12:02 +020042module_param_named(debug_level, isert_debug_level, int, 0644);
43MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
44
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080045static DEFINE_MUTEX(device_list_mutex);
46static LIST_HEAD(device_list);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080047static struct workqueue_struct *isert_comp_wq;
Sagi Grimbergb02efbfc2014-12-02 16:57:29 +020048static struct workqueue_struct *isert_release_wq;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080049
50static void
Vu Phamd40945d2013-08-28 23:23:34 +030051isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
52static int
53isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr);
Vu Pham59464ef2013-08-28 23:23:35 +030055static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +020056isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +030057static int
Sagi Grimberga3a5a822014-01-09 18:40:50 +020058isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
59 struct isert_rdma_wr *wr);
Sagi Grimbergf93f3a72014-02-19 17:50:24 +020060static int
61isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +020062static int
63isert_rdma_post_recvl(struct isert_conn *isert_conn);
64static int
65isert_rdma_accept(struct isert_conn *isert_conn);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +020066struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
Vu Phamd40945d2013-08-28 23:23:34 +030067
Sagi Grimberg302cc7c2014-12-02 16:57:34 +020068static inline bool
69isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
70{
Sagi Grimberg23a548e2014-12-02 16:57:35 +020071 return (conn->pi_support &&
Sagi Grimberg302cc7c2014-12-02 16:57:34 +020072 cmd->prot_op != TARGET_PROT_NORMAL);
73}
74
75
Vu Phamd40945d2013-08-28 23:23:34 +030076static void
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080077isert_qp_event_callback(struct ib_event *e, void *context)
78{
79 struct isert_conn *isert_conn = (struct isert_conn *)context;
80
Sagi Grimberg4c22e072014-12-07 13:12:03 +020081 isert_err("conn %p event: %d\n", isert_conn, e->event);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080082 switch (e->event) {
83 case IB_EVENT_COMM_EST:
84 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
85 break;
86 case IB_EVENT_QP_LAST_WQE_REACHED:
Sagi Grimberg4c22e072014-12-07 13:12:03 +020087 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080088 break;
89 default:
90 break;
91 }
92}
93
94static int
95isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
96{
97 int ret;
98
99 ret = ib_query_device(ib_dev, devattr);
100 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200101 isert_err("ib_query_device() failed: %d\n", ret);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800102 return ret;
103 }
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200104 isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
105 isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800106
107 return 0;
108}
109
110static int
Sagi Grimberg570db172014-12-02 16:57:31 +0200111isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800112{
113 struct isert_device *device = isert_conn->conn_device;
114 struct ib_qp_init_attr attr;
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200115 struct isert_comp *comp;
116 int ret, i, min = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800117
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800118 mutex_lock(&device_list_mutex);
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200119 for (i = 0; i < device->comps_used; i++)
120 if (device->comps[i].active_qps <
121 device->comps[min].active_qps)
122 min = i;
123 comp = &device->comps[min];
124 comp->active_qps++;
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200125 isert_info("conn %p, using comp %p min_index: %d\n",
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200126 isert_conn, comp, min);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800127 mutex_unlock(&device_list_mutex);
128
129 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
130 attr.event_handler = isert_qp_event_callback;
131 attr.qp_context = isert_conn;
Sagi Grimberg6f0fae32014-12-02 16:57:41 +0200132 attr.send_cq = comp->cq;
133 attr.recv_cq = comp->cq;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800134 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
Sagi Grimbergbdf20e72014-12-02 16:57:43 +0200135 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800136 /*
137 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
Or Gerlitzf57915c2014-10-22 14:55:49 -0700138 * work-around for RDMA_READs with ConnectX-2.
139 *
140 * Also, still make sure to have at least two SGEs for
141 * outgoing control PDU responses.
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800142 */
Or Gerlitzf57915c2014-10-22 14:55:49 -0700143 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800144 isert_conn->max_sge = attr.cap.max_send_sge;
145
146 attr.cap.max_recv_sge = 1;
147 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
148 attr.qp_type = IB_QPT_RC;
Sagi Grimberg570db172014-12-02 16:57:31 +0200149 if (device->pi_capable)
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200150 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800151
Sagi Grimberg67cb3942015-03-29 15:52:05 +0300152 ret = rdma_create_qp(cma_id, device->pd, &attr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800153 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200154 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
Sagi Grimberg19e20902014-12-02 16:57:26 +0200155 goto err;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800156 }
157 isert_conn->conn_qp = cma_id->qp;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800158
159 return 0;
Sagi Grimberg19e20902014-12-02 16:57:26 +0200160err:
161 mutex_lock(&device_list_mutex);
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200162 comp->active_qps--;
Sagi Grimberg19e20902014-12-02 16:57:26 +0200163 mutex_unlock(&device_list_mutex);
164
165 return ret;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800166}
167
168static void
169isert_cq_event_callback(struct ib_event *e, void *context)
170{
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200171 isert_dbg("event: %d\n", e->event);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800172}
173
174static int
175isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
176{
Sagi Grimberg67cb3942015-03-29 15:52:05 +0300177 struct isert_device *device = isert_conn->conn_device;
178 struct ib_device *ib_dev = device->ib_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800179 struct iser_rx_desc *rx_desc;
180 struct ib_sge *rx_sg;
181 u64 dma_addr;
182 int i, j;
183
184 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
185 sizeof(struct iser_rx_desc), GFP_KERNEL);
186 if (!isert_conn->conn_rx_descs)
187 goto fail;
188
189 rx_desc = isert_conn->conn_rx_descs;
190
191 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
192 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
193 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
194 if (ib_dma_mapping_error(ib_dev, dma_addr))
195 goto dma_map_fail;
196
197 rx_desc->dma_addr = dma_addr;
198
199 rx_sg = &rx_desc->rx_sg;
200 rx_sg->addr = rx_desc->dma_addr;
201 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
Sagi Grimberg67cb3942015-03-29 15:52:05 +0300202 rx_sg->lkey = device->mr->lkey;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800203 }
204
205 isert_conn->conn_rx_desc_head = 0;
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200206
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800207 return 0;
208
209dma_map_fail:
210 rx_desc = isert_conn->conn_rx_descs;
211 for (j = 0; j < i; j++, rx_desc++) {
212 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
213 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
214 }
215 kfree(isert_conn->conn_rx_descs);
216 isert_conn->conn_rx_descs = NULL;
217fail:
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200218 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
219
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800220 return -ENOMEM;
221}
222
223static void
224isert_free_rx_descriptors(struct isert_conn *isert_conn)
225{
Sagi Grimberg4a579da2015-03-29 15:52:04 +0300226 struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800227 struct iser_rx_desc *rx_desc;
228 int i;
229
230 if (!isert_conn->conn_rx_descs)
231 return;
232
233 rx_desc = isert_conn->conn_rx_descs;
234 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
235 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
236 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
237 }
238
239 kfree(isert_conn->conn_rx_descs);
240 isert_conn->conn_rx_descs = NULL;
241}
242
Sagi Grimberg6f0fae32014-12-02 16:57:41 +0200243static void isert_cq_work(struct work_struct *);
244static void isert_cq_callback(struct ib_cq *, void *);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800245
246static int
247isert_create_device_ib_res(struct isert_device *device)
248{
249 struct ib_device *ib_dev = device->ib_device;
Vu Pham59464ef2013-08-28 23:23:35 +0300250 struct ib_device_attr *dev_attr;
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200251 int ret = 0, i;
Sagi Grimberg6f0fae32014-12-02 16:57:41 +0200252 int max_cqe;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800253
Vu Pham59464ef2013-08-28 23:23:35 +0300254 dev_attr = &device->dev_attr;
255 ret = isert_query_device(ib_dev, dev_attr);
256 if (ret)
257 return ret;
258
Sagi Grimberg6f0fae32014-12-02 16:57:41 +0200259 max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
Chris Mooreb1a5ad02014-11-04 16:28:29 +0000260
Vu Phamd40945d2013-08-28 23:23:34 +0300261 /* asign function handlers */
Sagi Grimbergf2252252014-03-27 19:22:25 +0200262 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
263 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200264 device->use_fastreg = 1;
265 device->reg_rdma_mem = isert_reg_rdma;
266 device->unreg_rdma_mem = isert_unreg_rdma;
Vu Pham59464ef2013-08-28 23:23:35 +0300267 } else {
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200268 device->use_fastreg = 0;
Vu Pham59464ef2013-08-28 23:23:35 +0300269 device->reg_rdma_mem = isert_map_rdma;
270 device->unreg_rdma_mem = isert_unmap_cmd;
271 }
Vu Phamd40945d2013-08-28 23:23:34 +0300272
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200273 /* Check signature cap */
274 device->pi_capable = dev_attr->device_cap_flags &
275 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
276
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200277 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
278 device->ib_device->num_comp_vectors));
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200279 isert_info("Using %d CQs, %s supports %d vectors support "
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200280 "Fast registration %d pi_capable %d\n",
281 device->comps_used, device->ib_device->name,
282 device->ib_device->num_comp_vectors, device->use_fastreg,
283 device->pi_capable);
284
285 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
286 GFP_KERNEL);
287 if (!device->comps) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200288 isert_err("Unable to allocate completion contexts\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800289 return -ENOMEM;
290 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800291
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200292 for (i = 0; i < device->comps_used; i++) {
293 struct isert_comp *comp = &device->comps[i];
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800294
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200295 comp->device = device;
Sagi Grimberg6f0fae32014-12-02 16:57:41 +0200296 INIT_WORK(&comp->work, isert_cq_work);
297 comp->cq = ib_create_cq(device->ib_device,
298 isert_cq_callback,
299 isert_cq_event_callback,
300 (void *)comp,
301 max_cqe, i);
302 if (IS_ERR(comp->cq)) {
303 ret = PTR_ERR(comp->cq);
304 comp->cq = NULL;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800305 goto out_cq;
Wei Yongjun94a71112013-10-29 09:56:34 +0800306 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800307
Sagi Grimberg6f0fae32014-12-02 16:57:41 +0200308 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
Wei Yongjun94a71112013-10-29 09:56:34 +0800309 if (ret)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800310 goto out_cq;
311 }
312
Sagi Grimberg67cb3942015-03-29 15:52:05 +0300313 device->pd = ib_alloc_pd(device->ib_device);
314 if (IS_ERR(device->pd)) {
315 ret = PTR_ERR(device->pd);
316 isert_err("failed to allocate pd, device %p, ret=%d\n",
317 device, ret);
318 goto out_cq;
319 }
320
321 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE);
322 if (IS_ERR(device->mr)) {
323 ret = PTR_ERR(device->mr);
324 isert_err("failed to create dma mr, device %p, ret=%d\n",
325 device, ret);
326 goto out_mr;
327 }
328
329
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800330 return 0;
331
Sagi Grimberg67cb3942015-03-29 15:52:05 +0300332out_mr:
333 ib_dealloc_pd(device->pd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800334out_cq:
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200335 for (i = 0; i < device->comps_used; i++) {
336 struct isert_comp *comp = &device->comps[i];
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800337
Sagi Grimberg6f0fae32014-12-02 16:57:41 +0200338 if (comp->cq) {
339 cancel_work_sync(&comp->work);
340 ib_destroy_cq(comp->cq);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800341 }
342 }
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200343 kfree(device->comps);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800344
345 return ret;
346}
347
348static void
349isert_free_device_ib_res(struct isert_device *device)
350{
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800351 int i;
352
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200353 isert_info("device %p\n", device);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800354
Sagi Grimberg67cb3942015-03-29 15:52:05 +0300355 ib_dereg_mr(device->mr);
356 ib_dealloc_pd(device->pd);
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200357 for (i = 0; i < device->comps_used; i++) {
358 struct isert_comp *comp = &device->comps[i];
359
Sagi Grimberg6f0fae32014-12-02 16:57:41 +0200360 cancel_work_sync(&comp->work);
361 ib_destroy_cq(comp->cq);
362 comp->cq = NULL;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800363 }
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200364 kfree(device->comps);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800365}
366
367static void
368isert_device_try_release(struct isert_device *device)
369{
370 mutex_lock(&device_list_mutex);
371 device->refcount--;
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200372 isert_info("device %p refcount %d\n", device, device->refcount);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800373 if (!device->refcount) {
374 isert_free_device_ib_res(device);
375 list_del(&device->dev_node);
376 kfree(device);
377 }
378 mutex_unlock(&device_list_mutex);
379}
380
381static struct isert_device *
382isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
383{
384 struct isert_device *device;
385 int ret;
386
387 mutex_lock(&device_list_mutex);
388 list_for_each_entry(device, &device_list, dev_node) {
389 if (device->ib_device->node_guid == cma_id->device->node_guid) {
390 device->refcount++;
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200391 isert_info("Found iser device %p refcount %d\n",
392 device, device->refcount);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800393 mutex_unlock(&device_list_mutex);
394 return device;
395 }
396 }
397
398 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
399 if (!device) {
400 mutex_unlock(&device_list_mutex);
401 return ERR_PTR(-ENOMEM);
402 }
403
404 INIT_LIST_HEAD(&device->dev_node);
405
406 device->ib_device = cma_id->device;
407 ret = isert_create_device_ib_res(device);
408 if (ret) {
409 kfree(device);
410 mutex_unlock(&device_list_mutex);
411 return ERR_PTR(ret);
412 }
413
414 device->refcount++;
415 list_add_tail(&device->dev_node, &device_list);
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200416 isert_info("Created a new iser device %p refcount %d\n",
417 device, device->refcount);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800418 mutex_unlock(&device_list_mutex);
419
420 return device;
421}
422
Vu Pham59464ef2013-08-28 23:23:35 +0300423static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200424isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +0300425{
426 struct fast_reg_descriptor *fr_desc, *tmp;
427 int i = 0;
428
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200429 if (list_empty(&isert_conn->conn_fr_pool))
Vu Pham59464ef2013-08-28 23:23:35 +0300430 return;
431
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200432 isert_info("Freeing conn %p fastreg pool", isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300433
434 list_for_each_entry_safe(fr_desc, tmp,
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200435 &isert_conn->conn_fr_pool, list) {
Vu Pham59464ef2013-08-28 23:23:35 +0300436 list_del(&fr_desc->list);
437 ib_free_fast_reg_page_list(fr_desc->data_frpl);
438 ib_dereg_mr(fr_desc->data_mr);
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200439 if (fr_desc->pi_ctx) {
440 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
441 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
442 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
443 kfree(fr_desc->pi_ctx);
444 }
Vu Pham59464ef2013-08-28 23:23:35 +0300445 kfree(fr_desc);
446 ++i;
447 }
448
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200449 if (i < isert_conn->conn_fr_pool_size)
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200450 isert_warn("Pool still has %d regions registered\n",
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200451 isert_conn->conn_fr_pool_size - i);
Vu Pham59464ef2013-08-28 23:23:35 +0300452}
453
454static int
Sagi Grimberg570db172014-12-02 16:57:31 +0200455isert_create_pi_ctx(struct fast_reg_descriptor *desc,
456 struct ib_device *device,
457 struct ib_pd *pd)
458{
459 struct ib_mr_init_attr mr_init_attr;
460 struct pi_context *pi_ctx;
461 int ret;
462
463 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
464 if (!pi_ctx) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200465 isert_err("Failed to allocate pi context\n");
Sagi Grimberg570db172014-12-02 16:57:31 +0200466 return -ENOMEM;
467 }
468
469 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
470 ISCSI_ISER_SG_TABLESIZE);
471 if (IS_ERR(pi_ctx->prot_frpl)) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200472 isert_err("Failed to allocate prot frpl err=%ld\n",
Sagi Grimberg570db172014-12-02 16:57:31 +0200473 PTR_ERR(pi_ctx->prot_frpl));
474 ret = PTR_ERR(pi_ctx->prot_frpl);
475 goto err_pi_ctx;
476 }
477
478 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
479 if (IS_ERR(pi_ctx->prot_mr)) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200480 isert_err("Failed to allocate prot frmr err=%ld\n",
Sagi Grimberg570db172014-12-02 16:57:31 +0200481 PTR_ERR(pi_ctx->prot_mr));
482 ret = PTR_ERR(pi_ctx->prot_mr);
483 goto err_prot_frpl;
484 }
485 desc->ind |= ISERT_PROT_KEY_VALID;
486
487 memset(&mr_init_attr, 0, sizeof(mr_init_attr));
488 mr_init_attr.max_reg_descriptors = 2;
489 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
490 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
491 if (IS_ERR(pi_ctx->sig_mr)) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200492 isert_err("Failed to allocate signature enabled mr err=%ld\n",
Sagi Grimberg570db172014-12-02 16:57:31 +0200493 PTR_ERR(pi_ctx->sig_mr));
494 ret = PTR_ERR(pi_ctx->sig_mr);
495 goto err_prot_mr;
496 }
497
498 desc->pi_ctx = pi_ctx;
499 desc->ind |= ISERT_SIG_KEY_VALID;
500 desc->ind &= ~ISERT_PROTECTED;
501
502 return 0;
503
504err_prot_mr:
505 ib_dereg_mr(desc->pi_ctx->prot_mr);
506err_prot_frpl:
507 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
508err_pi_ctx:
509 kfree(desc->pi_ctx);
510
511 return ret;
512}
513
514static int
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200515isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
Sagi Grimberg570db172014-12-02 16:57:31 +0200516 struct fast_reg_descriptor *fr_desc)
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200517{
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200518 int ret;
519
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200520 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
521 ISCSI_ISER_SG_TABLESIZE);
522 if (IS_ERR(fr_desc->data_frpl)) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200523 isert_err("Failed to allocate data frpl err=%ld\n",
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200524 PTR_ERR(fr_desc->data_frpl));
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200525 return PTR_ERR(fr_desc->data_frpl);
526 }
527
528 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
529 if (IS_ERR(fr_desc->data_mr)) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200530 isert_err("Failed to allocate data frmr err=%ld\n",
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200531 PTR_ERR(fr_desc->data_mr));
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200532 ret = PTR_ERR(fr_desc->data_mr);
533 goto err_data_frpl;
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200534 }
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200535 fr_desc->ind |= ISERT_DATA_KEY_VALID;
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200536
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200537 isert_dbg("Created fr_desc %p\n", fr_desc);
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200538
539 return 0;
Sagi Grimberg570db172014-12-02 16:57:31 +0200540
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200541err_data_frpl:
542 ib_free_fast_reg_page_list(fr_desc->data_frpl);
543
544 return ret;
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200545}
546
547static int
Sagi Grimberg570db172014-12-02 16:57:31 +0200548isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +0300549{
550 struct fast_reg_descriptor *fr_desc;
551 struct isert_device *device = isert_conn->conn_device;
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -0700552 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
553 struct se_node_acl *se_nacl = se_sess->se_node_acl;
554 int i, ret, tag_num;
555 /*
556 * Setup the number of FRMRs based upon the number of tags
557 * available to session in iscsi_target_locate_portal().
558 */
559 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
560 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
Vu Pham59464ef2013-08-28 23:23:35 +0300561
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200562 isert_conn->conn_fr_pool_size = 0;
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -0700563 for (i = 0; i < tag_num; i++) {
Vu Pham59464ef2013-08-28 23:23:35 +0300564 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
565 if (!fr_desc) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200566 isert_err("Failed to allocate fast_reg descriptor\n");
Vu Pham59464ef2013-08-28 23:23:35 +0300567 ret = -ENOMEM;
568 goto err;
569 }
570
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200571 ret = isert_create_fr_desc(device->ib_device,
Sagi Grimberg67cb3942015-03-29 15:52:05 +0300572 device->pd, fr_desc);
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200573 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200574 isert_err("Failed to create fastreg descriptor err=%d\n",
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200575 ret);
Nicholas Bellingera80e21b2014-02-03 12:59:56 -0800576 kfree(fr_desc);
Vu Pham59464ef2013-08-28 23:23:35 +0300577 goto err;
578 }
579
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200580 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
581 isert_conn->conn_fr_pool_size++;
Vu Pham59464ef2013-08-28 23:23:35 +0300582 }
583
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200584 isert_dbg("Creating conn %p fastreg pool size=%d",
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200585 isert_conn, isert_conn->conn_fr_pool_size);
Vu Pham59464ef2013-08-28 23:23:35 +0300586
587 return 0;
588
589err:
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200590 isert_conn_free_fastreg_pool(isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300591 return ret;
592}
593
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800594static int
595isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
596{
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200597 struct isert_np *isert_np = cma_id->context;
598 struct iscsi_np *np = isert_np->np;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800599 struct isert_conn *isert_conn;
600 struct isert_device *device;
601 struct ib_device *ib_dev = cma_id->device;
602 int ret = 0;
Sagi Grimberg14f4b542014-04-29 13:13:47 +0300603
604 spin_lock_bh(&np->np_thread_lock);
605 if (!np->enabled) {
606 spin_unlock_bh(&np->np_thread_lock);
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200607 isert_dbg("iscsi_np is not enabled, reject connect request\n");
Sagi Grimberg14f4b542014-04-29 13:13:47 +0300608 return rdma_reject(cma_id, NULL, 0);
609 }
610 spin_unlock_bh(&np->np_thread_lock);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800611
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200612 isert_dbg("cma_id: %p, portal: %p\n",
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800613 cma_id, cma_id->context);
614
615 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
616 if (!isert_conn) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200617 isert_err("Unable to allocate isert_conn\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800618 return -ENOMEM;
619 }
620 isert_conn->state = ISER_CONN_INIT;
621 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
622 init_completion(&isert_conn->conn_login_comp);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200623 init_completion(&isert_conn->login_req_comp);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -0800624 init_completion(&isert_conn->conn_wait);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800625 kref_init(&isert_conn->conn_kref);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700626 mutex_init(&isert_conn->conn_mutex);
Vu Pham59464ef2013-08-28 23:23:35 +0300627 spin_lock_init(&isert_conn->conn_lock);
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -0700628 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800629
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800630 isert_conn->conn_cm_id = cma_id;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800631
632 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
633 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
634 if (!isert_conn->login_buf) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200635 isert_err("Unable to allocate isert_conn->login_buf\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800636 ret = -ENOMEM;
637 goto out;
638 }
639
640 isert_conn->login_req_buf = isert_conn->login_buf;
641 isert_conn->login_rsp_buf = isert_conn->login_buf +
642 ISCSI_DEF_MAX_RECV_SEG_LEN;
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200643 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800644 isert_conn->login_buf, isert_conn->login_req_buf,
645 isert_conn->login_rsp_buf);
646
647 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
648 (void *)isert_conn->login_req_buf,
649 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
650
651 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
652 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200653 isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800654 ret);
655 isert_conn->login_req_dma = 0;
656 goto out_login_buf;
657 }
658
659 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
660 (void *)isert_conn->login_rsp_buf,
661 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
662
663 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
664 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200665 isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800666 ret);
667 isert_conn->login_rsp_dma = 0;
668 goto out_req_dma_map;
669 }
670
671 device = isert_device_find_by_ib_dev(cma_id);
672 if (IS_ERR(device)) {
673 ret = PTR_ERR(device);
674 goto out_rsp_dma_map;
675 }
676
Sagi Grimberg1a92e172014-06-19 13:54:19 +0300677 /* Set max inflight RDMA READ requests */
678 isert_conn->initiator_depth = min_t(u8,
679 event->param.conn.initiator_depth,
680 device->dev_attr.max_qp_init_rd_atom);
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200681 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
Sagi Grimberg1a92e172014-06-19 13:54:19 +0300682
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800683 isert_conn->conn_device = device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800684
Sagi Grimberg570db172014-12-02 16:57:31 +0200685 ret = isert_conn_setup_qp(isert_conn, cma_id);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800686 if (ret)
687 goto out_conn_dev;
688
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200689 ret = isert_rdma_post_recvl(isert_conn);
690 if (ret)
691 goto out_conn_dev;
692
693 ret = isert_rdma_accept(isert_conn);
694 if (ret)
695 goto out_conn_dev;
696
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800697 mutex_lock(&isert_np->np_accept_mutex);
Sagi Grimberg9fe63c82014-04-29 13:13:44 +0300698 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800699 mutex_unlock(&isert_np->np_accept_mutex);
700
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200701 isert_info("np %p: Allow accept_np to continue\n", np);
Sagi Grimberg531b7bf2014-04-29 13:13:45 +0300702 up(&isert_np->np_sem);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800703 return 0;
704
705out_conn_dev:
706 isert_device_try_release(device);
707out_rsp_dma_map:
708 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
709 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
710out_req_dma_map:
711 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
712 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
713out_login_buf:
714 kfree(isert_conn->login_buf);
715out:
716 kfree(isert_conn);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200717 rdma_reject(cma_id, NULL, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800718 return ret;
719}
720
721static void
722isert_connect_release(struct isert_conn *isert_conn)
723{
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800724 struct isert_device *device = isert_conn->conn_device;
Sagi Grimberg4a579da2015-03-29 15:52:04 +0300725 struct ib_device *ib_dev = device->ib_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800726
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200727 isert_dbg("conn %p\n", isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800728
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200729 if (device && device->use_fastreg)
730 isert_conn_free_fastreg_pool(isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300731
Sagi Grimberg19e20902014-12-02 16:57:26 +0200732 isert_free_rx_descriptors(isert_conn);
Sagi Grimberg4a579da2015-03-29 15:52:04 +0300733 if (isert_conn->conn_cm_id)
734 rdma_destroy_id(isert_conn->conn_cm_id);
Sagi Grimberg19e20902014-12-02 16:57:26 +0200735
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800736 if (isert_conn->conn_qp) {
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200737 struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
738
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200739 isert_dbg("dec completion context %p active_qps\n", comp);
Sagi Grimberg19e20902014-12-02 16:57:26 +0200740 mutex_lock(&device_list_mutex);
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200741 comp->active_qps--;
Sagi Grimberg19e20902014-12-02 16:57:26 +0200742 mutex_unlock(&device_list_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800743
Sagi Grimberg19e20902014-12-02 16:57:26 +0200744 ib_destroy_qp(isert_conn->conn_qp);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800745 }
746
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800747 if (isert_conn->login_buf) {
748 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
749 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
750 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
751 ISCSI_DEF_MAX_RECV_SEG_LEN,
752 DMA_FROM_DEVICE);
753 kfree(isert_conn->login_buf);
754 }
755 kfree(isert_conn);
756
757 if (device)
758 isert_device_try_release(device);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800759}
760
761static void
762isert_connected_handler(struct rdma_cm_id *cma_id)
763{
Sagi Grimberg19e20902014-12-02 16:57:26 +0200764 struct isert_conn *isert_conn = cma_id->qp->qp_context;
Sagi Grimbergc2f88b12014-07-02 16:19:24 +0300765
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200766 isert_info("conn %p\n", isert_conn);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200767
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200768 if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200769 isert_warn("conn %p connect_release is running\n", isert_conn);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200770 return;
771 }
772
773 mutex_lock(&isert_conn->conn_mutex);
774 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
775 isert_conn->state = ISER_CONN_UP;
776 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800777}
778
779static void
780isert_release_conn_kref(struct kref *kref)
781{
782 struct isert_conn *isert_conn = container_of(kref,
783 struct isert_conn, conn_kref);
784
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200785 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
786 current->pid);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800787
788 isert_connect_release(isert_conn);
789}
790
791static void
792isert_put_conn(struct isert_conn *isert_conn)
793{
794 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
795}
796
Sagi Grimberg954f2372014-12-02 16:57:17 +0200797/**
798 * isert_conn_terminate() - Initiate connection termination
799 * @isert_conn: isert connection struct
800 *
801 * Notes:
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200802 * In case the connection state is FULL_FEATURE, move state
Sagi Grimberg954f2372014-12-02 16:57:17 +0200803 * to TEMINATING and start teardown sequence (rdma_disconnect).
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200804 * In case the connection state is UP, complete flush as well.
Sagi Grimberg954f2372014-12-02 16:57:17 +0200805 *
806 * This routine must be called with conn_mutex held. Thus it is
807 * safe to call multiple times.
808 */
809static void
810isert_conn_terminate(struct isert_conn *isert_conn)
811{
812 int err;
813
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200814 switch (isert_conn->state) {
815 case ISER_CONN_TERMINATING:
816 break;
817 case ISER_CONN_UP:
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200818 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200819 isert_info("Terminating conn %p state %d\n",
Sagi Grimberg954f2372014-12-02 16:57:17 +0200820 isert_conn, isert_conn->state);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200821 isert_conn->state = ISER_CONN_TERMINATING;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200822 err = rdma_disconnect(isert_conn->conn_cm_id);
823 if (err)
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200824 isert_warn("Failed rdma_disconnect isert_conn %p\n",
Sagi Grimberg954f2372014-12-02 16:57:17 +0200825 isert_conn);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200826 break;
827 default:
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200828 isert_warn("conn %p teminating in state %d\n",
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200829 isert_conn, isert_conn->state);
Sagi Grimberg954f2372014-12-02 16:57:17 +0200830 }
831}
832
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700833static int
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200834isert_np_cma_handler(struct isert_np *isert_np,
835 enum rdma_cm_event_type event)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800836{
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200837 isert_dbg("isert np %p, handling event %d\n", isert_np, event);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200838
839 switch (event) {
840 case RDMA_CM_EVENT_DEVICE_REMOVAL:
841 isert_np->np_cm_id = NULL;
842 break;
843 case RDMA_CM_EVENT_ADDR_CHANGE:
844 isert_np->np_cm_id = isert_setup_id(isert_np);
845 if (IS_ERR(isert_np->np_cm_id)) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200846 isert_err("isert np %p setup id failed: %ld\n",
847 isert_np, PTR_ERR(isert_np->np_cm_id));
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200848 isert_np->np_cm_id = NULL;
849 }
850 break;
851 default:
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200852 isert_err("isert np %p Unexpected event %d\n",
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200853 isert_np, event);
854 }
855
856 return -1;
857}
858
859static int
860isert_disconnected_handler(struct rdma_cm_id *cma_id,
861 enum rdma_cm_event_type event)
862{
863 struct isert_np *isert_np = cma_id->context;
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700864 struct isert_conn *isert_conn;
865
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200866 if (isert_np->np_cm_id == cma_id)
867 return isert_np_cma_handler(cma_id->context, event);
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700868
Sagi Grimberg19e20902014-12-02 16:57:26 +0200869 isert_conn = cma_id->qp->qp_context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800870
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200871 mutex_lock(&isert_conn->conn_mutex);
872 isert_conn_terminate(isert_conn);
873 mutex_unlock(&isert_conn->conn_mutex);
874
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200875 isert_info("conn %p completing conn_wait\n", isert_conn);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200876 complete(&isert_conn->conn_wait);
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700877
878 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800879}
880
Sagi Grimberg4a579da2015-03-29 15:52:04 +0300881static int
Sagi Grimberg954f2372014-12-02 16:57:17 +0200882isert_connect_error(struct rdma_cm_id *cma_id)
883{
Sagi Grimberg19e20902014-12-02 16:57:26 +0200884 struct isert_conn *isert_conn = cma_id->qp->qp_context;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200885
Sagi Grimberg4a579da2015-03-29 15:52:04 +0300886 isert_conn->conn_cm_id = NULL;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200887 isert_put_conn(isert_conn);
Sagi Grimberg4a579da2015-03-29 15:52:04 +0300888
889 return -1;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200890}
891
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800892static int
893isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
894{
895 int ret = 0;
896
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200897 isert_info("event %d status %d id %p np %p\n", event->event,
898 event->status, cma_id, cma_id->context);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800899
900 switch (event->event) {
901 case RDMA_CM_EVENT_CONNECT_REQUEST:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800902 ret = isert_connect_request(cma_id, event);
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700903 if (ret)
Sagi Grimberg4c22e072014-12-07 13:12:03 +0200904 isert_err("failed handle connect request %d\n", ret);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800905 break;
906 case RDMA_CM_EVENT_ESTABLISHED:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800907 isert_connected_handler(cma_id);
908 break;
Sagi Grimberg88c40152014-05-19 17:44:24 +0300909 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
910 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
911 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
Sagi Grimberg88c40152014-05-19 17:44:24 +0300912 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200913 ret = isert_disconnected_handler(cma_id, event->event);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800914 break;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200915 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
916 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800917 case RDMA_CM_EVENT_CONNECT_ERROR:
Sagi Grimberg4a579da2015-03-29 15:52:04 +0300918 ret = isert_connect_error(cma_id);
Sagi Grimberg954f2372014-12-02 16:57:17 +0200919 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800920 default:
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200921 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800922 break;
923 }
924
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800925 return ret;
926}
927
928static int
929isert_post_recv(struct isert_conn *isert_conn, u32 count)
930{
931 struct ib_recv_wr *rx_wr, *rx_wr_failed;
932 int i, ret;
933 unsigned int rx_head = isert_conn->conn_rx_desc_head;
934 struct iser_rx_desc *rx_desc;
935
936 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
937 rx_desc = &isert_conn->conn_rx_descs[rx_head];
Sagi Grimbergb0a191e2014-12-02 16:57:39 +0200938 rx_wr->wr_id = (uintptr_t)rx_desc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800939 rx_wr->sg_list = &rx_desc->rx_sg;
940 rx_wr->num_sge = 1;
941 rx_wr->next = rx_wr + 1;
942 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
943 }
944
945 rx_wr--;
946 rx_wr->next = NULL; /* mark end of work requests list */
947
948 isert_conn->post_recv_buf_count += count;
949 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
950 &rx_wr_failed);
951 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200952 isert_err("ib_post_recv() failed with ret: %d\n", ret);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800953 isert_conn->post_recv_buf_count -= count;
954 } else {
Rasmus Villemoes11378cd2015-02-06 01:09:05 +0100955 isert_dbg("Posted %d RX buffers\n", count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800956 isert_conn->conn_rx_desc_head = rx_head;
957 }
958 return ret;
959}
960
961static int
962isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
963{
964 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
965 struct ib_send_wr send_wr, *send_wr_failed;
966 int ret;
967
968 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
969 ISER_HEADERS_LEN, DMA_TO_DEVICE);
970
971 send_wr.next = NULL;
Sagi Grimbergb0a191e2014-12-02 16:57:39 +0200972 send_wr.wr_id = (uintptr_t)tx_desc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800973 send_wr.sg_list = tx_desc->tx_sg;
974 send_wr.num_sge = tx_desc->num_sge;
975 send_wr.opcode = IB_WR_SEND;
976 send_wr.send_flags = IB_SEND_SIGNALED;
977
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800978 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
Sagi Grimbergbdf20e72014-12-02 16:57:43 +0200979 if (ret)
Sagi Grimberg24f412d2014-12-07 13:12:02 +0200980 isert_err("ib_post_send() failed, ret: %d\n", ret);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800981
982 return ret;
983}
984
985static void
986isert_create_send_desc(struct isert_conn *isert_conn,
987 struct isert_cmd *isert_cmd,
988 struct iser_tx_desc *tx_desc)
989{
Sagi Grimberg67cb3942015-03-29 15:52:05 +0300990 struct isert_device *device = isert_conn->conn_device;
991 struct ib_device *ib_dev = device->ib_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800992
993 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
994 ISER_HEADERS_LEN, DMA_TO_DEVICE);
995
996 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
997 tx_desc->iser_header.flags = ISER_VER;
998
999 tx_desc->num_sge = 1;
1000 tx_desc->isert_cmd = isert_cmd;
1001
Sagi Grimberg67cb3942015-03-29 15:52:05 +03001002 if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
1003 tx_desc->tx_sg[0].lkey = device->mr->lkey;
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001004 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001005 }
1006}
1007
1008static int
1009isert_init_tx_hdrs(struct isert_conn *isert_conn,
1010 struct iser_tx_desc *tx_desc)
1011{
Sagi Grimberg67cb3942015-03-29 15:52:05 +03001012 struct isert_device *device = isert_conn->conn_device;
1013 struct ib_device *ib_dev = device->ib_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001014 u64 dma_addr;
1015
1016 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1017 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1018 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001019 isert_err("ib_dma_mapping_error() failed\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001020 return -ENOMEM;
1021 }
1022
1023 tx_desc->dma_addr = dma_addr;
1024 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1025 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
Sagi Grimberg67cb3942015-03-29 15:52:05 +03001026 tx_desc->tx_sg[0].lkey = device->mr->lkey;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001027
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001028 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
1029 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
1030 tx_desc->tx_sg[0].lkey);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001031
1032 return 0;
1033}
1034
1035static void
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001036isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Sagi Grimberg68a86de2014-12-02 16:57:37 +02001037 struct ib_send_wr *send_wr)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001038{
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001039 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1040
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001041 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
Sagi Grimbergb0a191e2014-12-02 16:57:39 +02001042 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001043 send_wr->opcode = IB_WR_SEND;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001044 send_wr->sg_list = &tx_desc->tx_sg[0];
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001045 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001046 send_wr->send_flags = IB_SEND_SIGNALED;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001047}
1048
1049static int
1050isert_rdma_post_recvl(struct isert_conn *isert_conn)
1051{
1052 struct ib_recv_wr rx_wr, *rx_wr_fail;
1053 struct ib_sge sge;
1054 int ret;
1055
1056 memset(&sge, 0, sizeof(struct ib_sge));
1057 sge.addr = isert_conn->login_req_dma;
1058 sge.length = ISER_RX_LOGIN_SIZE;
Sagi Grimberg67cb3942015-03-29 15:52:05 +03001059 sge.lkey = isert_conn->conn_device->mr->lkey;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001060
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001061 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001062 sge.addr, sge.length, sge.lkey);
1063
1064 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
Sagi Grimbergb0a191e2014-12-02 16:57:39 +02001065 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001066 rx_wr.sg_list = &sge;
1067 rx_wr.num_sge = 1;
1068
1069 isert_conn->post_recv_buf_count++;
1070 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
1071 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001072 isert_err("ib_post_recv() failed: %d\n", ret);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001073 isert_conn->post_recv_buf_count--;
1074 }
1075
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001076 return ret;
1077}
1078
1079static int
1080isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1081 u32 length)
1082{
1083 struct isert_conn *isert_conn = conn->context;
Sagi Grimberg67cb3942015-03-29 15:52:05 +03001084 struct isert_device *device = isert_conn->conn_device;
1085 struct ib_device *ib_dev = device->ib_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001086 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
1087 int ret;
1088
1089 isert_create_send_desc(isert_conn, NULL, tx_desc);
1090
1091 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1092 sizeof(struct iscsi_hdr));
1093
1094 isert_init_tx_hdrs(isert_conn, tx_desc);
1095
1096 if (length > 0) {
1097 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1098
1099 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1100 length, DMA_TO_DEVICE);
1101
1102 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1103
1104 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1105 length, DMA_TO_DEVICE);
1106
1107 tx_dsg->addr = isert_conn->login_rsp_dma;
1108 tx_dsg->length = length;
Sagi Grimberg67cb3942015-03-29 15:52:05 +03001109 tx_dsg->lkey = isert_conn->conn_device->mr->lkey;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001110 tx_desc->num_sge = 2;
1111 }
1112 if (!login->login_failed) {
1113 if (login->login_complete) {
Sagi Grimberge0546fc2014-06-10 13:41:41 +03001114 if (!conn->sess->sess_ops->SessionType &&
1115 isert_conn->conn_device->use_fastreg) {
Sagi Grimberg570db172014-12-02 16:57:31 +02001116 ret = isert_conn_create_fastreg_pool(isert_conn);
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -07001117 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001118 isert_err("Conn: %p failed to create"
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -07001119 " fastreg pool\n", isert_conn);
1120 return ret;
1121 }
1122 }
1123
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001124 ret = isert_alloc_rx_descriptors(isert_conn);
1125 if (ret)
1126 return ret;
1127
1128 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
1129 if (ret)
1130 return ret;
1131
Sagi Grimberg128e9cc2014-12-02 16:57:20 +02001132 /* Now we are in FULL_FEATURE phase */
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001133 mutex_lock(&isert_conn->conn_mutex);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +02001134 isert_conn->state = ISER_CONN_FULL_FEATURE;
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001135 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001136 goto post_send;
1137 }
1138
1139 ret = isert_rdma_post_recvl(isert_conn);
1140 if (ret)
1141 return ret;
1142 }
1143post_send:
1144 ret = isert_post_send(isert_conn, tx_desc);
1145 if (ret)
1146 return ret;
1147
1148 return 0;
1149}
1150
1151static void
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001152isert_rx_login_req(struct isert_conn *isert_conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001153{
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001154 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1155 int rx_buflen = isert_conn->login_req_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001156 struct iscsi_conn *conn = isert_conn->conn;
1157 struct iscsi_login *login = conn->conn_login;
1158 int size;
1159
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001160 isert_info("conn %p\n", isert_conn);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001161
1162 WARN_ON_ONCE(!login);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001163
1164 if (login->first_request) {
1165 struct iscsi_login_req *login_req =
1166 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1167 /*
1168 * Setup the initial iscsi_login values from the leading
1169 * login request PDU.
1170 */
1171 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1172 login->current_stage =
1173 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1174 >> 2;
1175 login->version_min = login_req->min_version;
1176 login->version_max = login_req->max_version;
1177 memcpy(login->isid, login_req->isid, 6);
1178 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1179 login->init_task_tag = login_req->itt;
1180 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1181 login->cid = be16_to_cpu(login_req->cid);
1182 login->tsih = be16_to_cpu(login_req->tsih);
1183 }
1184
1185 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1186
1187 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001188 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1189 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1190 MAX_KEY_VALUE_PAIRS);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001191 memcpy(login->req_buf, &rx_desc->data[0], size);
1192
Nicholas Bellinger6faaa852013-08-18 16:35:46 -07001193 if (login->first_request) {
1194 complete(&isert_conn->conn_login_comp);
1195 return;
1196 }
1197 schedule_delayed_work(&conn->login_work, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001198}
1199
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001200static struct iscsi_cmd
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001201*isert_allocate_cmd(struct iscsi_conn *conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001202{
1203 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1204 struct isert_cmd *isert_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001205 struct iscsi_cmd *cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001206
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001207 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001208 if (!cmd) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001209 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001210 return NULL;
1211 }
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001212 isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001213 isert_cmd->conn = isert_conn;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001214 isert_cmd->iscsi_cmd = cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001215
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001216 return cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001217}
1218
1219static int
1220isert_handle_scsi_cmd(struct isert_conn *isert_conn,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001221 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1222 struct iser_rx_desc *rx_desc, unsigned char *buf)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001223{
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001224 struct iscsi_conn *conn = isert_conn->conn;
1225 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1226 struct scatterlist *sg;
1227 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1228 bool dump_payload = false;
1229
1230 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1231 if (rc < 0)
1232 return rc;
1233
1234 imm_data = cmd->immediate_data;
1235 imm_data_len = cmd->first_burst_len;
1236 unsol_data = cmd->unsolicited_data;
1237
1238 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1239 if (rc < 0) {
1240 return 0;
1241 } else if (rc > 0) {
1242 dump_payload = true;
1243 goto sequence_cmd;
1244 }
1245
1246 if (!imm_data)
1247 return 0;
1248
1249 sg = &cmd->se_cmd.t_data_sg[0];
1250 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1251
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001252 isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001253 sg, sg_nents, &rx_desc->data[0], imm_data_len);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001254
1255 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1256
1257 cmd->write_data_done += imm_data_len;
1258
1259 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1260 spin_lock_bh(&cmd->istate_lock);
1261 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1262 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1263 spin_unlock_bh(&cmd->istate_lock);
1264 }
1265
1266sequence_cmd:
Nicholas Bellinger561bf152013-07-03 03:58:58 -07001267 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001268
1269 if (!rc && dump_payload == false && unsol_data)
1270 iscsit_set_unsoliticed_dataout(cmd);
Nicholas Bellinger6cc44a62014-05-23 00:48:35 -07001271 else if (dump_payload && imm_data)
1272 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001273
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001274 return 0;
1275}
1276
1277static int
1278isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1279 struct iser_rx_desc *rx_desc, unsigned char *buf)
1280{
1281 struct scatterlist *sg_start;
1282 struct iscsi_conn *conn = isert_conn->conn;
1283 struct iscsi_cmd *cmd = NULL;
1284 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1285 u32 unsol_data_len = ntoh24(hdr->dlength);
1286 int rc, sg_nents, sg_off, page_off;
1287
1288 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1289 if (rc < 0)
1290 return rc;
1291 else if (!cmd)
1292 return 0;
1293 /*
1294 * FIXME: Unexpected unsolicited_data out
1295 */
1296 if (!cmd->unsolicited_data) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001297 isert_err("Received unexpected solicited data payload\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001298 dump_stack();
1299 return -1;
1300 }
1301
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001302 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1303 "write_data_done: %u, data_length: %u\n",
1304 unsol_data_len, cmd->write_data_done,
1305 cmd->se_cmd.data_length);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001306
1307 sg_off = cmd->write_data_done / PAGE_SIZE;
1308 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1309 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1310 page_off = cmd->write_data_done % PAGE_SIZE;
1311 /*
1312 * FIXME: Non page-aligned unsolicited_data out
1313 */
1314 if (page_off) {
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001315 isert_err("unexpected non-page aligned data payload\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001316 dump_stack();
1317 return -1;
1318 }
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001319 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1320 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1321 sg_nents, &rx_desc->data[0], unsol_data_len);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001322
1323 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1324 unsol_data_len);
1325
1326 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1327 if (rc < 0)
1328 return rc;
1329
1330 return 0;
1331}
1332
1333static int
Nicholas Bellinger778de362013-06-14 16:07:47 -07001334isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001335 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1336 unsigned char *buf)
Nicholas Bellinger778de362013-06-14 16:07:47 -07001337{
Nicholas Bellinger778de362013-06-14 16:07:47 -07001338 struct iscsi_conn *conn = isert_conn->conn;
1339 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1340 int rc;
1341
1342 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1343 if (rc < 0)
1344 return rc;
1345 /*
1346 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1347 */
1348
1349 return iscsit_process_nop_out(conn, cmd, hdr);
1350}
1351
1352static int
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001353isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001354 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1355 struct iscsi_text *hdr)
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001356{
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001357 struct iscsi_conn *conn = isert_conn->conn;
1358 u32 payload_length = ntoh24(hdr->dlength);
1359 int rc;
Sagi Grimbergb44a2b62015-01-26 12:49:08 +02001360 unsigned char *text_in = NULL;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001361
1362 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1363 if (rc < 0)
1364 return rc;
1365
Sagi Grimbergb44a2b62015-01-26 12:49:08 +02001366 if (payload_length) {
1367 text_in = kzalloc(payload_length, GFP_KERNEL);
1368 if (!text_in) {
1369 isert_err("Unable to allocate text_in of payload_length: %u\n",
1370 payload_length);
1371 return -ENOMEM;
1372 }
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001373 }
1374 cmd->text_in_ptr = text_in;
1375
1376 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1377
1378 return iscsit_process_text_cmd(conn, cmd, hdr);
1379}
1380
1381static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001382isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1383 uint32_t read_stag, uint64_t read_va,
1384 uint32_t write_stag, uint64_t write_va)
1385{
1386 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1387 struct iscsi_conn *conn = isert_conn->conn;
Nicholas Bellingerca40d242013-07-07 17:45:08 -07001388 struct iscsi_session *sess = conn->sess;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001389 struct iscsi_cmd *cmd;
1390 struct isert_cmd *isert_cmd;
1391 int ret = -EINVAL;
1392 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1393
Nicholas Bellingerca40d242013-07-07 17:45:08 -07001394 if (sess->sess_ops->SessionType &&
1395 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001396 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001397 " ignoring\n", opcode);
Nicholas Bellingerca40d242013-07-07 17:45:08 -07001398 return 0;
1399 }
1400
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001401 switch (opcode) {
1402 case ISCSI_OP_SCSI_CMD:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001403 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001404 if (!cmd)
1405 break;
1406
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001407 isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001408 isert_cmd->read_stag = read_stag;
1409 isert_cmd->read_va = read_va;
1410 isert_cmd->write_stag = write_stag;
1411 isert_cmd->write_va = write_va;
1412
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001413 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001414 rx_desc, (unsigned char *)hdr);
1415 break;
1416 case ISCSI_OP_NOOP_OUT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001417 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001418 if (!cmd)
1419 break;
1420
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001421 isert_cmd = iscsit_priv_cmd(cmd);
1422 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
Nicholas Bellinger778de362013-06-14 16:07:47 -07001423 rx_desc, (unsigned char *)hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001424 break;
1425 case ISCSI_OP_SCSI_DATA_OUT:
1426 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1427 (unsigned char *)hdr);
1428 break;
1429 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001430 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001431 if (!cmd)
1432 break;
1433
1434 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1435 (unsigned char *)hdr);
1436 break;
1437 case ISCSI_OP_LOGOUT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001438 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001439 if (!cmd)
1440 break;
1441
1442 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001443 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001444 case ISCSI_OP_TEXT:
Sagi Grimberge4f4e802015-02-09 18:07:25 +02001445 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
1446 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1447 if (!cmd)
1448 break;
1449 } else {
1450 cmd = isert_allocate_cmd(conn);
1451 if (!cmd)
1452 break;
1453 }
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001454
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001455 isert_cmd = iscsit_priv_cmd(cmd);
1456 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001457 rx_desc, (struct iscsi_text *)hdr);
1458 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001459 default:
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001460 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001461 dump_stack();
1462 break;
1463 }
1464
1465 return ret;
1466}
1467
1468static void
1469isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1470{
1471 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1472 uint64_t read_va = 0, write_va = 0;
1473 uint32_t read_stag = 0, write_stag = 0;
1474 int rc;
1475
1476 switch (iser_hdr->flags & 0xF0) {
1477 case ISCSI_CTRL:
1478 if (iser_hdr->flags & ISER_RSV) {
1479 read_stag = be32_to_cpu(iser_hdr->read_stag);
1480 read_va = be64_to_cpu(iser_hdr->read_va);
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001481 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1482 read_stag, (unsigned long long)read_va);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001483 }
1484 if (iser_hdr->flags & ISER_WSV) {
1485 write_stag = be32_to_cpu(iser_hdr->write_stag);
1486 write_va = be64_to_cpu(iser_hdr->write_va);
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001487 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1488 write_stag, (unsigned long long)write_va);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001489 }
1490
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001491 isert_dbg("ISER ISCSI_CTRL PDU\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001492 break;
1493 case ISER_HELLO:
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001494 isert_err("iSER Hello message\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001495 break;
1496 default:
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001497 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001498 break;
1499 }
1500
1501 rc = isert_rx_opcode(isert_conn, rx_desc,
1502 read_stag, read_va, write_stag, write_va);
1503}
1504
1505static void
1506isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02001507 u32 xfer_len)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001508{
1509 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1510 struct iscsi_hdr *hdr;
1511 u64 rx_dma;
1512 int rx_buflen, outstanding;
1513
1514 if ((char *)desc == isert_conn->login_req_buf) {
1515 rx_dma = isert_conn->login_req_dma;
1516 rx_buflen = ISER_RX_LOGIN_SIZE;
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001517 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001518 rx_dma, rx_buflen);
1519 } else {
1520 rx_dma = desc->dma_addr;
1521 rx_buflen = ISER_RX_PAYLOAD_SIZE;
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001522 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001523 rx_dma, rx_buflen);
1524 }
1525
1526 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1527
1528 hdr = &desc->iscsi_header;
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001529 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001530 hdr->opcode, hdr->itt, hdr->flags,
1531 (int)(xfer_len - ISER_HEADERS_LEN));
1532
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001533 if ((char *)desc == isert_conn->login_req_buf) {
1534 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1535 if (isert_conn->conn) {
1536 struct iscsi_login *login = isert_conn->conn->conn_login;
1537
1538 if (login && !login->first_request)
1539 isert_rx_login_req(isert_conn);
1540 }
1541 mutex_lock(&isert_conn->conn_mutex);
1542 complete(&isert_conn->login_req_comp);
1543 mutex_unlock(&isert_conn->conn_mutex);
1544 } else {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001545 isert_rx_do_work(desc, isert_conn);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001546 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001547
1548 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1549 DMA_FROM_DEVICE);
1550
1551 isert_conn->post_recv_buf_count--;
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001552 isert_dbg("Decremented post_recv_buf_count: %d\n",
1553 isert_conn->post_recv_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001554
1555 if ((char *)desc == isert_conn->login_req_buf)
1556 return;
1557
1558 outstanding = isert_conn->post_recv_buf_count;
1559 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1560 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1561 ISERT_MIN_POSTED_RX);
1562 err = isert_post_recv(isert_conn, count);
1563 if (err) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001564 isert_err("isert_post_recv() count: %d failed, %d\n",
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001565 count, err);
1566 }
1567 }
1568}
1569
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001570static int
1571isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1572 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1573 enum iser_ib_op_code op, struct isert_data_buf *data)
1574{
1575 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1576
1577 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1578 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1579
1580 data->len = length - offset;
1581 data->offset = offset;
1582 data->sg_off = data->offset / PAGE_SIZE;
1583
1584 data->sg = &sg[data->sg_off];
1585 data->nents = min_t(unsigned int, nents - data->sg_off,
1586 ISCSI_ISER_SG_TABLESIZE);
1587 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1588 PAGE_SIZE);
1589
1590 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1591 data->dma_dir);
1592 if (unlikely(!data->dma_nents)) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001593 isert_err("Cmd: unable to dma map SGs %p\n", sg);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001594 return -EINVAL;
1595 }
1596
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001597 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001598 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001599
1600 return 0;
1601}
1602
1603static void
1604isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1605{
1606 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1607
1608 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1609 memset(data, 0, sizeof(*data));
1610}
1611
1612
1613
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001614static void
1615isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1616{
1617 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001618
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001619 isert_dbg("Cmd %p\n", isert_cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001620
1621 if (wr->data.sg) {
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001622 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001623 isert_unmap_data_buf(isert_conn, &wr->data);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001624 }
1625
Vu Pham90ecc6e2013-08-28 23:23:33 +03001626 if (wr->send_wr) {
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001627 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
Vu Pham90ecc6e2013-08-28 23:23:33 +03001628 kfree(wr->send_wr);
1629 wr->send_wr = NULL;
1630 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001631
Vu Pham90ecc6e2013-08-28 23:23:33 +03001632 if (wr->ib_sge) {
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001633 isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
Vu Pham90ecc6e2013-08-28 23:23:33 +03001634 kfree(wr->ib_sge);
1635 wr->ib_sge = NULL;
1636 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001637}
1638
1639static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001640isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +03001641{
1642 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Vu Pham59464ef2013-08-28 23:23:35 +03001643 LIST_HEAD(unmap_list);
1644
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001645 isert_dbg("Cmd %p\n", isert_cmd);
Vu Pham59464ef2013-08-28 23:23:35 +03001646
1647 if (wr->fr_desc) {
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001648 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001649 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1650 isert_unmap_data_buf(isert_conn, &wr->prot);
1651 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1652 }
Vu Pham59464ef2013-08-28 23:23:35 +03001653 spin_lock_bh(&isert_conn->conn_lock);
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001654 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
Vu Pham59464ef2013-08-28 23:23:35 +03001655 spin_unlock_bh(&isert_conn->conn_lock);
1656 wr->fr_desc = NULL;
1657 }
1658
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001659 if (wr->data.sg) {
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001660 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001661 isert_unmap_data_buf(isert_conn, &wr->data);
Vu Pham59464ef2013-08-28 23:23:35 +03001662 }
1663
1664 wr->ib_sge = NULL;
1665 wr->send_wr = NULL;
1666}
1667
1668static void
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001669isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001670{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001671 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001672 struct isert_conn *isert_conn = isert_cmd->conn;
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001673 struct iscsi_conn *conn = isert_conn->conn;
Vu Phamd40945d2013-08-28 23:23:34 +03001674 struct isert_device *device = isert_conn->conn_device;
Sagi Grimberge4f4e802015-02-09 18:07:25 +02001675 struct iscsi_text_rsp *hdr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001676
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001677 isert_dbg("Cmd %p\n", isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001678
1679 switch (cmd->iscsi_opcode) {
1680 case ISCSI_OP_SCSI_CMD:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001681 spin_lock_bh(&conn->cmd_lock);
1682 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellinger5159d762014-02-03 12:53:51 -08001683 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001684 spin_unlock_bh(&conn->cmd_lock);
1685
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001686 if (cmd->data_direction == DMA_TO_DEVICE) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001687 iscsit_stop_dataout_timer(cmd);
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001688 /*
1689 * Check for special case during comp_err where
1690 * WRITE_PENDING has been handed off from core,
1691 * but requires an extra target_put_sess_cmd()
1692 * before transport_generic_free_cmd() below.
1693 */
1694 if (comp_err &&
1695 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1696 struct se_cmd *se_cmd = &cmd->se_cmd;
1697
1698 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1699 }
1700 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001701
Vu Phamd40945d2013-08-28 23:23:34 +03001702 device->unreg_rdma_mem(isert_cmd, isert_conn);
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001703 transport_generic_free_cmd(&cmd->se_cmd, 0);
1704 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001705 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001706 spin_lock_bh(&conn->cmd_lock);
1707 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellinger5159d762014-02-03 12:53:51 -08001708 list_del_init(&cmd->i_conn_node);
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001709 spin_unlock_bh(&conn->cmd_lock);
1710
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001711 transport_generic_free_cmd(&cmd->se_cmd, 0);
1712 break;
1713 case ISCSI_OP_REJECT:
1714 case ISCSI_OP_NOOP_OUT:
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001715 case ISCSI_OP_TEXT:
Sagi Grimberge4f4e802015-02-09 18:07:25 +02001716 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1717 /* If the continue bit is on, keep the command alive */
1718 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1719 break;
1720
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001721 spin_lock_bh(&conn->cmd_lock);
1722 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellinger5159d762014-02-03 12:53:51 -08001723 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001724 spin_unlock_bh(&conn->cmd_lock);
1725
1726 /*
1727 * Handle special case for REJECT when iscsi_add_reject*() has
1728 * overwritten the original iscsi_opcode assignment, and the
1729 * associated cmd->se_cmd needs to be released.
1730 */
1731 if (cmd->se_cmd.se_tfo != NULL) {
Rasmus Villemoes11378cd2015-02-06 01:09:05 +01001732 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001733 cmd->iscsi_opcode);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001734 transport_generic_free_cmd(&cmd->se_cmd, 0);
1735 break;
1736 }
1737 /*
1738 * Fall-through
1739 */
1740 default:
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001741 iscsit_release_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001742 break;
1743 }
1744}
1745
1746static void
1747isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1748{
1749 if (tx_desc->dma_addr != 0) {
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001750 isert_dbg("unmap single for tx_desc->dma_addr\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001751 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1752 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1753 tx_desc->dma_addr = 0;
1754 }
1755}
1756
1757static void
1758isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001759 struct ib_device *ib_dev, bool comp_err)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001760{
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001761 if (isert_cmd->pdu_buf_dma != 0) {
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001762 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001763 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1764 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1765 isert_cmd->pdu_buf_dma = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001766 }
1767
1768 isert_unmap_tx_desc(tx_desc, ib_dev);
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001769 isert_put_cmd(isert_cmd, comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001770}
1771
Sagi Grimberg96b79732014-03-17 12:52:18 +02001772static int
1773isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1774{
1775 struct ib_mr_status mr_status;
1776 int ret;
1777
1778 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1779 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001780 isert_err("ib_check_mr_status failed, ret %d\n", ret);
Sagi Grimberg96b79732014-03-17 12:52:18 +02001781 goto fail_mr_status;
1782 }
1783
1784 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1785 u64 sec_offset_err;
1786 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1787
1788 switch (mr_status.sig_err.err_type) {
1789 case IB_SIG_BAD_GUARD:
1790 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1791 break;
1792 case IB_SIG_BAD_REFTAG:
1793 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1794 break;
1795 case IB_SIG_BAD_APPTAG:
1796 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1797 break;
1798 }
1799 sec_offset_err = mr_status.sig_err.sig_err_offset;
1800 do_div(sec_offset_err, block_size);
1801 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1802
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001803 isert_err("PI error found type %d at sector 0x%llx "
1804 "expected 0x%x vs actual 0x%x\n",
1805 mr_status.sig_err.err_type,
1806 (unsigned long long)se_cmd->bad_sector,
1807 mr_status.sig_err.expected,
1808 mr_status.sig_err.actual);
Sagi Grimberg96b79732014-03-17 12:52:18 +02001809 ret = 1;
1810 }
1811
1812fail_mr_status:
1813 return ret;
1814}
1815
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001816static void
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001817isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1818 struct isert_cmd *isert_cmd)
1819{
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001820 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001821 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001822 struct se_cmd *se_cmd = &cmd->se_cmd;
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001823 struct isert_conn *isert_conn = isert_cmd->conn;
1824 struct isert_device *device = isert_conn->conn_device;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001825 int ret = 0;
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001826
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001827 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
Sagi Grimberg96b79732014-03-17 12:52:18 +02001828 ret = isert_check_pi_status(se_cmd,
1829 wr->fr_desc->pi_ctx->sig_mr);
1830 wr->fr_desc->ind &= ~ISERT_PROTECTED;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001831 }
1832
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001833 device->unreg_rdma_mem(isert_cmd, isert_conn);
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02001834 wr->send_wr_num = 0;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001835 if (ret)
1836 transport_send_check_condition_and_sense(se_cmd,
1837 se_cmd->pi_err, 0);
1838 else
1839 isert_put_response(isert_conn->conn, cmd);
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001840}
1841
1842static void
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001843isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1844 struct isert_cmd *isert_cmd)
1845{
1846 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001847 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001848 struct se_cmd *se_cmd = &cmd->se_cmd;
Vu Pham90ecc6e2013-08-28 23:23:33 +03001849 struct isert_conn *isert_conn = isert_cmd->conn;
Vu Phamd40945d2013-08-28 23:23:34 +03001850 struct isert_device *device = isert_conn->conn_device;
Sagi Grimberg5bac4b12014-03-18 14:58:27 +02001851 int ret = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001852
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001853 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
Sagi Grimberg96b79732014-03-17 12:52:18 +02001854 ret = isert_check_pi_status(se_cmd,
1855 wr->fr_desc->pi_ctx->sig_mr);
1856 wr->fr_desc->ind &= ~ISERT_PROTECTED;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001857 }
1858
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001859 iscsit_stop_dataout_timer(cmd);
Vu Phamd40945d2013-08-28 23:23:34 +03001860 device->unreg_rdma_mem(isert_cmd, isert_conn);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001861 cmd->write_data_done = wr->data.len;
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08001862 wr->send_wr_num = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001863
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001864 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001865 spin_lock_bh(&cmd->istate_lock);
1866 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1867 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1868 spin_unlock_bh(&cmd->istate_lock);
1869
Sagi Grimberg364189f2015-03-29 15:52:03 +03001870 if (ret) {
1871 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
Sagi Grimberg5bac4b12014-03-18 14:58:27 +02001872 transport_send_check_condition_and_sense(se_cmd,
1873 se_cmd->pi_err, 0);
Sagi Grimberg364189f2015-03-29 15:52:03 +03001874 } else {
Sagi Grimberg5bac4b12014-03-18 14:58:27 +02001875 target_execute_cmd(se_cmd);
Sagi Grimberg364189f2015-03-29 15:52:03 +03001876 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001877}
1878
1879static void
1880isert_do_control_comp(struct work_struct *work)
1881{
1882 struct isert_cmd *isert_cmd = container_of(work,
1883 struct isert_cmd, comp_work);
1884 struct isert_conn *isert_conn = isert_cmd->conn;
1885 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001886 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001887
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001888 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1889
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001890 switch (cmd->i_state) {
1891 case ISTATE_SEND_TASKMGTRSP:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001892 iscsit_tmr_post_handler(cmd, cmd->conn);
Sagi Grimberg10633c32014-12-07 13:12:04 +02001893 case ISTATE_SEND_REJECT: /* FALLTHRU */
1894 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001895 cmd->i_state = ISTATE_SENT_STATUS;
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001896 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1897 ib_dev, false);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001898 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001899 case ISTATE_SEND_LOGOUTRSP:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001900 iscsit_logout_post_handler(cmd, cmd->conn);
1901 break;
1902 default:
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001903 isert_err("Unknown i_state %d\n", cmd->i_state);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001904 dump_stack();
1905 break;
1906 }
1907}
1908
1909static void
1910isert_response_completion(struct iser_tx_desc *tx_desc,
1911 struct isert_cmd *isert_cmd,
1912 struct isert_conn *isert_conn,
1913 struct ib_device *ib_dev)
1914{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001915 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001916
1917 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001918 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001919 cmd->i_state == ISTATE_SEND_REJECT ||
1920 cmd->i_state == ISTATE_SEND_TEXTRSP) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001921 isert_unmap_tx_desc(tx_desc, ib_dev);
1922
1923 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1924 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1925 return;
1926 }
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02001927
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001928 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001929 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001930}
1931
1932static void
Sagi Grimberg68a86de2014-12-02 16:57:37 +02001933isert_send_completion(struct iser_tx_desc *tx_desc,
1934 struct isert_conn *isert_conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001935{
1936 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1937 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1938 struct isert_rdma_wr *wr;
1939
1940 if (!isert_cmd) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001941 isert_unmap_tx_desc(tx_desc, ib_dev);
1942 return;
1943 }
1944 wr = &isert_cmd->rdma_wr;
1945
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001946 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
1947
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001948 switch (wr->iser_ib_op) {
1949 case ISER_IB_RECV:
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001950 isert_err("Got ISER_IB_RECV\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001951 dump_stack();
1952 break;
1953 case ISER_IB_SEND:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001954 isert_response_completion(tx_desc, isert_cmd,
1955 isert_conn, ib_dev);
1956 break;
1957 case ISER_IB_RDMA_WRITE:
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001958 isert_completion_rdma_write(tx_desc, isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001959 break;
1960 case ISER_IB_RDMA_READ:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001961 isert_completion_rdma_read(tx_desc, isert_cmd);
1962 break;
1963 default:
Sagi Grimberg4c22e072014-12-07 13:12:03 +02001964 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001965 dump_stack();
1966 break;
1967 }
1968}
1969
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02001970/**
1971 * is_isert_tx_desc() - Indicate if the completion wr_id
1972 * is a TX descriptor or not.
1973 * @isert_conn: iser connection
1974 * @wr_id: completion WR identifier
1975 *
1976 * Since we cannot rely on wc opcode in FLUSH errors
1977 * we must work around it by checking if the wr_id address
1978 * falls in the iser connection rx_descs buffer. If so
1979 * it is an RX descriptor, otherwize it is a TX.
1980 */
1981static inline bool
1982is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001983{
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02001984 void *start = isert_conn->conn_rx_descs;
1985 int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
1986
1987 if (wr_id >= start && wr_id < start + len)
1988 return false;
1989
1990 return true;
1991}
1992
1993static void
1994isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
1995{
Sagi Grimbergbdf20e72014-12-02 16:57:43 +02001996 if (wc->wr_id == ISER_BEACON_WRID) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02001997 isert_info("conn %p completing conn_wait_comp_err\n",
Sagi Grimbergbdf20e72014-12-02 16:57:43 +02001998 isert_conn);
1999 complete(&isert_conn->conn_wait_comp_err);
Nicholas Bellingered4520a2014-12-07 13:12:05 +02002000 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
Sagi Grimbergdf43deb2014-12-02 16:57:38 +02002001 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2002 struct isert_cmd *isert_cmd;
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002003 struct iser_tx_desc *desc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002004
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002005 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2006 isert_cmd = desc->isert_cmd;
Sagi Grimbergdf43deb2014-12-02 16:57:38 +02002007 if (!isert_cmd)
2008 isert_unmap_tx_desc(desc, ib_dev);
2009 else
2010 isert_completion_put(desc, isert_cmd, ib_dev, true);
Sagi Grimbergdf43deb2014-12-02 16:57:38 +02002011 } else {
2012 isert_conn->post_recv_buf_count--;
Sagi Grimbergbdf20e72014-12-02 16:57:43 +02002013 if (!isert_conn->post_recv_buf_count)
2014 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
Sagi Grimbergdf43deb2014-12-02 16:57:38 +02002015 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002016}
2017
2018static void
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002019isert_handle_wc(struct ib_wc *wc)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002020{
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002021 struct isert_conn *isert_conn;
2022 struct iser_tx_desc *tx_desc;
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002023 struct iser_rx_desc *rx_desc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002024
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002025 isert_conn = wc->qp->qp_context;
2026 if (likely(wc->status == IB_WC_SUCCESS)) {
2027 if (wc->opcode == IB_WC_RECV) {
2028 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
2029 isert_rx_completion(rx_desc, isert_conn, wc->byte_len);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002030 } else {
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002031 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2032 isert_send_completion(tx_desc, isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002033 }
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002034 } else {
2035 if (wc->status != IB_WC_WR_FLUSH_ERR)
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002036 isert_err("wr id %llx status %d vend_err %x\n",
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002037 wc->wr_id, wc->status, wc->vendor_err);
2038 else
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002039 isert_dbg("flush error: wr id %llx\n", wc->wr_id);
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002040
2041 if (wc->wr_id != ISER_FASTREG_LI_WRID)
2042 isert_cq_comp_err(isert_conn, wc);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002043 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002044}
2045
2046static void
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002047isert_cq_work(struct work_struct *work)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002048{
Sagi Grimberg37d9fe82014-12-02 16:57:44 +02002049 enum { isert_poll_budget = 65536 };
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002050 struct isert_comp *comp = container_of(work, struct isert_comp,
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002051 work);
Sagi Grimberg36ea63b2014-12-02 16:57:45 +02002052 struct ib_wc *const wcs = comp->wcs;
2053 int i, n, completed = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002054
Sagi Grimberg36ea63b2014-12-02 16:57:45 +02002055 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
2056 for (i = 0; i < n; i++)
2057 isert_handle_wc(&wcs[i]);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002058
Sagi Grimberg36ea63b2014-12-02 16:57:45 +02002059 completed += n;
2060 if (completed >= isert_poll_budget)
Sagi Grimberg37d9fe82014-12-02 16:57:44 +02002061 break;
2062 }
2063
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002064 ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002065}
2066
2067static void
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002068isert_cq_callback(struct ib_cq *cq, void *context)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002069{
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002070 struct isert_comp *comp = context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002071
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02002072 queue_work(isert_comp_wq, &comp->work);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002073}
2074
2075static int
2076isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2077{
2078 struct ib_send_wr *wr_failed;
2079 int ret;
2080
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002081 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
2082 &wr_failed);
2083 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002084 isert_err("ib_post_send failed with %d\n", ret);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002085 return ret;
2086 }
2087 return ret;
2088}
2089
2090static int
2091isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2092{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002093 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002094 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2095 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2096 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2097 &isert_cmd->tx_desc.iscsi_header;
2098
2099 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2100 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2101 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2102 /*
2103 * Attach SENSE DATA payload to iSCSI Response PDU
2104 */
2105 if (cmd->se_cmd.sense_buffer &&
2106 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2107 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
Sagi Grimberg67cb3942015-03-29 15:52:05 +03002108 struct isert_device *device = isert_conn->conn_device;
2109 struct ib_device *ib_dev = device->ib_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002110 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002111 u32 padding, pdu_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002112
2113 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2114 cmd->sense_buffer);
2115 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2116
2117 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2118 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002119 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002120
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002121 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2122 (void *)cmd->sense_buffer, pdu_len,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002123 DMA_TO_DEVICE);
2124
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002125 isert_cmd->pdu_buf_len = pdu_len;
2126 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2127 tx_dsg->length = pdu_len;
Sagi Grimberg67cb3942015-03-29 15:52:05 +03002128 tx_dsg->lkey = device->mr->lkey;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002129 isert_cmd->tx_desc.num_sge = 2;
2130 }
2131
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002132 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002133
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002134 isert_dbg("Posting SCSI Response\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002135
2136 return isert_post_response(isert_conn, isert_cmd);
2137}
2138
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -07002139static void
2140isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2141{
2142 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2143 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2144 struct isert_device *device = isert_conn->conn_device;
2145
2146 spin_lock_bh(&conn->cmd_lock);
2147 if (!list_empty(&cmd->i_conn_node))
2148 list_del_init(&cmd->i_conn_node);
2149 spin_unlock_bh(&conn->cmd_lock);
2150
2151 if (cmd->data_direction == DMA_TO_DEVICE)
2152 iscsit_stop_dataout_timer(cmd);
2153
2154 device->unreg_rdma_mem(isert_cmd, isert_conn);
2155}
2156
Nicholas Bellingere70beee2014-04-02 12:52:38 -07002157static enum target_prot_op
2158isert_get_sup_prot_ops(struct iscsi_conn *conn)
2159{
2160 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2161 struct isert_device *device = isert_conn->conn_device;
2162
Sagi Grimberg23a548e2014-12-02 16:57:35 +02002163 if (conn->tpg->tpg_attrib.t10_pi) {
2164 if (device->pi_capable) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002165 isert_info("conn %p PI offload enabled\n", isert_conn);
Sagi Grimberg23a548e2014-12-02 16:57:35 +02002166 isert_conn->pi_support = true;
2167 return TARGET_PROT_ALL;
2168 }
2169 }
2170
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002171 isert_info("conn %p PI offload disabled\n", isert_conn);
Sagi Grimberg23a548e2014-12-02 16:57:35 +02002172 isert_conn->pi_support = false;
Nicholas Bellingere70beee2014-04-02 12:52:38 -07002173
2174 return TARGET_PROT_NORMAL;
2175}
2176
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002177static int
2178isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2179 bool nopout_response)
2180{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002181 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002182 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2183 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2184
2185 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2186 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2187 &isert_cmd->tx_desc.iscsi_header,
2188 nopout_response);
2189 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002190 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002191
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002192 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002193
2194 return isert_post_response(isert_conn, isert_cmd);
2195}
2196
2197static int
2198isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2199{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002200 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002201 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2202 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2203
2204 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2205 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2206 &isert_cmd->tx_desc.iscsi_header);
2207 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002208 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002209
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002210 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002211
2212 return isert_post_response(isert_conn, isert_cmd);
2213}
2214
2215static int
2216isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2217{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002218 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002219 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2220 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2221
2222 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2223 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2224 &isert_cmd->tx_desc.iscsi_header);
2225 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002226 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002227
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002228 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002229
2230 return isert_post_response(isert_conn, isert_cmd);
2231}
2232
2233static int
2234isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2235{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002236 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002237 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2238 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
Sagi Grimberg67cb3942015-03-29 15:52:05 +03002239 struct isert_device *device = isert_conn->conn_device;
2240 struct ib_device *ib_dev = device->ib_device;
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002241 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2242 struct iscsi_reject *hdr =
2243 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002244
2245 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002246 iscsit_build_reject(cmd, conn, hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002247 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002248
2249 hton24(hdr->dlength, ISCSI_HDR_LEN);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002250 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002251 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2252 DMA_TO_DEVICE);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002253 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2254 tx_dsg->addr = isert_cmd->pdu_buf_dma;
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002255 tx_dsg->length = ISCSI_HDR_LEN;
Sagi Grimberg67cb3942015-03-29 15:52:05 +03002256 tx_dsg->lkey = device->mr->lkey;
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002257 isert_cmd->tx_desc.num_sge = 2;
2258
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002259 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002260
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002261 isert_dbg("conn %p Posting Reject\n", isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002262
2263 return isert_post_response(isert_conn, isert_cmd);
2264}
2265
2266static int
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002267isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2268{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002269 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002270 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2271 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2272 struct iscsi_text_rsp *hdr =
2273 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2274 u32 txt_rsp_len;
2275 int rc;
2276
2277 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
Sagi Grimberg22c7aaa2014-06-10 18:27:59 +03002278 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002279 if (rc < 0)
2280 return rc;
2281
2282 txt_rsp_len = rc;
2283 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2284
2285 if (txt_rsp_len) {
Sagi Grimberg67cb3942015-03-29 15:52:05 +03002286 struct isert_device *device = isert_conn->conn_device;
2287 struct ib_device *ib_dev = device->ib_device;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002288 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2289 void *txt_rsp_buf = cmd->buf_ptr;
2290
2291 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2292 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2293
2294 isert_cmd->pdu_buf_len = txt_rsp_len;
2295 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2296 tx_dsg->length = txt_rsp_len;
Sagi Grimberg67cb3942015-03-29 15:52:05 +03002297 tx_dsg->lkey = device->mr->lkey;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002298 isert_cmd->tx_desc.num_sge = 2;
2299 }
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002300 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002301
Sagi Grimbergf64d2792015-01-25 19:11:20 +02002302 isert_dbg("conn %p Text Response\n", isert_conn);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002303
2304 return isert_post_response(isert_conn, isert_cmd);
2305}
2306
2307static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002308isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2309 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2310 u32 data_left, u32 offset)
2311{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002312 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002313 struct scatterlist *sg_start, *tmp_sg;
Sagi Grimberg67cb3942015-03-29 15:52:05 +03002314 struct isert_device *device = isert_conn->conn_device;
2315 struct ib_device *ib_dev = device->ib_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002316 u32 sg_off, page_off;
2317 int i = 0, sg_nents;
2318
2319 sg_off = offset / PAGE_SIZE;
2320 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2321 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2322 page_off = offset % PAGE_SIZE;
2323
2324 send_wr->sg_list = ib_sge;
2325 send_wr->num_sge = sg_nents;
Sagi Grimbergb0a191e2014-12-02 16:57:39 +02002326 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002327 /*
2328 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2329 */
2330 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002331 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2332 "page_off: %u\n",
2333 (unsigned long long)tmp_sg->dma_address,
2334 tmp_sg->length, page_off);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002335
2336 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2337 ib_sge->length = min_t(u32, data_left,
2338 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
Sagi Grimberg67cb3942015-03-29 15:52:05 +03002339 ib_sge->lkey = device->mr->lkey;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002340
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002341 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2342 ib_sge->addr, ib_sge->length, ib_sge->lkey);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002343 page_off = 0;
2344 data_left -= ib_sge->length;
2345 ib_sge++;
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002346 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002347 }
2348
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002349 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002350 send_wr->sg_list, send_wr->num_sge);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002351
2352 return sg_nents;
2353}
2354
2355static int
Vu Pham90ecc6e2013-08-28 23:23:33 +03002356isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2357 struct isert_rdma_wr *wr)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002358{
2359 struct se_cmd *se_cmd = &cmd->se_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002360 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002361 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002362 struct isert_data_buf *data = &wr->data;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002363 struct ib_send_wr *send_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002364 struct ib_sge *ib_sge;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002365 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2366 int ret = 0, i, ib_sge_cnt;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002367
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002368 isert_cmd->tx_desc.isert_cmd = isert_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002369
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002370 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2371 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2372 se_cmd->t_data_nents, se_cmd->data_length,
2373 offset, wr->iser_ib_op, &wr->data);
2374 if (ret)
2375 return ret;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002376
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002377 data_left = data->len;
2378 offset = data->offset;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002379
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002380 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002381 if (!ib_sge) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002382 isert_warn("Unable to allocate ib_sge\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002383 ret = -ENOMEM;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002384 goto unmap_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002385 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002386 wr->ib_sge = ib_sge;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002387
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002388 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002389 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2390 GFP_KERNEL);
2391 if (!wr->send_wr) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002392 isert_dbg("Unable to allocate wr->send_wr\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002393 ret = -ENOMEM;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002394 goto unmap_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002395 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002396
2397 wr->isert_cmd = isert_cmd;
2398 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002399
2400 for (i = 0; i < wr->send_wr_num; i++) {
2401 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2402 data_len = min(data_left, rdma_write_max);
2403
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002404 send_wr->send_flags = 0;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002405 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2406 send_wr->opcode = IB_WR_RDMA_WRITE;
2407 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2408 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2409 if (i + 1 == wr->send_wr_num)
2410 send_wr->next = &isert_cmd->tx_desc.send_wr;
2411 else
2412 send_wr->next = &wr->send_wr[i + 1];
2413 } else {
2414 send_wr->opcode = IB_WR_RDMA_READ;
2415 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2416 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2417 if (i + 1 == wr->send_wr_num)
2418 send_wr->send_flags = IB_SEND_SIGNALED;
2419 else
2420 send_wr->next = &wr->send_wr[i + 1];
2421 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002422
2423 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2424 send_wr, data_len, offset);
2425 ib_sge += ib_sge_cnt;
2426
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002427 offset += data_len;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002428 va_offset += data_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002429 data_left -= data_len;
2430 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002431
2432 return 0;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002433unmap_cmd:
2434 isert_unmap_data_buf(isert_conn, data);
2435
Vu Pham90ecc6e2013-08-28 23:23:33 +03002436 return ret;
2437}
2438
2439static int
Vu Pham59464ef2013-08-28 23:23:35 +03002440isert_map_fr_pagelist(struct ib_device *ib_dev,
2441 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2442{
2443 u64 start_addr, end_addr, page, chunk_start = 0;
2444 struct scatterlist *tmp_sg;
2445 int i = 0, new_chunk, last_ent, n_pages;
2446
2447 n_pages = 0;
2448 new_chunk = 1;
2449 last_ent = sg_nents - 1;
2450 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2451 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2452 if (new_chunk)
2453 chunk_start = start_addr;
2454 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2455
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002456 isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
2457 i, (unsigned long long)tmp_sg->dma_address,
2458 tmp_sg->length);
Vu Pham59464ef2013-08-28 23:23:35 +03002459
2460 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2461 new_chunk = 0;
2462 continue;
2463 }
2464 new_chunk = 1;
2465
2466 page = chunk_start & PAGE_MASK;
2467 do {
2468 fr_pl[n_pages++] = page;
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002469 isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
2470 n_pages - 1, page);
Vu Pham59464ef2013-08-28 23:23:35 +03002471 page += PAGE_SIZE;
2472 } while (page < end_addr);
2473 }
2474
2475 return n_pages;
2476}
2477
Sagi Grimberg10633c32014-12-07 13:12:04 +02002478static inline void
2479isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
2480{
2481 u32 rkey;
2482
2483 memset(inv_wr, 0, sizeof(*inv_wr));
2484 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
2485 inv_wr->opcode = IB_WR_LOCAL_INV;
2486 inv_wr->ex.invalidate_rkey = mr->rkey;
2487
2488 /* Bump the key */
2489 rkey = ib_inc_rkey(mr->rkey);
2490 ib_update_fast_reg_key(mr, rkey);
2491}
2492
Vu Pham59464ef2013-08-28 23:23:35 +03002493static int
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002494isert_fast_reg_mr(struct isert_conn *isert_conn,
2495 struct fast_reg_descriptor *fr_desc,
2496 struct isert_data_buf *mem,
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002497 enum isert_indicator ind,
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002498 struct ib_sge *sge)
Vu Pham59464ef2013-08-28 23:23:35 +03002499{
Sagi Grimberg67cb3942015-03-29 15:52:05 +03002500 struct isert_device *device = isert_conn->conn_device;
2501 struct ib_device *ib_dev = device->ib_device;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002502 struct ib_mr *mr;
2503 struct ib_fast_reg_page_list *frpl;
Vu Pham59464ef2013-08-28 23:23:35 +03002504 struct ib_send_wr fr_wr, inv_wr;
2505 struct ib_send_wr *bad_wr, *wr = NULL;
Sagi Grimberg9bd626e2014-01-09 18:40:54 +02002506 int ret, pagelist_len;
2507 u32 page_off;
Vu Pham59464ef2013-08-28 23:23:35 +03002508
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002509 if (mem->dma_nents == 1) {
Sagi Grimberg67cb3942015-03-29 15:52:05 +03002510 sge->lkey = device->mr->lkey;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002511 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2512 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002513 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2514 sge->addr, sge->length, sge->lkey);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002515 return 0;
2516 }
2517
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002518 if (ind == ISERT_DATA_KEY_VALID) {
2519 /* Registering data buffer */
2520 mr = fr_desc->data_mr;
2521 frpl = fr_desc->data_frpl;
2522 } else {
2523 /* Registering protection buffer */
2524 mr = fr_desc->pi_ctx->prot_mr;
2525 frpl = fr_desc->pi_ctx->prot_frpl;
2526 }
2527
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002528 page_off = mem->offset % PAGE_SIZE;
Vu Pham59464ef2013-08-28 23:23:35 +03002529
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002530 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002531 fr_desc, mem->nents, mem->offset);
Vu Pham59464ef2013-08-28 23:23:35 +03002532
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002533 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002534 &frpl->page_list[0]);
Vu Pham59464ef2013-08-28 23:23:35 +03002535
Sagi Grimberg10633c32014-12-07 13:12:04 +02002536 if (!(fr_desc->ind & ind)) {
2537 isert_inv_rkey(&inv_wr, mr);
Vu Pham59464ef2013-08-28 23:23:35 +03002538 wr = &inv_wr;
Vu Pham59464ef2013-08-28 23:23:35 +03002539 }
2540
2541 /* Prepare FASTREG WR */
2542 memset(&fr_wr, 0, sizeof(fr_wr));
Nicholas Bellinger9bb4ca62014-02-27 07:02:48 -08002543 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
Vu Pham59464ef2013-08-28 23:23:35 +03002544 fr_wr.opcode = IB_WR_FAST_REG_MR;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002545 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2546 fr_wr.wr.fast_reg.page_list = frpl;
Vu Pham59464ef2013-08-28 23:23:35 +03002547 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2548 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002549 fr_wr.wr.fast_reg.length = mem->len;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002550 fr_wr.wr.fast_reg.rkey = mr->rkey;
Vu Pham59464ef2013-08-28 23:23:35 +03002551 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2552
2553 if (!wr)
2554 wr = &fr_wr;
2555 else
2556 wr->next = &fr_wr;
2557
2558 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2559 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002560 isert_err("fast registration failed, ret:%d\n", ret);
Vu Pham59464ef2013-08-28 23:23:35 +03002561 return ret;
2562 }
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002563 fr_desc->ind &= ~ind;
Vu Pham59464ef2013-08-28 23:23:35 +03002564
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002565 sge->lkey = mr->lkey;
2566 sge->addr = frpl->page_list[0] + page_off;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002567 sge->length = mem->len;
Vu Pham59464ef2013-08-28 23:23:35 +03002568
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002569 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2570 sge->addr, sge->length, sge->lkey);
Vu Pham59464ef2013-08-28 23:23:35 +03002571
2572 return ret;
2573}
2574
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002575static inline void
2576isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2577 struct ib_sig_domain *domain)
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002578{
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002579 domain->sig_type = IB_SIG_TYPE_T10_DIF;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002580 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2581 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2582 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002583 /*
2584 * At the moment we hard code those, but if in the future
2585 * the target core would like to use it, we will take it
2586 * from se_cmd.
2587 */
2588 domain->sig.dif.apptag_check_mask = 0xffff;
2589 domain->sig.dif.app_escape = true;
2590 domain->sig.dif.ref_escape = true;
2591 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2592 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2593 domain->sig.dif.ref_remap = true;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002594};
2595
2596static int
2597isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2598{
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002599 switch (se_cmd->prot_op) {
2600 case TARGET_PROT_DIN_INSERT:
2601 case TARGET_PROT_DOUT_STRIP:
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002602 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002603 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002604 break;
2605 case TARGET_PROT_DOUT_INSERT:
2606 case TARGET_PROT_DIN_STRIP:
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002607 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002608 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002609 break;
2610 case TARGET_PROT_DIN_PASS:
2611 case TARGET_PROT_DOUT_PASS:
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002612 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2613 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002614 break;
2615 default:
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002616 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002617 return -EINVAL;
2618 }
2619
2620 return 0;
2621}
2622
2623static inline u8
2624isert_set_prot_checks(u8 prot_checks)
2625{
2626 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2627 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2628 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2629}
2630
2631static int
Sagi Grimberg570db172014-12-02 16:57:31 +02002632isert_reg_sig_mr(struct isert_conn *isert_conn,
2633 struct se_cmd *se_cmd,
2634 struct isert_rdma_wr *rdma_wr,
2635 struct fast_reg_descriptor *fr_desc)
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002636{
2637 struct ib_send_wr sig_wr, inv_wr;
2638 struct ib_send_wr *bad_wr, *wr = NULL;
2639 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2640 struct ib_sig_attrs sig_attrs;
2641 int ret;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002642
2643 memset(&sig_attrs, 0, sizeof(sig_attrs));
2644 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2645 if (ret)
2646 goto err;
2647
2648 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2649
2650 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
Sagi Grimberg10633c32014-12-07 13:12:04 +02002651 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002652 wr = &inv_wr;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002653 }
2654
2655 memset(&sig_wr, 0, sizeof(sig_wr));
2656 sig_wr.opcode = IB_WR_REG_SIG_MR;
Sagi Grimbergc2caa202014-03-17 12:52:16 +02002657 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
Sagi Grimberg570db172014-12-02 16:57:31 +02002658 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002659 sig_wr.num_sge = 1;
2660 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2661 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2662 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2663 if (se_cmd->t_prot_sg)
Sagi Grimberg570db172014-12-02 16:57:31 +02002664 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002665
2666 if (!wr)
2667 wr = &sig_wr;
2668 else
2669 wr->next = &sig_wr;
2670
2671 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2672 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002673 isert_err("fast registration failed, ret:%d\n", ret);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002674 goto err;
2675 }
2676 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2677
Sagi Grimberg570db172014-12-02 16:57:31 +02002678 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2679 rdma_wr->ib_sg[SIG].addr = 0;
2680 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002681 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2682 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2683 /*
2684 * We have protection guards on the wire
2685 * so we need to set a larget transfer
2686 */
Sagi Grimberg570db172014-12-02 16:57:31 +02002687 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002688
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002689 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
Sagi Grimberg570db172014-12-02 16:57:31 +02002690 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2691 rdma_wr->ib_sg[SIG].lkey);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002692err:
2693 return ret;
2694}
2695
Vu Pham59464ef2013-08-28 23:23:35 +03002696static int
Sagi Grimberg570db172014-12-02 16:57:31 +02002697isert_handle_prot_cmd(struct isert_conn *isert_conn,
2698 struct isert_cmd *isert_cmd,
2699 struct isert_rdma_wr *wr)
2700{
2701 struct isert_device *device = isert_conn->conn_device;
2702 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2703 int ret;
2704
2705 if (!wr->fr_desc->pi_ctx) {
2706 ret = isert_create_pi_ctx(wr->fr_desc,
2707 device->ib_device,
Sagi Grimberg67cb3942015-03-29 15:52:05 +03002708 device->pd);
Sagi Grimberg570db172014-12-02 16:57:31 +02002709 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002710 isert_err("conn %p failed to allocate pi_ctx\n",
Sagi Grimberg570db172014-12-02 16:57:31 +02002711 isert_conn);
2712 return ret;
2713 }
2714 }
2715
2716 if (se_cmd->t_prot_sg) {
2717 ret = isert_map_data_buf(isert_conn, isert_cmd,
2718 se_cmd->t_prot_sg,
2719 se_cmd->t_prot_nents,
2720 se_cmd->prot_length,
2721 0, wr->iser_ib_op, &wr->prot);
2722 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002723 isert_err("conn %p failed to map protection buffer\n",
Sagi Grimberg570db172014-12-02 16:57:31 +02002724 isert_conn);
2725 return ret;
2726 }
2727
2728 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2729 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2730 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2731 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002732 isert_err("conn %p failed to fast reg mr\n",
Sagi Grimberg570db172014-12-02 16:57:31 +02002733 isert_conn);
2734 goto unmap_prot_cmd;
2735 }
2736 }
2737
2738 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2739 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002740 isert_err("conn %p failed to fast reg mr\n",
Sagi Grimberg570db172014-12-02 16:57:31 +02002741 isert_conn);
2742 goto unmap_prot_cmd;
2743 }
2744 wr->fr_desc->ind |= ISERT_PROTECTED;
2745
2746 return 0;
2747
2748unmap_prot_cmd:
2749 if (se_cmd->t_prot_sg)
2750 isert_unmap_data_buf(isert_conn, &wr->prot);
2751
2752 return ret;
2753}
2754
2755static int
Sagi Grimberga3a5a822014-01-09 18:40:50 +02002756isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2757 struct isert_rdma_wr *wr)
Vu Pham59464ef2013-08-28 23:23:35 +03002758{
2759 struct se_cmd *se_cmd = &cmd->se_cmd;
2760 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002761 struct isert_conn *isert_conn = conn->context;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002762 struct fast_reg_descriptor *fr_desc = NULL;
Sagi Grimberg570db172014-12-02 16:57:31 +02002763 struct ib_send_wr *send_wr;
2764 struct ib_sge *ib_sg;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002765 u32 offset;
2766 int ret = 0;
Vu Pham59464ef2013-08-28 23:23:35 +03002767 unsigned long flags;
2768
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002769 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2770
2771 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2772 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2773 se_cmd->t_data_nents, se_cmd->data_length,
2774 offset, wr->iser_ib_op, &wr->data);
2775 if (ret)
2776 return ret;
2777
Sagi Grimberg302cc7c2014-12-02 16:57:34 +02002778 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002779 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2780 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2781 struct fast_reg_descriptor, list);
2782 list_del(&fr_desc->list);
2783 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2784 wr->fr_desc = fr_desc;
Vu Pham59464ef2013-08-28 23:23:35 +03002785 }
2786
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002787 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
Sagi Grimberg570db172014-12-02 16:57:31 +02002788 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002789 if (ret)
2790 goto unmap_cmd;
Vu Pham59464ef2013-08-28 23:23:35 +03002791
Sagi Grimberg302cc7c2014-12-02 16:57:34 +02002792 if (isert_prot_cmd(isert_conn, se_cmd)) {
Sagi Grimberg570db172014-12-02 16:57:31 +02002793 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002794 if (ret)
Sagi Grimberg570db172014-12-02 16:57:31 +02002795 goto unmap_cmd;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002796
Sagi Grimberg570db172014-12-02 16:57:31 +02002797 ib_sg = &wr->ib_sg[SIG];
2798 } else {
2799 ib_sg = &wr->ib_sg[DATA];
2800 }
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002801
Sagi Grimberg570db172014-12-02 16:57:31 +02002802 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002803 wr->ib_sge = &wr->s_ib_sge;
Vu Pham59464ef2013-08-28 23:23:35 +03002804 wr->send_wr_num = 1;
2805 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2806 wr->send_wr = &wr->s_send_wr;
Vu Pham59464ef2013-08-28 23:23:35 +03002807 wr->isert_cmd = isert_cmd;
Vu Pham59464ef2013-08-28 23:23:35 +03002808
2809 send_wr = &isert_cmd->rdma_wr.s_send_wr;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002810 send_wr->sg_list = &wr->s_ib_sge;
Vu Pham59464ef2013-08-28 23:23:35 +03002811 send_wr->num_sge = 1;
Sagi Grimbergb0a191e2014-12-02 16:57:39 +02002812 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
Vu Pham59464ef2013-08-28 23:23:35 +03002813 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2814 send_wr->opcode = IB_WR_RDMA_WRITE;
2815 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2816 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
Sagi Grimberg302cc7c2014-12-02 16:57:34 +02002817 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002818 0 : IB_SEND_SIGNALED;
Vu Pham59464ef2013-08-28 23:23:35 +03002819 } else {
2820 send_wr->opcode = IB_WR_RDMA_READ;
2821 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2822 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2823 send_wr->send_flags = IB_SEND_SIGNALED;
2824 }
2825
Vu Pham59464ef2013-08-28 23:23:35 +03002826 return 0;
Sagi Grimberg570db172014-12-02 16:57:31 +02002827
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002828unmap_cmd:
2829 if (fr_desc) {
2830 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2831 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2832 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2833 }
2834 isert_unmap_data_buf(isert_conn, &wr->data);
Vu Pham59464ef2013-08-28 23:23:35 +03002835
Vu Pham59464ef2013-08-28 23:23:35 +03002836 return ret;
2837}
2838
2839static int
Vu Pham90ecc6e2013-08-28 23:23:33 +03002840isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2841{
2842 struct se_cmd *se_cmd = &cmd->se_cmd;
Vu Pham59464ef2013-08-28 23:23:35 +03002843 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002844 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2845 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Vu Phamd40945d2013-08-28 23:23:34 +03002846 struct isert_device *device = isert_conn->conn_device;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002847 struct ib_send_wr *wr_failed;
2848 int rc;
2849
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002850 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
Vu Pham90ecc6e2013-08-28 23:23:33 +03002851 isert_cmd, se_cmd->data_length);
Sagi Grimberg4c22e072014-12-07 13:12:03 +02002852
Vu Pham90ecc6e2013-08-28 23:23:33 +03002853 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
Vu Phamd40945d2013-08-28 23:23:34 +03002854 rc = device->reg_rdma_mem(conn, cmd, wr);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002855 if (rc) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002856 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002857 return rc;
2858 }
2859
Sagi Grimberg302cc7c2014-12-02 16:57:34 +02002860 if (!isert_prot_cmd(isert_conn, se_cmd)) {
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002861 /*
2862 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2863 */
2864 isert_create_send_desc(isert_conn, isert_cmd,
2865 &isert_cmd->tx_desc);
2866 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2867 &isert_cmd->tx_desc.iscsi_header);
2868 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2869 isert_init_send_wr(isert_conn, isert_cmd,
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002870 &isert_cmd->tx_desc.send_wr);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002871 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002872 wr->send_wr_num += 1;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002873 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002874
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002875 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
Sagi Grimbergbdf20e72014-12-02 16:57:43 +02002876 if (rc)
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002877 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002878
Sagi Grimberg302cc7c2014-12-02 16:57:34 +02002879 if (!isert_prot_cmd(isert_conn, se_cmd))
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002880 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002881 "READ\n", isert_cmd);
2882 else
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002883 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002884 isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002885
Vu Pham90ecc6e2013-08-28 23:23:33 +03002886 return 1;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002887}
2888
2889static int
2890isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2891{
2892 struct se_cmd *se_cmd = &cmd->se_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002893 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002894 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2895 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Vu Phamd40945d2013-08-28 23:23:34 +03002896 struct isert_device *device = isert_conn->conn_device;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002897 struct ib_send_wr *wr_failed;
2898 int rc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002899
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002900 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
Vu Pham90ecc6e2013-08-28 23:23:33 +03002901 isert_cmd, se_cmd->data_length, cmd->write_data_done);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002902 wr->iser_ib_op = ISER_IB_RDMA_READ;
Vu Phamd40945d2013-08-28 23:23:34 +03002903 rc = device->reg_rdma_mem(conn, cmd, wr);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002904 if (rc) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002905 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002906 return rc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002907 }
2908
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002909 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
Sagi Grimbergbdf20e72014-12-02 16:57:43 +02002910 if (rc)
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002911 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
Sagi Grimbergbdf20e72014-12-02 16:57:43 +02002912
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002913 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
Vu Pham90ecc6e2013-08-28 23:23:33 +03002914 isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002915
Vu Pham90ecc6e2013-08-28 23:23:33 +03002916 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002917}
2918
2919static int
2920isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2921{
2922 int ret;
2923
2924 switch (state) {
2925 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2926 ret = isert_put_nopin(cmd, conn, false);
2927 break;
2928 default:
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002929 isert_err("Unknown immediate state: 0x%02x\n", state);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002930 ret = -EINVAL;
2931 break;
2932 }
2933
2934 return ret;
2935}
2936
2937static int
2938isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2939{
Sagi Grimberg991bb762014-12-07 13:12:01 +02002940 struct isert_conn *isert_conn = conn->context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002941 int ret;
2942
2943 switch (state) {
2944 case ISTATE_SEND_LOGOUTRSP:
2945 ret = isert_put_logout_rsp(cmd, conn);
Sagi Grimberg991bb762014-12-07 13:12:01 +02002946 if (!ret)
2947 isert_conn->logout_posted = true;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002948 break;
2949 case ISTATE_SEND_NOPIN:
2950 ret = isert_put_nopin(cmd, conn, true);
2951 break;
2952 case ISTATE_SEND_TASKMGTRSP:
2953 ret = isert_put_tm_rsp(cmd, conn);
2954 break;
2955 case ISTATE_SEND_REJECT:
2956 ret = isert_put_reject(cmd, conn);
2957 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002958 case ISTATE_SEND_TEXTRSP:
2959 ret = isert_put_text_rsp(cmd, conn);
2960 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002961 case ISTATE_SEND_STATUS:
2962 /*
2963 * Special case for sending non GOOD SCSI status from TX thread
2964 * context during pre se_cmd excecution failure.
2965 */
2966 ret = isert_put_response(conn, cmd);
2967 break;
2968 default:
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002969 isert_err("Unknown response state: 0x%02x\n", state);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002970 ret = -EINVAL;
2971 break;
2972 }
2973
2974 return ret;
2975}
2976
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02002977struct rdma_cm_id *
2978isert_setup_id(struct isert_np *isert_np)
2979{
2980 struct iscsi_np *np = isert_np->np;
2981 struct rdma_cm_id *id;
2982 struct sockaddr *sa;
2983 int ret;
2984
2985 sa = (struct sockaddr *)&np->np_sockaddr;
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002986 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02002987
2988 id = rdma_create_id(isert_cma_handler, isert_np,
2989 RDMA_PS_TCP, IB_QPT_RC);
2990 if (IS_ERR(id)) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002991 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02002992 ret = PTR_ERR(id);
2993 goto out;
2994 }
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002995 isert_dbg("id %p context %p\n", id, id->context);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02002996
2997 ret = rdma_bind_addr(id, sa);
2998 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02002999 isert_err("rdma_bind_addr() failed: %d\n", ret);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003000 goto out_id;
3001 }
3002
3003 ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
3004 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003005 isert_err("rdma_listen() failed: %d\n", ret);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003006 goto out_id;
3007 }
3008
3009 return id;
3010out_id:
3011 rdma_destroy_id(id);
3012out:
3013 return ERR_PTR(ret);
3014}
3015
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003016static int
3017isert_setup_np(struct iscsi_np *np,
3018 struct __kernel_sockaddr_storage *ksockaddr)
3019{
3020 struct isert_np *isert_np;
3021 struct rdma_cm_id *isert_lid;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003022 int ret;
3023
3024 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3025 if (!isert_np) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003026 isert_err("Unable to allocate struct isert_np\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003027 return -ENOMEM;
3028 }
Sagi Grimberg531b7bf2014-04-29 13:13:45 +03003029 sema_init(&isert_np->np_sem, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003030 mutex_init(&isert_np->np_accept_mutex);
3031 INIT_LIST_HEAD(&isert_np->np_accept_list);
3032 init_completion(&isert_np->np_login_comp);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003033 isert_np->np = np;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003034
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003035 /*
3036 * Setup the np->np_sockaddr from the passed sockaddr setup
3037 * in iscsi_target_configfs.c code..
3038 */
3039 memcpy(&np->np_sockaddr, ksockaddr,
3040 sizeof(struct __kernel_sockaddr_storage));
3041
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003042 isert_lid = isert_setup_id(isert_np);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003043 if (IS_ERR(isert_lid)) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003044 ret = PTR_ERR(isert_lid);
3045 goto out;
3046 }
3047
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003048 isert_np->np_cm_id = isert_lid;
3049 np->np_context = isert_np;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003050
3051 return 0;
3052
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003053out:
3054 kfree(isert_np);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003055
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003056 return ret;
3057}
3058
3059static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003060isert_rdma_accept(struct isert_conn *isert_conn)
3061{
3062 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3063 struct rdma_conn_param cp;
3064 int ret;
3065
3066 memset(&cp, 0, sizeof(struct rdma_conn_param));
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003067 cp.initiator_depth = isert_conn->initiator_depth;
3068 cp.retry_count = 7;
3069 cp.rnr_retry_count = 7;
3070
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003071 ret = rdma_accept(cm_id, &cp);
3072 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003073 isert_err("rdma_accept() failed with: %d\n", ret);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003074 return ret;
3075 }
3076
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003077 return 0;
3078}
3079
3080static int
3081isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3082{
3083 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
3084 int ret;
3085
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003086 isert_info("before login_req comp conn: %p\n", isert_conn);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003087 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3088 if (ret) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003089 isert_err("isert_conn %p interrupted before got login req\n",
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003090 isert_conn);
3091 return ret;
3092 }
3093 reinit_completion(&isert_conn->login_req_comp);
3094
Nicholas Bellinger6faaa852013-08-18 16:35:46 -07003095 /*
3096 * For login requests after the first PDU, isert_rx_login_req() will
3097 * kick schedule_delayed_work(&conn->login_work) as the packet is
3098 * received, which turns this callback from iscsi_target_do_login_rx()
3099 * into a NOP.
3100 */
3101 if (!login->first_request)
3102 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003103
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003104 isert_rx_login_req(isert_conn);
3105
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003106 isert_info("before conn_login_comp conn: %p\n", conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003107 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3108 if (ret)
3109 return ret;
3110
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003111 isert_info("processing login->req: %p\n", login->req);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003112
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003113 return 0;
3114}
3115
3116static void
3117isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3118 struct isert_conn *isert_conn)
3119{
3120 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3121 struct rdma_route *cm_route = &cm_id->route;
3122 struct sockaddr_in *sock_in;
3123 struct sockaddr_in6 *sock_in6;
3124
3125 conn->login_family = np->np_sockaddr.ss_family;
3126
3127 if (np->np_sockaddr.ss_family == AF_INET6) {
3128 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
3129 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3130 &sock_in6->sin6_addr.in6_u);
3131 conn->login_port = ntohs(sock_in6->sin6_port);
3132
3133 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3134 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3135 &sock_in6->sin6_addr.in6_u);
3136 conn->local_port = ntohs(sock_in6->sin6_port);
3137 } else {
3138 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3139 sprintf(conn->login_ip, "%pI4",
3140 &sock_in->sin_addr.s_addr);
3141 conn->login_port = ntohs(sock_in->sin_port);
3142
3143 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3144 sprintf(conn->local_ip, "%pI4",
3145 &sock_in->sin_addr.s_addr);
3146 conn->local_port = ntohs(sock_in->sin_port);
3147 }
3148}
3149
3150static int
3151isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3152{
3153 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3154 struct isert_conn *isert_conn;
3155 int max_accept = 0, ret;
3156
3157accept_wait:
Sagi Grimberg531b7bf2014-04-29 13:13:45 +03003158 ret = down_interruptible(&isert_np->np_sem);
Sagi Grimberg1acff632014-10-02 21:40:34 -07003159 if (ret || max_accept > 5)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003160 return -ENODEV;
3161
3162 spin_lock_bh(&np->np_thread_lock);
Sagi Grimberge346ab32014-05-19 17:44:22 +03003163 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003164 spin_unlock_bh(&np->np_thread_lock);
Rasmus Villemoes11378cd2015-02-06 01:09:05 +01003165 isert_dbg("np_thread_state %d\n",
Sagi Grimberge346ab32014-05-19 17:44:22 +03003166 np->np_thread_state);
3167 /**
3168 * No point in stalling here when np_thread
3169 * is in state RESET/SHUTDOWN/EXIT - bail
3170 **/
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003171 return -ENODEV;
3172 }
3173 spin_unlock_bh(&np->np_thread_lock);
3174
3175 mutex_lock(&isert_np->np_accept_mutex);
3176 if (list_empty(&isert_np->np_accept_list)) {
3177 mutex_unlock(&isert_np->np_accept_mutex);
3178 max_accept++;
3179 goto accept_wait;
3180 }
3181 isert_conn = list_first_entry(&isert_np->np_accept_list,
3182 struct isert_conn, conn_accept_node);
3183 list_del_init(&isert_conn->conn_accept_node);
3184 mutex_unlock(&isert_np->np_accept_mutex);
3185
3186 conn->context = isert_conn;
3187 isert_conn->conn = conn;
3188 max_accept = 0;
3189
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003190 isert_set_conn_info(np, conn, isert_conn);
3191
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003192 isert_dbg("Processing isert_conn: %p\n", isert_conn);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003193
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003194 return 0;
3195}
3196
3197static void
3198isert_free_np(struct iscsi_np *np)
3199{
3200 struct isert_np *isert_np = (struct isert_np *)np->np_context;
Sagi Grimberg268e6812014-12-02 16:57:36 +02003201 struct isert_conn *isert_conn, *n;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003202
Sagi Grimberg3b726ae2014-10-28 13:45:03 -07003203 if (isert_np->np_cm_id)
3204 rdma_destroy_id(isert_np->np_cm_id);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003205
Sagi Grimberg268e6812014-12-02 16:57:36 +02003206 /*
3207 * FIXME: At this point we don't have a good way to insure
3208 * that at this point we don't have hanging connections that
3209 * completed RDMA establishment but didn't start iscsi login
3210 * process. So work-around this by cleaning up what ever piled
3211 * up in np_accept_list.
3212 */
3213 mutex_lock(&isert_np->np_accept_mutex);
3214 if (!list_empty(&isert_np->np_accept_list)) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003215 isert_info("Still have isert connections, cleaning up...\n");
Sagi Grimberg268e6812014-12-02 16:57:36 +02003216 list_for_each_entry_safe(isert_conn, n,
3217 &isert_np->np_accept_list,
3218 conn_accept_node) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003219 isert_info("cleaning isert_conn %p state (%d)\n",
Sagi Grimberg268e6812014-12-02 16:57:36 +02003220 isert_conn, isert_conn->state);
3221 isert_connect_release(isert_conn);
3222 }
3223 }
3224 mutex_unlock(&isert_np->np_accept_mutex);
3225
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003226 np->np_context = NULL;
3227 kfree(isert_np);
3228}
3229
Sagi Grimbergb02efbfc2014-12-02 16:57:29 +02003230static void isert_release_work(struct work_struct *work)
3231{
3232 struct isert_conn *isert_conn = container_of(work,
3233 struct isert_conn,
3234 release_work);
3235
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003236 isert_info("Starting release conn %p\n", isert_conn);
Sagi Grimbergb02efbfc2014-12-02 16:57:29 +02003237
3238 wait_for_completion(&isert_conn->conn_wait);
3239
3240 mutex_lock(&isert_conn->conn_mutex);
3241 isert_conn->state = ISER_CONN_DOWN;
3242 mutex_unlock(&isert_conn->conn_mutex);
3243
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003244 isert_info("Destroying conn %p\n", isert_conn);
Sagi Grimbergb02efbfc2014-12-02 16:57:29 +02003245 isert_put_conn(isert_conn);
3246}
3247
Sagi Grimbergbdf20e72014-12-02 16:57:43 +02003248static void
Sagi Grimberg991bb762014-12-07 13:12:01 +02003249isert_wait4logout(struct isert_conn *isert_conn)
3250{
3251 struct iscsi_conn *conn = isert_conn->conn;
3252
Sagi Grimberg4c22e072014-12-07 13:12:03 +02003253 isert_info("conn %p\n", isert_conn);
3254
Sagi Grimberg991bb762014-12-07 13:12:01 +02003255 if (isert_conn->logout_posted) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003256 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
Sagi Grimberg991bb762014-12-07 13:12:01 +02003257 wait_for_completion_timeout(&conn->conn_logout_comp,
3258 SECONDS_FOR_LOGOUT_COMP * HZ);
3259 }
3260}
3261
3262static void
Sagi Grimbergc7e160e2014-12-02 16:57:46 +02003263isert_wait4cmds(struct iscsi_conn *conn)
3264{
Sagi Grimberg4c22e072014-12-07 13:12:03 +02003265 isert_info("iscsi_conn %p\n", conn);
3266
Sagi Grimbergc7e160e2014-12-02 16:57:46 +02003267 if (conn->sess) {
3268 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
3269 target_wait_for_sess_cmds(conn->sess->se_sess);
3270 }
3271}
3272
3273static void
Sagi Grimbergbdf20e72014-12-02 16:57:43 +02003274isert_wait4flush(struct isert_conn *isert_conn)
3275{
3276 struct ib_recv_wr *bad_wr;
3277
Sagi Grimberg4c22e072014-12-07 13:12:03 +02003278 isert_info("conn %p\n", isert_conn);
3279
Sagi Grimbergbdf20e72014-12-02 16:57:43 +02003280 init_completion(&isert_conn->conn_wait_comp_err);
3281 isert_conn->beacon.wr_id = ISER_BEACON_WRID;
3282 /* post an indication that all flush errors were consumed */
3283 if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003284 isert_err("conn %p failed to post beacon", isert_conn);
Sagi Grimbergbdf20e72014-12-02 16:57:43 +02003285 return;
3286 }
3287
3288 wait_for_completion(&isert_conn->conn_wait_comp_err);
3289}
3290
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003291static void isert_wait_conn(struct iscsi_conn *conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003292{
3293 struct isert_conn *isert_conn = conn->context;
3294
Sagi Grimberg4c22e072014-12-07 13:12:03 +02003295 isert_info("Starting conn %p\n", isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003296
Sagi Grimberg9d49f5e2014-05-19 17:44:23 +03003297 mutex_lock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003298 /*
3299 * Only wait for conn_wait_comp_err if the isert_conn made it
3300 * into full feature phase..
3301 */
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07003302 if (isert_conn->state == ISER_CONN_INIT) {
3303 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07003304 return;
3305 }
Sagi Grimberg954f2372014-12-02 16:57:17 +02003306 isert_conn_terminate(isert_conn);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07003307 mutex_unlock(&isert_conn->conn_mutex);
3308
Sagi Grimbergc7e160e2014-12-02 16:57:46 +02003309 isert_wait4cmds(conn);
Sagi Grimbergbdf20e72014-12-02 16:57:43 +02003310 isert_wait4flush(isert_conn);
Sagi Grimberg991bb762014-12-07 13:12:01 +02003311 isert_wait4logout(isert_conn);
Sagi Grimberg954f2372014-12-02 16:57:17 +02003312
Sagi Grimbergb02efbfc2014-12-02 16:57:29 +02003313 INIT_WORK(&isert_conn->release_work, isert_release_work);
3314 queue_work(isert_release_wq, &isert_conn->release_work);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003315}
3316
3317static void isert_free_conn(struct iscsi_conn *conn)
3318{
3319 struct isert_conn *isert_conn = conn->context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003320
3321 isert_put_conn(isert_conn);
3322}
3323
3324static struct iscsit_transport iser_target_transport = {
3325 .name = "IB/iSER",
3326 .transport_type = ISCSI_INFINIBAND,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07003327 .priv_size = sizeof(struct isert_cmd),
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003328 .owner = THIS_MODULE,
3329 .iscsit_setup_np = isert_setup_np,
3330 .iscsit_accept_np = isert_accept_np,
3331 .iscsit_free_np = isert_free_np,
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003332 .iscsit_wait_conn = isert_wait_conn,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003333 .iscsit_free_conn = isert_free_conn,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003334 .iscsit_get_login_rx = isert_get_login_rx,
3335 .iscsit_put_login_tx = isert_put_login_tx,
3336 .iscsit_immediate_queue = isert_immediate_queue,
3337 .iscsit_response_queue = isert_response_queue,
3338 .iscsit_get_dataout = isert_get_dataout,
3339 .iscsit_queue_data_in = isert_put_datain,
3340 .iscsit_queue_status = isert_put_response,
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -07003341 .iscsit_aborted_task = isert_aborted_task,
Nicholas Bellingere70beee2014-04-02 12:52:38 -07003342 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003343};
3344
3345static int __init isert_init(void)
3346{
3347 int ret;
3348
Sagi Grimberg631af552015-01-25 19:09:50 +02003349 isert_comp_wq = alloc_workqueue("isert_comp_wq",
3350 WQ_UNBOUND | WQ_HIGHPRI, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003351 if (!isert_comp_wq) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003352 isert_err("Unable to allocate isert_comp_wq\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003353 ret = -ENOMEM;
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02003354 return -ENOMEM;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003355 }
3356
Sagi Grimbergb02efbfc2014-12-02 16:57:29 +02003357 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3358 WQ_UNBOUND_MAX_ACTIVE);
3359 if (!isert_release_wq) {
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003360 isert_err("Unable to allocate isert_release_wq\n");
Sagi Grimbergb02efbfc2014-12-02 16:57:29 +02003361 ret = -ENOMEM;
3362 goto destroy_comp_wq;
3363 }
3364
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003365 iscsit_register_transport(&iser_target_transport);
Sagi Grimberg24f412d2014-12-07 13:12:02 +02003366 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
Sagi Grimbergb02efbfc2014-12-02 16:57:29 +02003367
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003368 return 0;
3369
Sagi Grimbergb02efbfc2014-12-02 16:57:29 +02003370destroy_comp_wq:
3371 destroy_workqueue(isert_comp_wq);
Sagi Grimberg6f0fae32014-12-02 16:57:41 +02003372
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003373 return ret;
3374}
3375
3376static void __exit isert_exit(void)
3377{
Sagi Grimbergf5ebec92014-05-19 17:44:25 +03003378 flush_scheduled_work();
Sagi Grimbergb02efbfc2014-12-02 16:57:29 +02003379 destroy_workqueue(isert_release_wq);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003380 destroy_workqueue(isert_comp_wq);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003381 iscsit_unregister_transport(&iser_target_transport);
Sagi Grimberg4c22e072014-12-07 13:12:03 +02003382 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003383}
3384
3385MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3386MODULE_VERSION("0.1");
3387MODULE_AUTHOR("nab@Linux-iSCSI.org");
3388MODULE_LICENSE("GPL");
3389
3390module_init(isert_init);
3391module_exit(isert_exit);