blob: f4b14ede18e46613cf0cc64f3333d3ca651a02c2 [file] [log] [blame]
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07004 * (c) Copyright 2013 Datera, Inc.
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08005 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
18
19#include <linux/string.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/socket.h>
23#include <linux/in.h>
24#include <linux/in6.h>
Nicholas Bellinger95b60f02013-11-05 13:16:12 -080025#include <linux/llist.h>
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080026#include <rdma/ib_verbs.h>
27#include <rdma/rdma_cm.h>
28#include <target/target_core_base.h>
29#include <target/target_core_fabric.h>
30#include <target/iscsi/iscsi_transport.h>
Sagi Grimberg531b7bf2014-04-29 13:13:45 +030031#include <linux/semaphore.h>
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080032
33#include "isert_proto.h"
34#include "ib_isert.h"
35
36#define ISERT_MAX_CONN 8
37#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
38#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
39
40static DEFINE_MUTEX(device_list_mutex);
41static LIST_HEAD(device_list);
42static struct workqueue_struct *isert_rx_wq;
43static struct workqueue_struct *isert_comp_wq;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080044
45static void
Vu Phamd40945d2013-08-28 23:23:34 +030046isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
47static int
48isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
49 struct isert_rdma_wr *wr);
Vu Pham59464ef2013-08-28 23:23:35 +030050static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +020051isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +030052static int
Sagi Grimberga3a5a822014-01-09 18:40:50 +020053isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr);
Sagi Grimbergf93f3a72014-02-19 17:50:24 +020055static int
56isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +020057static int
58isert_rdma_post_recvl(struct isert_conn *isert_conn);
59static int
60isert_rdma_accept(struct isert_conn *isert_conn);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +020061struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
Vu Phamd40945d2013-08-28 23:23:34 +030062
63static void
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080064isert_qp_event_callback(struct ib_event *e, void *context)
65{
66 struct isert_conn *isert_conn = (struct isert_conn *)context;
67
68 pr_err("isert_qp_event_callback event: %d\n", e->event);
69 switch (e->event) {
70 case IB_EVENT_COMM_EST:
71 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
72 break;
73 case IB_EVENT_QP_LAST_WQE_REACHED:
74 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
75 break;
76 default:
77 break;
78 }
79}
80
81static int
82isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
83{
84 int ret;
85
86 ret = ib_query_device(ib_dev, devattr);
87 if (ret) {
88 pr_err("ib_query_device() failed: %d\n", ret);
89 return ret;
90 }
91 pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
92 pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
93
94 return 0;
95}
96
97static int
Sagi Grimbergd3e125d2014-02-19 17:50:23 +020098isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
99 u8 protection)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800100{
101 struct isert_device *device = isert_conn->conn_device;
102 struct ib_qp_init_attr attr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800103 int ret, index, min_index = 0;
104
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800105 mutex_lock(&device_list_mutex);
106 for (index = 0; index < device->cqs_used; index++)
107 if (device->cq_active_qps[index] <
108 device->cq_active_qps[min_index])
109 min_index = index;
110 device->cq_active_qps[min_index]++;
111 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
112 mutex_unlock(&device_list_mutex);
113
114 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
115 attr.event_handler = isert_qp_event_callback;
116 attr.qp_context = isert_conn;
117 attr.send_cq = device->dev_tx_cq[min_index];
118 attr.recv_cq = device->dev_rx_cq[min_index];
119 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
120 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
121 /*
122 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
Or Gerlitzf57915c2014-10-22 14:55:49 -0700123 * work-around for RDMA_READs with ConnectX-2.
124 *
125 * Also, still make sure to have at least two SGEs for
126 * outgoing control PDU responses.
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800127 */
Or Gerlitzf57915c2014-10-22 14:55:49 -0700128 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800129 isert_conn->max_sge = attr.cap.max_send_sge;
130
131 attr.cap.max_recv_sge = 1;
132 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
133 attr.qp_type = IB_QPT_RC;
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200134 if (protection)
135 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800136
137 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
138 cma_id->device);
139 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
140 isert_conn->conn_pd->device);
141
142 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
143 if (ret) {
144 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
Sagi Grimberg19e20902014-12-02 16:57:26 +0200145 goto err;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800146 }
147 isert_conn->conn_qp = cma_id->qp;
148 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
149
150 return 0;
Sagi Grimberg19e20902014-12-02 16:57:26 +0200151err:
152 mutex_lock(&device_list_mutex);
153 device->cq_active_qps[min_index]--;
154 mutex_unlock(&device_list_mutex);
155
156 return ret;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800157}
158
159static void
160isert_cq_event_callback(struct ib_event *e, void *context)
161{
162 pr_debug("isert_cq_event_callback event: %d\n", e->event);
163}
164
165static int
166isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
167{
168 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
169 struct iser_rx_desc *rx_desc;
170 struct ib_sge *rx_sg;
171 u64 dma_addr;
172 int i, j;
173
174 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
175 sizeof(struct iser_rx_desc), GFP_KERNEL);
176 if (!isert_conn->conn_rx_descs)
177 goto fail;
178
179 rx_desc = isert_conn->conn_rx_descs;
180
181 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
182 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
183 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
184 if (ib_dma_mapping_error(ib_dev, dma_addr))
185 goto dma_map_fail;
186
187 rx_desc->dma_addr = dma_addr;
188
189 rx_sg = &rx_desc->rx_sg;
190 rx_sg->addr = rx_desc->dma_addr;
191 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
192 rx_sg->lkey = isert_conn->conn_mr->lkey;
193 }
194
195 isert_conn->conn_rx_desc_head = 0;
196 return 0;
197
198dma_map_fail:
199 rx_desc = isert_conn->conn_rx_descs;
200 for (j = 0; j < i; j++, rx_desc++) {
201 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
202 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
203 }
204 kfree(isert_conn->conn_rx_descs);
205 isert_conn->conn_rx_descs = NULL;
206fail:
207 return -ENOMEM;
208}
209
210static void
211isert_free_rx_descriptors(struct isert_conn *isert_conn)
212{
213 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
214 struct iser_rx_desc *rx_desc;
215 int i;
216
217 if (!isert_conn->conn_rx_descs)
218 return;
219
220 rx_desc = isert_conn->conn_rx_descs;
221 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
222 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
223 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
224 }
225
226 kfree(isert_conn->conn_rx_descs);
227 isert_conn->conn_rx_descs = NULL;
228}
229
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800230static void isert_cq_tx_work(struct work_struct *);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800231static void isert_cq_tx_callback(struct ib_cq *, void *);
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800232static void isert_cq_rx_work(struct work_struct *);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800233static void isert_cq_rx_callback(struct ib_cq *, void *);
234
235static int
236isert_create_device_ib_res(struct isert_device *device)
237{
238 struct ib_device *ib_dev = device->ib_device;
239 struct isert_cq_desc *cq_desc;
Vu Pham59464ef2013-08-28 23:23:35 +0300240 struct ib_device_attr *dev_attr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800241 int ret = 0, i, j;
Chris Mooreb1a5ad02014-11-04 16:28:29 +0000242 int max_rx_cqe, max_tx_cqe;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800243
Vu Pham59464ef2013-08-28 23:23:35 +0300244 dev_attr = &device->dev_attr;
245 ret = isert_query_device(ib_dev, dev_attr);
246 if (ret)
247 return ret;
248
Chris Mooreb1a5ad02014-11-04 16:28:29 +0000249 max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
250 max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
251
Vu Phamd40945d2013-08-28 23:23:34 +0300252 /* asign function handlers */
Sagi Grimbergf2252252014-03-27 19:22:25 +0200253 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
254 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200255 device->use_fastreg = 1;
256 device->reg_rdma_mem = isert_reg_rdma;
257 device->unreg_rdma_mem = isert_unreg_rdma;
Vu Pham59464ef2013-08-28 23:23:35 +0300258 } else {
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200259 device->use_fastreg = 0;
Vu Pham59464ef2013-08-28 23:23:35 +0300260 device->reg_rdma_mem = isert_map_rdma;
261 device->unreg_rdma_mem = isert_unmap_cmd;
262 }
Vu Phamd40945d2013-08-28 23:23:34 +0300263
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200264 /* Check signature cap */
265 device->pi_capable = dev_attr->device_cap_flags &
266 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
267
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800268 device->cqs_used = min_t(int, num_online_cpus(),
269 device->ib_device->num_comp_vectors);
270 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200271 pr_debug("Using %d CQs, device %s supports %d vectors support "
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200272 "Fast registration %d pi_capable %d\n",
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800273 device->cqs_used, device->ib_device->name,
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200274 device->ib_device->num_comp_vectors, device->use_fastreg,
275 device->pi_capable);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800276 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
277 device->cqs_used, GFP_KERNEL);
278 if (!device->cq_desc) {
279 pr_err("Unable to allocate device->cq_desc\n");
280 return -ENOMEM;
281 }
282 cq_desc = device->cq_desc;
283
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800284 for (i = 0; i < device->cqs_used; i++) {
285 cq_desc[i].device = device;
286 cq_desc[i].cq_index = i;
287
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800288 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800289 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
290 isert_cq_rx_callback,
291 isert_cq_event_callback,
292 (void *)&cq_desc[i],
Chris Mooreb1a5ad02014-11-04 16:28:29 +0000293 max_rx_cqe, i);
Wei Yongjun94a71112013-10-29 09:56:34 +0800294 if (IS_ERR(device->dev_rx_cq[i])) {
295 ret = PTR_ERR(device->dev_rx_cq[i]);
296 device->dev_rx_cq[i] = NULL;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800297 goto out_cq;
Wei Yongjun94a71112013-10-29 09:56:34 +0800298 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800299
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800300 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800301 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
302 isert_cq_tx_callback,
303 isert_cq_event_callback,
304 (void *)&cq_desc[i],
Chris Mooreb1a5ad02014-11-04 16:28:29 +0000305 max_tx_cqe, i);
Wei Yongjun94a71112013-10-29 09:56:34 +0800306 if (IS_ERR(device->dev_tx_cq[i])) {
307 ret = PTR_ERR(device->dev_tx_cq[i]);
308 device->dev_tx_cq[i] = NULL;
309 goto out_cq;
310 }
311
312 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
313 if (ret)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800314 goto out_cq;
315
Wei Yongjun94a71112013-10-29 09:56:34 +0800316 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
317 if (ret)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800318 goto out_cq;
319 }
320
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800321 return 0;
322
323out_cq:
324 for (j = 0; j < i; j++) {
325 cq_desc = &device->cq_desc[j];
326
327 if (device->dev_rx_cq[j]) {
328 cancel_work_sync(&cq_desc->cq_rx_work);
329 ib_destroy_cq(device->dev_rx_cq[j]);
330 }
331 if (device->dev_tx_cq[j]) {
332 cancel_work_sync(&cq_desc->cq_tx_work);
333 ib_destroy_cq(device->dev_tx_cq[j]);
334 }
335 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800336 kfree(device->cq_desc);
337
338 return ret;
339}
340
341static void
342isert_free_device_ib_res(struct isert_device *device)
343{
344 struct isert_cq_desc *cq_desc;
345 int i;
346
347 for (i = 0; i < device->cqs_used; i++) {
348 cq_desc = &device->cq_desc[i];
349
350 cancel_work_sync(&cq_desc->cq_rx_work);
351 cancel_work_sync(&cq_desc->cq_tx_work);
352 ib_destroy_cq(device->dev_rx_cq[i]);
353 ib_destroy_cq(device->dev_tx_cq[i]);
354 device->dev_rx_cq[i] = NULL;
355 device->dev_tx_cq[i] = NULL;
356 }
357
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800358 kfree(device->cq_desc);
359}
360
361static void
362isert_device_try_release(struct isert_device *device)
363{
364 mutex_lock(&device_list_mutex);
365 device->refcount--;
366 if (!device->refcount) {
367 isert_free_device_ib_res(device);
368 list_del(&device->dev_node);
369 kfree(device);
370 }
371 mutex_unlock(&device_list_mutex);
372}
373
374static struct isert_device *
375isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
376{
377 struct isert_device *device;
378 int ret;
379
380 mutex_lock(&device_list_mutex);
381 list_for_each_entry(device, &device_list, dev_node) {
382 if (device->ib_device->node_guid == cma_id->device->node_guid) {
383 device->refcount++;
384 mutex_unlock(&device_list_mutex);
385 return device;
386 }
387 }
388
389 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
390 if (!device) {
391 mutex_unlock(&device_list_mutex);
392 return ERR_PTR(-ENOMEM);
393 }
394
395 INIT_LIST_HEAD(&device->dev_node);
396
397 device->ib_device = cma_id->device;
398 ret = isert_create_device_ib_res(device);
399 if (ret) {
400 kfree(device);
401 mutex_unlock(&device_list_mutex);
402 return ERR_PTR(ret);
403 }
404
405 device->refcount++;
406 list_add_tail(&device->dev_node, &device_list);
407 mutex_unlock(&device_list_mutex);
408
409 return device;
410}
411
Vu Pham59464ef2013-08-28 23:23:35 +0300412static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200413isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +0300414{
415 struct fast_reg_descriptor *fr_desc, *tmp;
416 int i = 0;
417
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200418 if (list_empty(&isert_conn->conn_fr_pool))
Vu Pham59464ef2013-08-28 23:23:35 +0300419 return;
420
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200421 pr_debug("Freeing conn %p fastreg pool", isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300422
423 list_for_each_entry_safe(fr_desc, tmp,
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200424 &isert_conn->conn_fr_pool, list) {
Vu Pham59464ef2013-08-28 23:23:35 +0300425 list_del(&fr_desc->list);
426 ib_free_fast_reg_page_list(fr_desc->data_frpl);
427 ib_dereg_mr(fr_desc->data_mr);
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200428 if (fr_desc->pi_ctx) {
429 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
430 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
431 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
432 kfree(fr_desc->pi_ctx);
433 }
Vu Pham59464ef2013-08-28 23:23:35 +0300434 kfree(fr_desc);
435 ++i;
436 }
437
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200438 if (i < isert_conn->conn_fr_pool_size)
Vu Pham59464ef2013-08-28 23:23:35 +0300439 pr_warn("Pool still has %d regions registered\n",
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200440 isert_conn->conn_fr_pool_size - i);
Vu Pham59464ef2013-08-28 23:23:35 +0300441}
442
443static int
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200444isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200445 struct fast_reg_descriptor *fr_desc, u8 protection)
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200446{
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200447 int ret;
448
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200449 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
450 ISCSI_ISER_SG_TABLESIZE);
451 if (IS_ERR(fr_desc->data_frpl)) {
452 pr_err("Failed to allocate data frpl err=%ld\n",
453 PTR_ERR(fr_desc->data_frpl));
454 return PTR_ERR(fr_desc->data_frpl);
455 }
456
457 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
458 if (IS_ERR(fr_desc->data_mr)) {
459 pr_err("Failed to allocate data frmr err=%ld\n",
460 PTR_ERR(fr_desc->data_mr));
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200461 ret = PTR_ERR(fr_desc->data_mr);
462 goto err_data_frpl;
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200463 }
464 pr_debug("Create fr_desc %p page_list %p\n",
465 fr_desc, fr_desc->data_frpl->page_list);
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200466 fr_desc->ind |= ISERT_DATA_KEY_VALID;
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200467
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200468 if (protection) {
469 struct ib_mr_init_attr mr_init_attr = {0};
470 struct pi_context *pi_ctx;
471
472 fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
473 if (!fr_desc->pi_ctx) {
474 pr_err("Failed to allocate pi context\n");
475 ret = -ENOMEM;
476 goto err_data_mr;
477 }
478 pi_ctx = fr_desc->pi_ctx;
479
480 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
481 ISCSI_ISER_SG_TABLESIZE);
482 if (IS_ERR(pi_ctx->prot_frpl)) {
483 pr_err("Failed to allocate prot frpl err=%ld\n",
484 PTR_ERR(pi_ctx->prot_frpl));
485 ret = PTR_ERR(pi_ctx->prot_frpl);
486 goto err_pi_ctx;
487 }
488
489 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
490 if (IS_ERR(pi_ctx->prot_mr)) {
491 pr_err("Failed to allocate prot frmr err=%ld\n",
492 PTR_ERR(pi_ctx->prot_mr));
493 ret = PTR_ERR(pi_ctx->prot_mr);
494 goto err_prot_frpl;
495 }
496 fr_desc->ind |= ISERT_PROT_KEY_VALID;
497
498 mr_init_attr.max_reg_descriptors = 2;
499 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
500 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
501 if (IS_ERR(pi_ctx->sig_mr)) {
502 pr_err("Failed to allocate signature enabled mr err=%ld\n",
503 PTR_ERR(pi_ctx->sig_mr));
504 ret = PTR_ERR(pi_ctx->sig_mr);
505 goto err_prot_mr;
506 }
507 fr_desc->ind |= ISERT_SIG_KEY_VALID;
508 }
509 fr_desc->ind &= ~ISERT_PROTECTED;
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200510
511 return 0;
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200512err_prot_mr:
513 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
514err_prot_frpl:
515 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
516err_pi_ctx:
517 kfree(fr_desc->pi_ctx);
518err_data_mr:
519 ib_dereg_mr(fr_desc->data_mr);
520err_data_frpl:
521 ib_free_fast_reg_page_list(fr_desc->data_frpl);
522
523 return ret;
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200524}
525
526static int
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200527isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
Vu Pham59464ef2013-08-28 23:23:35 +0300528{
529 struct fast_reg_descriptor *fr_desc;
530 struct isert_device *device = isert_conn->conn_device;
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -0700531 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
532 struct se_node_acl *se_nacl = se_sess->se_node_acl;
533 int i, ret, tag_num;
534 /*
535 * Setup the number of FRMRs based upon the number of tags
536 * available to session in iscsi_target_locate_portal().
537 */
538 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
539 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
Vu Pham59464ef2013-08-28 23:23:35 +0300540
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200541 isert_conn->conn_fr_pool_size = 0;
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -0700542 for (i = 0; i < tag_num; i++) {
Vu Pham59464ef2013-08-28 23:23:35 +0300543 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
544 if (!fr_desc) {
545 pr_err("Failed to allocate fast_reg descriptor\n");
546 ret = -ENOMEM;
547 goto err;
548 }
549
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200550 ret = isert_create_fr_desc(device->ib_device,
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200551 isert_conn->conn_pd, fr_desc,
552 pi_support);
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200553 if (ret) {
554 pr_err("Failed to create fastreg descriptor err=%d\n",
555 ret);
Nicholas Bellingera80e21b2014-02-03 12:59:56 -0800556 kfree(fr_desc);
Vu Pham59464ef2013-08-28 23:23:35 +0300557 goto err;
558 }
559
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200560 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
561 isert_conn->conn_fr_pool_size++;
Vu Pham59464ef2013-08-28 23:23:35 +0300562 }
563
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200564 pr_debug("Creating conn %p fastreg pool size=%d",
565 isert_conn, isert_conn->conn_fr_pool_size);
Vu Pham59464ef2013-08-28 23:23:35 +0300566
567 return 0;
568
569err:
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200570 isert_conn_free_fastreg_pool(isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300571 return ret;
572}
573
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800574static int
575isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
576{
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200577 struct isert_np *isert_np = cma_id->context;
578 struct iscsi_np *np = isert_np->np;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800579 struct isert_conn *isert_conn;
580 struct isert_device *device;
581 struct ib_device *ib_dev = cma_id->device;
582 int ret = 0;
Sagi Grimberg14f4b542014-04-29 13:13:47 +0300583 u8 pi_support;
584
585 spin_lock_bh(&np->np_thread_lock);
586 if (!np->enabled) {
587 spin_unlock_bh(&np->np_thread_lock);
588 pr_debug("iscsi_np is not enabled, reject connect request\n");
589 return rdma_reject(cma_id, NULL, 0);
590 }
591 spin_unlock_bh(&np->np_thread_lock);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800592
593 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
594 cma_id, cma_id->context);
595
596 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
597 if (!isert_conn) {
598 pr_err("Unable to allocate isert_conn\n");
599 return -ENOMEM;
600 }
601 isert_conn->state = ISER_CONN_INIT;
602 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
603 init_completion(&isert_conn->conn_login_comp);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200604 init_completion(&isert_conn->login_req_comp);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -0800605 init_completion(&isert_conn->conn_wait);
606 init_completion(&isert_conn->conn_wait_comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800607 kref_init(&isert_conn->conn_kref);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700608 mutex_init(&isert_conn->conn_mutex);
Vu Pham59464ef2013-08-28 23:23:35 +0300609 spin_lock_init(&isert_conn->conn_lock);
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -0700610 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800611
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800612 isert_conn->conn_cm_id = cma_id;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800613
614 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
615 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
616 if (!isert_conn->login_buf) {
617 pr_err("Unable to allocate isert_conn->login_buf\n");
618 ret = -ENOMEM;
619 goto out;
620 }
621
622 isert_conn->login_req_buf = isert_conn->login_buf;
623 isert_conn->login_rsp_buf = isert_conn->login_buf +
624 ISCSI_DEF_MAX_RECV_SEG_LEN;
625 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
626 isert_conn->login_buf, isert_conn->login_req_buf,
627 isert_conn->login_rsp_buf);
628
629 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
630 (void *)isert_conn->login_req_buf,
631 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
632
633 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
634 if (ret) {
635 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
636 ret);
637 isert_conn->login_req_dma = 0;
638 goto out_login_buf;
639 }
640
641 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
642 (void *)isert_conn->login_rsp_buf,
643 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
644
645 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
646 if (ret) {
647 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
648 ret);
649 isert_conn->login_rsp_dma = 0;
650 goto out_req_dma_map;
651 }
652
653 device = isert_device_find_by_ib_dev(cma_id);
654 if (IS_ERR(device)) {
655 ret = PTR_ERR(device);
656 goto out_rsp_dma_map;
657 }
658
Sagi Grimberg1a92e172014-06-19 13:54:19 +0300659 /* Set max inflight RDMA READ requests */
660 isert_conn->initiator_depth = min_t(u8,
661 event->param.conn.initiator_depth,
662 device->dev_attr.max_qp_init_rd_atom);
663 pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
664
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800665 isert_conn->conn_device = device;
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200666 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
667 if (IS_ERR(isert_conn->conn_pd)) {
668 ret = PTR_ERR(isert_conn->conn_pd);
669 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
670 isert_conn, ret);
671 goto out_pd;
672 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800673
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200674 isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
675 IB_ACCESS_LOCAL_WRITE);
676 if (IS_ERR(isert_conn->conn_mr)) {
677 ret = PTR_ERR(isert_conn->conn_mr);
678 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
679 isert_conn, ret);
680 goto out_mr;
681 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800682
Sagi Grimberg14f4b542014-04-29 13:13:47 +0300683 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200684 if (pi_support && !device->pi_capable) {
Sagi Grimberg5f80ff82014-05-20 13:28:11 +0300685 pr_err("Protection information requested but not supported, "
686 "rejecting connect request\n");
687 ret = rdma_reject(cma_id, NULL, 0);
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200688 goto out_mr;
689 }
690
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200691 ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800692 if (ret)
693 goto out_conn_dev;
694
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200695 ret = isert_rdma_post_recvl(isert_conn);
696 if (ret)
697 goto out_conn_dev;
698
699 ret = isert_rdma_accept(isert_conn);
700 if (ret)
701 goto out_conn_dev;
702
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800703 mutex_lock(&isert_np->np_accept_mutex);
Sagi Grimberg9fe63c82014-04-29 13:13:44 +0300704 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800705 mutex_unlock(&isert_np->np_accept_mutex);
706
Sagi Grimberg531b7bf2014-04-29 13:13:45 +0300707 pr_debug("isert_connect_request() up np_sem np: %p\n", np);
708 up(&isert_np->np_sem);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800709 return 0;
710
711out_conn_dev:
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200712 ib_dereg_mr(isert_conn->conn_mr);
713out_mr:
714 ib_dealloc_pd(isert_conn->conn_pd);
715out_pd:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800716 isert_device_try_release(device);
717out_rsp_dma_map:
718 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
719 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
720out_req_dma_map:
721 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
722 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
723out_login_buf:
724 kfree(isert_conn->login_buf);
725out:
726 kfree(isert_conn);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200727 rdma_reject(cma_id, NULL, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800728 return ret;
729}
730
731static void
732isert_connect_release(struct isert_conn *isert_conn)
733{
734 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
735 struct isert_device *device = isert_conn->conn_device;
736 int cq_index;
737
738 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
739
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200740 if (device && device->use_fastreg)
741 isert_conn_free_fastreg_pool(isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300742
Sagi Grimberg19e20902014-12-02 16:57:26 +0200743 isert_free_rx_descriptors(isert_conn);
744 rdma_destroy_id(isert_conn->conn_cm_id);
745
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800746 if (isert_conn->conn_qp) {
747 cq_index = ((struct isert_cq_desc *)
748 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
749 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
Sagi Grimberg19e20902014-12-02 16:57:26 +0200750 mutex_lock(&device_list_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800751 isert_conn->conn_device->cq_active_qps[cq_index]--;
Sagi Grimberg19e20902014-12-02 16:57:26 +0200752 mutex_unlock(&device_list_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800753
Sagi Grimberg19e20902014-12-02 16:57:26 +0200754 ib_destroy_qp(isert_conn->conn_qp);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800755 }
756
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200757 ib_dereg_mr(isert_conn->conn_mr);
758 ib_dealloc_pd(isert_conn->conn_pd);
759
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800760 if (isert_conn->login_buf) {
761 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
762 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
763 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
764 ISCSI_DEF_MAX_RECV_SEG_LEN,
765 DMA_FROM_DEVICE);
766 kfree(isert_conn->login_buf);
767 }
768 kfree(isert_conn);
769
770 if (device)
771 isert_device_try_release(device);
772
773 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
774}
775
776static void
777isert_connected_handler(struct rdma_cm_id *cma_id)
778{
Sagi Grimberg19e20902014-12-02 16:57:26 +0200779 struct isert_conn *isert_conn = cma_id->qp->qp_context;
Sagi Grimbergc2f88b12014-07-02 16:19:24 +0300780
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200781 pr_info("conn %p\n", isert_conn);
782
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200783 if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
784 pr_warn("conn %p connect_release is running\n", isert_conn);
785 return;
786 }
787
788 mutex_lock(&isert_conn->conn_mutex);
789 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
790 isert_conn->state = ISER_CONN_UP;
791 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800792}
793
794static void
795isert_release_conn_kref(struct kref *kref)
796{
797 struct isert_conn *isert_conn = container_of(kref,
798 struct isert_conn, conn_kref);
799
800 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
801 current->comm, current->pid);
802
803 isert_connect_release(isert_conn);
804}
805
806static void
807isert_put_conn(struct isert_conn *isert_conn)
808{
809 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
810}
811
Sagi Grimberg954f2372014-12-02 16:57:17 +0200812/**
813 * isert_conn_terminate() - Initiate connection termination
814 * @isert_conn: isert connection struct
815 *
816 * Notes:
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200817 * In case the connection state is FULL_FEATURE, move state
Sagi Grimberg954f2372014-12-02 16:57:17 +0200818 * to TEMINATING and start teardown sequence (rdma_disconnect).
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200819 * In case the connection state is UP, complete flush as well.
Sagi Grimberg954f2372014-12-02 16:57:17 +0200820 *
821 * This routine must be called with conn_mutex held. Thus it is
822 * safe to call multiple times.
823 */
824static void
825isert_conn_terminate(struct isert_conn *isert_conn)
826{
827 int err;
828
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200829 switch (isert_conn->state) {
830 case ISER_CONN_TERMINATING:
831 break;
832 case ISER_CONN_UP:
833 /*
834 * No flush completions will occur as we didn't
835 * get to ISER_CONN_FULL_FEATURE yet, complete
836 * to allow teardown progress.
837 */
838 complete(&isert_conn->conn_wait_comp_err);
839 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
Sagi Grimberg954f2372014-12-02 16:57:17 +0200840 pr_info("Terminating conn %p state %d\n",
841 isert_conn, isert_conn->state);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200842 isert_conn->state = ISER_CONN_TERMINATING;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200843 err = rdma_disconnect(isert_conn->conn_cm_id);
844 if (err)
845 pr_warn("Failed rdma_disconnect isert_conn %p\n",
846 isert_conn);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200847 break;
848 default:
849 pr_warn("conn %p teminating in state %d\n",
850 isert_conn, isert_conn->state);
Sagi Grimberg954f2372014-12-02 16:57:17 +0200851 }
852}
853
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700854static int
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200855isert_np_cma_handler(struct isert_np *isert_np,
856 enum rdma_cm_event_type event)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800857{
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200858 pr_debug("isert np %p, handling event %d\n", isert_np, event);
859
860 switch (event) {
861 case RDMA_CM_EVENT_DEVICE_REMOVAL:
862 isert_np->np_cm_id = NULL;
863 break;
864 case RDMA_CM_EVENT_ADDR_CHANGE:
865 isert_np->np_cm_id = isert_setup_id(isert_np);
866 if (IS_ERR(isert_np->np_cm_id)) {
867 pr_err("isert np %p setup id failed: %ld\n",
868 isert_np, PTR_ERR(isert_np->np_cm_id));
869 isert_np->np_cm_id = NULL;
870 }
871 break;
872 default:
873 pr_err("isert np %p Unexpected event %d\n",
874 isert_np, event);
875 }
876
877 return -1;
878}
879
880static int
881isert_disconnected_handler(struct rdma_cm_id *cma_id,
882 enum rdma_cm_event_type event)
883{
884 struct isert_np *isert_np = cma_id->context;
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700885 struct isert_conn *isert_conn;
886
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200887 if (isert_np->np_cm_id == cma_id)
888 return isert_np_cma_handler(cma_id->context, event);
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700889
Sagi Grimberg19e20902014-12-02 16:57:26 +0200890 isert_conn = cma_id->qp->qp_context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800891
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200892 mutex_lock(&isert_conn->conn_mutex);
893 isert_conn_terminate(isert_conn);
894 mutex_unlock(&isert_conn->conn_mutex);
895
896 pr_info("conn %p completing conn_wait\n", isert_conn);
897 complete(&isert_conn->conn_wait);
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700898
899 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800900}
901
Sagi Grimberg954f2372014-12-02 16:57:17 +0200902static void
903isert_connect_error(struct rdma_cm_id *cma_id)
904{
Sagi Grimberg19e20902014-12-02 16:57:26 +0200905 struct isert_conn *isert_conn = cma_id->qp->qp_context;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200906
907 isert_put_conn(isert_conn);
908}
909
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800910static int
911isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
912{
913 int ret = 0;
914
915 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
916 event->event, event->status, cma_id->context, cma_id);
917
918 switch (event->event) {
919 case RDMA_CM_EVENT_CONNECT_REQUEST:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800920 ret = isert_connect_request(cma_id, event);
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700921 if (ret)
922 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
923 event->event, ret);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800924 break;
925 case RDMA_CM_EVENT_ESTABLISHED:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800926 isert_connected_handler(cma_id);
927 break;
Sagi Grimberg88c40152014-05-19 17:44:24 +0300928 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
929 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
930 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
Sagi Grimberg88c40152014-05-19 17:44:24 +0300931 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200932 ret = isert_disconnected_handler(cma_id, event->event);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800933 break;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200934 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
935 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800936 case RDMA_CM_EVENT_CONNECT_ERROR:
Sagi Grimberg954f2372014-12-02 16:57:17 +0200937 isert_connect_error(cma_id);
938 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800939 default:
Sagi Grimberg88c40152014-05-19 17:44:24 +0300940 pr_err("Unhandled RDMA CMA event: %d\n", event->event);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800941 break;
942 }
943
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800944 return ret;
945}
946
947static int
948isert_post_recv(struct isert_conn *isert_conn, u32 count)
949{
950 struct ib_recv_wr *rx_wr, *rx_wr_failed;
951 int i, ret;
952 unsigned int rx_head = isert_conn->conn_rx_desc_head;
953 struct iser_rx_desc *rx_desc;
954
955 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
956 rx_desc = &isert_conn->conn_rx_descs[rx_head];
957 rx_wr->wr_id = (unsigned long)rx_desc;
958 rx_wr->sg_list = &rx_desc->rx_sg;
959 rx_wr->num_sge = 1;
960 rx_wr->next = rx_wr + 1;
961 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
962 }
963
964 rx_wr--;
965 rx_wr->next = NULL; /* mark end of work requests list */
966
967 isert_conn->post_recv_buf_count += count;
968 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
969 &rx_wr_failed);
970 if (ret) {
971 pr_err("ib_post_recv() failed with ret: %d\n", ret);
972 isert_conn->post_recv_buf_count -= count;
973 } else {
974 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
975 isert_conn->conn_rx_desc_head = rx_head;
976 }
977 return ret;
978}
979
980static int
981isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
982{
983 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
984 struct ib_send_wr send_wr, *send_wr_failed;
985 int ret;
986
987 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
988 ISER_HEADERS_LEN, DMA_TO_DEVICE);
989
990 send_wr.next = NULL;
991 send_wr.wr_id = (unsigned long)tx_desc;
992 send_wr.sg_list = tx_desc->tx_sg;
993 send_wr.num_sge = tx_desc->num_sge;
994 send_wr.opcode = IB_WR_SEND;
995 send_wr.send_flags = IB_SEND_SIGNALED;
996
997 atomic_inc(&isert_conn->post_send_buf_count);
998
999 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
1000 if (ret) {
1001 pr_err("ib_post_send() failed, ret: %d\n", ret);
1002 atomic_dec(&isert_conn->post_send_buf_count);
1003 }
1004
1005 return ret;
1006}
1007
1008static void
1009isert_create_send_desc(struct isert_conn *isert_conn,
1010 struct isert_cmd *isert_cmd,
1011 struct iser_tx_desc *tx_desc)
1012{
1013 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1014
1015 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
1016 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1017
1018 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
1019 tx_desc->iser_header.flags = ISER_VER;
1020
1021 tx_desc->num_sge = 1;
1022 tx_desc->isert_cmd = isert_cmd;
1023
1024 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
1025 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
1026 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
1027 }
1028}
1029
1030static int
1031isert_init_tx_hdrs(struct isert_conn *isert_conn,
1032 struct iser_tx_desc *tx_desc)
1033{
1034 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1035 u64 dma_addr;
1036
1037 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1038 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1039 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
1040 pr_err("ib_dma_mapping_error() failed\n");
1041 return -ENOMEM;
1042 }
1043
1044 tx_desc->dma_addr = dma_addr;
1045 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1046 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
1047 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
1048
1049 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
1050 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
1051 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
1052
1053 return 0;
1054}
1055
1056static void
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001057isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1058 struct ib_send_wr *send_wr, bool coalesce)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001059{
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001060 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1061
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001062 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
1063 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1064 send_wr->opcode = IB_WR_SEND;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001065 send_wr->sg_list = &tx_desc->tx_sg[0];
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001066 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001067 /*
1068 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
1069 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
1070 */
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08001071 mutex_lock(&isert_conn->conn_mutex);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +02001072 if (coalesce && isert_conn->state == ISER_CONN_FULL_FEATURE &&
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001073 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08001074 tx_desc->llnode_active = true;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001075 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08001076 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001077 return;
1078 }
1079 isert_conn->conn_comp_batch = 0;
1080 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08001081 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001082
1083 send_wr->send_flags = IB_SEND_SIGNALED;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001084}
1085
1086static int
1087isert_rdma_post_recvl(struct isert_conn *isert_conn)
1088{
1089 struct ib_recv_wr rx_wr, *rx_wr_fail;
1090 struct ib_sge sge;
1091 int ret;
1092
1093 memset(&sge, 0, sizeof(struct ib_sge));
1094 sge.addr = isert_conn->login_req_dma;
1095 sge.length = ISER_RX_LOGIN_SIZE;
1096 sge.lkey = isert_conn->conn_mr->lkey;
1097
1098 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
1099 sge.addr, sge.length, sge.lkey);
1100
1101 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1102 rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
1103 rx_wr.sg_list = &sge;
1104 rx_wr.num_sge = 1;
1105
1106 isert_conn->post_recv_buf_count++;
1107 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
1108 if (ret) {
1109 pr_err("ib_post_recv() failed: %d\n", ret);
1110 isert_conn->post_recv_buf_count--;
1111 }
1112
1113 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
1114 return ret;
1115}
1116
1117static int
1118isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1119 u32 length)
1120{
1121 struct isert_conn *isert_conn = conn->context;
1122 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1123 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
1124 int ret;
1125
1126 isert_create_send_desc(isert_conn, NULL, tx_desc);
1127
1128 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1129 sizeof(struct iscsi_hdr));
1130
1131 isert_init_tx_hdrs(isert_conn, tx_desc);
1132
1133 if (length > 0) {
1134 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1135
1136 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1137 length, DMA_TO_DEVICE);
1138
1139 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1140
1141 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1142 length, DMA_TO_DEVICE);
1143
1144 tx_dsg->addr = isert_conn->login_rsp_dma;
1145 tx_dsg->length = length;
1146 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1147 tx_desc->num_sge = 2;
1148 }
1149 if (!login->login_failed) {
1150 if (login->login_complete) {
Sagi Grimberge0546fc2014-06-10 13:41:41 +03001151 if (!conn->sess->sess_ops->SessionType &&
1152 isert_conn->conn_device->use_fastreg) {
1153 /* Normal Session and fastreg is used */
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -07001154 u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
1155
1156 ret = isert_conn_create_fastreg_pool(isert_conn,
1157 pi_support);
1158 if (ret) {
1159 pr_err("Conn: %p failed to create"
1160 " fastreg pool\n", isert_conn);
1161 return ret;
1162 }
1163 }
1164
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001165 ret = isert_alloc_rx_descriptors(isert_conn);
1166 if (ret)
1167 return ret;
1168
1169 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
1170 if (ret)
1171 return ret;
1172
Sagi Grimberg128e9cc2014-12-02 16:57:20 +02001173 /* Now we are in FULL_FEATURE phase */
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001174 mutex_lock(&isert_conn->conn_mutex);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +02001175 isert_conn->state = ISER_CONN_FULL_FEATURE;
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001176 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001177 goto post_send;
1178 }
1179
1180 ret = isert_rdma_post_recvl(isert_conn);
1181 if (ret)
1182 return ret;
1183 }
1184post_send:
1185 ret = isert_post_send(isert_conn, tx_desc);
1186 if (ret)
1187 return ret;
1188
1189 return 0;
1190}
1191
1192static void
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001193isert_rx_login_req(struct isert_conn *isert_conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001194{
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001195 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1196 int rx_buflen = isert_conn->login_req_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001197 struct iscsi_conn *conn = isert_conn->conn;
1198 struct iscsi_login *login = conn->conn_login;
1199 int size;
1200
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001201 pr_info("conn %p\n", isert_conn);
1202
1203 WARN_ON_ONCE(!login);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001204
1205 if (login->first_request) {
1206 struct iscsi_login_req *login_req =
1207 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1208 /*
1209 * Setup the initial iscsi_login values from the leading
1210 * login request PDU.
1211 */
1212 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1213 login->current_stage =
1214 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1215 >> 2;
1216 login->version_min = login_req->min_version;
1217 login->version_max = login_req->max_version;
1218 memcpy(login->isid, login_req->isid, 6);
1219 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1220 login->init_task_tag = login_req->itt;
1221 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1222 login->cid = be16_to_cpu(login_req->cid);
1223 login->tsih = be16_to_cpu(login_req->tsih);
1224 }
1225
1226 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1227
1228 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1229 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1230 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
1231 memcpy(login->req_buf, &rx_desc->data[0], size);
1232
Nicholas Bellinger6faaa852013-08-18 16:35:46 -07001233 if (login->first_request) {
1234 complete(&isert_conn->conn_login_comp);
1235 return;
1236 }
1237 schedule_delayed_work(&conn->login_work, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001238}
1239
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001240static struct iscsi_cmd
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001241*isert_allocate_cmd(struct iscsi_conn *conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001242{
1243 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1244 struct isert_cmd *isert_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001245 struct iscsi_cmd *cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001246
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001247 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001248 if (!cmd) {
1249 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001250 return NULL;
1251 }
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001252 isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001253 isert_cmd->conn = isert_conn;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001254 isert_cmd->iscsi_cmd = cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001255
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001256 return cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001257}
1258
1259static int
1260isert_handle_scsi_cmd(struct isert_conn *isert_conn,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001261 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1262 struct iser_rx_desc *rx_desc, unsigned char *buf)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001263{
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001264 struct iscsi_conn *conn = isert_conn->conn;
1265 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1266 struct scatterlist *sg;
1267 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1268 bool dump_payload = false;
1269
1270 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1271 if (rc < 0)
1272 return rc;
1273
1274 imm_data = cmd->immediate_data;
1275 imm_data_len = cmd->first_burst_len;
1276 unsol_data = cmd->unsolicited_data;
1277
1278 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1279 if (rc < 0) {
1280 return 0;
1281 } else if (rc > 0) {
1282 dump_payload = true;
1283 goto sequence_cmd;
1284 }
1285
1286 if (!imm_data)
1287 return 0;
1288
1289 sg = &cmd->se_cmd.t_data_sg[0];
1290 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1291
1292 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1293 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1294
1295 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1296
1297 cmd->write_data_done += imm_data_len;
1298
1299 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1300 spin_lock_bh(&cmd->istate_lock);
1301 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1302 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1303 spin_unlock_bh(&cmd->istate_lock);
1304 }
1305
1306sequence_cmd:
Nicholas Bellinger561bf152013-07-03 03:58:58 -07001307 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001308
1309 if (!rc && dump_payload == false && unsol_data)
1310 iscsit_set_unsoliticed_dataout(cmd);
Nicholas Bellinger6cc44a62014-05-23 00:48:35 -07001311 else if (dump_payload && imm_data)
1312 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001313
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001314 return 0;
1315}
1316
1317static int
1318isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1319 struct iser_rx_desc *rx_desc, unsigned char *buf)
1320{
1321 struct scatterlist *sg_start;
1322 struct iscsi_conn *conn = isert_conn->conn;
1323 struct iscsi_cmd *cmd = NULL;
1324 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1325 u32 unsol_data_len = ntoh24(hdr->dlength);
1326 int rc, sg_nents, sg_off, page_off;
1327
1328 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1329 if (rc < 0)
1330 return rc;
1331 else if (!cmd)
1332 return 0;
1333 /*
1334 * FIXME: Unexpected unsolicited_data out
1335 */
1336 if (!cmd->unsolicited_data) {
1337 pr_err("Received unexpected solicited data payload\n");
1338 dump_stack();
1339 return -1;
1340 }
1341
1342 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1343 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1344
1345 sg_off = cmd->write_data_done / PAGE_SIZE;
1346 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1347 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1348 page_off = cmd->write_data_done % PAGE_SIZE;
1349 /*
1350 * FIXME: Non page-aligned unsolicited_data out
1351 */
1352 if (page_off) {
1353 pr_err("Received unexpected non-page aligned data payload\n");
1354 dump_stack();
1355 return -1;
1356 }
1357 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1358 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1359
1360 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1361 unsol_data_len);
1362
1363 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1364 if (rc < 0)
1365 return rc;
1366
1367 return 0;
1368}
1369
1370static int
Nicholas Bellinger778de362013-06-14 16:07:47 -07001371isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001372 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1373 unsigned char *buf)
Nicholas Bellinger778de362013-06-14 16:07:47 -07001374{
Nicholas Bellinger778de362013-06-14 16:07:47 -07001375 struct iscsi_conn *conn = isert_conn->conn;
1376 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1377 int rc;
1378
1379 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1380 if (rc < 0)
1381 return rc;
1382 /*
1383 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1384 */
1385
1386 return iscsit_process_nop_out(conn, cmd, hdr);
1387}
1388
1389static int
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001390isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001391 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1392 struct iscsi_text *hdr)
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001393{
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001394 struct iscsi_conn *conn = isert_conn->conn;
1395 u32 payload_length = ntoh24(hdr->dlength);
1396 int rc;
1397 unsigned char *text_in;
1398
1399 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1400 if (rc < 0)
1401 return rc;
1402
1403 text_in = kzalloc(payload_length, GFP_KERNEL);
1404 if (!text_in) {
1405 pr_err("Unable to allocate text_in of payload_length: %u\n",
1406 payload_length);
1407 return -ENOMEM;
1408 }
1409 cmd->text_in_ptr = text_in;
1410
1411 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1412
1413 return iscsit_process_text_cmd(conn, cmd, hdr);
1414}
1415
1416static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001417isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1418 uint32_t read_stag, uint64_t read_va,
1419 uint32_t write_stag, uint64_t write_va)
1420{
1421 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1422 struct iscsi_conn *conn = isert_conn->conn;
Nicholas Bellingerca40d242013-07-07 17:45:08 -07001423 struct iscsi_session *sess = conn->sess;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001424 struct iscsi_cmd *cmd;
1425 struct isert_cmd *isert_cmd;
1426 int ret = -EINVAL;
1427 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1428
Nicholas Bellingerca40d242013-07-07 17:45:08 -07001429 if (sess->sess_ops->SessionType &&
1430 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1431 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1432 " ignoring\n", opcode);
1433 return 0;
1434 }
1435
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001436 switch (opcode) {
1437 case ISCSI_OP_SCSI_CMD:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001438 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001439 if (!cmd)
1440 break;
1441
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001442 isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001443 isert_cmd->read_stag = read_stag;
1444 isert_cmd->read_va = read_va;
1445 isert_cmd->write_stag = write_stag;
1446 isert_cmd->write_va = write_va;
1447
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001448 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001449 rx_desc, (unsigned char *)hdr);
1450 break;
1451 case ISCSI_OP_NOOP_OUT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001452 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001453 if (!cmd)
1454 break;
1455
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001456 isert_cmd = iscsit_priv_cmd(cmd);
1457 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
Nicholas Bellinger778de362013-06-14 16:07:47 -07001458 rx_desc, (unsigned char *)hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001459 break;
1460 case ISCSI_OP_SCSI_DATA_OUT:
1461 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1462 (unsigned char *)hdr);
1463 break;
1464 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001465 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001466 if (!cmd)
1467 break;
1468
1469 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1470 (unsigned char *)hdr);
1471 break;
1472 case ISCSI_OP_LOGOUT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001473 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001474 if (!cmd)
1475 break;
1476
1477 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1478 if (ret > 0)
1479 wait_for_completion_timeout(&conn->conn_logout_comp,
1480 SECONDS_FOR_LOGOUT_COMP *
1481 HZ);
1482 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001483 case ISCSI_OP_TEXT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001484 cmd = isert_allocate_cmd(conn);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001485 if (!cmd)
1486 break;
1487
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001488 isert_cmd = iscsit_priv_cmd(cmd);
1489 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001490 rx_desc, (struct iscsi_text *)hdr);
1491 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001492 default:
1493 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1494 dump_stack();
1495 break;
1496 }
1497
1498 return ret;
1499}
1500
1501static void
1502isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1503{
1504 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1505 uint64_t read_va = 0, write_va = 0;
1506 uint32_t read_stag = 0, write_stag = 0;
1507 int rc;
1508
1509 switch (iser_hdr->flags & 0xF0) {
1510 case ISCSI_CTRL:
1511 if (iser_hdr->flags & ISER_RSV) {
1512 read_stag = be32_to_cpu(iser_hdr->read_stag);
1513 read_va = be64_to_cpu(iser_hdr->read_va);
1514 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1515 read_stag, (unsigned long long)read_va);
1516 }
1517 if (iser_hdr->flags & ISER_WSV) {
1518 write_stag = be32_to_cpu(iser_hdr->write_stag);
1519 write_va = be64_to_cpu(iser_hdr->write_va);
1520 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1521 write_stag, (unsigned long long)write_va);
1522 }
1523
1524 pr_debug("ISER ISCSI_CTRL PDU\n");
1525 break;
1526 case ISER_HELLO:
1527 pr_err("iSER Hello message\n");
1528 break;
1529 default:
1530 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1531 break;
1532 }
1533
1534 rc = isert_rx_opcode(isert_conn, rx_desc,
1535 read_stag, read_va, write_stag, write_va);
1536}
1537
1538static void
1539isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1540 unsigned long xfer_len)
1541{
1542 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1543 struct iscsi_hdr *hdr;
1544 u64 rx_dma;
1545 int rx_buflen, outstanding;
1546
1547 if ((char *)desc == isert_conn->login_req_buf) {
1548 rx_dma = isert_conn->login_req_dma;
1549 rx_buflen = ISER_RX_LOGIN_SIZE;
1550 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1551 rx_dma, rx_buflen);
1552 } else {
1553 rx_dma = desc->dma_addr;
1554 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1555 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1556 rx_dma, rx_buflen);
1557 }
1558
1559 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1560
1561 hdr = &desc->iscsi_header;
1562 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1563 hdr->opcode, hdr->itt, hdr->flags,
1564 (int)(xfer_len - ISER_HEADERS_LEN));
1565
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001566 if ((char *)desc == isert_conn->login_req_buf) {
1567 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1568 if (isert_conn->conn) {
1569 struct iscsi_login *login = isert_conn->conn->conn_login;
1570
1571 if (login && !login->first_request)
1572 isert_rx_login_req(isert_conn);
1573 }
1574 mutex_lock(&isert_conn->conn_mutex);
1575 complete(&isert_conn->login_req_comp);
1576 mutex_unlock(&isert_conn->conn_mutex);
1577 } else {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001578 isert_rx_do_work(desc, isert_conn);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001579 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001580
1581 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1582 DMA_FROM_DEVICE);
1583
1584 isert_conn->post_recv_buf_count--;
1585 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1586 isert_conn->post_recv_buf_count);
1587
1588 if ((char *)desc == isert_conn->login_req_buf)
1589 return;
1590
1591 outstanding = isert_conn->post_recv_buf_count;
1592 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1593 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1594 ISERT_MIN_POSTED_RX);
1595 err = isert_post_recv(isert_conn, count);
1596 if (err) {
1597 pr_err("isert_post_recv() count: %d failed, %d\n",
1598 count, err);
1599 }
1600 }
1601}
1602
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001603static int
1604isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1605 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1606 enum iser_ib_op_code op, struct isert_data_buf *data)
1607{
1608 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1609
1610 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1611 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1612
1613 data->len = length - offset;
1614 data->offset = offset;
1615 data->sg_off = data->offset / PAGE_SIZE;
1616
1617 data->sg = &sg[data->sg_off];
1618 data->nents = min_t(unsigned int, nents - data->sg_off,
1619 ISCSI_ISER_SG_TABLESIZE);
1620 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1621 PAGE_SIZE);
1622
1623 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1624 data->dma_dir);
1625 if (unlikely(!data->dma_nents)) {
1626 pr_err("Cmd: unable to dma map SGs %p\n", sg);
1627 return -EINVAL;
1628 }
1629
1630 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1631 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1632
1633 return 0;
1634}
1635
1636static void
1637isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1638{
1639 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1640
1641 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1642 memset(data, 0, sizeof(*data));
1643}
1644
1645
1646
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001647static void
1648isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1649{
1650 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001651
Vu Pham90ecc6e2013-08-28 23:23:33 +03001652 pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001653
1654 if (wr->data.sg) {
Vu Pham90ecc6e2013-08-28 23:23:33 +03001655 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001656 isert_unmap_data_buf(isert_conn, &wr->data);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001657 }
1658
Vu Pham90ecc6e2013-08-28 23:23:33 +03001659 if (wr->send_wr) {
1660 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1661 kfree(wr->send_wr);
1662 wr->send_wr = NULL;
1663 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001664
Vu Pham90ecc6e2013-08-28 23:23:33 +03001665 if (wr->ib_sge) {
1666 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1667 kfree(wr->ib_sge);
1668 wr->ib_sge = NULL;
1669 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001670}
1671
1672static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001673isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +03001674{
1675 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Vu Pham59464ef2013-08-28 23:23:35 +03001676 LIST_HEAD(unmap_list);
1677
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001678 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
Vu Pham59464ef2013-08-28 23:23:35 +03001679
1680 if (wr->fr_desc) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001681 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
Vu Pham59464ef2013-08-28 23:23:35 +03001682 isert_cmd, wr->fr_desc);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001683 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1684 isert_unmap_data_buf(isert_conn, &wr->prot);
1685 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1686 }
Vu Pham59464ef2013-08-28 23:23:35 +03001687 spin_lock_bh(&isert_conn->conn_lock);
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001688 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
Vu Pham59464ef2013-08-28 23:23:35 +03001689 spin_unlock_bh(&isert_conn->conn_lock);
1690 wr->fr_desc = NULL;
1691 }
1692
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001693 if (wr->data.sg) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001694 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001695 isert_unmap_data_buf(isert_conn, &wr->data);
Vu Pham59464ef2013-08-28 23:23:35 +03001696 }
1697
1698 wr->ib_sge = NULL;
1699 wr->send_wr = NULL;
1700}
1701
1702static void
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001703isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001704{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001705 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001706 struct isert_conn *isert_conn = isert_cmd->conn;
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001707 struct iscsi_conn *conn = isert_conn->conn;
Vu Phamd40945d2013-08-28 23:23:34 +03001708 struct isert_device *device = isert_conn->conn_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001709
1710 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1711
1712 switch (cmd->iscsi_opcode) {
1713 case ISCSI_OP_SCSI_CMD:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001714 spin_lock_bh(&conn->cmd_lock);
1715 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellinger5159d762014-02-03 12:53:51 -08001716 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001717 spin_unlock_bh(&conn->cmd_lock);
1718
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001719 if (cmd->data_direction == DMA_TO_DEVICE) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001720 iscsit_stop_dataout_timer(cmd);
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001721 /*
1722 * Check for special case during comp_err where
1723 * WRITE_PENDING has been handed off from core,
1724 * but requires an extra target_put_sess_cmd()
1725 * before transport_generic_free_cmd() below.
1726 */
1727 if (comp_err &&
1728 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1729 struct se_cmd *se_cmd = &cmd->se_cmd;
1730
1731 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1732 }
1733 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001734
Vu Phamd40945d2013-08-28 23:23:34 +03001735 device->unreg_rdma_mem(isert_cmd, isert_conn);
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001736 transport_generic_free_cmd(&cmd->se_cmd, 0);
1737 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001738 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001739 spin_lock_bh(&conn->cmd_lock);
1740 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellinger5159d762014-02-03 12:53:51 -08001741 list_del_init(&cmd->i_conn_node);
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001742 spin_unlock_bh(&conn->cmd_lock);
1743
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001744 transport_generic_free_cmd(&cmd->se_cmd, 0);
1745 break;
1746 case ISCSI_OP_REJECT:
1747 case ISCSI_OP_NOOP_OUT:
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001748 case ISCSI_OP_TEXT:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001749 spin_lock_bh(&conn->cmd_lock);
1750 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellinger5159d762014-02-03 12:53:51 -08001751 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001752 spin_unlock_bh(&conn->cmd_lock);
1753
1754 /*
1755 * Handle special case for REJECT when iscsi_add_reject*() has
1756 * overwritten the original iscsi_opcode assignment, and the
1757 * associated cmd->se_cmd needs to be released.
1758 */
1759 if (cmd->se_cmd.se_tfo != NULL) {
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001760 pr_debug("Calling transport_generic_free_cmd from"
1761 " isert_put_cmd for 0x%02x\n",
1762 cmd->iscsi_opcode);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001763 transport_generic_free_cmd(&cmd->se_cmd, 0);
1764 break;
1765 }
1766 /*
1767 * Fall-through
1768 */
1769 default:
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001770 iscsit_release_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001771 break;
1772 }
1773}
1774
1775static void
1776isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1777{
1778 if (tx_desc->dma_addr != 0) {
1779 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1780 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1781 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1782 tx_desc->dma_addr = 0;
1783 }
1784}
1785
1786static void
1787isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001788 struct ib_device *ib_dev, bool comp_err)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001789{
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001790 if (isert_cmd->pdu_buf_dma != 0) {
1791 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1792 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1793 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1794 isert_cmd->pdu_buf_dma = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001795 }
1796
1797 isert_unmap_tx_desc(tx_desc, ib_dev);
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001798 isert_put_cmd(isert_cmd, comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001799}
1800
Sagi Grimberg96b79732014-03-17 12:52:18 +02001801static int
1802isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1803{
1804 struct ib_mr_status mr_status;
1805 int ret;
1806
1807 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1808 if (ret) {
1809 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1810 goto fail_mr_status;
1811 }
1812
1813 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1814 u64 sec_offset_err;
1815 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1816
1817 switch (mr_status.sig_err.err_type) {
1818 case IB_SIG_BAD_GUARD:
1819 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1820 break;
1821 case IB_SIG_BAD_REFTAG:
1822 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1823 break;
1824 case IB_SIG_BAD_APPTAG:
1825 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1826 break;
1827 }
1828 sec_offset_err = mr_status.sig_err.sig_err_offset;
1829 do_div(sec_offset_err, block_size);
1830 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1831
1832 pr_err("isert: PI error found type %d at sector 0x%llx "
1833 "expected 0x%x vs actual 0x%x\n",
1834 mr_status.sig_err.err_type,
1835 (unsigned long long)se_cmd->bad_sector,
1836 mr_status.sig_err.expected,
1837 mr_status.sig_err.actual);
1838 ret = 1;
1839 }
1840
1841fail_mr_status:
1842 return ret;
1843}
1844
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001845static void
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001846isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1847 struct isert_cmd *isert_cmd)
1848{
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001849 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001850 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001851 struct se_cmd *se_cmd = &cmd->se_cmd;
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001852 struct isert_conn *isert_conn = isert_cmd->conn;
1853 struct isert_device *device = isert_conn->conn_device;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001854 int ret = 0;
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001855
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001856 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
Sagi Grimberg96b79732014-03-17 12:52:18 +02001857 ret = isert_check_pi_status(se_cmd,
1858 wr->fr_desc->pi_ctx->sig_mr);
1859 wr->fr_desc->ind &= ~ISERT_PROTECTED;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001860 }
1861
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001862 device->unreg_rdma_mem(isert_cmd, isert_conn);
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02001863 wr->send_wr_num = 0;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001864 if (ret)
1865 transport_send_check_condition_and_sense(se_cmd,
1866 se_cmd->pi_err, 0);
1867 else
1868 isert_put_response(isert_conn->conn, cmd);
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001869}
1870
1871static void
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001872isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1873 struct isert_cmd *isert_cmd)
1874{
1875 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001876 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001877 struct se_cmd *se_cmd = &cmd->se_cmd;
Vu Pham90ecc6e2013-08-28 23:23:33 +03001878 struct isert_conn *isert_conn = isert_cmd->conn;
Vu Phamd40945d2013-08-28 23:23:34 +03001879 struct isert_device *device = isert_conn->conn_device;
Sagi Grimberg5bac4b12014-03-18 14:58:27 +02001880 int ret = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001881
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001882 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
Sagi Grimberg96b79732014-03-17 12:52:18 +02001883 ret = isert_check_pi_status(se_cmd,
1884 wr->fr_desc->pi_ctx->sig_mr);
1885 wr->fr_desc->ind &= ~ISERT_PROTECTED;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001886 }
1887
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001888 iscsit_stop_dataout_timer(cmd);
Vu Phamd40945d2013-08-28 23:23:34 +03001889 device->unreg_rdma_mem(isert_cmd, isert_conn);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001890 cmd->write_data_done = wr->data.len;
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08001891 wr->send_wr_num = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001892
Vu Pham90ecc6e2013-08-28 23:23:33 +03001893 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001894 spin_lock_bh(&cmd->istate_lock);
1895 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1896 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1897 spin_unlock_bh(&cmd->istate_lock);
1898
Sagi Grimberg5bac4b12014-03-18 14:58:27 +02001899 if (ret)
1900 transport_send_check_condition_and_sense(se_cmd,
1901 se_cmd->pi_err, 0);
1902 else
1903 target_execute_cmd(se_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001904}
1905
1906static void
1907isert_do_control_comp(struct work_struct *work)
1908{
1909 struct isert_cmd *isert_cmd = container_of(work,
1910 struct isert_cmd, comp_work);
1911 struct isert_conn *isert_conn = isert_cmd->conn;
1912 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001913 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001914
1915 switch (cmd->i_state) {
1916 case ISTATE_SEND_TASKMGTRSP:
1917 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1918
1919 atomic_dec(&isert_conn->post_send_buf_count);
1920 iscsit_tmr_post_handler(cmd, cmd->conn);
1921
1922 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001923 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001924 break;
1925 case ISTATE_SEND_REJECT:
1926 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1927 atomic_dec(&isert_conn->post_send_buf_count);
1928
1929 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001930 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001931 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001932 case ISTATE_SEND_LOGOUTRSP:
1933 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
Sagi Grimberg9d49f5e2014-05-19 17:44:23 +03001934
1935 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001936 iscsit_logout_post_handler(cmd, cmd->conn);
1937 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001938 case ISTATE_SEND_TEXTRSP:
1939 atomic_dec(&isert_conn->post_send_buf_count);
1940 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001941 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001942 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001943 default:
1944 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1945 dump_stack();
1946 break;
1947 }
1948}
1949
1950static void
1951isert_response_completion(struct iser_tx_desc *tx_desc,
1952 struct isert_cmd *isert_cmd,
1953 struct isert_conn *isert_conn,
1954 struct ib_device *ib_dev)
1955{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001956 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08001957 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001958
1959 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001960 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001961 cmd->i_state == ISTATE_SEND_REJECT ||
1962 cmd->i_state == ISTATE_SEND_TEXTRSP) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001963 isert_unmap_tx_desc(tx_desc, ib_dev);
1964
1965 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1966 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1967 return;
1968 }
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02001969
1970 /**
1971 * If send_wr_num is 0 this means that we got
1972 * RDMA completion and we cleared it and we should
1973 * simply decrement the response post. else the
1974 * response is incorporated in send_wr_num, just
1975 * sub it.
1976 **/
1977 if (wr->send_wr_num)
1978 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1979 else
1980 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001981
1982 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001983 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001984}
1985
1986static void
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001987__isert_send_completion(struct iser_tx_desc *tx_desc,
1988 struct isert_conn *isert_conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001989{
1990 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1991 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1992 struct isert_rdma_wr *wr;
1993
1994 if (!isert_cmd) {
1995 atomic_dec(&isert_conn->post_send_buf_count);
1996 isert_unmap_tx_desc(tx_desc, ib_dev);
1997 return;
1998 }
1999 wr = &isert_cmd->rdma_wr;
2000
2001 switch (wr->iser_ib_op) {
2002 case ISER_IB_RECV:
2003 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
2004 dump_stack();
2005 break;
2006 case ISER_IB_SEND:
2007 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
2008 isert_response_completion(tx_desc, isert_cmd,
2009 isert_conn, ib_dev);
2010 break;
2011 case ISER_IB_RDMA_WRITE:
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02002012 pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002013 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02002014 isert_completion_rdma_write(tx_desc, isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002015 break;
2016 case ISER_IB_RDMA_READ:
2017 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
2018
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08002019 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002020 isert_completion_rdma_read(tx_desc, isert_cmd);
2021 break;
2022 default:
2023 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
2024 dump_stack();
2025 break;
2026 }
2027}
2028
2029static void
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08002030isert_send_completion(struct iser_tx_desc *tx_desc,
2031 struct isert_conn *isert_conn)
2032{
2033 struct llist_node *llnode = tx_desc->comp_llnode_batch;
2034 struct iser_tx_desc *t;
2035 /*
2036 * Drain coalesced completion llist starting from comp_llnode_batch
2037 * setup in isert_init_send_wr(), and then complete trailing tx_desc.
2038 */
2039 while (llnode) {
2040 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
2041 llnode = llist_next(llnode);
2042 __isert_send_completion(t, isert_conn);
2043 }
2044 __isert_send_completion(tx_desc, isert_conn);
2045}
2046
2047static void
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08002048isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
2049{
2050 struct llist_node *llnode;
2051 struct isert_rdma_wr *wr;
2052 struct iser_tx_desc *t;
2053
2054 mutex_lock(&isert_conn->conn_mutex);
2055 llnode = llist_del_all(&isert_conn->conn_comp_llist);
2056 isert_conn->conn_comp_batch = 0;
2057 mutex_unlock(&isert_conn->conn_mutex);
2058
2059 while (llnode) {
2060 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
2061 llnode = llist_next(llnode);
2062 wr = &t->isert_cmd->rdma_wr;
2063
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002064 /**
2065 * If send_wr_num is 0 this means that we got
2066 * RDMA completion and we cleared it and we should
2067 * simply decrement the response post. else the
2068 * response is incorporated in send_wr_num, just
2069 * sub it.
2070 **/
2071 if (wr->send_wr_num)
2072 atomic_sub(wr->send_wr_num,
2073 &isert_conn->post_send_buf_count);
2074 else
2075 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellinger03e78482014-03-30 15:50:03 -07002076
2077 isert_completion_put(t, t->isert_cmd, ib_dev, true);
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08002078 }
2079}
2080
2081static void
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002082isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002083{
2084 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002085 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08002086 struct llist_node *llnode = tx_desc->comp_llnode_batch;
2087 struct isert_rdma_wr *wr;
2088 struct iser_tx_desc *t;
2089
2090 while (llnode) {
2091 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
2092 llnode = llist_next(llnode);
2093 wr = &t->isert_cmd->rdma_wr;
2094
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002095 /**
2096 * If send_wr_num is 0 this means that we got
2097 * RDMA completion and we cleared it and we should
2098 * simply decrement the response post. else the
2099 * response is incorporated in send_wr_num, just
2100 * sub it.
2101 **/
2102 if (wr->send_wr_num)
2103 atomic_sub(wr->send_wr_num,
2104 &isert_conn->post_send_buf_count);
2105 else
2106 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellinger03e78482014-03-30 15:50:03 -07002107
2108 isert_completion_put(t, t->isert_cmd, ib_dev, true);
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08002109 }
2110 tx_desc->comp_llnode_batch = NULL;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002111
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002112 if (!isert_cmd)
2113 isert_unmap_tx_desc(tx_desc, ib_dev);
2114 else
Nicholas Bellinger03e78482014-03-30 15:50:03 -07002115 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002116}
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002117
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002118static void
2119isert_cq_rx_comp_err(struct isert_conn *isert_conn)
2120{
2121 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2122 struct iscsi_conn *conn = isert_conn->conn;
2123
2124 if (isert_conn->post_recv_buf_count)
2125 return;
2126
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08002127 isert_cq_drain_comp_llist(isert_conn, ib_dev);
2128
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002129 if (conn->sess) {
2130 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
2131 target_wait_for_sess_cmds(conn->sess->se_sess);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002132 }
2133
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002134 while (atomic_read(&isert_conn->post_send_buf_count))
2135 msleep(3000);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002136
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002137 mutex_lock(&isert_conn->conn_mutex);
Sagi Grimberg954f2372014-12-02 16:57:17 +02002138 isert_conn_terminate(isert_conn);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002139 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002140
Sagi Grimberg9d49f5e2014-05-19 17:44:23 +03002141 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2142
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002143 complete(&isert_conn->conn_wait_comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002144}
2145
2146static void
2147isert_cq_tx_work(struct work_struct *work)
2148{
2149 struct isert_cq_desc *cq_desc = container_of(work,
2150 struct isert_cq_desc, cq_tx_work);
2151 struct isert_device *device = cq_desc->device;
2152 int cq_index = cq_desc->cq_index;
2153 struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
2154 struct isert_conn *isert_conn;
2155 struct iser_tx_desc *tx_desc;
2156 struct ib_wc wc;
2157
2158 while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
2159 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
2160 isert_conn = wc.qp->qp_context;
2161
2162 if (wc.status == IB_WC_SUCCESS) {
2163 isert_send_completion(tx_desc, isert_conn);
2164 } else {
2165 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
2166 pr_debug("TX wc.status: 0x%08x\n", wc.status);
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07002167 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002168
Nicholas Bellinger9bb4ca62014-02-27 07:02:48 -08002169 if (wc.wr_id != ISER_FASTREG_LI_WRID) {
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08002170 if (tx_desc->llnode_active)
2171 continue;
2172
Nicholas Bellinger9bb4ca62014-02-27 07:02:48 -08002173 atomic_dec(&isert_conn->post_send_buf_count);
2174 isert_cq_tx_comp_err(tx_desc, isert_conn);
2175 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002176 }
2177 }
2178
2179 ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
2180}
2181
2182static void
2183isert_cq_tx_callback(struct ib_cq *cq, void *context)
2184{
2185 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
2186
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002187 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
2188}
2189
2190static void
2191isert_cq_rx_work(struct work_struct *work)
2192{
2193 struct isert_cq_desc *cq_desc = container_of(work,
2194 struct isert_cq_desc, cq_rx_work);
2195 struct isert_device *device = cq_desc->device;
2196 int cq_index = cq_desc->cq_index;
2197 struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
2198 struct isert_conn *isert_conn;
2199 struct iser_rx_desc *rx_desc;
2200 struct ib_wc wc;
2201 unsigned long xfer_len;
2202
2203 while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
2204 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
2205 isert_conn = wc.qp->qp_context;
2206
2207 if (wc.status == IB_WC_SUCCESS) {
2208 xfer_len = (unsigned long)wc.byte_len;
2209 isert_rx_completion(rx_desc, isert_conn, xfer_len);
2210 } else {
2211 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07002212 if (wc.status != IB_WC_WR_FLUSH_ERR) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002213 pr_debug("RX wc.status: 0x%08x\n", wc.status);
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07002214 pr_debug("RX wc.vendor_err: 0x%08x\n",
2215 wc.vendor_err);
2216 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002217 isert_conn->post_recv_buf_count--;
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002218 isert_cq_rx_comp_err(isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002219 }
2220 }
2221
2222 ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
2223}
2224
2225static void
2226isert_cq_rx_callback(struct ib_cq *cq, void *context)
2227{
2228 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
2229
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002230 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
2231}
2232
2233static int
2234isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2235{
2236 struct ib_send_wr *wr_failed;
2237 int ret;
2238
2239 atomic_inc(&isert_conn->post_send_buf_count);
2240
2241 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
2242 &wr_failed);
2243 if (ret) {
2244 pr_err("ib_post_send failed with %d\n", ret);
2245 atomic_dec(&isert_conn->post_send_buf_count);
2246 return ret;
2247 }
2248 return ret;
2249}
2250
2251static int
2252isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2253{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002254 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002255 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2256 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2257 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2258 &isert_cmd->tx_desc.iscsi_header;
2259
2260 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2261 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2262 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2263 /*
2264 * Attach SENSE DATA payload to iSCSI Response PDU
2265 */
2266 if (cmd->se_cmd.sense_buffer &&
2267 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2268 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
2269 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2270 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002271 u32 padding, pdu_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002272
2273 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2274 cmd->sense_buffer);
2275 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2276
2277 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2278 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002279 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002280
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002281 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2282 (void *)cmd->sense_buffer, pdu_len,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002283 DMA_TO_DEVICE);
2284
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002285 isert_cmd->pdu_buf_len = pdu_len;
2286 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2287 tx_dsg->length = pdu_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002288 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2289 isert_cmd->tx_desc.num_sge = 2;
2290 }
2291
Nicholas Bellinger0d0f6602014-10-05 02:13:03 -07002292 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002293
2294 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2295
2296 return isert_post_response(isert_conn, isert_cmd);
2297}
2298
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -07002299static void
2300isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2301{
2302 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2303 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2304 struct isert_device *device = isert_conn->conn_device;
2305
2306 spin_lock_bh(&conn->cmd_lock);
2307 if (!list_empty(&cmd->i_conn_node))
2308 list_del_init(&cmd->i_conn_node);
2309 spin_unlock_bh(&conn->cmd_lock);
2310
2311 if (cmd->data_direction == DMA_TO_DEVICE)
2312 iscsit_stop_dataout_timer(cmd);
2313
2314 device->unreg_rdma_mem(isert_cmd, isert_conn);
2315}
2316
Nicholas Bellingere70beee2014-04-02 12:52:38 -07002317static enum target_prot_op
2318isert_get_sup_prot_ops(struct iscsi_conn *conn)
2319{
2320 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2321 struct isert_device *device = isert_conn->conn_device;
2322
2323 if (device->pi_capable)
2324 return TARGET_PROT_ALL;
2325
2326 return TARGET_PROT_NORMAL;
2327}
2328
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002329static int
2330isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2331 bool nopout_response)
2332{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002333 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002334 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2335 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2336
2337 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2338 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2339 &isert_cmd->tx_desc.iscsi_header,
2340 nopout_response);
2341 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08002342 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002343
Masanari Iida8b513d02013-05-21 23:13:12 +09002344 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002345
2346 return isert_post_response(isert_conn, isert_cmd);
2347}
2348
2349static int
2350isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2351{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002352 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002353 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2354 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2355
2356 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2357 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2358 &isert_cmd->tx_desc.iscsi_header);
2359 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08002360 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002361
2362 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2363
2364 return isert_post_response(isert_conn, isert_cmd);
2365}
2366
2367static int
2368isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2369{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002370 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002371 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2372 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2373
2374 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2375 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2376 &isert_cmd->tx_desc.iscsi_header);
2377 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08002378 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002379
2380 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2381
2382 return isert_post_response(isert_conn, isert_cmd);
2383}
2384
2385static int
2386isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2387{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002388 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002389 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2390 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002391 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2392 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2393 struct iscsi_reject *hdr =
2394 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002395
2396 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002397 iscsit_build_reject(cmd, conn, hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002398 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002399
2400 hton24(hdr->dlength, ISCSI_HDR_LEN);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002401 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002402 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2403 DMA_TO_DEVICE);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002404 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2405 tx_dsg->addr = isert_cmd->pdu_buf_dma;
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002406 tx_dsg->length = ISCSI_HDR_LEN;
2407 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2408 isert_cmd->tx_desc.num_sge = 2;
2409
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08002410 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002411
2412 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2413
2414 return isert_post_response(isert_conn, isert_cmd);
2415}
2416
2417static int
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002418isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2419{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002420 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002421 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2422 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2423 struct iscsi_text_rsp *hdr =
2424 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2425 u32 txt_rsp_len;
2426 int rc;
2427
2428 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
Sagi Grimberg22c7aaa2014-06-10 18:27:59 +03002429 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002430 if (rc < 0)
2431 return rc;
2432
2433 txt_rsp_len = rc;
2434 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2435
2436 if (txt_rsp_len) {
2437 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2438 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2439 void *txt_rsp_buf = cmd->buf_ptr;
2440
2441 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2442 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2443
2444 isert_cmd->pdu_buf_len = txt_rsp_len;
2445 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2446 tx_dsg->length = txt_rsp_len;
2447 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2448 isert_cmd->tx_desc.num_sge = 2;
2449 }
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08002450 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002451
2452 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2453
2454 return isert_post_response(isert_conn, isert_cmd);
2455}
2456
2457static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002458isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2459 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2460 u32 data_left, u32 offset)
2461{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002462 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002463 struct scatterlist *sg_start, *tmp_sg;
2464 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2465 u32 sg_off, page_off;
2466 int i = 0, sg_nents;
2467
2468 sg_off = offset / PAGE_SIZE;
2469 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2470 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2471 page_off = offset % PAGE_SIZE;
2472
2473 send_wr->sg_list = ib_sge;
2474 send_wr->num_sge = sg_nents;
2475 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2476 /*
2477 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2478 */
2479 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2480 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2481 (unsigned long long)tmp_sg->dma_address,
2482 tmp_sg->length, page_off);
2483
2484 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2485 ib_sge->length = min_t(u32, data_left,
2486 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2487 ib_sge->lkey = isert_conn->conn_mr->lkey;
2488
Vu Pham90ecc6e2013-08-28 23:23:33 +03002489 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2490 ib_sge->addr, ib_sge->length, ib_sge->lkey);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002491 page_off = 0;
2492 data_left -= ib_sge->length;
2493 ib_sge++;
2494 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
2495 }
2496
2497 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2498 send_wr->sg_list, send_wr->num_sge);
2499
2500 return sg_nents;
2501}
2502
2503static int
Vu Pham90ecc6e2013-08-28 23:23:33 +03002504isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2505 struct isert_rdma_wr *wr)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002506{
2507 struct se_cmd *se_cmd = &cmd->se_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002508 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002509 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002510 struct isert_data_buf *data = &wr->data;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002511 struct ib_send_wr *send_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002512 struct ib_sge *ib_sge;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002513 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2514 int ret = 0, i, ib_sge_cnt;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002515
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002516 isert_cmd->tx_desc.isert_cmd = isert_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002517
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002518 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2519 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2520 se_cmd->t_data_nents, se_cmd->data_length,
2521 offset, wr->iser_ib_op, &wr->data);
2522 if (ret)
2523 return ret;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002524
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002525 data_left = data->len;
2526 offset = data->offset;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002527
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002528 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002529 if (!ib_sge) {
Vu Pham90ecc6e2013-08-28 23:23:33 +03002530 pr_warn("Unable to allocate ib_sge\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002531 ret = -ENOMEM;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002532 goto unmap_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002533 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002534 wr->ib_sge = ib_sge;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002535
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002536 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002537 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2538 GFP_KERNEL);
2539 if (!wr->send_wr) {
Vu Pham90ecc6e2013-08-28 23:23:33 +03002540 pr_debug("Unable to allocate wr->send_wr\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002541 ret = -ENOMEM;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002542 goto unmap_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002543 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002544
2545 wr->isert_cmd = isert_cmd;
2546 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002547
2548 for (i = 0; i < wr->send_wr_num; i++) {
2549 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2550 data_len = min(data_left, rdma_write_max);
2551
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002552 send_wr->send_flags = 0;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002553 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2554 send_wr->opcode = IB_WR_RDMA_WRITE;
2555 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2556 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2557 if (i + 1 == wr->send_wr_num)
2558 send_wr->next = &isert_cmd->tx_desc.send_wr;
2559 else
2560 send_wr->next = &wr->send_wr[i + 1];
2561 } else {
2562 send_wr->opcode = IB_WR_RDMA_READ;
2563 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2564 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2565 if (i + 1 == wr->send_wr_num)
2566 send_wr->send_flags = IB_SEND_SIGNALED;
2567 else
2568 send_wr->next = &wr->send_wr[i + 1];
2569 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002570
2571 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2572 send_wr, data_len, offset);
2573 ib_sge += ib_sge_cnt;
2574
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002575 offset += data_len;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002576 va_offset += data_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002577 data_left -= data_len;
2578 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002579
2580 return 0;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002581unmap_cmd:
2582 isert_unmap_data_buf(isert_conn, data);
2583
Vu Pham90ecc6e2013-08-28 23:23:33 +03002584 return ret;
2585}
2586
2587static int
Vu Pham59464ef2013-08-28 23:23:35 +03002588isert_map_fr_pagelist(struct ib_device *ib_dev,
2589 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2590{
2591 u64 start_addr, end_addr, page, chunk_start = 0;
2592 struct scatterlist *tmp_sg;
2593 int i = 0, new_chunk, last_ent, n_pages;
2594
2595 n_pages = 0;
2596 new_chunk = 1;
2597 last_ent = sg_nents - 1;
2598 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2599 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2600 if (new_chunk)
2601 chunk_start = start_addr;
2602 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2603
2604 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2605 i, (unsigned long long)tmp_sg->dma_address,
2606 tmp_sg->length);
2607
2608 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2609 new_chunk = 0;
2610 continue;
2611 }
2612 new_chunk = 1;
2613
2614 page = chunk_start & PAGE_MASK;
2615 do {
2616 fr_pl[n_pages++] = page;
2617 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2618 n_pages - 1, page);
2619 page += PAGE_SIZE;
2620 } while (page < end_addr);
2621 }
2622
2623 return n_pages;
2624}
2625
2626static int
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002627isert_fast_reg_mr(struct isert_conn *isert_conn,
2628 struct fast_reg_descriptor *fr_desc,
2629 struct isert_data_buf *mem,
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002630 enum isert_indicator ind,
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002631 struct ib_sge *sge)
Vu Pham59464ef2013-08-28 23:23:35 +03002632{
Vu Pham59464ef2013-08-28 23:23:35 +03002633 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002634 struct ib_mr *mr;
2635 struct ib_fast_reg_page_list *frpl;
Vu Pham59464ef2013-08-28 23:23:35 +03002636 struct ib_send_wr fr_wr, inv_wr;
2637 struct ib_send_wr *bad_wr, *wr = NULL;
Sagi Grimberg9bd626e2014-01-09 18:40:54 +02002638 int ret, pagelist_len;
2639 u32 page_off;
Vu Pham59464ef2013-08-28 23:23:35 +03002640 u8 key;
Vu Pham59464ef2013-08-28 23:23:35 +03002641
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002642 if (mem->dma_nents == 1) {
2643 sge->lkey = isert_conn->conn_mr->lkey;
2644 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2645 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002646 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2647 __func__, __LINE__, sge->addr, sge->length,
2648 sge->lkey);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002649 return 0;
2650 }
2651
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002652 if (ind == ISERT_DATA_KEY_VALID) {
2653 /* Registering data buffer */
2654 mr = fr_desc->data_mr;
2655 frpl = fr_desc->data_frpl;
2656 } else {
2657 /* Registering protection buffer */
2658 mr = fr_desc->pi_ctx->prot_mr;
2659 frpl = fr_desc->pi_ctx->prot_frpl;
2660 }
2661
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002662 page_off = mem->offset % PAGE_SIZE;
Vu Pham59464ef2013-08-28 23:23:35 +03002663
Sagi Grimberg9bd626e2014-01-09 18:40:54 +02002664 pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002665 fr_desc, mem->nents, mem->offset);
Vu Pham59464ef2013-08-28 23:23:35 +03002666
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002667 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002668 &frpl->page_list[0]);
Vu Pham59464ef2013-08-28 23:23:35 +03002669
Sagi Grimbergd3e125d2014-02-19 17:50:23 +02002670 if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
Vu Pham59464ef2013-08-28 23:23:35 +03002671 memset(&inv_wr, 0, sizeof(inv_wr));
Nicholas Bellinger9bb4ca62014-02-27 07:02:48 -08002672 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
Vu Pham59464ef2013-08-28 23:23:35 +03002673 inv_wr.opcode = IB_WR_LOCAL_INV;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002674 inv_wr.ex.invalidate_rkey = mr->rkey;
Vu Pham59464ef2013-08-28 23:23:35 +03002675 wr = &inv_wr;
2676 /* Bump the key */
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002677 key = (u8)(mr->rkey & 0x000000FF);
2678 ib_update_fast_reg_key(mr, ++key);
Vu Pham59464ef2013-08-28 23:23:35 +03002679 }
2680
2681 /* Prepare FASTREG WR */
2682 memset(&fr_wr, 0, sizeof(fr_wr));
Nicholas Bellinger9bb4ca62014-02-27 07:02:48 -08002683 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
Vu Pham59464ef2013-08-28 23:23:35 +03002684 fr_wr.opcode = IB_WR_FAST_REG_MR;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002685 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2686 fr_wr.wr.fast_reg.page_list = frpl;
Vu Pham59464ef2013-08-28 23:23:35 +03002687 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2688 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002689 fr_wr.wr.fast_reg.length = mem->len;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002690 fr_wr.wr.fast_reg.rkey = mr->rkey;
Vu Pham59464ef2013-08-28 23:23:35 +03002691 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2692
2693 if (!wr)
2694 wr = &fr_wr;
2695 else
2696 wr->next = &fr_wr;
2697
2698 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2699 if (ret) {
2700 pr_err("fast registration failed, ret:%d\n", ret);
2701 return ret;
2702 }
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002703 fr_desc->ind &= ~ind;
Vu Pham59464ef2013-08-28 23:23:35 +03002704
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002705 sge->lkey = mr->lkey;
2706 sge->addr = frpl->page_list[0] + page_off;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002707 sge->length = mem->len;
Vu Pham59464ef2013-08-28 23:23:35 +03002708
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002709 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2710 __func__, __LINE__, sge->addr, sge->length,
2711 sge->lkey);
Vu Pham59464ef2013-08-28 23:23:35 +03002712
2713 return ret;
2714}
2715
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002716static inline void
2717isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2718 struct ib_sig_domain *domain)
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002719{
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002720 domain->sig_type = IB_SIG_TYPE_T10_DIF;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002721 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2722 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2723 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002724 /*
2725 * At the moment we hard code those, but if in the future
2726 * the target core would like to use it, we will take it
2727 * from se_cmd.
2728 */
2729 domain->sig.dif.apptag_check_mask = 0xffff;
2730 domain->sig.dif.app_escape = true;
2731 domain->sig.dif.ref_escape = true;
2732 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2733 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2734 domain->sig.dif.ref_remap = true;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002735};
2736
2737static int
2738isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2739{
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002740 switch (se_cmd->prot_op) {
2741 case TARGET_PROT_DIN_INSERT:
2742 case TARGET_PROT_DOUT_STRIP:
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002743 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002744 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002745 break;
2746 case TARGET_PROT_DOUT_INSERT:
2747 case TARGET_PROT_DIN_STRIP:
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002748 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002749 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002750 break;
2751 case TARGET_PROT_DIN_PASS:
2752 case TARGET_PROT_DOUT_PASS:
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002753 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2754 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002755 break;
2756 default:
2757 pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2758 return -EINVAL;
2759 }
2760
2761 return 0;
2762}
2763
2764static inline u8
2765isert_set_prot_checks(u8 prot_checks)
2766{
2767 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2768 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2769 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2770}
2771
2772static int
2773isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2774 struct fast_reg_descriptor *fr_desc,
2775 struct ib_sge *data_sge, struct ib_sge *prot_sge,
2776 struct ib_sge *sig_sge)
2777{
2778 struct ib_send_wr sig_wr, inv_wr;
2779 struct ib_send_wr *bad_wr, *wr = NULL;
2780 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2781 struct ib_sig_attrs sig_attrs;
2782 int ret;
2783 u32 key;
2784
2785 memset(&sig_attrs, 0, sizeof(sig_attrs));
2786 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2787 if (ret)
2788 goto err;
2789
2790 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2791
2792 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2793 memset(&inv_wr, 0, sizeof(inv_wr));
2794 inv_wr.opcode = IB_WR_LOCAL_INV;
Sagi Grimbergc2caa202014-03-17 12:52:16 +02002795 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002796 inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
2797 wr = &inv_wr;
2798 /* Bump the key */
2799 key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
2800 ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
2801 }
2802
2803 memset(&sig_wr, 0, sizeof(sig_wr));
2804 sig_wr.opcode = IB_WR_REG_SIG_MR;
Sagi Grimbergc2caa202014-03-17 12:52:16 +02002805 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002806 sig_wr.sg_list = data_sge;
2807 sig_wr.num_sge = 1;
2808 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2809 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2810 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2811 if (se_cmd->t_prot_sg)
2812 sig_wr.wr.sig_handover.prot = prot_sge;
2813
2814 if (!wr)
2815 wr = &sig_wr;
2816 else
2817 wr->next = &sig_wr;
2818
2819 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2820 if (ret) {
2821 pr_err("fast registration failed, ret:%d\n", ret);
2822 goto err;
2823 }
2824 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2825
2826 sig_sge->lkey = pi_ctx->sig_mr->lkey;
2827 sig_sge->addr = 0;
2828 sig_sge->length = se_cmd->data_length;
2829 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2830 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2831 /*
2832 * We have protection guards on the wire
2833 * so we need to set a larget transfer
2834 */
2835 sig_sge->length += se_cmd->prot_length;
2836
2837 pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2838 sig_sge->addr, sig_sge->length,
2839 sig_sge->lkey);
2840err:
2841 return ret;
2842}
2843
Vu Pham59464ef2013-08-28 23:23:35 +03002844static int
Sagi Grimberga3a5a822014-01-09 18:40:50 +02002845isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2846 struct isert_rdma_wr *wr)
Vu Pham59464ef2013-08-28 23:23:35 +03002847{
2848 struct se_cmd *se_cmd = &cmd->se_cmd;
2849 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002850 struct isert_conn *isert_conn = conn->context;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002851 struct ib_sge data_sge;
Vu Pham59464ef2013-08-28 23:23:35 +03002852 struct ib_send_wr *send_wr;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002853 struct fast_reg_descriptor *fr_desc = NULL;
2854 u32 offset;
2855 int ret = 0;
Vu Pham59464ef2013-08-28 23:23:35 +03002856 unsigned long flags;
2857
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002858 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2859
2860 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2861 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2862 se_cmd->t_data_nents, se_cmd->data_length,
2863 offset, wr->iser_ib_op, &wr->data);
2864 if (ret)
2865 return ret;
2866
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002867 if (wr->data.dma_nents != 1 ||
2868 se_cmd->prot_op != TARGET_PROT_NORMAL) {
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002869 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2870 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2871 struct fast_reg_descriptor, list);
2872 list_del(&fr_desc->list);
2873 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2874 wr->fr_desc = fr_desc;
Vu Pham59464ef2013-08-28 23:23:35 +03002875 }
2876
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002877 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2878 ISERT_DATA_KEY_VALID, &data_sge);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002879 if (ret)
2880 goto unmap_cmd;
Vu Pham59464ef2013-08-28 23:23:35 +03002881
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002882 if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
2883 struct ib_sge prot_sge, sig_sge;
2884
2885 if (se_cmd->t_prot_sg) {
2886 ret = isert_map_data_buf(isert_conn, isert_cmd,
2887 se_cmd->t_prot_sg,
2888 se_cmd->t_prot_nents,
2889 se_cmd->prot_length,
2890 0, wr->iser_ib_op, &wr->prot);
2891 if (ret)
2892 goto unmap_cmd;
2893
2894 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
2895 ISERT_PROT_KEY_VALID, &prot_sge);
2896 if (ret)
2897 goto unmap_prot_cmd;
2898 }
2899
2900 ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
2901 &data_sge, &prot_sge, &sig_sge);
2902 if (ret)
2903 goto unmap_prot_cmd;
2904
2905 fr_desc->ind |= ISERT_PROTECTED;
2906 memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
2907 } else
2908 memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
2909
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002910 wr->ib_sge = &wr->s_ib_sge;
Vu Pham59464ef2013-08-28 23:23:35 +03002911 wr->send_wr_num = 1;
2912 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2913 wr->send_wr = &wr->s_send_wr;
Vu Pham59464ef2013-08-28 23:23:35 +03002914 wr->isert_cmd = isert_cmd;
Vu Pham59464ef2013-08-28 23:23:35 +03002915
2916 send_wr = &isert_cmd->rdma_wr.s_send_wr;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002917 send_wr->sg_list = &wr->s_ib_sge;
Vu Pham59464ef2013-08-28 23:23:35 +03002918 send_wr->num_sge = 1;
2919 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2920 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2921 send_wr->opcode = IB_WR_RDMA_WRITE;
2922 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2923 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002924 send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
2925 0 : IB_SEND_SIGNALED;
Vu Pham59464ef2013-08-28 23:23:35 +03002926 } else {
2927 send_wr->opcode = IB_WR_RDMA_READ;
2928 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2929 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2930 send_wr->send_flags = IB_SEND_SIGNALED;
2931 }
2932
Vu Pham59464ef2013-08-28 23:23:35 +03002933 return 0;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002934unmap_prot_cmd:
2935 if (se_cmd->t_prot_sg)
2936 isert_unmap_data_buf(isert_conn, &wr->prot);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002937unmap_cmd:
2938 if (fr_desc) {
2939 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2940 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2941 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2942 }
2943 isert_unmap_data_buf(isert_conn, &wr->data);
Vu Pham59464ef2013-08-28 23:23:35 +03002944
Vu Pham59464ef2013-08-28 23:23:35 +03002945 return ret;
2946}
2947
2948static int
Vu Pham90ecc6e2013-08-28 23:23:33 +03002949isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2950{
2951 struct se_cmd *se_cmd = &cmd->se_cmd;
Vu Pham59464ef2013-08-28 23:23:35 +03002952 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002953 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2954 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Vu Phamd40945d2013-08-28 23:23:34 +03002955 struct isert_device *device = isert_conn->conn_device;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002956 struct ib_send_wr *wr_failed;
2957 int rc;
2958
2959 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2960 isert_cmd, se_cmd->data_length);
2961 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
Vu Phamd40945d2013-08-28 23:23:34 +03002962 rc = device->reg_rdma_mem(conn, cmd, wr);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002963 if (rc) {
2964 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2965 return rc;
2966 }
2967
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002968 if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
2969 /*
2970 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2971 */
2972 isert_create_send_desc(isert_conn, isert_cmd,
2973 &isert_cmd->tx_desc);
2974 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2975 &isert_cmd->tx_desc.iscsi_header);
2976 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2977 isert_init_send_wr(isert_conn, isert_cmd,
Nicholas Bellinger0d0f6602014-10-05 02:13:03 -07002978 &isert_cmd->tx_desc.send_wr, false);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002979 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002980 wr->send_wr_num += 1;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002981 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002982
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002983 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002984
2985 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2986 if (rc) {
2987 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002988 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002989 }
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002990
2991 if (se_cmd->prot_op == TARGET_PROT_NORMAL)
2992 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2993 "READ\n", isert_cmd);
2994 else
2995 pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2996 isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002997
Vu Pham90ecc6e2013-08-28 23:23:33 +03002998 return 1;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002999}
3000
3001static int
3002isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
3003{
3004 struct se_cmd *se_cmd = &cmd->se_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07003005 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003006 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
3007 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Vu Phamd40945d2013-08-28 23:23:34 +03003008 struct isert_device *device = isert_conn->conn_device;
Vu Pham90ecc6e2013-08-28 23:23:33 +03003009 struct ib_send_wr *wr_failed;
3010 int rc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003011
Vu Pham90ecc6e2013-08-28 23:23:33 +03003012 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
3013 isert_cmd, se_cmd->data_length, cmd->write_data_done);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003014 wr->iser_ib_op = ISER_IB_RDMA_READ;
Vu Phamd40945d2013-08-28 23:23:34 +03003015 rc = device->reg_rdma_mem(conn, cmd, wr);
Vu Pham90ecc6e2013-08-28 23:23:33 +03003016 if (rc) {
3017 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
3018 return rc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003019 }
3020
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08003021 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003022
3023 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
3024 if (rc) {
3025 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08003026 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003027 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03003028 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
3029 isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003030
Vu Pham90ecc6e2013-08-28 23:23:33 +03003031 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003032}
3033
3034static int
3035isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3036{
3037 int ret;
3038
3039 switch (state) {
3040 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3041 ret = isert_put_nopin(cmd, conn, false);
3042 break;
3043 default:
3044 pr_err("Unknown immediate state: 0x%02x\n", state);
3045 ret = -EINVAL;
3046 break;
3047 }
3048
3049 return ret;
3050}
3051
3052static int
3053isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3054{
3055 int ret;
3056
3057 switch (state) {
3058 case ISTATE_SEND_LOGOUTRSP:
3059 ret = isert_put_logout_rsp(cmd, conn);
3060 if (!ret) {
3061 pr_debug("Returning iSER Logout -EAGAIN\n");
3062 ret = -EAGAIN;
3063 }
3064 break;
3065 case ISTATE_SEND_NOPIN:
3066 ret = isert_put_nopin(cmd, conn, true);
3067 break;
3068 case ISTATE_SEND_TASKMGTRSP:
3069 ret = isert_put_tm_rsp(cmd, conn);
3070 break;
3071 case ISTATE_SEND_REJECT:
3072 ret = isert_put_reject(cmd, conn);
3073 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07003074 case ISTATE_SEND_TEXTRSP:
3075 ret = isert_put_text_rsp(cmd, conn);
3076 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003077 case ISTATE_SEND_STATUS:
3078 /*
3079 * Special case for sending non GOOD SCSI status from TX thread
3080 * context during pre se_cmd excecution failure.
3081 */
3082 ret = isert_put_response(conn, cmd);
3083 break;
3084 default:
3085 pr_err("Unknown response state: 0x%02x\n", state);
3086 ret = -EINVAL;
3087 break;
3088 }
3089
3090 return ret;
3091}
3092
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003093struct rdma_cm_id *
3094isert_setup_id(struct isert_np *isert_np)
3095{
3096 struct iscsi_np *np = isert_np->np;
3097 struct rdma_cm_id *id;
3098 struct sockaddr *sa;
3099 int ret;
3100
3101 sa = (struct sockaddr *)&np->np_sockaddr;
3102 pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
3103
3104 id = rdma_create_id(isert_cma_handler, isert_np,
3105 RDMA_PS_TCP, IB_QPT_RC);
3106 if (IS_ERR(id)) {
3107 pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
3108 ret = PTR_ERR(id);
3109 goto out;
3110 }
3111 pr_debug("id %p context %p\n", id, id->context);
3112
3113 ret = rdma_bind_addr(id, sa);
3114 if (ret) {
3115 pr_err("rdma_bind_addr() failed: %d\n", ret);
3116 goto out_id;
3117 }
3118
3119 ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
3120 if (ret) {
3121 pr_err("rdma_listen() failed: %d\n", ret);
3122 goto out_id;
3123 }
3124
3125 return id;
3126out_id:
3127 rdma_destroy_id(id);
3128out:
3129 return ERR_PTR(ret);
3130}
3131
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003132static int
3133isert_setup_np(struct iscsi_np *np,
3134 struct __kernel_sockaddr_storage *ksockaddr)
3135{
3136 struct isert_np *isert_np;
3137 struct rdma_cm_id *isert_lid;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003138 int ret;
3139
3140 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3141 if (!isert_np) {
3142 pr_err("Unable to allocate struct isert_np\n");
3143 return -ENOMEM;
3144 }
Sagi Grimberg531b7bf2014-04-29 13:13:45 +03003145 sema_init(&isert_np->np_sem, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003146 mutex_init(&isert_np->np_accept_mutex);
3147 INIT_LIST_HEAD(&isert_np->np_accept_list);
3148 init_completion(&isert_np->np_login_comp);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003149 isert_np->np = np;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003150
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003151 /*
3152 * Setup the np->np_sockaddr from the passed sockaddr setup
3153 * in iscsi_target_configfs.c code..
3154 */
3155 memcpy(&np->np_sockaddr, ksockaddr,
3156 sizeof(struct __kernel_sockaddr_storage));
3157
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003158 isert_lid = isert_setup_id(isert_np);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003159 if (IS_ERR(isert_lid)) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003160 ret = PTR_ERR(isert_lid);
3161 goto out;
3162 }
3163
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003164 isert_np->np_cm_id = isert_lid;
3165 np->np_context = isert_np;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003166
3167 return 0;
3168
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003169out:
3170 kfree(isert_np);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003171
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003172 return ret;
3173}
3174
3175static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003176isert_rdma_accept(struct isert_conn *isert_conn)
3177{
3178 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3179 struct rdma_conn_param cp;
3180 int ret;
3181
3182 memset(&cp, 0, sizeof(struct rdma_conn_param));
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003183 cp.initiator_depth = isert_conn->initiator_depth;
3184 cp.retry_count = 7;
3185 cp.rnr_retry_count = 7;
3186
3187 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
3188
3189 ret = rdma_accept(cm_id, &cp);
3190 if (ret) {
3191 pr_err("rdma_accept() failed with: %d\n", ret);
3192 return ret;
3193 }
3194
3195 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
3196
3197 return 0;
3198}
3199
3200static int
3201isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3202{
3203 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
3204 int ret;
3205
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003206 pr_info("before login_req comp conn: %p\n", isert_conn);
3207 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3208 if (ret) {
3209 pr_err("isert_conn %p interrupted before got login req\n",
3210 isert_conn);
3211 return ret;
3212 }
3213 reinit_completion(&isert_conn->login_req_comp);
3214
Nicholas Bellinger6faaa852013-08-18 16:35:46 -07003215 /*
3216 * For login requests after the first PDU, isert_rx_login_req() will
3217 * kick schedule_delayed_work(&conn->login_work) as the packet is
3218 * received, which turns this callback from iscsi_target_do_login_rx()
3219 * into a NOP.
3220 */
3221 if (!login->first_request)
3222 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003223
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003224 isert_rx_login_req(isert_conn);
3225
3226 pr_info("before conn_login_comp conn: %p\n", conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003227 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3228 if (ret)
3229 return ret;
3230
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003231 pr_info("processing login->req: %p\n", login->req);
3232
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003233 return 0;
3234}
3235
3236static void
3237isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3238 struct isert_conn *isert_conn)
3239{
3240 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3241 struct rdma_route *cm_route = &cm_id->route;
3242 struct sockaddr_in *sock_in;
3243 struct sockaddr_in6 *sock_in6;
3244
3245 conn->login_family = np->np_sockaddr.ss_family;
3246
3247 if (np->np_sockaddr.ss_family == AF_INET6) {
3248 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
3249 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3250 &sock_in6->sin6_addr.in6_u);
3251 conn->login_port = ntohs(sock_in6->sin6_port);
3252
3253 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3254 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3255 &sock_in6->sin6_addr.in6_u);
3256 conn->local_port = ntohs(sock_in6->sin6_port);
3257 } else {
3258 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3259 sprintf(conn->login_ip, "%pI4",
3260 &sock_in->sin_addr.s_addr);
3261 conn->login_port = ntohs(sock_in->sin_port);
3262
3263 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3264 sprintf(conn->local_ip, "%pI4",
3265 &sock_in->sin_addr.s_addr);
3266 conn->local_port = ntohs(sock_in->sin_port);
3267 }
3268}
3269
3270static int
3271isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3272{
3273 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3274 struct isert_conn *isert_conn;
3275 int max_accept = 0, ret;
3276
3277accept_wait:
Sagi Grimberg531b7bf2014-04-29 13:13:45 +03003278 ret = down_interruptible(&isert_np->np_sem);
Sagi Grimberg1acff632014-10-02 21:40:34 -07003279 if (ret || max_accept > 5)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003280 return -ENODEV;
3281
3282 spin_lock_bh(&np->np_thread_lock);
Sagi Grimberge346ab32014-05-19 17:44:22 +03003283 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003284 spin_unlock_bh(&np->np_thread_lock);
Sagi Grimberge346ab32014-05-19 17:44:22 +03003285 pr_debug("np_thread_state %d for isert_accept_np\n",
3286 np->np_thread_state);
3287 /**
3288 * No point in stalling here when np_thread
3289 * is in state RESET/SHUTDOWN/EXIT - bail
3290 **/
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003291 return -ENODEV;
3292 }
3293 spin_unlock_bh(&np->np_thread_lock);
3294
3295 mutex_lock(&isert_np->np_accept_mutex);
3296 if (list_empty(&isert_np->np_accept_list)) {
3297 mutex_unlock(&isert_np->np_accept_mutex);
3298 max_accept++;
3299 goto accept_wait;
3300 }
3301 isert_conn = list_first_entry(&isert_np->np_accept_list,
3302 struct isert_conn, conn_accept_node);
3303 list_del_init(&isert_conn->conn_accept_node);
3304 mutex_unlock(&isert_np->np_accept_mutex);
3305
3306 conn->context = isert_conn;
3307 isert_conn->conn = conn;
3308 max_accept = 0;
3309
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003310 isert_set_conn_info(np, conn, isert_conn);
3311
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003312 pr_debug("Processing isert_conn: %p\n", isert_conn);
3313
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003314 return 0;
3315}
3316
3317static void
3318isert_free_np(struct iscsi_np *np)
3319{
3320 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3321
Sagi Grimberg3b726ae2014-10-28 13:45:03 -07003322 if (isert_np->np_cm_id)
3323 rdma_destroy_id(isert_np->np_cm_id);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003324
3325 np->np_context = NULL;
3326 kfree(isert_np);
3327}
3328
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003329static void isert_wait_conn(struct iscsi_conn *conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003330{
3331 struct isert_conn *isert_conn = conn->context;
3332
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003333 pr_debug("isert_wait_conn: Starting \n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003334
Sagi Grimberg9d49f5e2014-05-19 17:44:23 +03003335 mutex_lock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003336 /*
3337 * Only wait for conn_wait_comp_err if the isert_conn made it
3338 * into full feature phase..
3339 */
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07003340 if (isert_conn->state == ISER_CONN_INIT) {
3341 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07003342 return;
3343 }
Sagi Grimberg954f2372014-12-02 16:57:17 +02003344 isert_conn_terminate(isert_conn);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07003345 mutex_unlock(&isert_conn->conn_mutex);
3346
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003347 wait_for_completion(&isert_conn->conn_wait_comp_err);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003348 wait_for_completion(&isert_conn->conn_wait);
Sagi Grimberg954f2372014-12-02 16:57:17 +02003349
3350 mutex_lock(&isert_conn->conn_mutex);
3351 isert_conn->state = ISER_CONN_DOWN;
3352 mutex_unlock(&isert_conn->conn_mutex);
3353
3354 pr_info("Destroying conn %p\n", isert_conn);
Sagi Grimberg0fc4ea72014-07-02 16:19:25 +03003355 isert_put_conn(isert_conn);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003356}
3357
3358static void isert_free_conn(struct iscsi_conn *conn)
3359{
3360 struct isert_conn *isert_conn = conn->context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003361
3362 isert_put_conn(isert_conn);
3363}
3364
3365static struct iscsit_transport iser_target_transport = {
3366 .name = "IB/iSER",
3367 .transport_type = ISCSI_INFINIBAND,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07003368 .priv_size = sizeof(struct isert_cmd),
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003369 .owner = THIS_MODULE,
3370 .iscsit_setup_np = isert_setup_np,
3371 .iscsit_accept_np = isert_accept_np,
3372 .iscsit_free_np = isert_free_np,
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003373 .iscsit_wait_conn = isert_wait_conn,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003374 .iscsit_free_conn = isert_free_conn,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003375 .iscsit_get_login_rx = isert_get_login_rx,
3376 .iscsit_put_login_tx = isert_put_login_tx,
3377 .iscsit_immediate_queue = isert_immediate_queue,
3378 .iscsit_response_queue = isert_response_queue,
3379 .iscsit_get_dataout = isert_get_dataout,
3380 .iscsit_queue_data_in = isert_put_datain,
3381 .iscsit_queue_status = isert_put_response,
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -07003382 .iscsit_aborted_task = isert_aborted_task,
Nicholas Bellingere70beee2014-04-02 12:52:38 -07003383 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003384};
3385
3386static int __init isert_init(void)
3387{
3388 int ret;
3389
3390 isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
3391 if (!isert_rx_wq) {
3392 pr_err("Unable to allocate isert_rx_wq\n");
3393 return -ENOMEM;
3394 }
3395
3396 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
3397 if (!isert_comp_wq) {
3398 pr_err("Unable to allocate isert_comp_wq\n");
3399 ret = -ENOMEM;
3400 goto destroy_rx_wq;
3401 }
3402
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003403 iscsit_register_transport(&iser_target_transport);
3404 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
3405 return 0;
3406
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003407destroy_rx_wq:
3408 destroy_workqueue(isert_rx_wq);
3409 return ret;
3410}
3411
3412static void __exit isert_exit(void)
3413{
Sagi Grimbergf5ebec92014-05-19 17:44:25 +03003414 flush_scheduled_work();
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003415 destroy_workqueue(isert_comp_wq);
3416 destroy_workqueue(isert_rx_wq);
3417 iscsit_unregister_transport(&iser_target_transport);
3418 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
3419}
3420
3421MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3422MODULE_VERSION("0.1");
3423MODULE_AUTHOR("nab@Linux-iSCSI.org");
3424MODULE_LICENSE("GPL");
3425
3426module_init(isert_init);
3427module_exit(isert_exit);