blob: b0e58f196d9098d4899fab986e26ad1cf975bd46 [file] [log] [blame]
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07004 * (c) Copyright 2013 Datera, Inc.
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08005 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
18
19#include <linux/string.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/socket.h>
23#include <linux/in.h>
24#include <linux/in6.h>
Nicholas Bellinger95b60f02013-11-05 13:16:12 -080025#include <linux/llist.h>
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080026#include <rdma/ib_verbs.h>
27#include <rdma/rdma_cm.h>
28#include <target/target_core_base.h>
29#include <target/target_core_fabric.h>
30#include <target/iscsi/iscsi_transport.h>
Sagi Grimberg531b7bf2014-04-29 13:13:45 +030031#include <linux/semaphore.h>
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080032
33#include "isert_proto.h"
34#include "ib_isert.h"
35
36#define ISERT_MAX_CONN 8
37#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
38#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
39
40static DEFINE_MUTEX(device_list_mutex);
41static LIST_HEAD(device_list);
42static struct workqueue_struct *isert_rx_wq;
43static struct workqueue_struct *isert_comp_wq;
Sagi Grimbergb02efbf2014-12-02 16:57:29 +020044static struct workqueue_struct *isert_release_wq;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080045
46static void
Vu Phamd40945d2013-08-28 23:23:34 +030047isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
48static int
49isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
50 struct isert_rdma_wr *wr);
Vu Pham59464ef2013-08-28 23:23:35 +030051static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +020052isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +030053static int
Sagi Grimberga3a5a822014-01-09 18:40:50 +020054isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
55 struct isert_rdma_wr *wr);
Sagi Grimbergf93f3a72014-02-19 17:50:24 +020056static int
57isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +020058static int
59isert_rdma_post_recvl(struct isert_conn *isert_conn);
60static int
61isert_rdma_accept(struct isert_conn *isert_conn);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +020062struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
Vu Phamd40945d2013-08-28 23:23:34 +030063
64static void
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080065isert_qp_event_callback(struct ib_event *e, void *context)
66{
67 struct isert_conn *isert_conn = (struct isert_conn *)context;
68
69 pr_err("isert_qp_event_callback event: %d\n", e->event);
70 switch (e->event) {
71 case IB_EVENT_COMM_EST:
72 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
73 break;
74 case IB_EVENT_QP_LAST_WQE_REACHED:
75 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
76 break;
77 default:
78 break;
79 }
80}
81
82static int
83isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
84{
85 int ret;
86
87 ret = ib_query_device(ib_dev, devattr);
88 if (ret) {
89 pr_err("ib_query_device() failed: %d\n", ret);
90 return ret;
91 }
92 pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
93 pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
94
95 return 0;
96}
97
98static int
Sagi Grimberg570db172014-12-02 16:57:31 +020099isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800100{
101 struct isert_device *device = isert_conn->conn_device;
102 struct ib_qp_init_attr attr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800103 int ret, index, min_index = 0;
104
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800105 mutex_lock(&device_list_mutex);
106 for (index = 0; index < device->cqs_used; index++)
107 if (device->cq_active_qps[index] <
108 device->cq_active_qps[min_index])
109 min_index = index;
110 device->cq_active_qps[min_index]++;
111 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
112 mutex_unlock(&device_list_mutex);
113
114 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
115 attr.event_handler = isert_qp_event_callback;
116 attr.qp_context = isert_conn;
117 attr.send_cq = device->dev_tx_cq[min_index];
118 attr.recv_cq = device->dev_rx_cq[min_index];
119 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
120 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
121 /*
122 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
Or Gerlitzf57915c2014-10-22 14:55:49 -0700123 * work-around for RDMA_READs with ConnectX-2.
124 *
125 * Also, still make sure to have at least two SGEs for
126 * outgoing control PDU responses.
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800127 */
Or Gerlitzf57915c2014-10-22 14:55:49 -0700128 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800129 isert_conn->max_sge = attr.cap.max_send_sge;
130
131 attr.cap.max_recv_sge = 1;
132 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
133 attr.qp_type = IB_QPT_RC;
Sagi Grimberg570db172014-12-02 16:57:31 +0200134 if (device->pi_capable)
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200135 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800136
137 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
138 cma_id->device);
139 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
140 isert_conn->conn_pd->device);
141
142 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
143 if (ret) {
144 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
Sagi Grimberg19e20902014-12-02 16:57:26 +0200145 goto err;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800146 }
147 isert_conn->conn_qp = cma_id->qp;
148 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
149
150 return 0;
Sagi Grimberg19e20902014-12-02 16:57:26 +0200151err:
152 mutex_lock(&device_list_mutex);
153 device->cq_active_qps[min_index]--;
154 mutex_unlock(&device_list_mutex);
155
156 return ret;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800157}
158
159static void
160isert_cq_event_callback(struct ib_event *e, void *context)
161{
162 pr_debug("isert_cq_event_callback event: %d\n", e->event);
163}
164
165static int
166isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
167{
168 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
169 struct iser_rx_desc *rx_desc;
170 struct ib_sge *rx_sg;
171 u64 dma_addr;
172 int i, j;
173
174 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
175 sizeof(struct iser_rx_desc), GFP_KERNEL);
176 if (!isert_conn->conn_rx_descs)
177 goto fail;
178
179 rx_desc = isert_conn->conn_rx_descs;
180
181 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
182 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
183 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
184 if (ib_dma_mapping_error(ib_dev, dma_addr))
185 goto dma_map_fail;
186
187 rx_desc->dma_addr = dma_addr;
188
189 rx_sg = &rx_desc->rx_sg;
190 rx_sg->addr = rx_desc->dma_addr;
191 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
192 rx_sg->lkey = isert_conn->conn_mr->lkey;
193 }
194
195 isert_conn->conn_rx_desc_head = 0;
196 return 0;
197
198dma_map_fail:
199 rx_desc = isert_conn->conn_rx_descs;
200 for (j = 0; j < i; j++, rx_desc++) {
201 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
202 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
203 }
204 kfree(isert_conn->conn_rx_descs);
205 isert_conn->conn_rx_descs = NULL;
206fail:
207 return -ENOMEM;
208}
209
210static void
211isert_free_rx_descriptors(struct isert_conn *isert_conn)
212{
213 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
214 struct iser_rx_desc *rx_desc;
215 int i;
216
217 if (!isert_conn->conn_rx_descs)
218 return;
219
220 rx_desc = isert_conn->conn_rx_descs;
221 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
222 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
223 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
224 }
225
226 kfree(isert_conn->conn_rx_descs);
227 isert_conn->conn_rx_descs = NULL;
228}
229
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800230static void isert_cq_tx_work(struct work_struct *);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800231static void isert_cq_tx_callback(struct ib_cq *, void *);
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800232static void isert_cq_rx_work(struct work_struct *);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800233static void isert_cq_rx_callback(struct ib_cq *, void *);
234
235static int
236isert_create_device_ib_res(struct isert_device *device)
237{
238 struct ib_device *ib_dev = device->ib_device;
239 struct isert_cq_desc *cq_desc;
Vu Pham59464ef2013-08-28 23:23:35 +0300240 struct ib_device_attr *dev_attr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800241 int ret = 0, i, j;
Chris Mooreb1a5ad02014-11-04 16:28:29 +0000242 int max_rx_cqe, max_tx_cqe;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800243
Vu Pham59464ef2013-08-28 23:23:35 +0300244 dev_attr = &device->dev_attr;
245 ret = isert_query_device(ib_dev, dev_attr);
246 if (ret)
247 return ret;
248
Chris Mooreb1a5ad02014-11-04 16:28:29 +0000249 max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
250 max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
251
Vu Phamd40945d2013-08-28 23:23:34 +0300252 /* asign function handlers */
Sagi Grimbergf2252252014-03-27 19:22:25 +0200253 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
254 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200255 device->use_fastreg = 1;
256 device->reg_rdma_mem = isert_reg_rdma;
257 device->unreg_rdma_mem = isert_unreg_rdma;
Vu Pham59464ef2013-08-28 23:23:35 +0300258 } else {
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200259 device->use_fastreg = 0;
Vu Pham59464ef2013-08-28 23:23:35 +0300260 device->reg_rdma_mem = isert_map_rdma;
261 device->unreg_rdma_mem = isert_unmap_cmd;
262 }
Vu Phamd40945d2013-08-28 23:23:34 +0300263
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200264 /* Check signature cap */
265 device->pi_capable = dev_attr->device_cap_flags &
266 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
267
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800268 device->cqs_used = min_t(int, num_online_cpus(),
269 device->ib_device->num_comp_vectors);
270 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200271 pr_debug("Using %d CQs, device %s supports %d vectors support "
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200272 "Fast registration %d pi_capable %d\n",
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800273 device->cqs_used, device->ib_device->name,
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200274 device->ib_device->num_comp_vectors, device->use_fastreg,
275 device->pi_capable);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800276 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
277 device->cqs_used, GFP_KERNEL);
278 if (!device->cq_desc) {
279 pr_err("Unable to allocate device->cq_desc\n");
280 return -ENOMEM;
281 }
282 cq_desc = device->cq_desc;
283
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800284 for (i = 0; i < device->cqs_used; i++) {
285 cq_desc[i].device = device;
286 cq_desc[i].cq_index = i;
287
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800288 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800289 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
290 isert_cq_rx_callback,
291 isert_cq_event_callback,
292 (void *)&cq_desc[i],
Chris Mooreb1a5ad02014-11-04 16:28:29 +0000293 max_rx_cqe, i);
Wei Yongjun94a71112013-10-29 09:56:34 +0800294 if (IS_ERR(device->dev_rx_cq[i])) {
295 ret = PTR_ERR(device->dev_rx_cq[i]);
296 device->dev_rx_cq[i] = NULL;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800297 goto out_cq;
Wei Yongjun94a71112013-10-29 09:56:34 +0800298 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800299
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800300 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800301 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
302 isert_cq_tx_callback,
303 isert_cq_event_callback,
304 (void *)&cq_desc[i],
Chris Mooreb1a5ad02014-11-04 16:28:29 +0000305 max_tx_cqe, i);
Wei Yongjun94a71112013-10-29 09:56:34 +0800306 if (IS_ERR(device->dev_tx_cq[i])) {
307 ret = PTR_ERR(device->dev_tx_cq[i]);
308 device->dev_tx_cq[i] = NULL;
309 goto out_cq;
310 }
311
312 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
313 if (ret)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800314 goto out_cq;
315
Wei Yongjun94a71112013-10-29 09:56:34 +0800316 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
317 if (ret)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800318 goto out_cq;
319 }
320
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800321 return 0;
322
323out_cq:
324 for (j = 0; j < i; j++) {
325 cq_desc = &device->cq_desc[j];
326
327 if (device->dev_rx_cq[j]) {
328 cancel_work_sync(&cq_desc->cq_rx_work);
329 ib_destroy_cq(device->dev_rx_cq[j]);
330 }
331 if (device->dev_tx_cq[j]) {
332 cancel_work_sync(&cq_desc->cq_tx_work);
333 ib_destroy_cq(device->dev_tx_cq[j]);
334 }
335 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800336 kfree(device->cq_desc);
337
338 return ret;
339}
340
341static void
342isert_free_device_ib_res(struct isert_device *device)
343{
344 struct isert_cq_desc *cq_desc;
345 int i;
346
347 for (i = 0; i < device->cqs_used; i++) {
348 cq_desc = &device->cq_desc[i];
349
350 cancel_work_sync(&cq_desc->cq_rx_work);
351 cancel_work_sync(&cq_desc->cq_tx_work);
352 ib_destroy_cq(device->dev_rx_cq[i]);
353 ib_destroy_cq(device->dev_tx_cq[i]);
354 device->dev_rx_cq[i] = NULL;
355 device->dev_tx_cq[i] = NULL;
356 }
357
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800358 kfree(device->cq_desc);
359}
360
361static void
362isert_device_try_release(struct isert_device *device)
363{
364 mutex_lock(&device_list_mutex);
365 device->refcount--;
366 if (!device->refcount) {
367 isert_free_device_ib_res(device);
368 list_del(&device->dev_node);
369 kfree(device);
370 }
371 mutex_unlock(&device_list_mutex);
372}
373
374static struct isert_device *
375isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
376{
377 struct isert_device *device;
378 int ret;
379
380 mutex_lock(&device_list_mutex);
381 list_for_each_entry(device, &device_list, dev_node) {
382 if (device->ib_device->node_guid == cma_id->device->node_guid) {
383 device->refcount++;
384 mutex_unlock(&device_list_mutex);
385 return device;
386 }
387 }
388
389 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
390 if (!device) {
391 mutex_unlock(&device_list_mutex);
392 return ERR_PTR(-ENOMEM);
393 }
394
395 INIT_LIST_HEAD(&device->dev_node);
396
397 device->ib_device = cma_id->device;
398 ret = isert_create_device_ib_res(device);
399 if (ret) {
400 kfree(device);
401 mutex_unlock(&device_list_mutex);
402 return ERR_PTR(ret);
403 }
404
405 device->refcount++;
406 list_add_tail(&device->dev_node, &device_list);
407 mutex_unlock(&device_list_mutex);
408
409 return device;
410}
411
Vu Pham59464ef2013-08-28 23:23:35 +0300412static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200413isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +0300414{
415 struct fast_reg_descriptor *fr_desc, *tmp;
416 int i = 0;
417
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200418 if (list_empty(&isert_conn->conn_fr_pool))
Vu Pham59464ef2013-08-28 23:23:35 +0300419 return;
420
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200421 pr_debug("Freeing conn %p fastreg pool", isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300422
423 list_for_each_entry_safe(fr_desc, tmp,
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200424 &isert_conn->conn_fr_pool, list) {
Vu Pham59464ef2013-08-28 23:23:35 +0300425 list_del(&fr_desc->list);
426 ib_free_fast_reg_page_list(fr_desc->data_frpl);
427 ib_dereg_mr(fr_desc->data_mr);
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200428 if (fr_desc->pi_ctx) {
429 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
430 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
431 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
432 kfree(fr_desc->pi_ctx);
433 }
Vu Pham59464ef2013-08-28 23:23:35 +0300434 kfree(fr_desc);
435 ++i;
436 }
437
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200438 if (i < isert_conn->conn_fr_pool_size)
Vu Pham59464ef2013-08-28 23:23:35 +0300439 pr_warn("Pool still has %d regions registered\n",
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200440 isert_conn->conn_fr_pool_size - i);
Vu Pham59464ef2013-08-28 23:23:35 +0300441}
442
443static int
Sagi Grimberg570db172014-12-02 16:57:31 +0200444isert_create_pi_ctx(struct fast_reg_descriptor *desc,
445 struct ib_device *device,
446 struct ib_pd *pd)
447{
448 struct ib_mr_init_attr mr_init_attr;
449 struct pi_context *pi_ctx;
450 int ret;
451
452 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
453 if (!pi_ctx) {
454 pr_err("Failed to allocate pi context\n");
455 return -ENOMEM;
456 }
457
458 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
459 ISCSI_ISER_SG_TABLESIZE);
460 if (IS_ERR(pi_ctx->prot_frpl)) {
461 pr_err("Failed to allocate prot frpl err=%ld\n",
462 PTR_ERR(pi_ctx->prot_frpl));
463 ret = PTR_ERR(pi_ctx->prot_frpl);
464 goto err_pi_ctx;
465 }
466
467 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
468 if (IS_ERR(pi_ctx->prot_mr)) {
469 pr_err("Failed to allocate prot frmr err=%ld\n",
470 PTR_ERR(pi_ctx->prot_mr));
471 ret = PTR_ERR(pi_ctx->prot_mr);
472 goto err_prot_frpl;
473 }
474 desc->ind |= ISERT_PROT_KEY_VALID;
475
476 memset(&mr_init_attr, 0, sizeof(mr_init_attr));
477 mr_init_attr.max_reg_descriptors = 2;
478 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
479 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
480 if (IS_ERR(pi_ctx->sig_mr)) {
481 pr_err("Failed to allocate signature enabled mr err=%ld\n",
482 PTR_ERR(pi_ctx->sig_mr));
483 ret = PTR_ERR(pi_ctx->sig_mr);
484 goto err_prot_mr;
485 }
486
487 desc->pi_ctx = pi_ctx;
488 desc->ind |= ISERT_SIG_KEY_VALID;
489 desc->ind &= ~ISERT_PROTECTED;
490
491 return 0;
492
493err_prot_mr:
494 ib_dereg_mr(desc->pi_ctx->prot_mr);
495err_prot_frpl:
496 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
497err_pi_ctx:
498 kfree(desc->pi_ctx);
499
500 return ret;
501}
502
503static int
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200504isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
Sagi Grimberg570db172014-12-02 16:57:31 +0200505 struct fast_reg_descriptor *fr_desc)
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200506{
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200507 int ret;
508
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200509 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
510 ISCSI_ISER_SG_TABLESIZE);
511 if (IS_ERR(fr_desc->data_frpl)) {
512 pr_err("Failed to allocate data frpl err=%ld\n",
513 PTR_ERR(fr_desc->data_frpl));
514 return PTR_ERR(fr_desc->data_frpl);
515 }
516
517 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
518 if (IS_ERR(fr_desc->data_mr)) {
519 pr_err("Failed to allocate data frmr err=%ld\n",
520 PTR_ERR(fr_desc->data_mr));
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200521 ret = PTR_ERR(fr_desc->data_mr);
522 goto err_data_frpl;
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200523 }
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200524 fr_desc->ind |= ISERT_DATA_KEY_VALID;
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200525
Sagi Grimberg570db172014-12-02 16:57:31 +0200526 pr_debug("Created fr_desc %p\n", fr_desc);
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200527
528 return 0;
Sagi Grimberg570db172014-12-02 16:57:31 +0200529
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200530err_data_frpl:
531 ib_free_fast_reg_page_list(fr_desc->data_frpl);
532
533 return ret;
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200534}
535
536static int
Sagi Grimberg570db172014-12-02 16:57:31 +0200537isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +0300538{
539 struct fast_reg_descriptor *fr_desc;
540 struct isert_device *device = isert_conn->conn_device;
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -0700541 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
542 struct se_node_acl *se_nacl = se_sess->se_node_acl;
543 int i, ret, tag_num;
544 /*
545 * Setup the number of FRMRs based upon the number of tags
546 * available to session in iscsi_target_locate_portal().
547 */
548 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
549 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
Vu Pham59464ef2013-08-28 23:23:35 +0300550
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200551 isert_conn->conn_fr_pool_size = 0;
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -0700552 for (i = 0; i < tag_num; i++) {
Vu Pham59464ef2013-08-28 23:23:35 +0300553 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
554 if (!fr_desc) {
555 pr_err("Failed to allocate fast_reg descriptor\n");
556 ret = -ENOMEM;
557 goto err;
558 }
559
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200560 ret = isert_create_fr_desc(device->ib_device,
Sagi Grimberg570db172014-12-02 16:57:31 +0200561 isert_conn->conn_pd, fr_desc);
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200562 if (ret) {
563 pr_err("Failed to create fastreg descriptor err=%d\n",
564 ret);
Nicholas Bellingera80e21b2014-02-03 12:59:56 -0800565 kfree(fr_desc);
Vu Pham59464ef2013-08-28 23:23:35 +0300566 goto err;
567 }
568
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200569 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
570 isert_conn->conn_fr_pool_size++;
Vu Pham59464ef2013-08-28 23:23:35 +0300571 }
572
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200573 pr_debug("Creating conn %p fastreg pool size=%d",
574 isert_conn, isert_conn->conn_fr_pool_size);
Vu Pham59464ef2013-08-28 23:23:35 +0300575
576 return 0;
577
578err:
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200579 isert_conn_free_fastreg_pool(isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300580 return ret;
581}
582
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800583static int
584isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
585{
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200586 struct isert_np *isert_np = cma_id->context;
587 struct iscsi_np *np = isert_np->np;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800588 struct isert_conn *isert_conn;
589 struct isert_device *device;
590 struct ib_device *ib_dev = cma_id->device;
591 int ret = 0;
Sagi Grimberg14f4b542014-04-29 13:13:47 +0300592
593 spin_lock_bh(&np->np_thread_lock);
594 if (!np->enabled) {
595 spin_unlock_bh(&np->np_thread_lock);
596 pr_debug("iscsi_np is not enabled, reject connect request\n");
597 return rdma_reject(cma_id, NULL, 0);
598 }
599 spin_unlock_bh(&np->np_thread_lock);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800600
601 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
602 cma_id, cma_id->context);
603
604 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
605 if (!isert_conn) {
606 pr_err("Unable to allocate isert_conn\n");
607 return -ENOMEM;
608 }
609 isert_conn->state = ISER_CONN_INIT;
610 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
611 init_completion(&isert_conn->conn_login_comp);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200612 init_completion(&isert_conn->login_req_comp);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -0800613 init_completion(&isert_conn->conn_wait);
614 init_completion(&isert_conn->conn_wait_comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800615 kref_init(&isert_conn->conn_kref);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700616 mutex_init(&isert_conn->conn_mutex);
Vu Pham59464ef2013-08-28 23:23:35 +0300617 spin_lock_init(&isert_conn->conn_lock);
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -0700618 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800619
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800620 isert_conn->conn_cm_id = cma_id;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800621
622 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
623 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
624 if (!isert_conn->login_buf) {
625 pr_err("Unable to allocate isert_conn->login_buf\n");
626 ret = -ENOMEM;
627 goto out;
628 }
629
630 isert_conn->login_req_buf = isert_conn->login_buf;
631 isert_conn->login_rsp_buf = isert_conn->login_buf +
632 ISCSI_DEF_MAX_RECV_SEG_LEN;
633 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
634 isert_conn->login_buf, isert_conn->login_req_buf,
635 isert_conn->login_rsp_buf);
636
637 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
638 (void *)isert_conn->login_req_buf,
639 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
640
641 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
642 if (ret) {
643 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
644 ret);
645 isert_conn->login_req_dma = 0;
646 goto out_login_buf;
647 }
648
649 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
650 (void *)isert_conn->login_rsp_buf,
651 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
652
653 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
654 if (ret) {
655 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
656 ret);
657 isert_conn->login_rsp_dma = 0;
658 goto out_req_dma_map;
659 }
660
661 device = isert_device_find_by_ib_dev(cma_id);
662 if (IS_ERR(device)) {
663 ret = PTR_ERR(device);
664 goto out_rsp_dma_map;
665 }
666
Sagi Grimberg1a92e172014-06-19 13:54:19 +0300667 /* Set max inflight RDMA READ requests */
668 isert_conn->initiator_depth = min_t(u8,
669 event->param.conn.initiator_depth,
670 device->dev_attr.max_qp_init_rd_atom);
671 pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
672
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800673 isert_conn->conn_device = device;
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200674 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
675 if (IS_ERR(isert_conn->conn_pd)) {
676 ret = PTR_ERR(isert_conn->conn_pd);
677 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
678 isert_conn, ret);
679 goto out_pd;
680 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800681
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200682 isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
683 IB_ACCESS_LOCAL_WRITE);
684 if (IS_ERR(isert_conn->conn_mr)) {
685 ret = PTR_ERR(isert_conn->conn_mr);
686 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
687 isert_conn, ret);
688 goto out_mr;
689 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800690
Sagi Grimberg570db172014-12-02 16:57:31 +0200691 ret = isert_conn_setup_qp(isert_conn, cma_id);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800692 if (ret)
693 goto out_conn_dev;
694
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200695 ret = isert_rdma_post_recvl(isert_conn);
696 if (ret)
697 goto out_conn_dev;
698
699 ret = isert_rdma_accept(isert_conn);
700 if (ret)
701 goto out_conn_dev;
702
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800703 mutex_lock(&isert_np->np_accept_mutex);
Sagi Grimberg9fe63c82014-04-29 13:13:44 +0300704 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800705 mutex_unlock(&isert_np->np_accept_mutex);
706
Sagi Grimberg531b7bf2014-04-29 13:13:45 +0300707 pr_debug("isert_connect_request() up np_sem np: %p\n", np);
708 up(&isert_np->np_sem);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800709 return 0;
710
711out_conn_dev:
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200712 ib_dereg_mr(isert_conn->conn_mr);
713out_mr:
714 ib_dealloc_pd(isert_conn->conn_pd);
715out_pd:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800716 isert_device_try_release(device);
717out_rsp_dma_map:
718 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
719 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
720out_req_dma_map:
721 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
722 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
723out_login_buf:
724 kfree(isert_conn->login_buf);
725out:
726 kfree(isert_conn);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200727 rdma_reject(cma_id, NULL, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800728 return ret;
729}
730
731static void
732isert_connect_release(struct isert_conn *isert_conn)
733{
734 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
735 struct isert_device *device = isert_conn->conn_device;
736 int cq_index;
737
738 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
739
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200740 if (device && device->use_fastreg)
741 isert_conn_free_fastreg_pool(isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300742
Sagi Grimberg19e20902014-12-02 16:57:26 +0200743 isert_free_rx_descriptors(isert_conn);
744 rdma_destroy_id(isert_conn->conn_cm_id);
745
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800746 if (isert_conn->conn_qp) {
747 cq_index = ((struct isert_cq_desc *)
748 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
749 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
Sagi Grimberg19e20902014-12-02 16:57:26 +0200750 mutex_lock(&device_list_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800751 isert_conn->conn_device->cq_active_qps[cq_index]--;
Sagi Grimberg19e20902014-12-02 16:57:26 +0200752 mutex_unlock(&device_list_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800753
Sagi Grimberg19e20902014-12-02 16:57:26 +0200754 ib_destroy_qp(isert_conn->conn_qp);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800755 }
756
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200757 ib_dereg_mr(isert_conn->conn_mr);
758 ib_dealloc_pd(isert_conn->conn_pd);
759
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800760 if (isert_conn->login_buf) {
761 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
762 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
763 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
764 ISCSI_DEF_MAX_RECV_SEG_LEN,
765 DMA_FROM_DEVICE);
766 kfree(isert_conn->login_buf);
767 }
768 kfree(isert_conn);
769
770 if (device)
771 isert_device_try_release(device);
772
773 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
774}
775
776static void
777isert_connected_handler(struct rdma_cm_id *cma_id)
778{
Sagi Grimberg19e20902014-12-02 16:57:26 +0200779 struct isert_conn *isert_conn = cma_id->qp->qp_context;
Sagi Grimbergc2f88b12014-07-02 16:19:24 +0300780
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200781 pr_info("conn %p\n", isert_conn);
782
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200783 if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
784 pr_warn("conn %p connect_release is running\n", isert_conn);
785 return;
786 }
787
788 mutex_lock(&isert_conn->conn_mutex);
789 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
790 isert_conn->state = ISER_CONN_UP;
791 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800792}
793
794static void
795isert_release_conn_kref(struct kref *kref)
796{
797 struct isert_conn *isert_conn = container_of(kref,
798 struct isert_conn, conn_kref);
799
800 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
801 current->comm, current->pid);
802
803 isert_connect_release(isert_conn);
804}
805
806static void
807isert_put_conn(struct isert_conn *isert_conn)
808{
809 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
810}
811
Sagi Grimberg954f2372014-12-02 16:57:17 +0200812/**
813 * isert_conn_terminate() - Initiate connection termination
814 * @isert_conn: isert connection struct
815 *
816 * Notes:
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200817 * In case the connection state is FULL_FEATURE, move state
Sagi Grimberg954f2372014-12-02 16:57:17 +0200818 * to TEMINATING and start teardown sequence (rdma_disconnect).
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200819 * In case the connection state is UP, complete flush as well.
Sagi Grimberg954f2372014-12-02 16:57:17 +0200820 *
821 * This routine must be called with conn_mutex held. Thus it is
822 * safe to call multiple times.
823 */
824static void
825isert_conn_terminate(struct isert_conn *isert_conn)
826{
827 int err;
828
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200829 switch (isert_conn->state) {
830 case ISER_CONN_TERMINATING:
831 break;
832 case ISER_CONN_UP:
833 /*
834 * No flush completions will occur as we didn't
835 * get to ISER_CONN_FULL_FEATURE yet, complete
836 * to allow teardown progress.
837 */
838 complete(&isert_conn->conn_wait_comp_err);
839 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
Sagi Grimberg954f2372014-12-02 16:57:17 +0200840 pr_info("Terminating conn %p state %d\n",
841 isert_conn, isert_conn->state);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200842 isert_conn->state = ISER_CONN_TERMINATING;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200843 err = rdma_disconnect(isert_conn->conn_cm_id);
844 if (err)
845 pr_warn("Failed rdma_disconnect isert_conn %p\n",
846 isert_conn);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200847 break;
848 default:
849 pr_warn("conn %p teminating in state %d\n",
850 isert_conn, isert_conn->state);
Sagi Grimberg954f2372014-12-02 16:57:17 +0200851 }
852}
853
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700854static int
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200855isert_np_cma_handler(struct isert_np *isert_np,
856 enum rdma_cm_event_type event)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800857{
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200858 pr_debug("isert np %p, handling event %d\n", isert_np, event);
859
860 switch (event) {
861 case RDMA_CM_EVENT_DEVICE_REMOVAL:
862 isert_np->np_cm_id = NULL;
863 break;
864 case RDMA_CM_EVENT_ADDR_CHANGE:
865 isert_np->np_cm_id = isert_setup_id(isert_np);
866 if (IS_ERR(isert_np->np_cm_id)) {
867 pr_err("isert np %p setup id failed: %ld\n",
868 isert_np, PTR_ERR(isert_np->np_cm_id));
869 isert_np->np_cm_id = NULL;
870 }
871 break;
872 default:
873 pr_err("isert np %p Unexpected event %d\n",
874 isert_np, event);
875 }
876
877 return -1;
878}
879
880static int
881isert_disconnected_handler(struct rdma_cm_id *cma_id,
882 enum rdma_cm_event_type event)
883{
884 struct isert_np *isert_np = cma_id->context;
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700885 struct isert_conn *isert_conn;
886
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200887 if (isert_np->np_cm_id == cma_id)
888 return isert_np_cma_handler(cma_id->context, event);
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700889
Sagi Grimberg19e20902014-12-02 16:57:26 +0200890 isert_conn = cma_id->qp->qp_context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800891
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200892 mutex_lock(&isert_conn->conn_mutex);
893 isert_conn_terminate(isert_conn);
894 mutex_unlock(&isert_conn->conn_mutex);
895
896 pr_info("conn %p completing conn_wait\n", isert_conn);
897 complete(&isert_conn->conn_wait);
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700898
899 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800900}
901
Sagi Grimberg954f2372014-12-02 16:57:17 +0200902static void
903isert_connect_error(struct rdma_cm_id *cma_id)
904{
Sagi Grimberg19e20902014-12-02 16:57:26 +0200905 struct isert_conn *isert_conn = cma_id->qp->qp_context;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200906
907 isert_put_conn(isert_conn);
908}
909
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800910static int
911isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
912{
913 int ret = 0;
914
915 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
916 event->event, event->status, cma_id->context, cma_id);
917
918 switch (event->event) {
919 case RDMA_CM_EVENT_CONNECT_REQUEST:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800920 ret = isert_connect_request(cma_id, event);
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700921 if (ret)
922 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
923 event->event, ret);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800924 break;
925 case RDMA_CM_EVENT_ESTABLISHED:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800926 isert_connected_handler(cma_id);
927 break;
Sagi Grimberg88c40152014-05-19 17:44:24 +0300928 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
929 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
930 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
Sagi Grimberg88c40152014-05-19 17:44:24 +0300931 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200932 ret = isert_disconnected_handler(cma_id, event->event);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800933 break;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200934 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
935 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800936 case RDMA_CM_EVENT_CONNECT_ERROR:
Sagi Grimberg954f2372014-12-02 16:57:17 +0200937 isert_connect_error(cma_id);
938 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800939 default:
Sagi Grimberg88c40152014-05-19 17:44:24 +0300940 pr_err("Unhandled RDMA CMA event: %d\n", event->event);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800941 break;
942 }
943
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800944 return ret;
945}
946
947static int
948isert_post_recv(struct isert_conn *isert_conn, u32 count)
949{
950 struct ib_recv_wr *rx_wr, *rx_wr_failed;
951 int i, ret;
952 unsigned int rx_head = isert_conn->conn_rx_desc_head;
953 struct iser_rx_desc *rx_desc;
954
955 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
956 rx_desc = &isert_conn->conn_rx_descs[rx_head];
957 rx_wr->wr_id = (unsigned long)rx_desc;
958 rx_wr->sg_list = &rx_desc->rx_sg;
959 rx_wr->num_sge = 1;
960 rx_wr->next = rx_wr + 1;
961 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
962 }
963
964 rx_wr--;
965 rx_wr->next = NULL; /* mark end of work requests list */
966
967 isert_conn->post_recv_buf_count += count;
968 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
969 &rx_wr_failed);
970 if (ret) {
971 pr_err("ib_post_recv() failed with ret: %d\n", ret);
972 isert_conn->post_recv_buf_count -= count;
973 } else {
974 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
975 isert_conn->conn_rx_desc_head = rx_head;
976 }
977 return ret;
978}
979
980static int
981isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
982{
983 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
984 struct ib_send_wr send_wr, *send_wr_failed;
985 int ret;
986
987 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
988 ISER_HEADERS_LEN, DMA_TO_DEVICE);
989
990 send_wr.next = NULL;
991 send_wr.wr_id = (unsigned long)tx_desc;
992 send_wr.sg_list = tx_desc->tx_sg;
993 send_wr.num_sge = tx_desc->num_sge;
994 send_wr.opcode = IB_WR_SEND;
995 send_wr.send_flags = IB_SEND_SIGNALED;
996
997 atomic_inc(&isert_conn->post_send_buf_count);
998
999 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
1000 if (ret) {
1001 pr_err("ib_post_send() failed, ret: %d\n", ret);
1002 atomic_dec(&isert_conn->post_send_buf_count);
1003 }
1004
1005 return ret;
1006}
1007
1008static void
1009isert_create_send_desc(struct isert_conn *isert_conn,
1010 struct isert_cmd *isert_cmd,
1011 struct iser_tx_desc *tx_desc)
1012{
1013 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1014
1015 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
1016 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1017
1018 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
1019 tx_desc->iser_header.flags = ISER_VER;
1020
1021 tx_desc->num_sge = 1;
1022 tx_desc->isert_cmd = isert_cmd;
1023
1024 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
1025 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
1026 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
1027 }
1028}
1029
1030static int
1031isert_init_tx_hdrs(struct isert_conn *isert_conn,
1032 struct iser_tx_desc *tx_desc)
1033{
1034 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1035 u64 dma_addr;
1036
1037 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1038 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1039 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
1040 pr_err("ib_dma_mapping_error() failed\n");
1041 return -ENOMEM;
1042 }
1043
1044 tx_desc->dma_addr = dma_addr;
1045 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1046 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
1047 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
1048
1049 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
1050 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
1051 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
1052
1053 return 0;
1054}
1055
1056static void
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001057isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1058 struct ib_send_wr *send_wr, bool coalesce)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001059{
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001060 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1061
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001062 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
1063 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1064 send_wr->opcode = IB_WR_SEND;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001065 send_wr->sg_list = &tx_desc->tx_sg[0];
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001066 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001067 /*
1068 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
1069 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
1070 */
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08001071 mutex_lock(&isert_conn->conn_mutex);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +02001072 if (coalesce && isert_conn->state == ISER_CONN_FULL_FEATURE &&
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001073 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08001074 tx_desc->llnode_active = true;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001075 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08001076 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001077 return;
1078 }
1079 isert_conn->conn_comp_batch = 0;
1080 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08001081 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001082
1083 send_wr->send_flags = IB_SEND_SIGNALED;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001084}
1085
1086static int
1087isert_rdma_post_recvl(struct isert_conn *isert_conn)
1088{
1089 struct ib_recv_wr rx_wr, *rx_wr_fail;
1090 struct ib_sge sge;
1091 int ret;
1092
1093 memset(&sge, 0, sizeof(struct ib_sge));
1094 sge.addr = isert_conn->login_req_dma;
1095 sge.length = ISER_RX_LOGIN_SIZE;
1096 sge.lkey = isert_conn->conn_mr->lkey;
1097
1098 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
1099 sge.addr, sge.length, sge.lkey);
1100
1101 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1102 rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
1103 rx_wr.sg_list = &sge;
1104 rx_wr.num_sge = 1;
1105
1106 isert_conn->post_recv_buf_count++;
1107 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
1108 if (ret) {
1109 pr_err("ib_post_recv() failed: %d\n", ret);
1110 isert_conn->post_recv_buf_count--;
1111 }
1112
1113 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
1114 return ret;
1115}
1116
1117static int
1118isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1119 u32 length)
1120{
1121 struct isert_conn *isert_conn = conn->context;
1122 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1123 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
1124 int ret;
1125
1126 isert_create_send_desc(isert_conn, NULL, tx_desc);
1127
1128 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1129 sizeof(struct iscsi_hdr));
1130
1131 isert_init_tx_hdrs(isert_conn, tx_desc);
1132
1133 if (length > 0) {
1134 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1135
1136 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1137 length, DMA_TO_DEVICE);
1138
1139 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1140
1141 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1142 length, DMA_TO_DEVICE);
1143
1144 tx_dsg->addr = isert_conn->login_rsp_dma;
1145 tx_dsg->length = length;
1146 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1147 tx_desc->num_sge = 2;
1148 }
1149 if (!login->login_failed) {
1150 if (login->login_complete) {
Sagi Grimberge0546fc2014-06-10 13:41:41 +03001151 if (!conn->sess->sess_ops->SessionType &&
1152 isert_conn->conn_device->use_fastreg) {
Sagi Grimberg570db172014-12-02 16:57:31 +02001153 ret = isert_conn_create_fastreg_pool(isert_conn);
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -07001154 if (ret) {
1155 pr_err("Conn: %p failed to create"
1156 " fastreg pool\n", isert_conn);
1157 return ret;
1158 }
1159 }
1160
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001161 ret = isert_alloc_rx_descriptors(isert_conn);
1162 if (ret)
1163 return ret;
1164
1165 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
1166 if (ret)
1167 return ret;
1168
Sagi Grimberg128e9cc2014-12-02 16:57:20 +02001169 /* Now we are in FULL_FEATURE phase */
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001170 mutex_lock(&isert_conn->conn_mutex);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +02001171 isert_conn->state = ISER_CONN_FULL_FEATURE;
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001172 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001173 goto post_send;
1174 }
1175
1176 ret = isert_rdma_post_recvl(isert_conn);
1177 if (ret)
1178 return ret;
1179 }
1180post_send:
1181 ret = isert_post_send(isert_conn, tx_desc);
1182 if (ret)
1183 return ret;
1184
1185 return 0;
1186}
1187
1188static void
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001189isert_rx_login_req(struct isert_conn *isert_conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001190{
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001191 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1192 int rx_buflen = isert_conn->login_req_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001193 struct iscsi_conn *conn = isert_conn->conn;
1194 struct iscsi_login *login = conn->conn_login;
1195 int size;
1196
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001197 pr_info("conn %p\n", isert_conn);
1198
1199 WARN_ON_ONCE(!login);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001200
1201 if (login->first_request) {
1202 struct iscsi_login_req *login_req =
1203 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1204 /*
1205 * Setup the initial iscsi_login values from the leading
1206 * login request PDU.
1207 */
1208 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1209 login->current_stage =
1210 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1211 >> 2;
1212 login->version_min = login_req->min_version;
1213 login->version_max = login_req->max_version;
1214 memcpy(login->isid, login_req->isid, 6);
1215 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1216 login->init_task_tag = login_req->itt;
1217 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1218 login->cid = be16_to_cpu(login_req->cid);
1219 login->tsih = be16_to_cpu(login_req->tsih);
1220 }
1221
1222 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1223
1224 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1225 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1226 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
1227 memcpy(login->req_buf, &rx_desc->data[0], size);
1228
Nicholas Bellinger6faaa852013-08-18 16:35:46 -07001229 if (login->first_request) {
1230 complete(&isert_conn->conn_login_comp);
1231 return;
1232 }
1233 schedule_delayed_work(&conn->login_work, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001234}
1235
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001236static struct iscsi_cmd
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001237*isert_allocate_cmd(struct iscsi_conn *conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001238{
1239 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1240 struct isert_cmd *isert_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001241 struct iscsi_cmd *cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001242
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001243 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001244 if (!cmd) {
1245 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001246 return NULL;
1247 }
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001248 isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001249 isert_cmd->conn = isert_conn;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001250 isert_cmd->iscsi_cmd = cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001251
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001252 return cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001253}
1254
1255static int
1256isert_handle_scsi_cmd(struct isert_conn *isert_conn,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001257 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1258 struct iser_rx_desc *rx_desc, unsigned char *buf)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001259{
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001260 struct iscsi_conn *conn = isert_conn->conn;
1261 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1262 struct scatterlist *sg;
1263 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1264 bool dump_payload = false;
1265
1266 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1267 if (rc < 0)
1268 return rc;
1269
1270 imm_data = cmd->immediate_data;
1271 imm_data_len = cmd->first_burst_len;
1272 unsol_data = cmd->unsolicited_data;
1273
1274 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1275 if (rc < 0) {
1276 return 0;
1277 } else if (rc > 0) {
1278 dump_payload = true;
1279 goto sequence_cmd;
1280 }
1281
1282 if (!imm_data)
1283 return 0;
1284
1285 sg = &cmd->se_cmd.t_data_sg[0];
1286 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1287
1288 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1289 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1290
1291 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1292
1293 cmd->write_data_done += imm_data_len;
1294
1295 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1296 spin_lock_bh(&cmd->istate_lock);
1297 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1298 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1299 spin_unlock_bh(&cmd->istate_lock);
1300 }
1301
1302sequence_cmd:
Nicholas Bellinger561bf152013-07-03 03:58:58 -07001303 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001304
1305 if (!rc && dump_payload == false && unsol_data)
1306 iscsit_set_unsoliticed_dataout(cmd);
Nicholas Bellinger6cc44a62014-05-23 00:48:35 -07001307 else if (dump_payload && imm_data)
1308 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001309
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001310 return 0;
1311}
1312
1313static int
1314isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1315 struct iser_rx_desc *rx_desc, unsigned char *buf)
1316{
1317 struct scatterlist *sg_start;
1318 struct iscsi_conn *conn = isert_conn->conn;
1319 struct iscsi_cmd *cmd = NULL;
1320 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1321 u32 unsol_data_len = ntoh24(hdr->dlength);
1322 int rc, sg_nents, sg_off, page_off;
1323
1324 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1325 if (rc < 0)
1326 return rc;
1327 else if (!cmd)
1328 return 0;
1329 /*
1330 * FIXME: Unexpected unsolicited_data out
1331 */
1332 if (!cmd->unsolicited_data) {
1333 pr_err("Received unexpected solicited data payload\n");
1334 dump_stack();
1335 return -1;
1336 }
1337
1338 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1339 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1340
1341 sg_off = cmd->write_data_done / PAGE_SIZE;
1342 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1343 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1344 page_off = cmd->write_data_done % PAGE_SIZE;
1345 /*
1346 * FIXME: Non page-aligned unsolicited_data out
1347 */
1348 if (page_off) {
1349 pr_err("Received unexpected non-page aligned data payload\n");
1350 dump_stack();
1351 return -1;
1352 }
1353 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1354 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1355
1356 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1357 unsol_data_len);
1358
1359 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1360 if (rc < 0)
1361 return rc;
1362
1363 return 0;
1364}
1365
1366static int
Nicholas Bellinger778de362013-06-14 16:07:47 -07001367isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001368 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1369 unsigned char *buf)
Nicholas Bellinger778de362013-06-14 16:07:47 -07001370{
Nicholas Bellinger778de362013-06-14 16:07:47 -07001371 struct iscsi_conn *conn = isert_conn->conn;
1372 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1373 int rc;
1374
1375 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1376 if (rc < 0)
1377 return rc;
1378 /*
1379 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1380 */
1381
1382 return iscsit_process_nop_out(conn, cmd, hdr);
1383}
1384
1385static int
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001386isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001387 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1388 struct iscsi_text *hdr)
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001389{
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001390 struct iscsi_conn *conn = isert_conn->conn;
1391 u32 payload_length = ntoh24(hdr->dlength);
1392 int rc;
1393 unsigned char *text_in;
1394
1395 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1396 if (rc < 0)
1397 return rc;
1398
1399 text_in = kzalloc(payload_length, GFP_KERNEL);
1400 if (!text_in) {
1401 pr_err("Unable to allocate text_in of payload_length: %u\n",
1402 payload_length);
1403 return -ENOMEM;
1404 }
1405 cmd->text_in_ptr = text_in;
1406
1407 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1408
1409 return iscsit_process_text_cmd(conn, cmd, hdr);
1410}
1411
1412static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001413isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1414 uint32_t read_stag, uint64_t read_va,
1415 uint32_t write_stag, uint64_t write_va)
1416{
1417 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1418 struct iscsi_conn *conn = isert_conn->conn;
Nicholas Bellingerca40d242013-07-07 17:45:08 -07001419 struct iscsi_session *sess = conn->sess;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001420 struct iscsi_cmd *cmd;
1421 struct isert_cmd *isert_cmd;
1422 int ret = -EINVAL;
1423 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1424
Nicholas Bellingerca40d242013-07-07 17:45:08 -07001425 if (sess->sess_ops->SessionType &&
1426 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1427 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1428 " ignoring\n", opcode);
1429 return 0;
1430 }
1431
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001432 switch (opcode) {
1433 case ISCSI_OP_SCSI_CMD:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001434 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001435 if (!cmd)
1436 break;
1437
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001438 isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001439 isert_cmd->read_stag = read_stag;
1440 isert_cmd->read_va = read_va;
1441 isert_cmd->write_stag = write_stag;
1442 isert_cmd->write_va = write_va;
1443
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001444 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001445 rx_desc, (unsigned char *)hdr);
1446 break;
1447 case ISCSI_OP_NOOP_OUT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001448 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001449 if (!cmd)
1450 break;
1451
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001452 isert_cmd = iscsit_priv_cmd(cmd);
1453 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
Nicholas Bellinger778de362013-06-14 16:07:47 -07001454 rx_desc, (unsigned char *)hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001455 break;
1456 case ISCSI_OP_SCSI_DATA_OUT:
1457 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1458 (unsigned char *)hdr);
1459 break;
1460 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001461 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001462 if (!cmd)
1463 break;
1464
1465 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1466 (unsigned char *)hdr);
1467 break;
1468 case ISCSI_OP_LOGOUT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001469 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001470 if (!cmd)
1471 break;
1472
1473 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1474 if (ret > 0)
1475 wait_for_completion_timeout(&conn->conn_logout_comp,
1476 SECONDS_FOR_LOGOUT_COMP *
1477 HZ);
1478 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001479 case ISCSI_OP_TEXT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001480 cmd = isert_allocate_cmd(conn);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001481 if (!cmd)
1482 break;
1483
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001484 isert_cmd = iscsit_priv_cmd(cmd);
1485 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001486 rx_desc, (struct iscsi_text *)hdr);
1487 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001488 default:
1489 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1490 dump_stack();
1491 break;
1492 }
1493
1494 return ret;
1495}
1496
1497static void
1498isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1499{
1500 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1501 uint64_t read_va = 0, write_va = 0;
1502 uint32_t read_stag = 0, write_stag = 0;
1503 int rc;
1504
1505 switch (iser_hdr->flags & 0xF0) {
1506 case ISCSI_CTRL:
1507 if (iser_hdr->flags & ISER_RSV) {
1508 read_stag = be32_to_cpu(iser_hdr->read_stag);
1509 read_va = be64_to_cpu(iser_hdr->read_va);
1510 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1511 read_stag, (unsigned long long)read_va);
1512 }
1513 if (iser_hdr->flags & ISER_WSV) {
1514 write_stag = be32_to_cpu(iser_hdr->write_stag);
1515 write_va = be64_to_cpu(iser_hdr->write_va);
1516 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1517 write_stag, (unsigned long long)write_va);
1518 }
1519
1520 pr_debug("ISER ISCSI_CTRL PDU\n");
1521 break;
1522 case ISER_HELLO:
1523 pr_err("iSER Hello message\n");
1524 break;
1525 default:
1526 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1527 break;
1528 }
1529
1530 rc = isert_rx_opcode(isert_conn, rx_desc,
1531 read_stag, read_va, write_stag, write_va);
1532}
1533
1534static void
1535isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1536 unsigned long xfer_len)
1537{
1538 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1539 struct iscsi_hdr *hdr;
1540 u64 rx_dma;
1541 int rx_buflen, outstanding;
1542
1543 if ((char *)desc == isert_conn->login_req_buf) {
1544 rx_dma = isert_conn->login_req_dma;
1545 rx_buflen = ISER_RX_LOGIN_SIZE;
1546 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1547 rx_dma, rx_buflen);
1548 } else {
1549 rx_dma = desc->dma_addr;
1550 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1551 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1552 rx_dma, rx_buflen);
1553 }
1554
1555 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1556
1557 hdr = &desc->iscsi_header;
1558 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1559 hdr->opcode, hdr->itt, hdr->flags,
1560 (int)(xfer_len - ISER_HEADERS_LEN));
1561
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001562 if ((char *)desc == isert_conn->login_req_buf) {
1563 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1564 if (isert_conn->conn) {
1565 struct iscsi_login *login = isert_conn->conn->conn_login;
1566
1567 if (login && !login->first_request)
1568 isert_rx_login_req(isert_conn);
1569 }
1570 mutex_lock(&isert_conn->conn_mutex);
1571 complete(&isert_conn->login_req_comp);
1572 mutex_unlock(&isert_conn->conn_mutex);
1573 } else {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001574 isert_rx_do_work(desc, isert_conn);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001575 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001576
1577 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1578 DMA_FROM_DEVICE);
1579
1580 isert_conn->post_recv_buf_count--;
1581 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1582 isert_conn->post_recv_buf_count);
1583
1584 if ((char *)desc == isert_conn->login_req_buf)
1585 return;
1586
1587 outstanding = isert_conn->post_recv_buf_count;
1588 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1589 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1590 ISERT_MIN_POSTED_RX);
1591 err = isert_post_recv(isert_conn, count);
1592 if (err) {
1593 pr_err("isert_post_recv() count: %d failed, %d\n",
1594 count, err);
1595 }
1596 }
1597}
1598
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001599static int
1600isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1601 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1602 enum iser_ib_op_code op, struct isert_data_buf *data)
1603{
1604 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1605
1606 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1607 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1608
1609 data->len = length - offset;
1610 data->offset = offset;
1611 data->sg_off = data->offset / PAGE_SIZE;
1612
1613 data->sg = &sg[data->sg_off];
1614 data->nents = min_t(unsigned int, nents - data->sg_off,
1615 ISCSI_ISER_SG_TABLESIZE);
1616 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1617 PAGE_SIZE);
1618
1619 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1620 data->dma_dir);
1621 if (unlikely(!data->dma_nents)) {
1622 pr_err("Cmd: unable to dma map SGs %p\n", sg);
1623 return -EINVAL;
1624 }
1625
1626 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1627 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1628
1629 return 0;
1630}
1631
1632static void
1633isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1634{
1635 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1636
1637 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1638 memset(data, 0, sizeof(*data));
1639}
1640
1641
1642
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001643static void
1644isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1645{
1646 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001647
Vu Pham90ecc6e2013-08-28 23:23:33 +03001648 pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001649
1650 if (wr->data.sg) {
Vu Pham90ecc6e2013-08-28 23:23:33 +03001651 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001652 isert_unmap_data_buf(isert_conn, &wr->data);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001653 }
1654
Vu Pham90ecc6e2013-08-28 23:23:33 +03001655 if (wr->send_wr) {
1656 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1657 kfree(wr->send_wr);
1658 wr->send_wr = NULL;
1659 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001660
Vu Pham90ecc6e2013-08-28 23:23:33 +03001661 if (wr->ib_sge) {
1662 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1663 kfree(wr->ib_sge);
1664 wr->ib_sge = NULL;
1665 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001666}
1667
1668static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001669isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +03001670{
1671 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Vu Pham59464ef2013-08-28 23:23:35 +03001672 LIST_HEAD(unmap_list);
1673
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001674 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
Vu Pham59464ef2013-08-28 23:23:35 +03001675
1676 if (wr->fr_desc) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001677 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
Vu Pham59464ef2013-08-28 23:23:35 +03001678 isert_cmd, wr->fr_desc);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001679 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1680 isert_unmap_data_buf(isert_conn, &wr->prot);
1681 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1682 }
Vu Pham59464ef2013-08-28 23:23:35 +03001683 spin_lock_bh(&isert_conn->conn_lock);
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001684 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
Vu Pham59464ef2013-08-28 23:23:35 +03001685 spin_unlock_bh(&isert_conn->conn_lock);
1686 wr->fr_desc = NULL;
1687 }
1688
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001689 if (wr->data.sg) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001690 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001691 isert_unmap_data_buf(isert_conn, &wr->data);
Vu Pham59464ef2013-08-28 23:23:35 +03001692 }
1693
1694 wr->ib_sge = NULL;
1695 wr->send_wr = NULL;
1696}
1697
1698static void
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001699isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001700{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001701 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001702 struct isert_conn *isert_conn = isert_cmd->conn;
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001703 struct iscsi_conn *conn = isert_conn->conn;
Vu Phamd40945d2013-08-28 23:23:34 +03001704 struct isert_device *device = isert_conn->conn_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001705
1706 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1707
1708 switch (cmd->iscsi_opcode) {
1709 case ISCSI_OP_SCSI_CMD:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001710 spin_lock_bh(&conn->cmd_lock);
1711 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellinger5159d762014-02-03 12:53:51 -08001712 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001713 spin_unlock_bh(&conn->cmd_lock);
1714
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001715 if (cmd->data_direction == DMA_TO_DEVICE) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001716 iscsit_stop_dataout_timer(cmd);
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001717 /*
1718 * Check for special case during comp_err where
1719 * WRITE_PENDING has been handed off from core,
1720 * but requires an extra target_put_sess_cmd()
1721 * before transport_generic_free_cmd() below.
1722 */
1723 if (comp_err &&
1724 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1725 struct se_cmd *se_cmd = &cmd->se_cmd;
1726
1727 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1728 }
1729 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001730
Vu Phamd40945d2013-08-28 23:23:34 +03001731 device->unreg_rdma_mem(isert_cmd, isert_conn);
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001732 transport_generic_free_cmd(&cmd->se_cmd, 0);
1733 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001734 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001735 spin_lock_bh(&conn->cmd_lock);
1736 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellinger5159d762014-02-03 12:53:51 -08001737 list_del_init(&cmd->i_conn_node);
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001738 spin_unlock_bh(&conn->cmd_lock);
1739
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001740 transport_generic_free_cmd(&cmd->se_cmd, 0);
1741 break;
1742 case ISCSI_OP_REJECT:
1743 case ISCSI_OP_NOOP_OUT:
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001744 case ISCSI_OP_TEXT:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001745 spin_lock_bh(&conn->cmd_lock);
1746 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellinger5159d762014-02-03 12:53:51 -08001747 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001748 spin_unlock_bh(&conn->cmd_lock);
1749
1750 /*
1751 * Handle special case for REJECT when iscsi_add_reject*() has
1752 * overwritten the original iscsi_opcode assignment, and the
1753 * associated cmd->se_cmd needs to be released.
1754 */
1755 if (cmd->se_cmd.se_tfo != NULL) {
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001756 pr_debug("Calling transport_generic_free_cmd from"
1757 " isert_put_cmd for 0x%02x\n",
1758 cmd->iscsi_opcode);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001759 transport_generic_free_cmd(&cmd->se_cmd, 0);
1760 break;
1761 }
1762 /*
1763 * Fall-through
1764 */
1765 default:
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001766 iscsit_release_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001767 break;
1768 }
1769}
1770
1771static void
1772isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1773{
1774 if (tx_desc->dma_addr != 0) {
1775 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1776 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1777 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1778 tx_desc->dma_addr = 0;
1779 }
1780}
1781
1782static void
1783isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001784 struct ib_device *ib_dev, bool comp_err)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001785{
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001786 if (isert_cmd->pdu_buf_dma != 0) {
1787 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1788 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1789 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1790 isert_cmd->pdu_buf_dma = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001791 }
1792
1793 isert_unmap_tx_desc(tx_desc, ib_dev);
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001794 isert_put_cmd(isert_cmd, comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001795}
1796
Sagi Grimberg96b79732014-03-17 12:52:18 +02001797static int
1798isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1799{
1800 struct ib_mr_status mr_status;
1801 int ret;
1802
1803 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1804 if (ret) {
1805 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1806 goto fail_mr_status;
1807 }
1808
1809 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1810 u64 sec_offset_err;
1811 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1812
1813 switch (mr_status.sig_err.err_type) {
1814 case IB_SIG_BAD_GUARD:
1815 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1816 break;
1817 case IB_SIG_BAD_REFTAG:
1818 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1819 break;
1820 case IB_SIG_BAD_APPTAG:
1821 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1822 break;
1823 }
1824 sec_offset_err = mr_status.sig_err.sig_err_offset;
1825 do_div(sec_offset_err, block_size);
1826 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1827
1828 pr_err("isert: PI error found type %d at sector 0x%llx "
1829 "expected 0x%x vs actual 0x%x\n",
1830 mr_status.sig_err.err_type,
1831 (unsigned long long)se_cmd->bad_sector,
1832 mr_status.sig_err.expected,
1833 mr_status.sig_err.actual);
1834 ret = 1;
1835 }
1836
1837fail_mr_status:
1838 return ret;
1839}
1840
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001841static void
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001842isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1843 struct isert_cmd *isert_cmd)
1844{
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001845 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001846 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001847 struct se_cmd *se_cmd = &cmd->se_cmd;
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001848 struct isert_conn *isert_conn = isert_cmd->conn;
1849 struct isert_device *device = isert_conn->conn_device;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001850 int ret = 0;
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001851
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001852 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
Sagi Grimberg96b79732014-03-17 12:52:18 +02001853 ret = isert_check_pi_status(se_cmd,
1854 wr->fr_desc->pi_ctx->sig_mr);
1855 wr->fr_desc->ind &= ~ISERT_PROTECTED;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001856 }
1857
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001858 device->unreg_rdma_mem(isert_cmd, isert_conn);
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02001859 wr->send_wr_num = 0;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001860 if (ret)
1861 transport_send_check_condition_and_sense(se_cmd,
1862 se_cmd->pi_err, 0);
1863 else
1864 isert_put_response(isert_conn->conn, cmd);
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001865}
1866
1867static void
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001868isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1869 struct isert_cmd *isert_cmd)
1870{
1871 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001872 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001873 struct se_cmd *se_cmd = &cmd->se_cmd;
Vu Pham90ecc6e2013-08-28 23:23:33 +03001874 struct isert_conn *isert_conn = isert_cmd->conn;
Vu Phamd40945d2013-08-28 23:23:34 +03001875 struct isert_device *device = isert_conn->conn_device;
Sagi Grimberg5bac4b12014-03-18 14:58:27 +02001876 int ret = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001877
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001878 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
Sagi Grimberg96b79732014-03-17 12:52:18 +02001879 ret = isert_check_pi_status(se_cmd,
1880 wr->fr_desc->pi_ctx->sig_mr);
1881 wr->fr_desc->ind &= ~ISERT_PROTECTED;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001882 }
1883
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001884 iscsit_stop_dataout_timer(cmd);
Vu Phamd40945d2013-08-28 23:23:34 +03001885 device->unreg_rdma_mem(isert_cmd, isert_conn);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001886 cmd->write_data_done = wr->data.len;
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08001887 wr->send_wr_num = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001888
Vu Pham90ecc6e2013-08-28 23:23:33 +03001889 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001890 spin_lock_bh(&cmd->istate_lock);
1891 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1892 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1893 spin_unlock_bh(&cmd->istate_lock);
1894
Sagi Grimberg5bac4b12014-03-18 14:58:27 +02001895 if (ret)
1896 transport_send_check_condition_and_sense(se_cmd,
1897 se_cmd->pi_err, 0);
1898 else
1899 target_execute_cmd(se_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001900}
1901
1902static void
1903isert_do_control_comp(struct work_struct *work)
1904{
1905 struct isert_cmd *isert_cmd = container_of(work,
1906 struct isert_cmd, comp_work);
1907 struct isert_conn *isert_conn = isert_cmd->conn;
1908 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001909 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001910
1911 switch (cmd->i_state) {
1912 case ISTATE_SEND_TASKMGTRSP:
1913 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1914
1915 atomic_dec(&isert_conn->post_send_buf_count);
1916 iscsit_tmr_post_handler(cmd, cmd->conn);
1917
1918 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001919 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001920 break;
1921 case ISTATE_SEND_REJECT:
1922 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1923 atomic_dec(&isert_conn->post_send_buf_count);
1924
1925 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001926 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001927 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001928 case ISTATE_SEND_LOGOUTRSP:
1929 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
Sagi Grimberg9d49f5e2014-05-19 17:44:23 +03001930
1931 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001932 iscsit_logout_post_handler(cmd, cmd->conn);
1933 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001934 case ISTATE_SEND_TEXTRSP:
1935 atomic_dec(&isert_conn->post_send_buf_count);
1936 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001937 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001938 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001939 default:
1940 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1941 dump_stack();
1942 break;
1943 }
1944}
1945
1946static void
1947isert_response_completion(struct iser_tx_desc *tx_desc,
1948 struct isert_cmd *isert_cmd,
1949 struct isert_conn *isert_conn,
1950 struct ib_device *ib_dev)
1951{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001952 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08001953 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001954
1955 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001956 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001957 cmd->i_state == ISTATE_SEND_REJECT ||
1958 cmd->i_state == ISTATE_SEND_TEXTRSP) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001959 isert_unmap_tx_desc(tx_desc, ib_dev);
1960
1961 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1962 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1963 return;
1964 }
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02001965
1966 /**
1967 * If send_wr_num is 0 this means that we got
1968 * RDMA completion and we cleared it and we should
1969 * simply decrement the response post. else the
1970 * response is incorporated in send_wr_num, just
1971 * sub it.
1972 **/
1973 if (wr->send_wr_num)
1974 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1975 else
1976 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001977
1978 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001979 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001980}
1981
1982static void
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001983__isert_send_completion(struct iser_tx_desc *tx_desc,
1984 struct isert_conn *isert_conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001985{
1986 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1987 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1988 struct isert_rdma_wr *wr;
1989
1990 if (!isert_cmd) {
1991 atomic_dec(&isert_conn->post_send_buf_count);
1992 isert_unmap_tx_desc(tx_desc, ib_dev);
1993 return;
1994 }
1995 wr = &isert_cmd->rdma_wr;
1996
1997 switch (wr->iser_ib_op) {
1998 case ISER_IB_RECV:
1999 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
2000 dump_stack();
2001 break;
2002 case ISER_IB_SEND:
2003 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
2004 isert_response_completion(tx_desc, isert_cmd,
2005 isert_conn, ib_dev);
2006 break;
2007 case ISER_IB_RDMA_WRITE:
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02002008 pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002009 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02002010 isert_completion_rdma_write(tx_desc, isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002011 break;
2012 case ISER_IB_RDMA_READ:
2013 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
2014
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08002015 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002016 isert_completion_rdma_read(tx_desc, isert_cmd);
2017 break;
2018 default:
2019 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
2020 dump_stack();
2021 break;
2022 }
2023}
2024
2025static void
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08002026isert_send_completion(struct iser_tx_desc *tx_desc,
2027 struct isert_conn *isert_conn)
2028{
2029 struct llist_node *llnode = tx_desc->comp_llnode_batch;
2030 struct iser_tx_desc *t;
2031 /*
2032 * Drain coalesced completion llist starting from comp_llnode_batch
2033 * setup in isert_init_send_wr(), and then complete trailing tx_desc.
2034 */
2035 while (llnode) {
2036 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
2037 llnode = llist_next(llnode);
2038 __isert_send_completion(t, isert_conn);
2039 }
2040 __isert_send_completion(tx_desc, isert_conn);
2041}
2042
2043static void
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08002044isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
2045{
2046 struct llist_node *llnode;
2047 struct isert_rdma_wr *wr;
2048 struct iser_tx_desc *t;
2049
2050 mutex_lock(&isert_conn->conn_mutex);
2051 llnode = llist_del_all(&isert_conn->conn_comp_llist);
2052 isert_conn->conn_comp_batch = 0;
2053 mutex_unlock(&isert_conn->conn_mutex);
2054
2055 while (llnode) {
2056 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
2057 llnode = llist_next(llnode);
2058 wr = &t->isert_cmd->rdma_wr;
2059
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002060 /**
2061 * If send_wr_num is 0 this means that we got
2062 * RDMA completion and we cleared it and we should
2063 * simply decrement the response post. else the
2064 * response is incorporated in send_wr_num, just
2065 * sub it.
2066 **/
2067 if (wr->send_wr_num)
2068 atomic_sub(wr->send_wr_num,
2069 &isert_conn->post_send_buf_count);
2070 else
2071 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellinger03e78482014-03-30 15:50:03 -07002072
2073 isert_completion_put(t, t->isert_cmd, ib_dev, true);
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08002074 }
2075}
2076
2077static void
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002078isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002079{
2080 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002081 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08002082 struct llist_node *llnode = tx_desc->comp_llnode_batch;
2083 struct isert_rdma_wr *wr;
2084 struct iser_tx_desc *t;
2085
2086 while (llnode) {
2087 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
2088 llnode = llist_next(llnode);
2089 wr = &t->isert_cmd->rdma_wr;
2090
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002091 /**
2092 * If send_wr_num is 0 this means that we got
2093 * RDMA completion and we cleared it and we should
2094 * simply decrement the response post. else the
2095 * response is incorporated in send_wr_num, just
2096 * sub it.
2097 **/
2098 if (wr->send_wr_num)
2099 atomic_sub(wr->send_wr_num,
2100 &isert_conn->post_send_buf_count);
2101 else
2102 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellinger03e78482014-03-30 15:50:03 -07002103
2104 isert_completion_put(t, t->isert_cmd, ib_dev, true);
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08002105 }
2106 tx_desc->comp_llnode_batch = NULL;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002107
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002108 if (!isert_cmd)
2109 isert_unmap_tx_desc(tx_desc, ib_dev);
2110 else
Nicholas Bellinger03e78482014-03-30 15:50:03 -07002111 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002112}
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002113
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002114static void
2115isert_cq_rx_comp_err(struct isert_conn *isert_conn)
2116{
2117 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2118 struct iscsi_conn *conn = isert_conn->conn;
2119
2120 if (isert_conn->post_recv_buf_count)
2121 return;
2122
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08002123 isert_cq_drain_comp_llist(isert_conn, ib_dev);
2124
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002125 if (conn->sess) {
2126 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
2127 target_wait_for_sess_cmds(conn->sess->se_sess);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002128 }
2129
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002130 while (atomic_read(&isert_conn->post_send_buf_count))
2131 msleep(3000);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002132
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002133 mutex_lock(&isert_conn->conn_mutex);
Sagi Grimberg954f2372014-12-02 16:57:17 +02002134 isert_conn_terminate(isert_conn);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002135 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002136
Sagi Grimberg9d49f5e2014-05-19 17:44:23 +03002137 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2138
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002139 complete(&isert_conn->conn_wait_comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002140}
2141
2142static void
2143isert_cq_tx_work(struct work_struct *work)
2144{
2145 struct isert_cq_desc *cq_desc = container_of(work,
2146 struct isert_cq_desc, cq_tx_work);
2147 struct isert_device *device = cq_desc->device;
2148 int cq_index = cq_desc->cq_index;
2149 struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
2150 struct isert_conn *isert_conn;
2151 struct iser_tx_desc *tx_desc;
2152 struct ib_wc wc;
2153
2154 while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
2155 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
2156 isert_conn = wc.qp->qp_context;
2157
2158 if (wc.status == IB_WC_SUCCESS) {
2159 isert_send_completion(tx_desc, isert_conn);
2160 } else {
2161 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
2162 pr_debug("TX wc.status: 0x%08x\n", wc.status);
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07002163 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002164
Nicholas Bellinger9bb4ca62014-02-27 07:02:48 -08002165 if (wc.wr_id != ISER_FASTREG_LI_WRID) {
Nicholas Bellingerebbe4422014-03-02 14:51:12 -08002166 if (tx_desc->llnode_active)
2167 continue;
2168
Nicholas Bellinger9bb4ca62014-02-27 07:02:48 -08002169 atomic_dec(&isert_conn->post_send_buf_count);
2170 isert_cq_tx_comp_err(tx_desc, isert_conn);
2171 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002172 }
2173 }
2174
2175 ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
2176}
2177
2178static void
2179isert_cq_tx_callback(struct ib_cq *cq, void *context)
2180{
2181 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
2182
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002183 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
2184}
2185
2186static void
2187isert_cq_rx_work(struct work_struct *work)
2188{
2189 struct isert_cq_desc *cq_desc = container_of(work,
2190 struct isert_cq_desc, cq_rx_work);
2191 struct isert_device *device = cq_desc->device;
2192 int cq_index = cq_desc->cq_index;
2193 struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
2194 struct isert_conn *isert_conn;
2195 struct iser_rx_desc *rx_desc;
2196 struct ib_wc wc;
2197 unsigned long xfer_len;
2198
2199 while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
2200 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
2201 isert_conn = wc.qp->qp_context;
2202
2203 if (wc.status == IB_WC_SUCCESS) {
2204 xfer_len = (unsigned long)wc.byte_len;
2205 isert_rx_completion(rx_desc, isert_conn, xfer_len);
2206 } else {
2207 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07002208 if (wc.status != IB_WC_WR_FLUSH_ERR) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002209 pr_debug("RX wc.status: 0x%08x\n", wc.status);
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07002210 pr_debug("RX wc.vendor_err: 0x%08x\n",
2211 wc.vendor_err);
2212 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002213 isert_conn->post_recv_buf_count--;
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002214 isert_cq_rx_comp_err(isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002215 }
2216 }
2217
2218 ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
2219}
2220
2221static void
2222isert_cq_rx_callback(struct ib_cq *cq, void *context)
2223{
2224 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
2225
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002226 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
2227}
2228
2229static int
2230isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2231{
2232 struct ib_send_wr *wr_failed;
2233 int ret;
2234
2235 atomic_inc(&isert_conn->post_send_buf_count);
2236
2237 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
2238 &wr_failed);
2239 if (ret) {
2240 pr_err("ib_post_send failed with %d\n", ret);
2241 atomic_dec(&isert_conn->post_send_buf_count);
2242 return ret;
2243 }
2244 return ret;
2245}
2246
2247static int
2248isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2249{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002250 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002251 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2252 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2253 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2254 &isert_cmd->tx_desc.iscsi_header;
2255
2256 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2257 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2258 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2259 /*
2260 * Attach SENSE DATA payload to iSCSI Response PDU
2261 */
2262 if (cmd->se_cmd.sense_buffer &&
2263 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2264 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
2265 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2266 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002267 u32 padding, pdu_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002268
2269 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2270 cmd->sense_buffer);
2271 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2272
2273 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2274 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002275 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002276
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002277 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2278 (void *)cmd->sense_buffer, pdu_len,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002279 DMA_TO_DEVICE);
2280
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002281 isert_cmd->pdu_buf_len = pdu_len;
2282 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2283 tx_dsg->length = pdu_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002284 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2285 isert_cmd->tx_desc.num_sge = 2;
2286 }
2287
Nicholas Bellinger0d0f6602014-10-05 02:13:03 -07002288 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002289
2290 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2291
2292 return isert_post_response(isert_conn, isert_cmd);
2293}
2294
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -07002295static void
2296isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2297{
2298 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2299 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2300 struct isert_device *device = isert_conn->conn_device;
2301
2302 spin_lock_bh(&conn->cmd_lock);
2303 if (!list_empty(&cmd->i_conn_node))
2304 list_del_init(&cmd->i_conn_node);
2305 spin_unlock_bh(&conn->cmd_lock);
2306
2307 if (cmd->data_direction == DMA_TO_DEVICE)
2308 iscsit_stop_dataout_timer(cmd);
2309
2310 device->unreg_rdma_mem(isert_cmd, isert_conn);
2311}
2312
Nicholas Bellingere70beee2014-04-02 12:52:38 -07002313static enum target_prot_op
2314isert_get_sup_prot_ops(struct iscsi_conn *conn)
2315{
2316 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2317 struct isert_device *device = isert_conn->conn_device;
2318
2319 if (device->pi_capable)
2320 return TARGET_PROT_ALL;
2321
2322 return TARGET_PROT_NORMAL;
2323}
2324
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002325static int
2326isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2327 bool nopout_response)
2328{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002329 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002330 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2331 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2332
2333 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2334 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2335 &isert_cmd->tx_desc.iscsi_header,
2336 nopout_response);
2337 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08002338 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002339
Masanari Iida8b513d02013-05-21 23:13:12 +09002340 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002341
2342 return isert_post_response(isert_conn, isert_cmd);
2343}
2344
2345static int
2346isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2347{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002348 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002349 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2350 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2351
2352 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2353 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2354 &isert_cmd->tx_desc.iscsi_header);
2355 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08002356 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002357
2358 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2359
2360 return isert_post_response(isert_conn, isert_cmd);
2361}
2362
2363static int
2364isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2365{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002366 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002367 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2368 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2369
2370 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2371 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2372 &isert_cmd->tx_desc.iscsi_header);
2373 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08002374 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002375
2376 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2377
2378 return isert_post_response(isert_conn, isert_cmd);
2379}
2380
2381static int
2382isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2383{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002384 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002385 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2386 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002387 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2388 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2389 struct iscsi_reject *hdr =
2390 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002391
2392 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002393 iscsit_build_reject(cmd, conn, hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002394 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002395
2396 hton24(hdr->dlength, ISCSI_HDR_LEN);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002397 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002398 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2399 DMA_TO_DEVICE);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002400 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2401 tx_dsg->addr = isert_cmd->pdu_buf_dma;
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002402 tx_dsg->length = ISCSI_HDR_LEN;
2403 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2404 isert_cmd->tx_desc.num_sge = 2;
2405
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08002406 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002407
2408 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2409
2410 return isert_post_response(isert_conn, isert_cmd);
2411}
2412
2413static int
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002414isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2415{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002416 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002417 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2418 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2419 struct iscsi_text_rsp *hdr =
2420 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2421 u32 txt_rsp_len;
2422 int rc;
2423
2424 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
Sagi Grimberg22c7aaa2014-06-10 18:27:59 +03002425 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002426 if (rc < 0)
2427 return rc;
2428
2429 txt_rsp_len = rc;
2430 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2431
2432 if (txt_rsp_len) {
2433 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2434 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2435 void *txt_rsp_buf = cmd->buf_ptr;
2436
2437 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2438 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2439
2440 isert_cmd->pdu_buf_len = txt_rsp_len;
2441 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2442 tx_dsg->length = txt_rsp_len;
2443 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2444 isert_cmd->tx_desc.num_sge = 2;
2445 }
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08002446 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002447
2448 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2449
2450 return isert_post_response(isert_conn, isert_cmd);
2451}
2452
2453static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002454isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2455 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2456 u32 data_left, u32 offset)
2457{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002458 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002459 struct scatterlist *sg_start, *tmp_sg;
2460 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2461 u32 sg_off, page_off;
2462 int i = 0, sg_nents;
2463
2464 sg_off = offset / PAGE_SIZE;
2465 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2466 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2467 page_off = offset % PAGE_SIZE;
2468
2469 send_wr->sg_list = ib_sge;
2470 send_wr->num_sge = sg_nents;
2471 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2472 /*
2473 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2474 */
2475 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2476 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2477 (unsigned long long)tmp_sg->dma_address,
2478 tmp_sg->length, page_off);
2479
2480 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2481 ib_sge->length = min_t(u32, data_left,
2482 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2483 ib_sge->lkey = isert_conn->conn_mr->lkey;
2484
Vu Pham90ecc6e2013-08-28 23:23:33 +03002485 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2486 ib_sge->addr, ib_sge->length, ib_sge->lkey);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002487 page_off = 0;
2488 data_left -= ib_sge->length;
2489 ib_sge++;
2490 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
2491 }
2492
2493 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2494 send_wr->sg_list, send_wr->num_sge);
2495
2496 return sg_nents;
2497}
2498
2499static int
Vu Pham90ecc6e2013-08-28 23:23:33 +03002500isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2501 struct isert_rdma_wr *wr)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002502{
2503 struct se_cmd *se_cmd = &cmd->se_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002504 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002505 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002506 struct isert_data_buf *data = &wr->data;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002507 struct ib_send_wr *send_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002508 struct ib_sge *ib_sge;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002509 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2510 int ret = 0, i, ib_sge_cnt;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002511
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002512 isert_cmd->tx_desc.isert_cmd = isert_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002513
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002514 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2515 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2516 se_cmd->t_data_nents, se_cmd->data_length,
2517 offset, wr->iser_ib_op, &wr->data);
2518 if (ret)
2519 return ret;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002520
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002521 data_left = data->len;
2522 offset = data->offset;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002523
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002524 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002525 if (!ib_sge) {
Vu Pham90ecc6e2013-08-28 23:23:33 +03002526 pr_warn("Unable to allocate ib_sge\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002527 ret = -ENOMEM;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002528 goto unmap_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002529 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002530 wr->ib_sge = ib_sge;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002531
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002532 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002533 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2534 GFP_KERNEL);
2535 if (!wr->send_wr) {
Vu Pham90ecc6e2013-08-28 23:23:33 +03002536 pr_debug("Unable to allocate wr->send_wr\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002537 ret = -ENOMEM;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002538 goto unmap_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002539 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002540
2541 wr->isert_cmd = isert_cmd;
2542 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002543
2544 for (i = 0; i < wr->send_wr_num; i++) {
2545 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2546 data_len = min(data_left, rdma_write_max);
2547
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002548 send_wr->send_flags = 0;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002549 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2550 send_wr->opcode = IB_WR_RDMA_WRITE;
2551 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2552 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2553 if (i + 1 == wr->send_wr_num)
2554 send_wr->next = &isert_cmd->tx_desc.send_wr;
2555 else
2556 send_wr->next = &wr->send_wr[i + 1];
2557 } else {
2558 send_wr->opcode = IB_WR_RDMA_READ;
2559 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2560 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2561 if (i + 1 == wr->send_wr_num)
2562 send_wr->send_flags = IB_SEND_SIGNALED;
2563 else
2564 send_wr->next = &wr->send_wr[i + 1];
2565 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002566
2567 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2568 send_wr, data_len, offset);
2569 ib_sge += ib_sge_cnt;
2570
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002571 offset += data_len;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002572 va_offset += data_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002573 data_left -= data_len;
2574 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002575
2576 return 0;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002577unmap_cmd:
2578 isert_unmap_data_buf(isert_conn, data);
2579
Vu Pham90ecc6e2013-08-28 23:23:33 +03002580 return ret;
2581}
2582
2583static int
Vu Pham59464ef2013-08-28 23:23:35 +03002584isert_map_fr_pagelist(struct ib_device *ib_dev,
2585 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2586{
2587 u64 start_addr, end_addr, page, chunk_start = 0;
2588 struct scatterlist *tmp_sg;
2589 int i = 0, new_chunk, last_ent, n_pages;
2590
2591 n_pages = 0;
2592 new_chunk = 1;
2593 last_ent = sg_nents - 1;
2594 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2595 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2596 if (new_chunk)
2597 chunk_start = start_addr;
2598 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2599
2600 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2601 i, (unsigned long long)tmp_sg->dma_address,
2602 tmp_sg->length);
2603
2604 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2605 new_chunk = 0;
2606 continue;
2607 }
2608 new_chunk = 1;
2609
2610 page = chunk_start & PAGE_MASK;
2611 do {
2612 fr_pl[n_pages++] = page;
2613 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2614 n_pages - 1, page);
2615 page += PAGE_SIZE;
2616 } while (page < end_addr);
2617 }
2618
2619 return n_pages;
2620}
2621
2622static int
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002623isert_fast_reg_mr(struct isert_conn *isert_conn,
2624 struct fast_reg_descriptor *fr_desc,
2625 struct isert_data_buf *mem,
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002626 enum isert_indicator ind,
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002627 struct ib_sge *sge)
Vu Pham59464ef2013-08-28 23:23:35 +03002628{
Vu Pham59464ef2013-08-28 23:23:35 +03002629 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002630 struct ib_mr *mr;
2631 struct ib_fast_reg_page_list *frpl;
Vu Pham59464ef2013-08-28 23:23:35 +03002632 struct ib_send_wr fr_wr, inv_wr;
2633 struct ib_send_wr *bad_wr, *wr = NULL;
Sagi Grimberg9bd626e2014-01-09 18:40:54 +02002634 int ret, pagelist_len;
2635 u32 page_off;
Vu Pham59464ef2013-08-28 23:23:35 +03002636 u8 key;
Vu Pham59464ef2013-08-28 23:23:35 +03002637
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002638 if (mem->dma_nents == 1) {
2639 sge->lkey = isert_conn->conn_mr->lkey;
2640 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2641 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002642 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2643 __func__, __LINE__, sge->addr, sge->length,
2644 sge->lkey);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002645 return 0;
2646 }
2647
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002648 if (ind == ISERT_DATA_KEY_VALID) {
2649 /* Registering data buffer */
2650 mr = fr_desc->data_mr;
2651 frpl = fr_desc->data_frpl;
2652 } else {
2653 /* Registering protection buffer */
2654 mr = fr_desc->pi_ctx->prot_mr;
2655 frpl = fr_desc->pi_ctx->prot_frpl;
2656 }
2657
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002658 page_off = mem->offset % PAGE_SIZE;
Vu Pham59464ef2013-08-28 23:23:35 +03002659
Sagi Grimberg9bd626e2014-01-09 18:40:54 +02002660 pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002661 fr_desc, mem->nents, mem->offset);
Vu Pham59464ef2013-08-28 23:23:35 +03002662
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002663 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002664 &frpl->page_list[0]);
Vu Pham59464ef2013-08-28 23:23:35 +03002665
Sagi Grimbergd3e125d2014-02-19 17:50:23 +02002666 if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
Vu Pham59464ef2013-08-28 23:23:35 +03002667 memset(&inv_wr, 0, sizeof(inv_wr));
Nicholas Bellinger9bb4ca62014-02-27 07:02:48 -08002668 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
Vu Pham59464ef2013-08-28 23:23:35 +03002669 inv_wr.opcode = IB_WR_LOCAL_INV;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002670 inv_wr.ex.invalidate_rkey = mr->rkey;
Vu Pham59464ef2013-08-28 23:23:35 +03002671 wr = &inv_wr;
2672 /* Bump the key */
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002673 key = (u8)(mr->rkey & 0x000000FF);
2674 ib_update_fast_reg_key(mr, ++key);
Vu Pham59464ef2013-08-28 23:23:35 +03002675 }
2676
2677 /* Prepare FASTREG WR */
2678 memset(&fr_wr, 0, sizeof(fr_wr));
Nicholas Bellinger9bb4ca62014-02-27 07:02:48 -08002679 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
Vu Pham59464ef2013-08-28 23:23:35 +03002680 fr_wr.opcode = IB_WR_FAST_REG_MR;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002681 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2682 fr_wr.wr.fast_reg.page_list = frpl;
Vu Pham59464ef2013-08-28 23:23:35 +03002683 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2684 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002685 fr_wr.wr.fast_reg.length = mem->len;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002686 fr_wr.wr.fast_reg.rkey = mr->rkey;
Vu Pham59464ef2013-08-28 23:23:35 +03002687 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2688
2689 if (!wr)
2690 wr = &fr_wr;
2691 else
2692 wr->next = &fr_wr;
2693
2694 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2695 if (ret) {
2696 pr_err("fast registration failed, ret:%d\n", ret);
2697 return ret;
2698 }
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002699 fr_desc->ind &= ~ind;
Vu Pham59464ef2013-08-28 23:23:35 +03002700
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002701 sge->lkey = mr->lkey;
2702 sge->addr = frpl->page_list[0] + page_off;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002703 sge->length = mem->len;
Vu Pham59464ef2013-08-28 23:23:35 +03002704
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002705 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2706 __func__, __LINE__, sge->addr, sge->length,
2707 sge->lkey);
Vu Pham59464ef2013-08-28 23:23:35 +03002708
2709 return ret;
2710}
2711
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002712static inline void
2713isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2714 struct ib_sig_domain *domain)
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002715{
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002716 domain->sig_type = IB_SIG_TYPE_T10_DIF;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002717 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2718 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2719 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002720 /*
2721 * At the moment we hard code those, but if in the future
2722 * the target core would like to use it, we will take it
2723 * from se_cmd.
2724 */
2725 domain->sig.dif.apptag_check_mask = 0xffff;
2726 domain->sig.dif.app_escape = true;
2727 domain->sig.dif.ref_escape = true;
2728 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2729 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2730 domain->sig.dif.ref_remap = true;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002731};
2732
2733static int
2734isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2735{
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002736 switch (se_cmd->prot_op) {
2737 case TARGET_PROT_DIN_INSERT:
2738 case TARGET_PROT_DOUT_STRIP:
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002739 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002740 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002741 break;
2742 case TARGET_PROT_DOUT_INSERT:
2743 case TARGET_PROT_DIN_STRIP:
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002744 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002745 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002746 break;
2747 case TARGET_PROT_DIN_PASS:
2748 case TARGET_PROT_DOUT_PASS:
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002749 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2750 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002751 break;
2752 default:
2753 pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2754 return -EINVAL;
2755 }
2756
2757 return 0;
2758}
2759
2760static inline u8
2761isert_set_prot_checks(u8 prot_checks)
2762{
2763 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2764 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2765 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2766}
2767
2768static int
Sagi Grimberg570db172014-12-02 16:57:31 +02002769isert_reg_sig_mr(struct isert_conn *isert_conn,
2770 struct se_cmd *se_cmd,
2771 struct isert_rdma_wr *rdma_wr,
2772 struct fast_reg_descriptor *fr_desc)
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002773{
2774 struct ib_send_wr sig_wr, inv_wr;
2775 struct ib_send_wr *bad_wr, *wr = NULL;
2776 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2777 struct ib_sig_attrs sig_attrs;
2778 int ret;
2779 u32 key;
2780
2781 memset(&sig_attrs, 0, sizeof(sig_attrs));
2782 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2783 if (ret)
2784 goto err;
2785
2786 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2787
2788 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2789 memset(&inv_wr, 0, sizeof(inv_wr));
2790 inv_wr.opcode = IB_WR_LOCAL_INV;
Sagi Grimbergc2caa202014-03-17 12:52:16 +02002791 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002792 inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
2793 wr = &inv_wr;
2794 /* Bump the key */
2795 key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
2796 ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
2797 }
2798
2799 memset(&sig_wr, 0, sizeof(sig_wr));
2800 sig_wr.opcode = IB_WR_REG_SIG_MR;
Sagi Grimbergc2caa202014-03-17 12:52:16 +02002801 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
Sagi Grimberg570db172014-12-02 16:57:31 +02002802 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002803 sig_wr.num_sge = 1;
2804 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2805 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2806 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2807 if (se_cmd->t_prot_sg)
Sagi Grimberg570db172014-12-02 16:57:31 +02002808 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002809
2810 if (!wr)
2811 wr = &sig_wr;
2812 else
2813 wr->next = &sig_wr;
2814
2815 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2816 if (ret) {
2817 pr_err("fast registration failed, ret:%d\n", ret);
2818 goto err;
2819 }
2820 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2821
Sagi Grimberg570db172014-12-02 16:57:31 +02002822 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2823 rdma_wr->ib_sg[SIG].addr = 0;
2824 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002825 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2826 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2827 /*
2828 * We have protection guards on the wire
2829 * so we need to set a larget transfer
2830 */
Sagi Grimberg570db172014-12-02 16:57:31 +02002831 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002832
2833 pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
Sagi Grimberg570db172014-12-02 16:57:31 +02002834 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2835 rdma_wr->ib_sg[SIG].lkey);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002836err:
2837 return ret;
2838}
2839
Vu Pham59464ef2013-08-28 23:23:35 +03002840static int
Sagi Grimberg570db172014-12-02 16:57:31 +02002841isert_handle_prot_cmd(struct isert_conn *isert_conn,
2842 struct isert_cmd *isert_cmd,
2843 struct isert_rdma_wr *wr)
2844{
2845 struct isert_device *device = isert_conn->conn_device;
2846 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2847 int ret;
2848
2849 if (!wr->fr_desc->pi_ctx) {
2850 ret = isert_create_pi_ctx(wr->fr_desc,
2851 device->ib_device,
2852 isert_conn->conn_pd);
2853 if (ret) {
2854 pr_err("conn %p failed to allocate pi_ctx\n",
2855 isert_conn);
2856 return ret;
2857 }
2858 }
2859
2860 if (se_cmd->t_prot_sg) {
2861 ret = isert_map_data_buf(isert_conn, isert_cmd,
2862 se_cmd->t_prot_sg,
2863 se_cmd->t_prot_nents,
2864 se_cmd->prot_length,
2865 0, wr->iser_ib_op, &wr->prot);
2866 if (ret) {
2867 pr_err("conn %p failed to map protection buffer\n",
2868 isert_conn);
2869 return ret;
2870 }
2871
2872 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2873 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2874 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2875 if (ret) {
2876 pr_err("conn %p failed to fast reg mr\n",
2877 isert_conn);
2878 goto unmap_prot_cmd;
2879 }
2880 }
2881
2882 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2883 if (ret) {
2884 pr_err("conn %p failed to fast reg mr\n",
2885 isert_conn);
2886 goto unmap_prot_cmd;
2887 }
2888 wr->fr_desc->ind |= ISERT_PROTECTED;
2889
2890 return 0;
2891
2892unmap_prot_cmd:
2893 if (se_cmd->t_prot_sg)
2894 isert_unmap_data_buf(isert_conn, &wr->prot);
2895
2896 return ret;
2897}
2898
2899static int
Sagi Grimberga3a5a822014-01-09 18:40:50 +02002900isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2901 struct isert_rdma_wr *wr)
Vu Pham59464ef2013-08-28 23:23:35 +03002902{
2903 struct se_cmd *se_cmd = &cmd->se_cmd;
2904 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002905 struct isert_conn *isert_conn = conn->context;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002906 struct fast_reg_descriptor *fr_desc = NULL;
Sagi Grimberg570db172014-12-02 16:57:31 +02002907 struct ib_send_wr *send_wr;
2908 struct ib_sge *ib_sg;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002909 u32 offset;
2910 int ret = 0;
Vu Pham59464ef2013-08-28 23:23:35 +03002911 unsigned long flags;
2912
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002913 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2914
2915 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2916 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2917 se_cmd->t_data_nents, se_cmd->data_length,
2918 offset, wr->iser_ib_op, &wr->data);
2919 if (ret)
2920 return ret;
2921
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002922 if (wr->data.dma_nents != 1 ||
2923 se_cmd->prot_op != TARGET_PROT_NORMAL) {
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002924 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2925 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2926 struct fast_reg_descriptor, list);
2927 list_del(&fr_desc->list);
2928 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2929 wr->fr_desc = fr_desc;
Vu Pham59464ef2013-08-28 23:23:35 +03002930 }
2931
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002932 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
Sagi Grimberg570db172014-12-02 16:57:31 +02002933 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002934 if (ret)
2935 goto unmap_cmd;
Vu Pham59464ef2013-08-28 23:23:35 +03002936
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002937 if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
Sagi Grimberg570db172014-12-02 16:57:31 +02002938 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002939 if (ret)
Sagi Grimberg570db172014-12-02 16:57:31 +02002940 goto unmap_cmd;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002941
Sagi Grimberg570db172014-12-02 16:57:31 +02002942 ib_sg = &wr->ib_sg[SIG];
2943 } else {
2944 ib_sg = &wr->ib_sg[DATA];
2945 }
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002946
Sagi Grimberg570db172014-12-02 16:57:31 +02002947 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002948 wr->ib_sge = &wr->s_ib_sge;
Vu Pham59464ef2013-08-28 23:23:35 +03002949 wr->send_wr_num = 1;
2950 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2951 wr->send_wr = &wr->s_send_wr;
Vu Pham59464ef2013-08-28 23:23:35 +03002952 wr->isert_cmd = isert_cmd;
Vu Pham59464ef2013-08-28 23:23:35 +03002953
2954 send_wr = &isert_cmd->rdma_wr.s_send_wr;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002955 send_wr->sg_list = &wr->s_ib_sge;
Vu Pham59464ef2013-08-28 23:23:35 +03002956 send_wr->num_sge = 1;
2957 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2958 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2959 send_wr->opcode = IB_WR_RDMA_WRITE;
2960 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2961 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002962 send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
2963 0 : IB_SEND_SIGNALED;
Vu Pham59464ef2013-08-28 23:23:35 +03002964 } else {
2965 send_wr->opcode = IB_WR_RDMA_READ;
2966 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2967 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2968 send_wr->send_flags = IB_SEND_SIGNALED;
2969 }
2970
Vu Pham59464ef2013-08-28 23:23:35 +03002971 return 0;
Sagi Grimberg570db172014-12-02 16:57:31 +02002972
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002973unmap_cmd:
2974 if (fr_desc) {
2975 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2976 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2977 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2978 }
2979 isert_unmap_data_buf(isert_conn, &wr->data);
Vu Pham59464ef2013-08-28 23:23:35 +03002980
Vu Pham59464ef2013-08-28 23:23:35 +03002981 return ret;
2982}
2983
2984static int
Vu Pham90ecc6e2013-08-28 23:23:33 +03002985isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2986{
2987 struct se_cmd *se_cmd = &cmd->se_cmd;
Vu Pham59464ef2013-08-28 23:23:35 +03002988 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002989 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2990 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Vu Phamd40945d2013-08-28 23:23:34 +03002991 struct isert_device *device = isert_conn->conn_device;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002992 struct ib_send_wr *wr_failed;
2993 int rc;
2994
2995 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2996 isert_cmd, se_cmd->data_length);
2997 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
Vu Phamd40945d2013-08-28 23:23:34 +03002998 rc = device->reg_rdma_mem(conn, cmd, wr);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002999 if (rc) {
3000 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
3001 return rc;
3002 }
3003
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02003004 if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
3005 /*
3006 * Build isert_conn->tx_desc for iSCSI response PDU and attach
3007 */
3008 isert_create_send_desc(isert_conn, isert_cmd,
3009 &isert_cmd->tx_desc);
3010 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
3011 &isert_cmd->tx_desc.iscsi_header);
3012 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
3013 isert_init_send_wr(isert_conn, isert_cmd,
Nicholas Bellinger0d0f6602014-10-05 02:13:03 -07003014 &isert_cmd->tx_desc.send_wr, false);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02003015 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02003016 wr->send_wr_num += 1;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02003017 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003018
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02003019 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003020
3021 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
3022 if (rc) {
3023 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02003024 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003025 }
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02003026
3027 if (se_cmd->prot_op == TARGET_PROT_NORMAL)
3028 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
3029 "READ\n", isert_cmd);
3030 else
3031 pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
3032 isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003033
Vu Pham90ecc6e2013-08-28 23:23:33 +03003034 return 1;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003035}
3036
3037static int
3038isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
3039{
3040 struct se_cmd *se_cmd = &cmd->se_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07003041 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003042 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
3043 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Vu Phamd40945d2013-08-28 23:23:34 +03003044 struct isert_device *device = isert_conn->conn_device;
Vu Pham90ecc6e2013-08-28 23:23:33 +03003045 struct ib_send_wr *wr_failed;
3046 int rc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003047
Vu Pham90ecc6e2013-08-28 23:23:33 +03003048 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
3049 isert_cmd, se_cmd->data_length, cmd->write_data_done);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003050 wr->iser_ib_op = ISER_IB_RDMA_READ;
Vu Phamd40945d2013-08-28 23:23:34 +03003051 rc = device->reg_rdma_mem(conn, cmd, wr);
Vu Pham90ecc6e2013-08-28 23:23:33 +03003052 if (rc) {
3053 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
3054 return rc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003055 }
3056
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08003057 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003058
3059 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
3060 if (rc) {
3061 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08003062 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003063 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03003064 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
3065 isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003066
Vu Pham90ecc6e2013-08-28 23:23:33 +03003067 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003068}
3069
3070static int
3071isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3072{
3073 int ret;
3074
3075 switch (state) {
3076 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3077 ret = isert_put_nopin(cmd, conn, false);
3078 break;
3079 default:
3080 pr_err("Unknown immediate state: 0x%02x\n", state);
3081 ret = -EINVAL;
3082 break;
3083 }
3084
3085 return ret;
3086}
3087
3088static int
3089isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3090{
3091 int ret;
3092
3093 switch (state) {
3094 case ISTATE_SEND_LOGOUTRSP:
3095 ret = isert_put_logout_rsp(cmd, conn);
3096 if (!ret) {
3097 pr_debug("Returning iSER Logout -EAGAIN\n");
3098 ret = -EAGAIN;
3099 }
3100 break;
3101 case ISTATE_SEND_NOPIN:
3102 ret = isert_put_nopin(cmd, conn, true);
3103 break;
3104 case ISTATE_SEND_TASKMGTRSP:
3105 ret = isert_put_tm_rsp(cmd, conn);
3106 break;
3107 case ISTATE_SEND_REJECT:
3108 ret = isert_put_reject(cmd, conn);
3109 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07003110 case ISTATE_SEND_TEXTRSP:
3111 ret = isert_put_text_rsp(cmd, conn);
3112 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003113 case ISTATE_SEND_STATUS:
3114 /*
3115 * Special case for sending non GOOD SCSI status from TX thread
3116 * context during pre se_cmd excecution failure.
3117 */
3118 ret = isert_put_response(conn, cmd);
3119 break;
3120 default:
3121 pr_err("Unknown response state: 0x%02x\n", state);
3122 ret = -EINVAL;
3123 break;
3124 }
3125
3126 return ret;
3127}
3128
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003129struct rdma_cm_id *
3130isert_setup_id(struct isert_np *isert_np)
3131{
3132 struct iscsi_np *np = isert_np->np;
3133 struct rdma_cm_id *id;
3134 struct sockaddr *sa;
3135 int ret;
3136
3137 sa = (struct sockaddr *)&np->np_sockaddr;
3138 pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
3139
3140 id = rdma_create_id(isert_cma_handler, isert_np,
3141 RDMA_PS_TCP, IB_QPT_RC);
3142 if (IS_ERR(id)) {
3143 pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
3144 ret = PTR_ERR(id);
3145 goto out;
3146 }
3147 pr_debug("id %p context %p\n", id, id->context);
3148
3149 ret = rdma_bind_addr(id, sa);
3150 if (ret) {
3151 pr_err("rdma_bind_addr() failed: %d\n", ret);
3152 goto out_id;
3153 }
3154
3155 ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
3156 if (ret) {
3157 pr_err("rdma_listen() failed: %d\n", ret);
3158 goto out_id;
3159 }
3160
3161 return id;
3162out_id:
3163 rdma_destroy_id(id);
3164out:
3165 return ERR_PTR(ret);
3166}
3167
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003168static int
3169isert_setup_np(struct iscsi_np *np,
3170 struct __kernel_sockaddr_storage *ksockaddr)
3171{
3172 struct isert_np *isert_np;
3173 struct rdma_cm_id *isert_lid;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003174 int ret;
3175
3176 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3177 if (!isert_np) {
3178 pr_err("Unable to allocate struct isert_np\n");
3179 return -ENOMEM;
3180 }
Sagi Grimberg531b7bf2014-04-29 13:13:45 +03003181 sema_init(&isert_np->np_sem, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003182 mutex_init(&isert_np->np_accept_mutex);
3183 INIT_LIST_HEAD(&isert_np->np_accept_list);
3184 init_completion(&isert_np->np_login_comp);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003185 isert_np->np = np;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003186
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003187 /*
3188 * Setup the np->np_sockaddr from the passed sockaddr setup
3189 * in iscsi_target_configfs.c code..
3190 */
3191 memcpy(&np->np_sockaddr, ksockaddr,
3192 sizeof(struct __kernel_sockaddr_storage));
3193
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003194 isert_lid = isert_setup_id(isert_np);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003195 if (IS_ERR(isert_lid)) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003196 ret = PTR_ERR(isert_lid);
3197 goto out;
3198 }
3199
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003200 isert_np->np_cm_id = isert_lid;
3201 np->np_context = isert_np;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003202
3203 return 0;
3204
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003205out:
3206 kfree(isert_np);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003207
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003208 return ret;
3209}
3210
3211static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003212isert_rdma_accept(struct isert_conn *isert_conn)
3213{
3214 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3215 struct rdma_conn_param cp;
3216 int ret;
3217
3218 memset(&cp, 0, sizeof(struct rdma_conn_param));
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003219 cp.initiator_depth = isert_conn->initiator_depth;
3220 cp.retry_count = 7;
3221 cp.rnr_retry_count = 7;
3222
3223 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
3224
3225 ret = rdma_accept(cm_id, &cp);
3226 if (ret) {
3227 pr_err("rdma_accept() failed with: %d\n", ret);
3228 return ret;
3229 }
3230
3231 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
3232
3233 return 0;
3234}
3235
3236static int
3237isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3238{
3239 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
3240 int ret;
3241
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003242 pr_info("before login_req comp conn: %p\n", isert_conn);
3243 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3244 if (ret) {
3245 pr_err("isert_conn %p interrupted before got login req\n",
3246 isert_conn);
3247 return ret;
3248 }
3249 reinit_completion(&isert_conn->login_req_comp);
3250
Nicholas Bellinger6faaa852013-08-18 16:35:46 -07003251 /*
3252 * For login requests after the first PDU, isert_rx_login_req() will
3253 * kick schedule_delayed_work(&conn->login_work) as the packet is
3254 * received, which turns this callback from iscsi_target_do_login_rx()
3255 * into a NOP.
3256 */
3257 if (!login->first_request)
3258 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003259
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003260 isert_rx_login_req(isert_conn);
3261
3262 pr_info("before conn_login_comp conn: %p\n", conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003263 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3264 if (ret)
3265 return ret;
3266
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003267 pr_info("processing login->req: %p\n", login->req);
3268
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003269 return 0;
3270}
3271
3272static void
3273isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3274 struct isert_conn *isert_conn)
3275{
3276 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3277 struct rdma_route *cm_route = &cm_id->route;
3278 struct sockaddr_in *sock_in;
3279 struct sockaddr_in6 *sock_in6;
3280
3281 conn->login_family = np->np_sockaddr.ss_family;
3282
3283 if (np->np_sockaddr.ss_family == AF_INET6) {
3284 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
3285 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3286 &sock_in6->sin6_addr.in6_u);
3287 conn->login_port = ntohs(sock_in6->sin6_port);
3288
3289 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3290 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3291 &sock_in6->sin6_addr.in6_u);
3292 conn->local_port = ntohs(sock_in6->sin6_port);
3293 } else {
3294 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3295 sprintf(conn->login_ip, "%pI4",
3296 &sock_in->sin_addr.s_addr);
3297 conn->login_port = ntohs(sock_in->sin_port);
3298
3299 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3300 sprintf(conn->local_ip, "%pI4",
3301 &sock_in->sin_addr.s_addr);
3302 conn->local_port = ntohs(sock_in->sin_port);
3303 }
3304}
3305
3306static int
3307isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3308{
3309 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3310 struct isert_conn *isert_conn;
3311 int max_accept = 0, ret;
3312
3313accept_wait:
Sagi Grimberg531b7bf2014-04-29 13:13:45 +03003314 ret = down_interruptible(&isert_np->np_sem);
Sagi Grimberg1acff632014-10-02 21:40:34 -07003315 if (ret || max_accept > 5)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003316 return -ENODEV;
3317
3318 spin_lock_bh(&np->np_thread_lock);
Sagi Grimberge346ab32014-05-19 17:44:22 +03003319 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003320 spin_unlock_bh(&np->np_thread_lock);
Sagi Grimberge346ab32014-05-19 17:44:22 +03003321 pr_debug("np_thread_state %d for isert_accept_np\n",
3322 np->np_thread_state);
3323 /**
3324 * No point in stalling here when np_thread
3325 * is in state RESET/SHUTDOWN/EXIT - bail
3326 **/
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003327 return -ENODEV;
3328 }
3329 spin_unlock_bh(&np->np_thread_lock);
3330
3331 mutex_lock(&isert_np->np_accept_mutex);
3332 if (list_empty(&isert_np->np_accept_list)) {
3333 mutex_unlock(&isert_np->np_accept_mutex);
3334 max_accept++;
3335 goto accept_wait;
3336 }
3337 isert_conn = list_first_entry(&isert_np->np_accept_list,
3338 struct isert_conn, conn_accept_node);
3339 list_del_init(&isert_conn->conn_accept_node);
3340 mutex_unlock(&isert_np->np_accept_mutex);
3341
3342 conn->context = isert_conn;
3343 isert_conn->conn = conn;
3344 max_accept = 0;
3345
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003346 isert_set_conn_info(np, conn, isert_conn);
3347
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003348 pr_debug("Processing isert_conn: %p\n", isert_conn);
3349
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003350 return 0;
3351}
3352
3353static void
3354isert_free_np(struct iscsi_np *np)
3355{
3356 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3357
Sagi Grimberg3b726ae2014-10-28 13:45:03 -07003358 if (isert_np->np_cm_id)
3359 rdma_destroy_id(isert_np->np_cm_id);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003360
3361 np->np_context = NULL;
3362 kfree(isert_np);
3363}
3364
Sagi Grimbergb02efbf2014-12-02 16:57:29 +02003365static void isert_release_work(struct work_struct *work)
3366{
3367 struct isert_conn *isert_conn = container_of(work,
3368 struct isert_conn,
3369 release_work);
3370
3371 pr_info("Starting release conn %p\n", isert_conn);
3372
3373 wait_for_completion(&isert_conn->conn_wait);
3374
3375 mutex_lock(&isert_conn->conn_mutex);
3376 isert_conn->state = ISER_CONN_DOWN;
3377 mutex_unlock(&isert_conn->conn_mutex);
3378
3379 pr_info("Destroying conn %p\n", isert_conn);
3380 isert_put_conn(isert_conn);
3381}
3382
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003383static void isert_wait_conn(struct iscsi_conn *conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003384{
3385 struct isert_conn *isert_conn = conn->context;
3386
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003387 pr_debug("isert_wait_conn: Starting \n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003388
Sagi Grimberg9d49f5e2014-05-19 17:44:23 +03003389 mutex_lock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003390 /*
3391 * Only wait for conn_wait_comp_err if the isert_conn made it
3392 * into full feature phase..
3393 */
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07003394 if (isert_conn->state == ISER_CONN_INIT) {
3395 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07003396 return;
3397 }
Sagi Grimberg954f2372014-12-02 16:57:17 +02003398 isert_conn_terminate(isert_conn);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07003399 mutex_unlock(&isert_conn->conn_mutex);
3400
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003401 wait_for_completion(&isert_conn->conn_wait_comp_err);
Sagi Grimberg954f2372014-12-02 16:57:17 +02003402
Sagi Grimbergb02efbf2014-12-02 16:57:29 +02003403 INIT_WORK(&isert_conn->release_work, isert_release_work);
3404 queue_work(isert_release_wq, &isert_conn->release_work);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003405}
3406
3407static void isert_free_conn(struct iscsi_conn *conn)
3408{
3409 struct isert_conn *isert_conn = conn->context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003410
3411 isert_put_conn(isert_conn);
3412}
3413
3414static struct iscsit_transport iser_target_transport = {
3415 .name = "IB/iSER",
3416 .transport_type = ISCSI_INFINIBAND,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07003417 .priv_size = sizeof(struct isert_cmd),
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003418 .owner = THIS_MODULE,
3419 .iscsit_setup_np = isert_setup_np,
3420 .iscsit_accept_np = isert_accept_np,
3421 .iscsit_free_np = isert_free_np,
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003422 .iscsit_wait_conn = isert_wait_conn,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003423 .iscsit_free_conn = isert_free_conn,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003424 .iscsit_get_login_rx = isert_get_login_rx,
3425 .iscsit_put_login_tx = isert_put_login_tx,
3426 .iscsit_immediate_queue = isert_immediate_queue,
3427 .iscsit_response_queue = isert_response_queue,
3428 .iscsit_get_dataout = isert_get_dataout,
3429 .iscsit_queue_data_in = isert_put_datain,
3430 .iscsit_queue_status = isert_put_response,
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -07003431 .iscsit_aborted_task = isert_aborted_task,
Nicholas Bellingere70beee2014-04-02 12:52:38 -07003432 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003433};
3434
3435static int __init isert_init(void)
3436{
3437 int ret;
3438
3439 isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
3440 if (!isert_rx_wq) {
3441 pr_err("Unable to allocate isert_rx_wq\n");
3442 return -ENOMEM;
3443 }
3444
3445 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
3446 if (!isert_comp_wq) {
3447 pr_err("Unable to allocate isert_comp_wq\n");
3448 ret = -ENOMEM;
3449 goto destroy_rx_wq;
3450 }
3451
Sagi Grimbergb02efbf2014-12-02 16:57:29 +02003452 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3453 WQ_UNBOUND_MAX_ACTIVE);
3454 if (!isert_release_wq) {
3455 pr_err("Unable to allocate isert_release_wq\n");
3456 ret = -ENOMEM;
3457 goto destroy_comp_wq;
3458 }
3459
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003460 iscsit_register_transport(&iser_target_transport);
Sagi Grimbergb02efbf2014-12-02 16:57:29 +02003461 pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3462
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003463 return 0;
3464
Sagi Grimbergb02efbf2014-12-02 16:57:29 +02003465destroy_comp_wq:
3466 destroy_workqueue(isert_comp_wq);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003467destroy_rx_wq:
3468 destroy_workqueue(isert_rx_wq);
3469 return ret;
3470}
3471
3472static void __exit isert_exit(void)
3473{
Sagi Grimbergf5ebec92014-05-19 17:44:25 +03003474 flush_scheduled_work();
Sagi Grimbergb02efbf2014-12-02 16:57:29 +02003475 destroy_workqueue(isert_release_wq);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003476 destroy_workqueue(isert_comp_wq);
3477 destroy_workqueue(isert_rx_wq);
3478 iscsit_unregister_transport(&iser_target_transport);
3479 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
3480}
3481
3482MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3483MODULE_VERSION("0.1");
3484MODULE_AUTHOR("nab@Linux-iSCSI.org");
3485MODULE_LICENSE("GPL");
3486
3487module_init(isert_init);
3488module_exit(isert_exit);