blob: d4a208381af86b78b8f2935cd32575fce1bd379f [file] [log] [blame]
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07004 * (c) Copyright 2013 Datera, Inc.
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08005 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
18
19#include <linux/string.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/socket.h>
23#include <linux/in.h>
24#include <linux/in6.h>
25#include <rdma/ib_verbs.h>
26#include <rdma/rdma_cm.h>
27#include <target/target_core_base.h>
28#include <target/target_core_fabric.h>
29#include <target/iscsi/iscsi_transport.h>
Sagi Grimberg531b7bf2014-04-29 13:13:45 +030030#include <linux/semaphore.h>
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080031
32#include "isert_proto.h"
33#include "ib_isert.h"
34
35#define ISERT_MAX_CONN 8
36#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38
39static DEFINE_MUTEX(device_list_mutex);
40static LIST_HEAD(device_list);
41static struct workqueue_struct *isert_rx_wq;
42static struct workqueue_struct *isert_comp_wq;
Sagi Grimbergb02efbf2014-12-02 16:57:29 +020043static struct workqueue_struct *isert_release_wq;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080044
45static void
Vu Phamd40945d2013-08-28 23:23:34 +030046isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
47static int
48isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
49 struct isert_rdma_wr *wr);
Vu Pham59464ef2013-08-28 23:23:35 +030050static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +020051isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +030052static int
Sagi Grimberga3a5a822014-01-09 18:40:50 +020053isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr);
Sagi Grimbergf93f3a72014-02-19 17:50:24 +020055static int
56isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +020057static int
58isert_rdma_post_recvl(struct isert_conn *isert_conn);
59static int
60isert_rdma_accept(struct isert_conn *isert_conn);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +020061struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
Vu Phamd40945d2013-08-28 23:23:34 +030062
Sagi Grimberg302cc7c2014-12-02 16:57:34 +020063static inline bool
64isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
65{
Sagi Grimberg23a548e2014-12-02 16:57:35 +020066 return (conn->pi_support &&
Sagi Grimberg302cc7c2014-12-02 16:57:34 +020067 cmd->prot_op != TARGET_PROT_NORMAL);
68}
69
70
Vu Phamd40945d2013-08-28 23:23:34 +030071static void
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080072isert_qp_event_callback(struct ib_event *e, void *context)
73{
74 struct isert_conn *isert_conn = (struct isert_conn *)context;
75
76 pr_err("isert_qp_event_callback event: %d\n", e->event);
77 switch (e->event) {
78 case IB_EVENT_COMM_EST:
79 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
80 break;
81 case IB_EVENT_QP_LAST_WQE_REACHED:
82 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
83 break;
84 default:
85 break;
86 }
87}
88
89static int
90isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
91{
92 int ret;
93
94 ret = ib_query_device(ib_dev, devattr);
95 if (ret) {
96 pr_err("ib_query_device() failed: %d\n", ret);
97 return ret;
98 }
99 pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
100 pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
101
102 return 0;
103}
104
105static int
Sagi Grimberg570db172014-12-02 16:57:31 +0200106isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800107{
108 struct isert_device *device = isert_conn->conn_device;
109 struct ib_qp_init_attr attr;
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200110 struct isert_comp *comp;
111 int ret, i, min = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800112
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800113 mutex_lock(&device_list_mutex);
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200114 for (i = 0; i < device->comps_used; i++)
115 if (device->comps[i].active_qps <
116 device->comps[min].active_qps)
117 min = i;
118 comp = &device->comps[min];
119 comp->active_qps++;
120 pr_info("conn %p, using comp %p min_index: %d\n",
121 isert_conn, comp, min);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800122 mutex_unlock(&device_list_mutex);
123
124 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
125 attr.event_handler = isert_qp_event_callback;
126 attr.qp_context = isert_conn;
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200127 attr.send_cq = comp->tx_cq;
128 attr.recv_cq = comp->rx_cq;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800129 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
130 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
131 /*
132 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
Or Gerlitzf57915c2014-10-22 14:55:49 -0700133 * work-around for RDMA_READs with ConnectX-2.
134 *
135 * Also, still make sure to have at least two SGEs for
136 * outgoing control PDU responses.
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800137 */
Or Gerlitzf57915c2014-10-22 14:55:49 -0700138 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800139 isert_conn->max_sge = attr.cap.max_send_sge;
140
141 attr.cap.max_recv_sge = 1;
142 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
143 attr.qp_type = IB_QPT_RC;
Sagi Grimberg570db172014-12-02 16:57:31 +0200144 if (device->pi_capable)
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200145 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800146
147 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
148 cma_id->device);
149 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
150 isert_conn->conn_pd->device);
151
152 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
153 if (ret) {
154 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
Sagi Grimberg19e20902014-12-02 16:57:26 +0200155 goto err;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800156 }
157 isert_conn->conn_qp = cma_id->qp;
158 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
159
160 return 0;
Sagi Grimberg19e20902014-12-02 16:57:26 +0200161err:
162 mutex_lock(&device_list_mutex);
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200163 comp->active_qps--;
Sagi Grimberg19e20902014-12-02 16:57:26 +0200164 mutex_unlock(&device_list_mutex);
165
166 return ret;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800167}
168
169static void
170isert_cq_event_callback(struct ib_event *e, void *context)
171{
172 pr_debug("isert_cq_event_callback event: %d\n", e->event);
173}
174
175static int
176isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
177{
178 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
179 struct iser_rx_desc *rx_desc;
180 struct ib_sge *rx_sg;
181 u64 dma_addr;
182 int i, j;
183
184 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
185 sizeof(struct iser_rx_desc), GFP_KERNEL);
186 if (!isert_conn->conn_rx_descs)
187 goto fail;
188
189 rx_desc = isert_conn->conn_rx_descs;
190
191 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
192 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
193 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
194 if (ib_dma_mapping_error(ib_dev, dma_addr))
195 goto dma_map_fail;
196
197 rx_desc->dma_addr = dma_addr;
198
199 rx_sg = &rx_desc->rx_sg;
200 rx_sg->addr = rx_desc->dma_addr;
201 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
202 rx_sg->lkey = isert_conn->conn_mr->lkey;
203 }
204
205 isert_conn->conn_rx_desc_head = 0;
206 return 0;
207
208dma_map_fail:
209 rx_desc = isert_conn->conn_rx_descs;
210 for (j = 0; j < i; j++, rx_desc++) {
211 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
212 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
213 }
214 kfree(isert_conn->conn_rx_descs);
215 isert_conn->conn_rx_descs = NULL;
216fail:
217 return -ENOMEM;
218}
219
220static void
221isert_free_rx_descriptors(struct isert_conn *isert_conn)
222{
223 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
224 struct iser_rx_desc *rx_desc;
225 int i;
226
227 if (!isert_conn->conn_rx_descs)
228 return;
229
230 rx_desc = isert_conn->conn_rx_descs;
231 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
232 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
233 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
234 }
235
236 kfree(isert_conn->conn_rx_descs);
237 isert_conn->conn_rx_descs = NULL;
238}
239
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800240static void isert_cq_tx_work(struct work_struct *);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800241static void isert_cq_tx_callback(struct ib_cq *, void *);
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800242static void isert_cq_rx_work(struct work_struct *);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800243static void isert_cq_rx_callback(struct ib_cq *, void *);
244
245static int
246isert_create_device_ib_res(struct isert_device *device)
247{
248 struct ib_device *ib_dev = device->ib_device;
Vu Pham59464ef2013-08-28 23:23:35 +0300249 struct ib_device_attr *dev_attr;
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200250 int ret = 0, i;
Chris Mooreb1a5ad02014-11-04 16:28:29 +0000251 int max_rx_cqe, max_tx_cqe;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800252
Vu Pham59464ef2013-08-28 23:23:35 +0300253 dev_attr = &device->dev_attr;
254 ret = isert_query_device(ib_dev, dev_attr);
255 if (ret)
256 return ret;
257
Chris Mooreb1a5ad02014-11-04 16:28:29 +0000258 max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
259 max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
260
Vu Phamd40945d2013-08-28 23:23:34 +0300261 /* asign function handlers */
Sagi Grimbergf2252252014-03-27 19:22:25 +0200262 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
263 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200264 device->use_fastreg = 1;
265 device->reg_rdma_mem = isert_reg_rdma;
266 device->unreg_rdma_mem = isert_unreg_rdma;
Vu Pham59464ef2013-08-28 23:23:35 +0300267 } else {
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200268 device->use_fastreg = 0;
Vu Pham59464ef2013-08-28 23:23:35 +0300269 device->reg_rdma_mem = isert_map_rdma;
270 device->unreg_rdma_mem = isert_unmap_cmd;
271 }
Vu Phamd40945d2013-08-28 23:23:34 +0300272
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200273 /* Check signature cap */
274 device->pi_capable = dev_attr->device_cap_flags &
275 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
276
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200277 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
278 device->ib_device->num_comp_vectors));
279 pr_info("Using %d CQs, %s supports %d vectors support "
280 "Fast registration %d pi_capable %d\n",
281 device->comps_used, device->ib_device->name,
282 device->ib_device->num_comp_vectors, device->use_fastreg,
283 device->pi_capable);
284
285 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
286 GFP_KERNEL);
287 if (!device->comps) {
288 pr_err("Unable to allocate completion contexts\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800289 return -ENOMEM;
290 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800291
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200292 for (i = 0; i < device->comps_used; i++) {
293 struct isert_comp *comp = &device->comps[i];
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800294
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200295 comp->device = device;
296 INIT_WORK(&comp->rx_work, isert_cq_rx_work);
297 comp->rx_cq = ib_create_cq(device->ib_device,
298 isert_cq_rx_callback,
299 isert_cq_event_callback,
300 (void *)comp,
301 max_rx_cqe, i);
302 if (IS_ERR(comp->rx_cq)) {
303 ret = PTR_ERR(comp->rx_cq);
304 comp->rx_cq = NULL;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800305 goto out_cq;
Wei Yongjun94a71112013-10-29 09:56:34 +0800306 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800307
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200308 INIT_WORK(&comp->tx_work, isert_cq_tx_work);
309 comp->tx_cq = ib_create_cq(device->ib_device,
310 isert_cq_tx_callback,
311 isert_cq_event_callback,
312 (void *)comp,
313 max_tx_cqe, i);
314 if (IS_ERR(comp->tx_cq)) {
315 ret = PTR_ERR(comp->tx_cq);
316 comp->tx_cq = NULL;
Wei Yongjun94a71112013-10-29 09:56:34 +0800317 goto out_cq;
318 }
319
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200320 ret = ib_req_notify_cq(comp->rx_cq, IB_CQ_NEXT_COMP);
Wei Yongjun94a71112013-10-29 09:56:34 +0800321 if (ret)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800322 goto out_cq;
323
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200324 ret = ib_req_notify_cq(comp->tx_cq, IB_CQ_NEXT_COMP);
Wei Yongjun94a71112013-10-29 09:56:34 +0800325 if (ret)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800326 goto out_cq;
327 }
328
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800329 return 0;
330
331out_cq:
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200332 for (i = 0; i < device->comps_used; i++) {
333 struct isert_comp *comp = &device->comps[i];
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800334
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200335 if (comp->rx_cq) {
336 cancel_work_sync(&comp->rx_work);
337 ib_destroy_cq(comp->rx_cq);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800338 }
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200339 if (comp->tx_cq) {
340 cancel_work_sync(&comp->tx_work);
341 ib_destroy_cq(comp->tx_cq);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800342 }
343 }
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200344 kfree(device->comps);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800345
346 return ret;
347}
348
349static void
350isert_free_device_ib_res(struct isert_device *device)
351{
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800352 int i;
353
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200354 pr_info("device %p\n", device);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800355
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200356 for (i = 0; i < device->comps_used; i++) {
357 struct isert_comp *comp = &device->comps[i];
358
359 cancel_work_sync(&comp->rx_work);
360 cancel_work_sync(&comp->tx_work);
361 ib_destroy_cq(comp->rx_cq);
362 ib_destroy_cq(comp->tx_cq);
363 comp->rx_cq = NULL;
364 comp->tx_cq = NULL;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800365 }
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200366 kfree(device->comps);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800367}
368
369static void
370isert_device_try_release(struct isert_device *device)
371{
372 mutex_lock(&device_list_mutex);
373 device->refcount--;
374 if (!device->refcount) {
375 isert_free_device_ib_res(device);
376 list_del(&device->dev_node);
377 kfree(device);
378 }
379 mutex_unlock(&device_list_mutex);
380}
381
382static struct isert_device *
383isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
384{
385 struct isert_device *device;
386 int ret;
387
388 mutex_lock(&device_list_mutex);
389 list_for_each_entry(device, &device_list, dev_node) {
390 if (device->ib_device->node_guid == cma_id->device->node_guid) {
391 device->refcount++;
392 mutex_unlock(&device_list_mutex);
393 return device;
394 }
395 }
396
397 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
398 if (!device) {
399 mutex_unlock(&device_list_mutex);
400 return ERR_PTR(-ENOMEM);
401 }
402
403 INIT_LIST_HEAD(&device->dev_node);
404
405 device->ib_device = cma_id->device;
406 ret = isert_create_device_ib_res(device);
407 if (ret) {
408 kfree(device);
409 mutex_unlock(&device_list_mutex);
410 return ERR_PTR(ret);
411 }
412
413 device->refcount++;
414 list_add_tail(&device->dev_node, &device_list);
415 mutex_unlock(&device_list_mutex);
416
417 return device;
418}
419
Vu Pham59464ef2013-08-28 23:23:35 +0300420static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200421isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +0300422{
423 struct fast_reg_descriptor *fr_desc, *tmp;
424 int i = 0;
425
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200426 if (list_empty(&isert_conn->conn_fr_pool))
Vu Pham59464ef2013-08-28 23:23:35 +0300427 return;
428
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200429 pr_debug("Freeing conn %p fastreg pool", isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300430
431 list_for_each_entry_safe(fr_desc, tmp,
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200432 &isert_conn->conn_fr_pool, list) {
Vu Pham59464ef2013-08-28 23:23:35 +0300433 list_del(&fr_desc->list);
434 ib_free_fast_reg_page_list(fr_desc->data_frpl);
435 ib_dereg_mr(fr_desc->data_mr);
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200436 if (fr_desc->pi_ctx) {
437 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
438 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
439 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
440 kfree(fr_desc->pi_ctx);
441 }
Vu Pham59464ef2013-08-28 23:23:35 +0300442 kfree(fr_desc);
443 ++i;
444 }
445
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200446 if (i < isert_conn->conn_fr_pool_size)
Vu Pham59464ef2013-08-28 23:23:35 +0300447 pr_warn("Pool still has %d regions registered\n",
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200448 isert_conn->conn_fr_pool_size - i);
Vu Pham59464ef2013-08-28 23:23:35 +0300449}
450
451static int
Sagi Grimberg570db172014-12-02 16:57:31 +0200452isert_create_pi_ctx(struct fast_reg_descriptor *desc,
453 struct ib_device *device,
454 struct ib_pd *pd)
455{
456 struct ib_mr_init_attr mr_init_attr;
457 struct pi_context *pi_ctx;
458 int ret;
459
460 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
461 if (!pi_ctx) {
462 pr_err("Failed to allocate pi context\n");
463 return -ENOMEM;
464 }
465
466 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
467 ISCSI_ISER_SG_TABLESIZE);
468 if (IS_ERR(pi_ctx->prot_frpl)) {
469 pr_err("Failed to allocate prot frpl err=%ld\n",
470 PTR_ERR(pi_ctx->prot_frpl));
471 ret = PTR_ERR(pi_ctx->prot_frpl);
472 goto err_pi_ctx;
473 }
474
475 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
476 if (IS_ERR(pi_ctx->prot_mr)) {
477 pr_err("Failed to allocate prot frmr err=%ld\n",
478 PTR_ERR(pi_ctx->prot_mr));
479 ret = PTR_ERR(pi_ctx->prot_mr);
480 goto err_prot_frpl;
481 }
482 desc->ind |= ISERT_PROT_KEY_VALID;
483
484 memset(&mr_init_attr, 0, sizeof(mr_init_attr));
485 mr_init_attr.max_reg_descriptors = 2;
486 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
487 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
488 if (IS_ERR(pi_ctx->sig_mr)) {
489 pr_err("Failed to allocate signature enabled mr err=%ld\n",
490 PTR_ERR(pi_ctx->sig_mr));
491 ret = PTR_ERR(pi_ctx->sig_mr);
492 goto err_prot_mr;
493 }
494
495 desc->pi_ctx = pi_ctx;
496 desc->ind |= ISERT_SIG_KEY_VALID;
497 desc->ind &= ~ISERT_PROTECTED;
498
499 return 0;
500
501err_prot_mr:
502 ib_dereg_mr(desc->pi_ctx->prot_mr);
503err_prot_frpl:
504 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
505err_pi_ctx:
506 kfree(desc->pi_ctx);
507
508 return ret;
509}
510
511static int
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200512isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
Sagi Grimberg570db172014-12-02 16:57:31 +0200513 struct fast_reg_descriptor *fr_desc)
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200514{
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200515 int ret;
516
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200517 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
518 ISCSI_ISER_SG_TABLESIZE);
519 if (IS_ERR(fr_desc->data_frpl)) {
520 pr_err("Failed to allocate data frpl err=%ld\n",
521 PTR_ERR(fr_desc->data_frpl));
522 return PTR_ERR(fr_desc->data_frpl);
523 }
524
525 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
526 if (IS_ERR(fr_desc->data_mr)) {
527 pr_err("Failed to allocate data frmr err=%ld\n",
528 PTR_ERR(fr_desc->data_mr));
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200529 ret = PTR_ERR(fr_desc->data_mr);
530 goto err_data_frpl;
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200531 }
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200532 fr_desc->ind |= ISERT_DATA_KEY_VALID;
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200533
Sagi Grimberg570db172014-12-02 16:57:31 +0200534 pr_debug("Created fr_desc %p\n", fr_desc);
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200535
536 return 0;
Sagi Grimberg570db172014-12-02 16:57:31 +0200537
Sagi Grimbergd3e125d2014-02-19 17:50:23 +0200538err_data_frpl:
539 ib_free_fast_reg_page_list(fr_desc->data_frpl);
540
541 return ret;
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200542}
543
544static int
Sagi Grimberg570db172014-12-02 16:57:31 +0200545isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +0300546{
547 struct fast_reg_descriptor *fr_desc;
548 struct isert_device *device = isert_conn->conn_device;
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -0700549 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
550 struct se_node_acl *se_nacl = se_sess->se_node_acl;
551 int i, ret, tag_num;
552 /*
553 * Setup the number of FRMRs based upon the number of tags
554 * available to session in iscsi_target_locate_portal().
555 */
556 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
557 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
Vu Pham59464ef2013-08-28 23:23:35 +0300558
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200559 isert_conn->conn_fr_pool_size = 0;
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -0700560 for (i = 0; i < tag_num; i++) {
Vu Pham59464ef2013-08-28 23:23:35 +0300561 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
562 if (!fr_desc) {
563 pr_err("Failed to allocate fast_reg descriptor\n");
564 ret = -ENOMEM;
565 goto err;
566 }
567
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200568 ret = isert_create_fr_desc(device->ib_device,
Sagi Grimberg570db172014-12-02 16:57:31 +0200569 isert_conn->conn_pd, fr_desc);
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200570 if (ret) {
571 pr_err("Failed to create fastreg descriptor err=%d\n",
572 ret);
Nicholas Bellingera80e21b2014-02-03 12:59:56 -0800573 kfree(fr_desc);
Vu Pham59464ef2013-08-28 23:23:35 +0300574 goto err;
575 }
576
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200577 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
578 isert_conn->conn_fr_pool_size++;
Vu Pham59464ef2013-08-28 23:23:35 +0300579 }
580
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200581 pr_debug("Creating conn %p fastreg pool size=%d",
582 isert_conn, isert_conn->conn_fr_pool_size);
Vu Pham59464ef2013-08-28 23:23:35 +0300583
584 return 0;
585
586err:
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200587 isert_conn_free_fastreg_pool(isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300588 return ret;
589}
590
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800591static int
592isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
593{
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200594 struct isert_np *isert_np = cma_id->context;
595 struct iscsi_np *np = isert_np->np;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800596 struct isert_conn *isert_conn;
597 struct isert_device *device;
598 struct ib_device *ib_dev = cma_id->device;
599 int ret = 0;
Sagi Grimberg14f4b542014-04-29 13:13:47 +0300600
601 spin_lock_bh(&np->np_thread_lock);
602 if (!np->enabled) {
603 spin_unlock_bh(&np->np_thread_lock);
604 pr_debug("iscsi_np is not enabled, reject connect request\n");
605 return rdma_reject(cma_id, NULL, 0);
606 }
607 spin_unlock_bh(&np->np_thread_lock);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800608
609 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
610 cma_id, cma_id->context);
611
612 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
613 if (!isert_conn) {
614 pr_err("Unable to allocate isert_conn\n");
615 return -ENOMEM;
616 }
617 isert_conn->state = ISER_CONN_INIT;
618 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
619 init_completion(&isert_conn->conn_login_comp);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200620 init_completion(&isert_conn->login_req_comp);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -0800621 init_completion(&isert_conn->conn_wait);
622 init_completion(&isert_conn->conn_wait_comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800623 kref_init(&isert_conn->conn_kref);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700624 mutex_init(&isert_conn->conn_mutex);
Vu Pham59464ef2013-08-28 23:23:35 +0300625 spin_lock_init(&isert_conn->conn_lock);
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -0700626 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800627
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800628 isert_conn->conn_cm_id = cma_id;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800629
630 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
631 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
632 if (!isert_conn->login_buf) {
633 pr_err("Unable to allocate isert_conn->login_buf\n");
634 ret = -ENOMEM;
635 goto out;
636 }
637
638 isert_conn->login_req_buf = isert_conn->login_buf;
639 isert_conn->login_rsp_buf = isert_conn->login_buf +
640 ISCSI_DEF_MAX_RECV_SEG_LEN;
641 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
642 isert_conn->login_buf, isert_conn->login_req_buf,
643 isert_conn->login_rsp_buf);
644
645 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
646 (void *)isert_conn->login_req_buf,
647 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
648
649 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
650 if (ret) {
651 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
652 ret);
653 isert_conn->login_req_dma = 0;
654 goto out_login_buf;
655 }
656
657 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
658 (void *)isert_conn->login_rsp_buf,
659 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
660
661 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
662 if (ret) {
663 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
664 ret);
665 isert_conn->login_rsp_dma = 0;
666 goto out_req_dma_map;
667 }
668
669 device = isert_device_find_by_ib_dev(cma_id);
670 if (IS_ERR(device)) {
671 ret = PTR_ERR(device);
672 goto out_rsp_dma_map;
673 }
674
Sagi Grimberg1a92e172014-06-19 13:54:19 +0300675 /* Set max inflight RDMA READ requests */
676 isert_conn->initiator_depth = min_t(u8,
677 event->param.conn.initiator_depth,
678 device->dev_attr.max_qp_init_rd_atom);
679 pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
680
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800681 isert_conn->conn_device = device;
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200682 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
683 if (IS_ERR(isert_conn->conn_pd)) {
684 ret = PTR_ERR(isert_conn->conn_pd);
685 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
686 isert_conn, ret);
687 goto out_pd;
688 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800689
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200690 isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
691 IB_ACCESS_LOCAL_WRITE);
692 if (IS_ERR(isert_conn->conn_mr)) {
693 ret = PTR_ERR(isert_conn->conn_mr);
694 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
695 isert_conn, ret);
696 goto out_mr;
697 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800698
Sagi Grimberg570db172014-12-02 16:57:31 +0200699 ret = isert_conn_setup_qp(isert_conn, cma_id);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800700 if (ret)
701 goto out_conn_dev;
702
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200703 ret = isert_rdma_post_recvl(isert_conn);
704 if (ret)
705 goto out_conn_dev;
706
707 ret = isert_rdma_accept(isert_conn);
708 if (ret)
709 goto out_conn_dev;
710
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800711 mutex_lock(&isert_np->np_accept_mutex);
Sagi Grimberg9fe63c82014-04-29 13:13:44 +0300712 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800713 mutex_unlock(&isert_np->np_accept_mutex);
714
Sagi Grimberg531b7bf2014-04-29 13:13:45 +0300715 pr_debug("isert_connect_request() up np_sem np: %p\n", np);
716 up(&isert_np->np_sem);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800717 return 0;
718
719out_conn_dev:
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200720 ib_dereg_mr(isert_conn->conn_mr);
721out_mr:
722 ib_dealloc_pd(isert_conn->conn_pd);
723out_pd:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800724 isert_device_try_release(device);
725out_rsp_dma_map:
726 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
727 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
728out_req_dma_map:
729 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
730 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
731out_login_buf:
732 kfree(isert_conn->login_buf);
733out:
734 kfree(isert_conn);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200735 rdma_reject(cma_id, NULL, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800736 return ret;
737}
738
739static void
740isert_connect_release(struct isert_conn *isert_conn)
741{
742 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
743 struct isert_device *device = isert_conn->conn_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800744
745 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
746
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200747 if (device && device->use_fastreg)
748 isert_conn_free_fastreg_pool(isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300749
Sagi Grimberg19e20902014-12-02 16:57:26 +0200750 isert_free_rx_descriptors(isert_conn);
751 rdma_destroy_id(isert_conn->conn_cm_id);
752
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800753 if (isert_conn->conn_qp) {
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200754 struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
755
756 pr_debug("dec completion context %p active_qps\n", comp);
Sagi Grimberg19e20902014-12-02 16:57:26 +0200757 mutex_lock(&device_list_mutex);
Sagi Grimberg4a295ba2014-12-02 16:57:40 +0200758 comp->active_qps--;
Sagi Grimberg19e20902014-12-02 16:57:26 +0200759 mutex_unlock(&device_list_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800760
Sagi Grimberg19e20902014-12-02 16:57:26 +0200761 ib_destroy_qp(isert_conn->conn_qp);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800762 }
763
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200764 ib_dereg_mr(isert_conn->conn_mr);
765 ib_dealloc_pd(isert_conn->conn_pd);
766
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800767 if (isert_conn->login_buf) {
768 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
769 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
770 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
771 ISCSI_DEF_MAX_RECV_SEG_LEN,
772 DMA_FROM_DEVICE);
773 kfree(isert_conn->login_buf);
774 }
775 kfree(isert_conn);
776
777 if (device)
778 isert_device_try_release(device);
779
780 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
781}
782
783static void
784isert_connected_handler(struct rdma_cm_id *cma_id)
785{
Sagi Grimberg19e20902014-12-02 16:57:26 +0200786 struct isert_conn *isert_conn = cma_id->qp->qp_context;
Sagi Grimbergc2f88b12014-07-02 16:19:24 +0300787
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200788 pr_info("conn %p\n", isert_conn);
789
Sagi Grimberg2371e5d2014-12-02 16:57:21 +0200790 if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
791 pr_warn("conn %p connect_release is running\n", isert_conn);
792 return;
793 }
794
795 mutex_lock(&isert_conn->conn_mutex);
796 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
797 isert_conn->state = ISER_CONN_UP;
798 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800799}
800
801static void
802isert_release_conn_kref(struct kref *kref)
803{
804 struct isert_conn *isert_conn = container_of(kref,
805 struct isert_conn, conn_kref);
806
807 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
808 current->comm, current->pid);
809
810 isert_connect_release(isert_conn);
811}
812
813static void
814isert_put_conn(struct isert_conn *isert_conn)
815{
816 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
817}
818
Sagi Grimberg954f2372014-12-02 16:57:17 +0200819/**
820 * isert_conn_terminate() - Initiate connection termination
821 * @isert_conn: isert connection struct
822 *
823 * Notes:
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200824 * In case the connection state is FULL_FEATURE, move state
Sagi Grimberg954f2372014-12-02 16:57:17 +0200825 * to TEMINATING and start teardown sequence (rdma_disconnect).
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200826 * In case the connection state is UP, complete flush as well.
Sagi Grimberg954f2372014-12-02 16:57:17 +0200827 *
828 * This routine must be called with conn_mutex held. Thus it is
829 * safe to call multiple times.
830 */
831static void
832isert_conn_terminate(struct isert_conn *isert_conn)
833{
834 int err;
835
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200836 switch (isert_conn->state) {
837 case ISER_CONN_TERMINATING:
838 break;
839 case ISER_CONN_UP:
840 /*
841 * No flush completions will occur as we didn't
842 * get to ISER_CONN_FULL_FEATURE yet, complete
843 * to allow teardown progress.
844 */
845 complete(&isert_conn->conn_wait_comp_err);
846 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
Sagi Grimberg954f2372014-12-02 16:57:17 +0200847 pr_info("Terminating conn %p state %d\n",
848 isert_conn, isert_conn->state);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200849 isert_conn->state = ISER_CONN_TERMINATING;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200850 err = rdma_disconnect(isert_conn->conn_cm_id);
851 if (err)
852 pr_warn("Failed rdma_disconnect isert_conn %p\n",
853 isert_conn);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200854 break;
855 default:
856 pr_warn("conn %p teminating in state %d\n",
857 isert_conn, isert_conn->state);
Sagi Grimberg954f2372014-12-02 16:57:17 +0200858 }
859}
860
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700861static int
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200862isert_np_cma_handler(struct isert_np *isert_np,
863 enum rdma_cm_event_type event)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800864{
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200865 pr_debug("isert np %p, handling event %d\n", isert_np, event);
866
867 switch (event) {
868 case RDMA_CM_EVENT_DEVICE_REMOVAL:
869 isert_np->np_cm_id = NULL;
870 break;
871 case RDMA_CM_EVENT_ADDR_CHANGE:
872 isert_np->np_cm_id = isert_setup_id(isert_np);
873 if (IS_ERR(isert_np->np_cm_id)) {
874 pr_err("isert np %p setup id failed: %ld\n",
875 isert_np, PTR_ERR(isert_np->np_cm_id));
876 isert_np->np_cm_id = NULL;
877 }
878 break;
879 default:
880 pr_err("isert np %p Unexpected event %d\n",
881 isert_np, event);
882 }
883
884 return -1;
885}
886
887static int
888isert_disconnected_handler(struct rdma_cm_id *cma_id,
889 enum rdma_cm_event_type event)
890{
891 struct isert_np *isert_np = cma_id->context;
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700892 struct isert_conn *isert_conn;
893
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200894 if (isert_np->np_cm_id == cma_id)
895 return isert_np_cma_handler(cma_id->context, event);
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700896
Sagi Grimberg19e20902014-12-02 16:57:26 +0200897 isert_conn = cma_id->qp->qp_context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800898
Sagi Grimberg128e9cc2014-12-02 16:57:20 +0200899 mutex_lock(&isert_conn->conn_mutex);
900 isert_conn_terminate(isert_conn);
901 mutex_unlock(&isert_conn->conn_mutex);
902
903 pr_info("conn %p completing conn_wait\n", isert_conn);
904 complete(&isert_conn->conn_wait);
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700905
906 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800907}
908
Sagi Grimberg954f2372014-12-02 16:57:17 +0200909static void
910isert_connect_error(struct rdma_cm_id *cma_id)
911{
Sagi Grimberg19e20902014-12-02 16:57:26 +0200912 struct isert_conn *isert_conn = cma_id->qp->qp_context;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200913
914 isert_put_conn(isert_conn);
915}
916
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800917static int
918isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
919{
920 int ret = 0;
921
922 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
923 event->event, event->status, cma_id->context, cma_id);
924
925 switch (event->event) {
926 case RDMA_CM_EVENT_CONNECT_REQUEST:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800927 ret = isert_connect_request(cma_id, event);
Sagi Grimberg3b726ae2014-10-28 13:45:03 -0700928 if (ret)
929 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
930 event->event, ret);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800931 break;
932 case RDMA_CM_EVENT_ESTABLISHED:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800933 isert_connected_handler(cma_id);
934 break;
Sagi Grimberg88c40152014-05-19 17:44:24 +0300935 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
936 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
937 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
Sagi Grimberg88c40152014-05-19 17:44:24 +0300938 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
Sagi Grimbergca6c1d82014-12-02 16:57:27 +0200939 ret = isert_disconnected_handler(cma_id, event->event);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800940 break;
Sagi Grimberg954f2372014-12-02 16:57:17 +0200941 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
942 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800943 case RDMA_CM_EVENT_CONNECT_ERROR:
Sagi Grimberg954f2372014-12-02 16:57:17 +0200944 isert_connect_error(cma_id);
945 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800946 default:
Sagi Grimberg88c40152014-05-19 17:44:24 +0300947 pr_err("Unhandled RDMA CMA event: %d\n", event->event);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800948 break;
949 }
950
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800951 return ret;
952}
953
954static int
955isert_post_recv(struct isert_conn *isert_conn, u32 count)
956{
957 struct ib_recv_wr *rx_wr, *rx_wr_failed;
958 int i, ret;
959 unsigned int rx_head = isert_conn->conn_rx_desc_head;
960 struct iser_rx_desc *rx_desc;
961
962 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
963 rx_desc = &isert_conn->conn_rx_descs[rx_head];
Sagi Grimbergb0a191e2014-12-02 16:57:39 +0200964 rx_wr->wr_id = (uintptr_t)rx_desc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800965 rx_wr->sg_list = &rx_desc->rx_sg;
966 rx_wr->num_sge = 1;
967 rx_wr->next = rx_wr + 1;
968 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
969 }
970
971 rx_wr--;
972 rx_wr->next = NULL; /* mark end of work requests list */
973
974 isert_conn->post_recv_buf_count += count;
975 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
976 &rx_wr_failed);
977 if (ret) {
978 pr_err("ib_post_recv() failed with ret: %d\n", ret);
979 isert_conn->post_recv_buf_count -= count;
980 } else {
981 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
982 isert_conn->conn_rx_desc_head = rx_head;
983 }
984 return ret;
985}
986
987static int
988isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
989{
990 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
991 struct ib_send_wr send_wr, *send_wr_failed;
992 int ret;
993
994 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
995 ISER_HEADERS_LEN, DMA_TO_DEVICE);
996
997 send_wr.next = NULL;
Sagi Grimbergb0a191e2014-12-02 16:57:39 +0200998 send_wr.wr_id = (uintptr_t)tx_desc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800999 send_wr.sg_list = tx_desc->tx_sg;
1000 send_wr.num_sge = tx_desc->num_sge;
1001 send_wr.opcode = IB_WR_SEND;
1002 send_wr.send_flags = IB_SEND_SIGNALED;
1003
1004 atomic_inc(&isert_conn->post_send_buf_count);
1005
1006 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
1007 if (ret) {
1008 pr_err("ib_post_send() failed, ret: %d\n", ret);
1009 atomic_dec(&isert_conn->post_send_buf_count);
1010 }
1011
1012 return ret;
1013}
1014
1015static void
1016isert_create_send_desc(struct isert_conn *isert_conn,
1017 struct isert_cmd *isert_cmd,
1018 struct iser_tx_desc *tx_desc)
1019{
1020 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1021
1022 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
1023 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1024
1025 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
1026 tx_desc->iser_header.flags = ISER_VER;
1027
1028 tx_desc->num_sge = 1;
1029 tx_desc->isert_cmd = isert_cmd;
1030
1031 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
1032 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
1033 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
1034 }
1035}
1036
1037static int
1038isert_init_tx_hdrs(struct isert_conn *isert_conn,
1039 struct iser_tx_desc *tx_desc)
1040{
1041 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1042 u64 dma_addr;
1043
1044 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1045 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1046 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
1047 pr_err("ib_dma_mapping_error() failed\n");
1048 return -ENOMEM;
1049 }
1050
1051 tx_desc->dma_addr = dma_addr;
1052 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1053 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
1054 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
1055
1056 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
1057 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
1058 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
1059
1060 return 0;
1061}
1062
1063static void
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001064isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Sagi Grimberg68a86de2014-12-02 16:57:37 +02001065 struct ib_send_wr *send_wr)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001066{
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001067 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1068
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001069 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
Sagi Grimbergb0a191e2014-12-02 16:57:39 +02001070 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001071 send_wr->opcode = IB_WR_SEND;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001072 send_wr->sg_list = &tx_desc->tx_sg[0];
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001073 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001074 send_wr->send_flags = IB_SEND_SIGNALED;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001075}
1076
1077static int
1078isert_rdma_post_recvl(struct isert_conn *isert_conn)
1079{
1080 struct ib_recv_wr rx_wr, *rx_wr_fail;
1081 struct ib_sge sge;
1082 int ret;
1083
1084 memset(&sge, 0, sizeof(struct ib_sge));
1085 sge.addr = isert_conn->login_req_dma;
1086 sge.length = ISER_RX_LOGIN_SIZE;
1087 sge.lkey = isert_conn->conn_mr->lkey;
1088
1089 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
1090 sge.addr, sge.length, sge.lkey);
1091
1092 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
Sagi Grimbergb0a191e2014-12-02 16:57:39 +02001093 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001094 rx_wr.sg_list = &sge;
1095 rx_wr.num_sge = 1;
1096
1097 isert_conn->post_recv_buf_count++;
1098 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
1099 if (ret) {
1100 pr_err("ib_post_recv() failed: %d\n", ret);
1101 isert_conn->post_recv_buf_count--;
1102 }
1103
1104 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
1105 return ret;
1106}
1107
1108static int
1109isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1110 u32 length)
1111{
1112 struct isert_conn *isert_conn = conn->context;
1113 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1114 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
1115 int ret;
1116
1117 isert_create_send_desc(isert_conn, NULL, tx_desc);
1118
1119 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1120 sizeof(struct iscsi_hdr));
1121
1122 isert_init_tx_hdrs(isert_conn, tx_desc);
1123
1124 if (length > 0) {
1125 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1126
1127 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1128 length, DMA_TO_DEVICE);
1129
1130 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1131
1132 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1133 length, DMA_TO_DEVICE);
1134
1135 tx_dsg->addr = isert_conn->login_rsp_dma;
1136 tx_dsg->length = length;
1137 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1138 tx_desc->num_sge = 2;
1139 }
1140 if (!login->login_failed) {
1141 if (login->login_complete) {
Sagi Grimberge0546fc2014-06-10 13:41:41 +03001142 if (!conn->sess->sess_ops->SessionType &&
1143 isert_conn->conn_device->use_fastreg) {
Sagi Grimberg570db172014-12-02 16:57:31 +02001144 ret = isert_conn_create_fastreg_pool(isert_conn);
Nicholas Bellingerf46d6a82014-03-21 18:10:25 -07001145 if (ret) {
1146 pr_err("Conn: %p failed to create"
1147 " fastreg pool\n", isert_conn);
1148 return ret;
1149 }
1150 }
1151
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001152 ret = isert_alloc_rx_descriptors(isert_conn);
1153 if (ret)
1154 return ret;
1155
1156 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
1157 if (ret)
1158 return ret;
1159
Sagi Grimberg128e9cc2014-12-02 16:57:20 +02001160 /* Now we are in FULL_FEATURE phase */
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001161 mutex_lock(&isert_conn->conn_mutex);
Sagi Grimberg128e9cc2014-12-02 16:57:20 +02001162 isert_conn->state = ISER_CONN_FULL_FEATURE;
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001163 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001164 goto post_send;
1165 }
1166
1167 ret = isert_rdma_post_recvl(isert_conn);
1168 if (ret)
1169 return ret;
1170 }
1171post_send:
1172 ret = isert_post_send(isert_conn, tx_desc);
1173 if (ret)
1174 return ret;
1175
1176 return 0;
1177}
1178
1179static void
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001180isert_rx_login_req(struct isert_conn *isert_conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001181{
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001182 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1183 int rx_buflen = isert_conn->login_req_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001184 struct iscsi_conn *conn = isert_conn->conn;
1185 struct iscsi_login *login = conn->conn_login;
1186 int size;
1187
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001188 pr_info("conn %p\n", isert_conn);
1189
1190 WARN_ON_ONCE(!login);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001191
1192 if (login->first_request) {
1193 struct iscsi_login_req *login_req =
1194 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1195 /*
1196 * Setup the initial iscsi_login values from the leading
1197 * login request PDU.
1198 */
1199 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1200 login->current_stage =
1201 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1202 >> 2;
1203 login->version_min = login_req->min_version;
1204 login->version_max = login_req->max_version;
1205 memcpy(login->isid, login_req->isid, 6);
1206 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1207 login->init_task_tag = login_req->itt;
1208 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1209 login->cid = be16_to_cpu(login_req->cid);
1210 login->tsih = be16_to_cpu(login_req->tsih);
1211 }
1212
1213 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1214
1215 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1216 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1217 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
1218 memcpy(login->req_buf, &rx_desc->data[0], size);
1219
Nicholas Bellinger6faaa852013-08-18 16:35:46 -07001220 if (login->first_request) {
1221 complete(&isert_conn->conn_login_comp);
1222 return;
1223 }
1224 schedule_delayed_work(&conn->login_work, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001225}
1226
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001227static struct iscsi_cmd
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001228*isert_allocate_cmd(struct iscsi_conn *conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001229{
1230 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1231 struct isert_cmd *isert_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001232 struct iscsi_cmd *cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001233
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001234 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001235 if (!cmd) {
1236 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001237 return NULL;
1238 }
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001239 isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001240 isert_cmd->conn = isert_conn;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001241 isert_cmd->iscsi_cmd = cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001242
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001243 return cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001244}
1245
1246static int
1247isert_handle_scsi_cmd(struct isert_conn *isert_conn,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001248 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1249 struct iser_rx_desc *rx_desc, unsigned char *buf)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001250{
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001251 struct iscsi_conn *conn = isert_conn->conn;
1252 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1253 struct scatterlist *sg;
1254 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1255 bool dump_payload = false;
1256
1257 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1258 if (rc < 0)
1259 return rc;
1260
1261 imm_data = cmd->immediate_data;
1262 imm_data_len = cmd->first_burst_len;
1263 unsol_data = cmd->unsolicited_data;
1264
1265 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1266 if (rc < 0) {
1267 return 0;
1268 } else if (rc > 0) {
1269 dump_payload = true;
1270 goto sequence_cmd;
1271 }
1272
1273 if (!imm_data)
1274 return 0;
1275
1276 sg = &cmd->se_cmd.t_data_sg[0];
1277 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1278
1279 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1280 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1281
1282 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1283
1284 cmd->write_data_done += imm_data_len;
1285
1286 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1287 spin_lock_bh(&cmd->istate_lock);
1288 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1289 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1290 spin_unlock_bh(&cmd->istate_lock);
1291 }
1292
1293sequence_cmd:
Nicholas Bellinger561bf152013-07-03 03:58:58 -07001294 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001295
1296 if (!rc && dump_payload == false && unsol_data)
1297 iscsit_set_unsoliticed_dataout(cmd);
Nicholas Bellinger6cc44a62014-05-23 00:48:35 -07001298 else if (dump_payload && imm_data)
1299 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001300
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001301 return 0;
1302}
1303
1304static int
1305isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1306 struct iser_rx_desc *rx_desc, unsigned char *buf)
1307{
1308 struct scatterlist *sg_start;
1309 struct iscsi_conn *conn = isert_conn->conn;
1310 struct iscsi_cmd *cmd = NULL;
1311 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1312 u32 unsol_data_len = ntoh24(hdr->dlength);
1313 int rc, sg_nents, sg_off, page_off;
1314
1315 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1316 if (rc < 0)
1317 return rc;
1318 else if (!cmd)
1319 return 0;
1320 /*
1321 * FIXME: Unexpected unsolicited_data out
1322 */
1323 if (!cmd->unsolicited_data) {
1324 pr_err("Received unexpected solicited data payload\n");
1325 dump_stack();
1326 return -1;
1327 }
1328
1329 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1330 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1331
1332 sg_off = cmd->write_data_done / PAGE_SIZE;
1333 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1334 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1335 page_off = cmd->write_data_done % PAGE_SIZE;
1336 /*
1337 * FIXME: Non page-aligned unsolicited_data out
1338 */
1339 if (page_off) {
1340 pr_err("Received unexpected non-page aligned data payload\n");
1341 dump_stack();
1342 return -1;
1343 }
1344 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1345 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1346
1347 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1348 unsol_data_len);
1349
1350 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1351 if (rc < 0)
1352 return rc;
1353
1354 return 0;
1355}
1356
1357static int
Nicholas Bellinger778de362013-06-14 16:07:47 -07001358isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001359 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1360 unsigned char *buf)
Nicholas Bellinger778de362013-06-14 16:07:47 -07001361{
Nicholas Bellinger778de362013-06-14 16:07:47 -07001362 struct iscsi_conn *conn = isert_conn->conn;
1363 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1364 int rc;
1365
1366 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1367 if (rc < 0)
1368 return rc;
1369 /*
1370 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1371 */
1372
1373 return iscsit_process_nop_out(conn, cmd, hdr);
1374}
1375
1376static int
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001377isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001378 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1379 struct iscsi_text *hdr)
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001380{
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001381 struct iscsi_conn *conn = isert_conn->conn;
1382 u32 payload_length = ntoh24(hdr->dlength);
1383 int rc;
1384 unsigned char *text_in;
1385
1386 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1387 if (rc < 0)
1388 return rc;
1389
1390 text_in = kzalloc(payload_length, GFP_KERNEL);
1391 if (!text_in) {
1392 pr_err("Unable to allocate text_in of payload_length: %u\n",
1393 payload_length);
1394 return -ENOMEM;
1395 }
1396 cmd->text_in_ptr = text_in;
1397
1398 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1399
1400 return iscsit_process_text_cmd(conn, cmd, hdr);
1401}
1402
1403static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001404isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1405 uint32_t read_stag, uint64_t read_va,
1406 uint32_t write_stag, uint64_t write_va)
1407{
1408 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1409 struct iscsi_conn *conn = isert_conn->conn;
Nicholas Bellingerca40d242013-07-07 17:45:08 -07001410 struct iscsi_session *sess = conn->sess;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001411 struct iscsi_cmd *cmd;
1412 struct isert_cmd *isert_cmd;
1413 int ret = -EINVAL;
1414 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1415
Nicholas Bellingerca40d242013-07-07 17:45:08 -07001416 if (sess->sess_ops->SessionType &&
1417 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1418 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1419 " ignoring\n", opcode);
1420 return 0;
1421 }
1422
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001423 switch (opcode) {
1424 case ISCSI_OP_SCSI_CMD:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001425 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001426 if (!cmd)
1427 break;
1428
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001429 isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001430 isert_cmd->read_stag = read_stag;
1431 isert_cmd->read_va = read_va;
1432 isert_cmd->write_stag = write_stag;
1433 isert_cmd->write_va = write_va;
1434
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001435 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001436 rx_desc, (unsigned char *)hdr);
1437 break;
1438 case ISCSI_OP_NOOP_OUT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001439 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001440 if (!cmd)
1441 break;
1442
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001443 isert_cmd = iscsit_priv_cmd(cmd);
1444 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
Nicholas Bellinger778de362013-06-14 16:07:47 -07001445 rx_desc, (unsigned char *)hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001446 break;
1447 case ISCSI_OP_SCSI_DATA_OUT:
1448 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1449 (unsigned char *)hdr);
1450 break;
1451 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001452 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001453 if (!cmd)
1454 break;
1455
1456 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1457 (unsigned char *)hdr);
1458 break;
1459 case ISCSI_OP_LOGOUT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001460 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001461 if (!cmd)
1462 break;
1463
1464 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1465 if (ret > 0)
1466 wait_for_completion_timeout(&conn->conn_logout_comp,
1467 SECONDS_FOR_LOGOUT_COMP *
1468 HZ);
1469 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001470 case ISCSI_OP_TEXT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001471 cmd = isert_allocate_cmd(conn);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001472 if (!cmd)
1473 break;
1474
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001475 isert_cmd = iscsit_priv_cmd(cmd);
1476 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001477 rx_desc, (struct iscsi_text *)hdr);
1478 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001479 default:
1480 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1481 dump_stack();
1482 break;
1483 }
1484
1485 return ret;
1486}
1487
1488static void
1489isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1490{
1491 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1492 uint64_t read_va = 0, write_va = 0;
1493 uint32_t read_stag = 0, write_stag = 0;
1494 int rc;
1495
1496 switch (iser_hdr->flags & 0xF0) {
1497 case ISCSI_CTRL:
1498 if (iser_hdr->flags & ISER_RSV) {
1499 read_stag = be32_to_cpu(iser_hdr->read_stag);
1500 read_va = be64_to_cpu(iser_hdr->read_va);
1501 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1502 read_stag, (unsigned long long)read_va);
1503 }
1504 if (iser_hdr->flags & ISER_WSV) {
1505 write_stag = be32_to_cpu(iser_hdr->write_stag);
1506 write_va = be64_to_cpu(iser_hdr->write_va);
1507 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1508 write_stag, (unsigned long long)write_va);
1509 }
1510
1511 pr_debug("ISER ISCSI_CTRL PDU\n");
1512 break;
1513 case ISER_HELLO:
1514 pr_err("iSER Hello message\n");
1515 break;
1516 default:
1517 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1518 break;
1519 }
1520
1521 rc = isert_rx_opcode(isert_conn, rx_desc,
1522 read_stag, read_va, write_stag, write_va);
1523}
1524
1525static void
1526isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02001527 u32 xfer_len)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001528{
1529 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1530 struct iscsi_hdr *hdr;
1531 u64 rx_dma;
1532 int rx_buflen, outstanding;
1533
1534 if ((char *)desc == isert_conn->login_req_buf) {
1535 rx_dma = isert_conn->login_req_dma;
1536 rx_buflen = ISER_RX_LOGIN_SIZE;
1537 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1538 rx_dma, rx_buflen);
1539 } else {
1540 rx_dma = desc->dma_addr;
1541 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1542 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1543 rx_dma, rx_buflen);
1544 }
1545
1546 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1547
1548 hdr = &desc->iscsi_header;
1549 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1550 hdr->opcode, hdr->itt, hdr->flags,
1551 (int)(xfer_len - ISER_HEADERS_LEN));
1552
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001553 if ((char *)desc == isert_conn->login_req_buf) {
1554 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1555 if (isert_conn->conn) {
1556 struct iscsi_login *login = isert_conn->conn->conn_login;
1557
1558 if (login && !login->first_request)
1559 isert_rx_login_req(isert_conn);
1560 }
1561 mutex_lock(&isert_conn->conn_mutex);
1562 complete(&isert_conn->login_req_comp);
1563 mutex_unlock(&isert_conn->conn_mutex);
1564 } else {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001565 isert_rx_do_work(desc, isert_conn);
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02001566 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001567
1568 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1569 DMA_FROM_DEVICE);
1570
1571 isert_conn->post_recv_buf_count--;
1572 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1573 isert_conn->post_recv_buf_count);
1574
1575 if ((char *)desc == isert_conn->login_req_buf)
1576 return;
1577
1578 outstanding = isert_conn->post_recv_buf_count;
1579 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1580 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1581 ISERT_MIN_POSTED_RX);
1582 err = isert_post_recv(isert_conn, count);
1583 if (err) {
1584 pr_err("isert_post_recv() count: %d failed, %d\n",
1585 count, err);
1586 }
1587 }
1588}
1589
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001590static int
1591isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1592 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1593 enum iser_ib_op_code op, struct isert_data_buf *data)
1594{
1595 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1596
1597 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1598 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1599
1600 data->len = length - offset;
1601 data->offset = offset;
1602 data->sg_off = data->offset / PAGE_SIZE;
1603
1604 data->sg = &sg[data->sg_off];
1605 data->nents = min_t(unsigned int, nents - data->sg_off,
1606 ISCSI_ISER_SG_TABLESIZE);
1607 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1608 PAGE_SIZE);
1609
1610 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1611 data->dma_dir);
1612 if (unlikely(!data->dma_nents)) {
1613 pr_err("Cmd: unable to dma map SGs %p\n", sg);
1614 return -EINVAL;
1615 }
1616
1617 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1618 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1619
1620 return 0;
1621}
1622
1623static void
1624isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1625{
1626 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1627
1628 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1629 memset(data, 0, sizeof(*data));
1630}
1631
1632
1633
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001634static void
1635isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1636{
1637 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001638
Vu Pham90ecc6e2013-08-28 23:23:33 +03001639 pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001640
1641 if (wr->data.sg) {
Vu Pham90ecc6e2013-08-28 23:23:33 +03001642 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001643 isert_unmap_data_buf(isert_conn, &wr->data);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001644 }
1645
Vu Pham90ecc6e2013-08-28 23:23:33 +03001646 if (wr->send_wr) {
1647 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1648 kfree(wr->send_wr);
1649 wr->send_wr = NULL;
1650 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001651
Vu Pham90ecc6e2013-08-28 23:23:33 +03001652 if (wr->ib_sge) {
1653 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1654 kfree(wr->ib_sge);
1655 wr->ib_sge = NULL;
1656 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001657}
1658
1659static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001660isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +03001661{
1662 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Vu Pham59464ef2013-08-28 23:23:35 +03001663 LIST_HEAD(unmap_list);
1664
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001665 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
Vu Pham59464ef2013-08-28 23:23:35 +03001666
1667 if (wr->fr_desc) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001668 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
Vu Pham59464ef2013-08-28 23:23:35 +03001669 isert_cmd, wr->fr_desc);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001670 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1671 isert_unmap_data_buf(isert_conn, &wr->prot);
1672 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1673 }
Vu Pham59464ef2013-08-28 23:23:35 +03001674 spin_lock_bh(&isert_conn->conn_lock);
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001675 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
Vu Pham59464ef2013-08-28 23:23:35 +03001676 spin_unlock_bh(&isert_conn->conn_lock);
1677 wr->fr_desc = NULL;
1678 }
1679
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001680 if (wr->data.sg) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001681 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001682 isert_unmap_data_buf(isert_conn, &wr->data);
Vu Pham59464ef2013-08-28 23:23:35 +03001683 }
1684
1685 wr->ib_sge = NULL;
1686 wr->send_wr = NULL;
1687}
1688
1689static void
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001690isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001691{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001692 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001693 struct isert_conn *isert_conn = isert_cmd->conn;
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001694 struct iscsi_conn *conn = isert_conn->conn;
Vu Phamd40945d2013-08-28 23:23:34 +03001695 struct isert_device *device = isert_conn->conn_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001696
1697 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1698
1699 switch (cmd->iscsi_opcode) {
1700 case ISCSI_OP_SCSI_CMD:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001701 spin_lock_bh(&conn->cmd_lock);
1702 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellinger5159d762014-02-03 12:53:51 -08001703 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001704 spin_unlock_bh(&conn->cmd_lock);
1705
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001706 if (cmd->data_direction == DMA_TO_DEVICE) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001707 iscsit_stop_dataout_timer(cmd);
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001708 /*
1709 * Check for special case during comp_err where
1710 * WRITE_PENDING has been handed off from core,
1711 * but requires an extra target_put_sess_cmd()
1712 * before transport_generic_free_cmd() below.
1713 */
1714 if (comp_err &&
1715 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1716 struct se_cmd *se_cmd = &cmd->se_cmd;
1717
1718 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1719 }
1720 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001721
Vu Phamd40945d2013-08-28 23:23:34 +03001722 device->unreg_rdma_mem(isert_cmd, isert_conn);
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001723 transport_generic_free_cmd(&cmd->se_cmd, 0);
1724 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001725 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001726 spin_lock_bh(&conn->cmd_lock);
1727 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellinger5159d762014-02-03 12:53:51 -08001728 list_del_init(&cmd->i_conn_node);
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001729 spin_unlock_bh(&conn->cmd_lock);
1730
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001731 transport_generic_free_cmd(&cmd->se_cmd, 0);
1732 break;
1733 case ISCSI_OP_REJECT:
1734 case ISCSI_OP_NOOP_OUT:
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001735 case ISCSI_OP_TEXT:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001736 spin_lock_bh(&conn->cmd_lock);
1737 if (!list_empty(&cmd->i_conn_node))
Nicholas Bellinger5159d762014-02-03 12:53:51 -08001738 list_del_init(&cmd->i_conn_node);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001739 spin_unlock_bh(&conn->cmd_lock);
1740
1741 /*
1742 * Handle special case for REJECT when iscsi_add_reject*() has
1743 * overwritten the original iscsi_opcode assignment, and the
1744 * associated cmd->se_cmd needs to be released.
1745 */
1746 if (cmd->se_cmd.se_tfo != NULL) {
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001747 pr_debug("Calling transport_generic_free_cmd from"
1748 " isert_put_cmd for 0x%02x\n",
1749 cmd->iscsi_opcode);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001750 transport_generic_free_cmd(&cmd->se_cmd, 0);
1751 break;
1752 }
1753 /*
1754 * Fall-through
1755 */
1756 default:
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001757 iscsit_release_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001758 break;
1759 }
1760}
1761
1762static void
1763isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1764{
1765 if (tx_desc->dma_addr != 0) {
1766 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1767 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1768 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1769 tx_desc->dma_addr = 0;
1770 }
1771}
1772
1773static void
1774isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001775 struct ib_device *ib_dev, bool comp_err)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001776{
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001777 if (isert_cmd->pdu_buf_dma != 0) {
1778 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1779 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1780 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1781 isert_cmd->pdu_buf_dma = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001782 }
1783
1784 isert_unmap_tx_desc(tx_desc, ib_dev);
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001785 isert_put_cmd(isert_cmd, comp_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001786}
1787
Sagi Grimberg96b79732014-03-17 12:52:18 +02001788static int
1789isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1790{
1791 struct ib_mr_status mr_status;
1792 int ret;
1793
1794 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1795 if (ret) {
1796 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1797 goto fail_mr_status;
1798 }
1799
1800 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1801 u64 sec_offset_err;
1802 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1803
1804 switch (mr_status.sig_err.err_type) {
1805 case IB_SIG_BAD_GUARD:
1806 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1807 break;
1808 case IB_SIG_BAD_REFTAG:
1809 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1810 break;
1811 case IB_SIG_BAD_APPTAG:
1812 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1813 break;
1814 }
1815 sec_offset_err = mr_status.sig_err.sig_err_offset;
1816 do_div(sec_offset_err, block_size);
1817 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1818
1819 pr_err("isert: PI error found type %d at sector 0x%llx "
1820 "expected 0x%x vs actual 0x%x\n",
1821 mr_status.sig_err.err_type,
1822 (unsigned long long)se_cmd->bad_sector,
1823 mr_status.sig_err.expected,
1824 mr_status.sig_err.actual);
1825 ret = 1;
1826 }
1827
1828fail_mr_status:
1829 return ret;
1830}
1831
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001832static void
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001833isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1834 struct isert_cmd *isert_cmd)
1835{
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001836 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001837 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001838 struct se_cmd *se_cmd = &cmd->se_cmd;
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001839 struct isert_conn *isert_conn = isert_cmd->conn;
1840 struct isert_device *device = isert_conn->conn_device;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001841 int ret = 0;
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001842
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001843 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
Sagi Grimberg96b79732014-03-17 12:52:18 +02001844 ret = isert_check_pi_status(se_cmd,
1845 wr->fr_desc->pi_ctx->sig_mr);
1846 wr->fr_desc->ind &= ~ISERT_PROTECTED;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001847 }
1848
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001849 device->unreg_rdma_mem(isert_cmd, isert_conn);
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02001850 wr->send_wr_num = 0;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001851 if (ret)
1852 transport_send_check_condition_and_sense(se_cmd,
1853 se_cmd->pi_err, 0);
1854 else
1855 isert_put_response(isert_conn->conn, cmd);
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001856}
1857
1858static void
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001859isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1860 struct isert_cmd *isert_cmd)
1861{
1862 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001863 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001864 struct se_cmd *se_cmd = &cmd->se_cmd;
Vu Pham90ecc6e2013-08-28 23:23:33 +03001865 struct isert_conn *isert_conn = isert_cmd->conn;
Vu Phamd40945d2013-08-28 23:23:34 +03001866 struct isert_device *device = isert_conn->conn_device;
Sagi Grimberg5bac4b12014-03-18 14:58:27 +02001867 int ret = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001868
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001869 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
Sagi Grimberg96b79732014-03-17 12:52:18 +02001870 ret = isert_check_pi_status(se_cmd,
1871 wr->fr_desc->pi_ctx->sig_mr);
1872 wr->fr_desc->ind &= ~ISERT_PROTECTED;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02001873 }
1874
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001875 iscsit_stop_dataout_timer(cmd);
Vu Phamd40945d2013-08-28 23:23:34 +03001876 device->unreg_rdma_mem(isert_cmd, isert_conn);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02001877 cmd->write_data_done = wr->data.len;
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08001878 wr->send_wr_num = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001879
Vu Pham90ecc6e2013-08-28 23:23:33 +03001880 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001881 spin_lock_bh(&cmd->istate_lock);
1882 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1883 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1884 spin_unlock_bh(&cmd->istate_lock);
1885
Sagi Grimberg5bac4b12014-03-18 14:58:27 +02001886 if (ret)
1887 transport_send_check_condition_and_sense(se_cmd,
1888 se_cmd->pi_err, 0);
1889 else
1890 target_execute_cmd(se_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001891}
1892
1893static void
1894isert_do_control_comp(struct work_struct *work)
1895{
1896 struct isert_cmd *isert_cmd = container_of(work,
1897 struct isert_cmd, comp_work);
1898 struct isert_conn *isert_conn = isert_cmd->conn;
1899 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001900 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001901
1902 switch (cmd->i_state) {
1903 case ISTATE_SEND_TASKMGTRSP:
1904 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1905
1906 atomic_dec(&isert_conn->post_send_buf_count);
1907 iscsit_tmr_post_handler(cmd, cmd->conn);
1908
1909 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001910 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001911 break;
1912 case ISTATE_SEND_REJECT:
1913 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1914 atomic_dec(&isert_conn->post_send_buf_count);
1915
1916 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001917 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001918 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001919 case ISTATE_SEND_LOGOUTRSP:
1920 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
Sagi Grimberg9d49f5e2014-05-19 17:44:23 +03001921
1922 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001923 iscsit_logout_post_handler(cmd, cmd->conn);
1924 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001925 case ISTATE_SEND_TEXTRSP:
1926 atomic_dec(&isert_conn->post_send_buf_count);
1927 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001928 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001929 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001930 default:
1931 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1932 dump_stack();
1933 break;
1934 }
1935}
1936
1937static void
1938isert_response_completion(struct iser_tx_desc *tx_desc,
1939 struct isert_cmd *isert_cmd,
1940 struct isert_conn *isert_conn,
1941 struct ib_device *ib_dev)
1942{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001943 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08001944 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001945
1946 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001947 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001948 cmd->i_state == ISTATE_SEND_REJECT ||
1949 cmd->i_state == ISTATE_SEND_TEXTRSP) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001950 isert_unmap_tx_desc(tx_desc, ib_dev);
1951
1952 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1953 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1954 return;
1955 }
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02001956
1957 /**
1958 * If send_wr_num is 0 this means that we got
1959 * RDMA completion and we cleared it and we should
1960 * simply decrement the response post. else the
1961 * response is incorporated in send_wr_num, just
1962 * sub it.
1963 **/
1964 if (wr->send_wr_num)
1965 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1966 else
1967 atomic_dec(&isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001968
1969 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellinger03e78482014-03-30 15:50:03 -07001970 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001971}
1972
1973static void
Sagi Grimberg68a86de2014-12-02 16:57:37 +02001974isert_send_completion(struct iser_tx_desc *tx_desc,
1975 struct isert_conn *isert_conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001976{
1977 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1978 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1979 struct isert_rdma_wr *wr;
1980
1981 if (!isert_cmd) {
1982 atomic_dec(&isert_conn->post_send_buf_count);
1983 isert_unmap_tx_desc(tx_desc, ib_dev);
1984 return;
1985 }
1986 wr = &isert_cmd->rdma_wr;
1987
1988 switch (wr->iser_ib_op) {
1989 case ISER_IB_RECV:
1990 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1991 dump_stack();
1992 break;
1993 case ISER_IB_SEND:
1994 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1995 isert_response_completion(tx_desc, isert_cmd,
1996 isert_conn, ib_dev);
1997 break;
1998 case ISER_IB_RDMA_WRITE:
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02001999 pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002000 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Sagi Grimbergf93f3a72014-02-19 17:50:24 +02002001 isert_completion_rdma_write(tx_desc, isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002002 break;
2003 case ISER_IB_RDMA_READ:
2004 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
2005
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08002006 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002007 isert_completion_rdma_read(tx_desc, isert_cmd);
2008 break;
2009 default:
2010 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
2011 dump_stack();
2012 break;
2013 }
2014}
2015
2016static void
Sagi Grimbergdf43deb2014-12-02 16:57:38 +02002017isert_cq_comp_err(void *desc, struct isert_conn *isert_conn, bool tx)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002018{
Sagi Grimbergdf43deb2014-12-02 16:57:38 +02002019 if (tx) {
2020 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2021 struct isert_cmd *isert_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002022
Sagi Grimbergdf43deb2014-12-02 16:57:38 +02002023 isert_cmd = ((struct iser_tx_desc *)desc)->isert_cmd;
2024 if (!isert_cmd)
2025 isert_unmap_tx_desc(desc, ib_dev);
2026 else
2027 isert_completion_put(desc, isert_cmd, ib_dev, true);
2028 atomic_dec(&isert_conn->post_send_buf_count);
2029 } else {
2030 isert_conn->post_recv_buf_count--;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002031 }
2032
Sagi Grimbergdf43deb2014-12-02 16:57:38 +02002033 if (isert_conn->post_recv_buf_count == 0 &&
2034 atomic_read(&isert_conn->post_send_buf_count) == 0) {
2035 struct iscsi_conn *conn = isert_conn->conn;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002036
Sagi Grimbergdf43deb2014-12-02 16:57:38 +02002037 if (conn->sess) {
2038 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
2039 target_wait_for_sess_cmds(conn->sess->se_sess);
2040 }
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002041
Sagi Grimbergdf43deb2014-12-02 16:57:38 +02002042 mutex_lock(&isert_conn->conn_mutex);
2043 isert_conn_terminate(isert_conn);
2044 mutex_unlock(&isert_conn->conn_mutex);
Sagi Grimberg9d49f5e2014-05-19 17:44:23 +03002045
Sagi Grimbergdf43deb2014-12-02 16:57:38 +02002046 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2047 complete(&isert_conn->conn_wait_comp_err);
2048 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002049}
2050
2051static void
2052isert_cq_tx_work(struct work_struct *work)
2053{
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002054 struct isert_comp *comp = container_of(work, struct isert_comp,
2055 tx_work);
2056 struct ib_cq *cq = comp->tx_cq;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002057 struct isert_conn *isert_conn;
2058 struct iser_tx_desc *tx_desc;
2059 struct ib_wc wc;
2060
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002061 while (ib_poll_cq(cq, 1, &wc) == 1) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002062 isert_conn = wc.qp->qp_context;
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002063 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc.wr_id;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002064
2065 if (wc.status == IB_WC_SUCCESS) {
2066 isert_send_completion(tx_desc, isert_conn);
2067 } else {
2068 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
2069 pr_debug("TX wc.status: 0x%08x\n", wc.status);
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07002070 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08002071
Sagi Grimbergdf43deb2014-12-02 16:57:38 +02002072 if (wc.wr_id != ISER_FASTREG_LI_WRID)
2073 isert_cq_comp_err(tx_desc, isert_conn, true);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002074 }
2075 }
2076
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002077 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002078}
2079
2080static void
2081isert_cq_tx_callback(struct ib_cq *cq, void *context)
2082{
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002083 struct isert_comp *comp = context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002084
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002085 queue_work(isert_comp_wq, &comp->tx_work);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002086}
2087
2088static void
2089isert_cq_rx_work(struct work_struct *work)
2090{
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002091 struct isert_comp *comp = container_of(work, struct isert_comp,
2092 rx_work);
2093 struct ib_cq *cq = comp->rx_cq;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002094 struct isert_conn *isert_conn;
2095 struct iser_rx_desc *rx_desc;
2096 struct ib_wc wc;
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002097 u32 xfer_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002098
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002099 while (ib_poll_cq(cq, 1, &wc) == 1) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002100 isert_conn = wc.qp->qp_context;
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002101 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc.wr_id;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002102
2103 if (wc.status == IB_WC_SUCCESS) {
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002104 xfer_len = wc.byte_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002105 isert_rx_completion(rx_desc, isert_conn, xfer_len);
2106 } else {
2107 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07002108 if (wc.status != IB_WC_WR_FLUSH_ERR) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002109 pr_debug("RX wc.status: 0x%08x\n", wc.status);
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07002110 pr_debug("RX wc.vendor_err: 0x%08x\n",
2111 wc.vendor_err);
2112 }
Sagi Grimbergdf43deb2014-12-02 16:57:38 +02002113 isert_cq_comp_err(rx_desc, isert_conn, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002114 }
2115 }
2116
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002117 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002118}
2119
2120static void
2121isert_cq_rx_callback(struct ib_cq *cq, void *context)
2122{
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002123 struct isert_comp *comp = context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002124
Sagi Grimberg4a295ba2014-12-02 16:57:40 +02002125 queue_work(isert_rx_wq, &comp->rx_work);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002126}
2127
2128static int
2129isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2130{
2131 struct ib_send_wr *wr_failed;
2132 int ret;
2133
2134 atomic_inc(&isert_conn->post_send_buf_count);
2135
2136 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
2137 &wr_failed);
2138 if (ret) {
2139 pr_err("ib_post_send failed with %d\n", ret);
2140 atomic_dec(&isert_conn->post_send_buf_count);
2141 return ret;
2142 }
2143 return ret;
2144}
2145
2146static int
2147isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2148{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002149 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002150 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2151 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2152 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2153 &isert_cmd->tx_desc.iscsi_header;
2154
2155 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2156 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2157 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2158 /*
2159 * Attach SENSE DATA payload to iSCSI Response PDU
2160 */
2161 if (cmd->se_cmd.sense_buffer &&
2162 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2163 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
2164 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2165 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002166 u32 padding, pdu_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002167
2168 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2169 cmd->sense_buffer);
2170 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2171
2172 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2173 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002174 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002175
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002176 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2177 (void *)cmd->sense_buffer, pdu_len,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002178 DMA_TO_DEVICE);
2179
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002180 isert_cmd->pdu_buf_len = pdu_len;
2181 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2182 tx_dsg->length = pdu_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002183 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2184 isert_cmd->tx_desc.num_sge = 2;
2185 }
2186
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002187 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002188
2189 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2190
2191 return isert_post_response(isert_conn, isert_cmd);
2192}
2193
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -07002194static void
2195isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2196{
2197 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2198 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2199 struct isert_device *device = isert_conn->conn_device;
2200
2201 spin_lock_bh(&conn->cmd_lock);
2202 if (!list_empty(&cmd->i_conn_node))
2203 list_del_init(&cmd->i_conn_node);
2204 spin_unlock_bh(&conn->cmd_lock);
2205
2206 if (cmd->data_direction == DMA_TO_DEVICE)
2207 iscsit_stop_dataout_timer(cmd);
2208
2209 device->unreg_rdma_mem(isert_cmd, isert_conn);
2210}
2211
Nicholas Bellingere70beee2014-04-02 12:52:38 -07002212static enum target_prot_op
2213isert_get_sup_prot_ops(struct iscsi_conn *conn)
2214{
2215 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2216 struct isert_device *device = isert_conn->conn_device;
2217
Sagi Grimberg23a548e2014-12-02 16:57:35 +02002218 if (conn->tpg->tpg_attrib.t10_pi) {
2219 if (device->pi_capable) {
2220 pr_info("conn %p PI offload enabled\n", isert_conn);
2221 isert_conn->pi_support = true;
2222 return TARGET_PROT_ALL;
2223 }
2224 }
2225
2226 pr_info("conn %p PI offload disabled\n", isert_conn);
2227 isert_conn->pi_support = false;
Nicholas Bellingere70beee2014-04-02 12:52:38 -07002228
2229 return TARGET_PROT_NORMAL;
2230}
2231
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002232static int
2233isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2234 bool nopout_response)
2235{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002236 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002237 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2238 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2239
2240 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2241 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2242 &isert_cmd->tx_desc.iscsi_header,
2243 nopout_response);
2244 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002245 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002246
Masanari Iida8b513d02013-05-21 23:13:12 +09002247 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002248
2249 return isert_post_response(isert_conn, isert_cmd);
2250}
2251
2252static int
2253isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2254{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002255 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002256 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2257 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2258
2259 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2260 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2261 &isert_cmd->tx_desc.iscsi_header);
2262 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002263 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002264
2265 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2266
2267 return isert_post_response(isert_conn, isert_cmd);
2268}
2269
2270static int
2271isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2272{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002273 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002274 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2275 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2276
2277 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2278 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2279 &isert_cmd->tx_desc.iscsi_header);
2280 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002281 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002282
2283 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2284
2285 return isert_post_response(isert_conn, isert_cmd);
2286}
2287
2288static int
2289isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2290{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002291 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002292 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2293 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002294 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2295 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2296 struct iscsi_reject *hdr =
2297 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002298
2299 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002300 iscsit_build_reject(cmd, conn, hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002301 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002302
2303 hton24(hdr->dlength, ISCSI_HDR_LEN);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002304 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002305 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2306 DMA_TO_DEVICE);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07002307 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2308 tx_dsg->addr = isert_cmd->pdu_buf_dma;
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07002309 tx_dsg->length = ISCSI_HDR_LEN;
2310 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2311 isert_cmd->tx_desc.num_sge = 2;
2312
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002313 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002314
2315 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2316
2317 return isert_post_response(isert_conn, isert_cmd);
2318}
2319
2320static int
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002321isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2322{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002323 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002324 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2325 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2326 struct iscsi_text_rsp *hdr =
2327 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2328 u32 txt_rsp_len;
2329 int rc;
2330
2331 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
Sagi Grimberg22c7aaa2014-06-10 18:27:59 +03002332 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002333 if (rc < 0)
2334 return rc;
2335
2336 txt_rsp_len = rc;
2337 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2338
2339 if (txt_rsp_len) {
2340 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2341 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2342 void *txt_rsp_buf = cmd->buf_ptr;
2343
2344 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2345 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2346
2347 isert_cmd->pdu_buf_len = txt_rsp_len;
2348 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2349 tx_dsg->length = txt_rsp_len;
2350 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2351 isert_cmd->tx_desc.num_sge = 2;
2352 }
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002353 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002354
2355 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2356
2357 return isert_post_response(isert_conn, isert_cmd);
2358}
2359
2360static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002361isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2362 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2363 u32 data_left, u32 offset)
2364{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002365 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002366 struct scatterlist *sg_start, *tmp_sg;
2367 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2368 u32 sg_off, page_off;
2369 int i = 0, sg_nents;
2370
2371 sg_off = offset / PAGE_SIZE;
2372 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2373 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2374 page_off = offset % PAGE_SIZE;
2375
2376 send_wr->sg_list = ib_sge;
2377 send_wr->num_sge = sg_nents;
Sagi Grimbergb0a191e2014-12-02 16:57:39 +02002378 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002379 /*
2380 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2381 */
2382 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2383 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2384 (unsigned long long)tmp_sg->dma_address,
2385 tmp_sg->length, page_off);
2386
2387 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2388 ib_sge->length = min_t(u32, data_left,
2389 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2390 ib_sge->lkey = isert_conn->conn_mr->lkey;
2391
Vu Pham90ecc6e2013-08-28 23:23:33 +03002392 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2393 ib_sge->addr, ib_sge->length, ib_sge->lkey);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002394 page_off = 0;
2395 data_left -= ib_sge->length;
2396 ib_sge++;
2397 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
2398 }
2399
2400 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2401 send_wr->sg_list, send_wr->num_sge);
2402
2403 return sg_nents;
2404}
2405
2406static int
Vu Pham90ecc6e2013-08-28 23:23:33 +03002407isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2408 struct isert_rdma_wr *wr)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002409{
2410 struct se_cmd *se_cmd = &cmd->se_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002411 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002412 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002413 struct isert_data_buf *data = &wr->data;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002414 struct ib_send_wr *send_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002415 struct ib_sge *ib_sge;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002416 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2417 int ret = 0, i, ib_sge_cnt;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002418
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002419 isert_cmd->tx_desc.isert_cmd = isert_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002420
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002421 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2422 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2423 se_cmd->t_data_nents, se_cmd->data_length,
2424 offset, wr->iser_ib_op, &wr->data);
2425 if (ret)
2426 return ret;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002427
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002428 data_left = data->len;
2429 offset = data->offset;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002430
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002431 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002432 if (!ib_sge) {
Vu Pham90ecc6e2013-08-28 23:23:33 +03002433 pr_warn("Unable to allocate ib_sge\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002434 ret = -ENOMEM;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002435 goto unmap_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002436 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002437 wr->ib_sge = ib_sge;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002438
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002439 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002440 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2441 GFP_KERNEL);
2442 if (!wr->send_wr) {
Vu Pham90ecc6e2013-08-28 23:23:33 +03002443 pr_debug("Unable to allocate wr->send_wr\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002444 ret = -ENOMEM;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002445 goto unmap_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002446 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002447
2448 wr->isert_cmd = isert_cmd;
2449 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002450
2451 for (i = 0; i < wr->send_wr_num; i++) {
2452 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2453 data_len = min(data_left, rdma_write_max);
2454
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002455 send_wr->send_flags = 0;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002456 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2457 send_wr->opcode = IB_WR_RDMA_WRITE;
2458 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2459 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2460 if (i + 1 == wr->send_wr_num)
2461 send_wr->next = &isert_cmd->tx_desc.send_wr;
2462 else
2463 send_wr->next = &wr->send_wr[i + 1];
2464 } else {
2465 send_wr->opcode = IB_WR_RDMA_READ;
2466 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2467 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2468 if (i + 1 == wr->send_wr_num)
2469 send_wr->send_flags = IB_SEND_SIGNALED;
2470 else
2471 send_wr->next = &wr->send_wr[i + 1];
2472 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002473
2474 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2475 send_wr, data_len, offset);
2476 ib_sge += ib_sge_cnt;
2477
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002478 offset += data_len;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002479 va_offset += data_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002480 data_left -= data_len;
2481 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002482
2483 return 0;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002484unmap_cmd:
2485 isert_unmap_data_buf(isert_conn, data);
2486
Vu Pham90ecc6e2013-08-28 23:23:33 +03002487 return ret;
2488}
2489
2490static int
Vu Pham59464ef2013-08-28 23:23:35 +03002491isert_map_fr_pagelist(struct ib_device *ib_dev,
2492 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2493{
2494 u64 start_addr, end_addr, page, chunk_start = 0;
2495 struct scatterlist *tmp_sg;
2496 int i = 0, new_chunk, last_ent, n_pages;
2497
2498 n_pages = 0;
2499 new_chunk = 1;
2500 last_ent = sg_nents - 1;
2501 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2502 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2503 if (new_chunk)
2504 chunk_start = start_addr;
2505 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2506
2507 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2508 i, (unsigned long long)tmp_sg->dma_address,
2509 tmp_sg->length);
2510
2511 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2512 new_chunk = 0;
2513 continue;
2514 }
2515 new_chunk = 1;
2516
2517 page = chunk_start & PAGE_MASK;
2518 do {
2519 fr_pl[n_pages++] = page;
2520 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2521 n_pages - 1, page);
2522 page += PAGE_SIZE;
2523 } while (page < end_addr);
2524 }
2525
2526 return n_pages;
2527}
2528
2529static int
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002530isert_fast_reg_mr(struct isert_conn *isert_conn,
2531 struct fast_reg_descriptor *fr_desc,
2532 struct isert_data_buf *mem,
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002533 enum isert_indicator ind,
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002534 struct ib_sge *sge)
Vu Pham59464ef2013-08-28 23:23:35 +03002535{
Vu Pham59464ef2013-08-28 23:23:35 +03002536 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002537 struct ib_mr *mr;
2538 struct ib_fast_reg_page_list *frpl;
Vu Pham59464ef2013-08-28 23:23:35 +03002539 struct ib_send_wr fr_wr, inv_wr;
2540 struct ib_send_wr *bad_wr, *wr = NULL;
Sagi Grimberg9bd626e2014-01-09 18:40:54 +02002541 int ret, pagelist_len;
2542 u32 page_off;
Vu Pham59464ef2013-08-28 23:23:35 +03002543 u8 key;
Vu Pham59464ef2013-08-28 23:23:35 +03002544
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002545 if (mem->dma_nents == 1) {
2546 sge->lkey = isert_conn->conn_mr->lkey;
2547 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2548 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002549 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2550 __func__, __LINE__, sge->addr, sge->length,
2551 sge->lkey);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002552 return 0;
2553 }
2554
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002555 if (ind == ISERT_DATA_KEY_VALID) {
2556 /* Registering data buffer */
2557 mr = fr_desc->data_mr;
2558 frpl = fr_desc->data_frpl;
2559 } else {
2560 /* Registering protection buffer */
2561 mr = fr_desc->pi_ctx->prot_mr;
2562 frpl = fr_desc->pi_ctx->prot_frpl;
2563 }
2564
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002565 page_off = mem->offset % PAGE_SIZE;
Vu Pham59464ef2013-08-28 23:23:35 +03002566
Sagi Grimberg9bd626e2014-01-09 18:40:54 +02002567 pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002568 fr_desc, mem->nents, mem->offset);
Vu Pham59464ef2013-08-28 23:23:35 +03002569
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002570 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002571 &frpl->page_list[0]);
Vu Pham59464ef2013-08-28 23:23:35 +03002572
Sagi Grimbergd3e125d2014-02-19 17:50:23 +02002573 if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
Vu Pham59464ef2013-08-28 23:23:35 +03002574 memset(&inv_wr, 0, sizeof(inv_wr));
Nicholas Bellinger9bb4ca62014-02-27 07:02:48 -08002575 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
Vu Pham59464ef2013-08-28 23:23:35 +03002576 inv_wr.opcode = IB_WR_LOCAL_INV;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002577 inv_wr.ex.invalidate_rkey = mr->rkey;
Vu Pham59464ef2013-08-28 23:23:35 +03002578 wr = &inv_wr;
2579 /* Bump the key */
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002580 key = (u8)(mr->rkey & 0x000000FF);
2581 ib_update_fast_reg_key(mr, ++key);
Vu Pham59464ef2013-08-28 23:23:35 +03002582 }
2583
2584 /* Prepare FASTREG WR */
2585 memset(&fr_wr, 0, sizeof(fr_wr));
Nicholas Bellinger9bb4ca62014-02-27 07:02:48 -08002586 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
Vu Pham59464ef2013-08-28 23:23:35 +03002587 fr_wr.opcode = IB_WR_FAST_REG_MR;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002588 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2589 fr_wr.wr.fast_reg.page_list = frpl;
Vu Pham59464ef2013-08-28 23:23:35 +03002590 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2591 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002592 fr_wr.wr.fast_reg.length = mem->len;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002593 fr_wr.wr.fast_reg.rkey = mr->rkey;
Vu Pham59464ef2013-08-28 23:23:35 +03002594 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2595
2596 if (!wr)
2597 wr = &fr_wr;
2598 else
2599 wr->next = &fr_wr;
2600
2601 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2602 if (ret) {
2603 pr_err("fast registration failed, ret:%d\n", ret);
2604 return ret;
2605 }
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002606 fr_desc->ind &= ~ind;
Vu Pham59464ef2013-08-28 23:23:35 +03002607
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002608 sge->lkey = mr->lkey;
2609 sge->addr = frpl->page_list[0] + page_off;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002610 sge->length = mem->len;
Vu Pham59464ef2013-08-28 23:23:35 +03002611
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002612 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2613 __func__, __LINE__, sge->addr, sge->length,
2614 sge->lkey);
Vu Pham59464ef2013-08-28 23:23:35 +03002615
2616 return ret;
2617}
2618
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002619static inline void
2620isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2621 struct ib_sig_domain *domain)
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002622{
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002623 domain->sig_type = IB_SIG_TYPE_T10_DIF;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002624 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2625 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2626 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002627 /*
2628 * At the moment we hard code those, but if in the future
2629 * the target core would like to use it, we will take it
2630 * from se_cmd.
2631 */
2632 domain->sig.dif.apptag_check_mask = 0xffff;
2633 domain->sig.dif.app_escape = true;
2634 domain->sig.dif.ref_escape = true;
2635 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2636 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2637 domain->sig.dif.ref_remap = true;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002638};
2639
2640static int
2641isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2642{
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002643 switch (se_cmd->prot_op) {
2644 case TARGET_PROT_DIN_INSERT:
2645 case TARGET_PROT_DOUT_STRIP:
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002646 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002647 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002648 break;
2649 case TARGET_PROT_DOUT_INSERT:
2650 case TARGET_PROT_DIN_STRIP:
Sagi Grimberg78eda2b2014-08-13 19:54:35 +03002651 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002652 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002653 break;
2654 case TARGET_PROT_DIN_PASS:
2655 case TARGET_PROT_DOUT_PASS:
Sagi Grimberg3d73cf12014-08-13 19:54:34 +03002656 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2657 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002658 break;
2659 default:
2660 pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2661 return -EINVAL;
2662 }
2663
2664 return 0;
2665}
2666
2667static inline u8
2668isert_set_prot_checks(u8 prot_checks)
2669{
2670 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2671 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2672 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2673}
2674
2675static int
Sagi Grimberg570db172014-12-02 16:57:31 +02002676isert_reg_sig_mr(struct isert_conn *isert_conn,
2677 struct se_cmd *se_cmd,
2678 struct isert_rdma_wr *rdma_wr,
2679 struct fast_reg_descriptor *fr_desc)
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002680{
2681 struct ib_send_wr sig_wr, inv_wr;
2682 struct ib_send_wr *bad_wr, *wr = NULL;
2683 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2684 struct ib_sig_attrs sig_attrs;
2685 int ret;
2686 u32 key;
2687
2688 memset(&sig_attrs, 0, sizeof(sig_attrs));
2689 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2690 if (ret)
2691 goto err;
2692
2693 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2694
2695 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2696 memset(&inv_wr, 0, sizeof(inv_wr));
2697 inv_wr.opcode = IB_WR_LOCAL_INV;
Sagi Grimbergc2caa202014-03-17 12:52:16 +02002698 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002699 inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
2700 wr = &inv_wr;
2701 /* Bump the key */
2702 key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
2703 ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
2704 }
2705
2706 memset(&sig_wr, 0, sizeof(sig_wr));
2707 sig_wr.opcode = IB_WR_REG_SIG_MR;
Sagi Grimbergc2caa202014-03-17 12:52:16 +02002708 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
Sagi Grimberg570db172014-12-02 16:57:31 +02002709 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002710 sig_wr.num_sge = 1;
2711 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2712 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2713 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2714 if (se_cmd->t_prot_sg)
Sagi Grimberg570db172014-12-02 16:57:31 +02002715 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002716
2717 if (!wr)
2718 wr = &sig_wr;
2719 else
2720 wr->next = &sig_wr;
2721
2722 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2723 if (ret) {
2724 pr_err("fast registration failed, ret:%d\n", ret);
2725 goto err;
2726 }
2727 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2728
Sagi Grimberg570db172014-12-02 16:57:31 +02002729 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2730 rdma_wr->ib_sg[SIG].addr = 0;
2731 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002732 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2733 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2734 /*
2735 * We have protection guards on the wire
2736 * so we need to set a larget transfer
2737 */
Sagi Grimberg570db172014-12-02 16:57:31 +02002738 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002739
2740 pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
Sagi Grimberg570db172014-12-02 16:57:31 +02002741 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2742 rdma_wr->ib_sg[SIG].lkey);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002743err:
2744 return ret;
2745}
2746
Vu Pham59464ef2013-08-28 23:23:35 +03002747static int
Sagi Grimberg570db172014-12-02 16:57:31 +02002748isert_handle_prot_cmd(struct isert_conn *isert_conn,
2749 struct isert_cmd *isert_cmd,
2750 struct isert_rdma_wr *wr)
2751{
2752 struct isert_device *device = isert_conn->conn_device;
2753 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2754 int ret;
2755
2756 if (!wr->fr_desc->pi_ctx) {
2757 ret = isert_create_pi_ctx(wr->fr_desc,
2758 device->ib_device,
2759 isert_conn->conn_pd);
2760 if (ret) {
2761 pr_err("conn %p failed to allocate pi_ctx\n",
2762 isert_conn);
2763 return ret;
2764 }
2765 }
2766
2767 if (se_cmd->t_prot_sg) {
2768 ret = isert_map_data_buf(isert_conn, isert_cmd,
2769 se_cmd->t_prot_sg,
2770 se_cmd->t_prot_nents,
2771 se_cmd->prot_length,
2772 0, wr->iser_ib_op, &wr->prot);
2773 if (ret) {
2774 pr_err("conn %p failed to map protection buffer\n",
2775 isert_conn);
2776 return ret;
2777 }
2778
2779 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2780 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2781 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2782 if (ret) {
2783 pr_err("conn %p failed to fast reg mr\n",
2784 isert_conn);
2785 goto unmap_prot_cmd;
2786 }
2787 }
2788
2789 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2790 if (ret) {
2791 pr_err("conn %p failed to fast reg mr\n",
2792 isert_conn);
2793 goto unmap_prot_cmd;
2794 }
2795 wr->fr_desc->ind |= ISERT_PROTECTED;
2796
2797 return 0;
2798
2799unmap_prot_cmd:
2800 if (se_cmd->t_prot_sg)
2801 isert_unmap_data_buf(isert_conn, &wr->prot);
2802
2803 return ret;
2804}
2805
2806static int
Sagi Grimberga3a5a822014-01-09 18:40:50 +02002807isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2808 struct isert_rdma_wr *wr)
Vu Pham59464ef2013-08-28 23:23:35 +03002809{
2810 struct se_cmd *se_cmd = &cmd->se_cmd;
2811 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002812 struct isert_conn *isert_conn = conn->context;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002813 struct fast_reg_descriptor *fr_desc = NULL;
Sagi Grimberg570db172014-12-02 16:57:31 +02002814 struct ib_send_wr *send_wr;
2815 struct ib_sge *ib_sg;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002816 u32 offset;
2817 int ret = 0;
Vu Pham59464ef2013-08-28 23:23:35 +03002818 unsigned long flags;
2819
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002820 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2821
2822 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2823 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2824 se_cmd->t_data_nents, se_cmd->data_length,
2825 offset, wr->iser_ib_op, &wr->data);
2826 if (ret)
2827 return ret;
2828
Sagi Grimberg302cc7c2014-12-02 16:57:34 +02002829 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002830 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2831 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2832 struct fast_reg_descriptor, list);
2833 list_del(&fr_desc->list);
2834 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2835 wr->fr_desc = fr_desc;
Vu Pham59464ef2013-08-28 23:23:35 +03002836 }
2837
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002838 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
Sagi Grimberg570db172014-12-02 16:57:31 +02002839 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002840 if (ret)
2841 goto unmap_cmd;
Vu Pham59464ef2013-08-28 23:23:35 +03002842
Sagi Grimberg302cc7c2014-12-02 16:57:34 +02002843 if (isert_prot_cmd(isert_conn, se_cmd)) {
Sagi Grimberg570db172014-12-02 16:57:31 +02002844 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002845 if (ret)
Sagi Grimberg570db172014-12-02 16:57:31 +02002846 goto unmap_cmd;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002847
Sagi Grimberg570db172014-12-02 16:57:31 +02002848 ib_sg = &wr->ib_sg[SIG];
2849 } else {
2850 ib_sg = &wr->ib_sg[DATA];
2851 }
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002852
Sagi Grimberg570db172014-12-02 16:57:31 +02002853 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002854 wr->ib_sge = &wr->s_ib_sge;
Vu Pham59464ef2013-08-28 23:23:35 +03002855 wr->send_wr_num = 1;
2856 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2857 wr->send_wr = &wr->s_send_wr;
Vu Pham59464ef2013-08-28 23:23:35 +03002858 wr->isert_cmd = isert_cmd;
Vu Pham59464ef2013-08-28 23:23:35 +03002859
2860 send_wr = &isert_cmd->rdma_wr.s_send_wr;
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002861 send_wr->sg_list = &wr->s_ib_sge;
Vu Pham59464ef2013-08-28 23:23:35 +03002862 send_wr->num_sge = 1;
Sagi Grimbergb0a191e2014-12-02 16:57:39 +02002863 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
Vu Pham59464ef2013-08-28 23:23:35 +03002864 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2865 send_wr->opcode = IB_WR_RDMA_WRITE;
2866 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2867 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
Sagi Grimberg302cc7c2014-12-02 16:57:34 +02002868 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002869 0 : IB_SEND_SIGNALED;
Vu Pham59464ef2013-08-28 23:23:35 +03002870 } else {
2871 send_wr->opcode = IB_WR_RDMA_READ;
2872 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2873 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2874 send_wr->send_flags = IB_SEND_SIGNALED;
2875 }
2876
Vu Pham59464ef2013-08-28 23:23:35 +03002877 return 0;
Sagi Grimberg570db172014-12-02 16:57:31 +02002878
Sagi Grimberge3d7e4c2014-02-19 17:50:22 +02002879unmap_cmd:
2880 if (fr_desc) {
2881 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2882 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2883 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2884 }
2885 isert_unmap_data_buf(isert_conn, &wr->data);
Vu Pham59464ef2013-08-28 23:23:35 +03002886
Vu Pham59464ef2013-08-28 23:23:35 +03002887 return ret;
2888}
2889
2890static int
Vu Pham90ecc6e2013-08-28 23:23:33 +03002891isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2892{
2893 struct se_cmd *se_cmd = &cmd->se_cmd;
Vu Pham59464ef2013-08-28 23:23:35 +03002894 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002895 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2896 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Vu Phamd40945d2013-08-28 23:23:34 +03002897 struct isert_device *device = isert_conn->conn_device;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002898 struct ib_send_wr *wr_failed;
2899 int rc;
2900
2901 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2902 isert_cmd, se_cmd->data_length);
2903 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
Vu Phamd40945d2013-08-28 23:23:34 +03002904 rc = device->reg_rdma_mem(conn, cmd, wr);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002905 if (rc) {
2906 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2907 return rc;
2908 }
2909
Sagi Grimberg302cc7c2014-12-02 16:57:34 +02002910 if (!isert_prot_cmd(isert_conn, se_cmd)) {
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002911 /*
2912 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2913 */
2914 isert_create_send_desc(isert_conn, isert_cmd,
2915 &isert_cmd->tx_desc);
2916 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2917 &isert_cmd->tx_desc.iscsi_header);
2918 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2919 isert_init_send_wr(isert_conn, isert_cmd,
Sagi Grimberg68a86de2014-12-02 16:57:37 +02002920 &isert_cmd->tx_desc.send_wr);
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002921 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002922 wr->send_wr_num += 1;
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002923 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002924
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002925 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002926
2927 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2928 if (rc) {
2929 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
Sagi Grimberg897bb2c2014-03-17 12:52:17 +02002930 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002931 }
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002932
Sagi Grimberg302cc7c2014-12-02 16:57:34 +02002933 if (!isert_prot_cmd(isert_conn, se_cmd))
Sagi Grimberg9e961ae2014-02-19 17:50:25 +02002934 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2935 "READ\n", isert_cmd);
2936 else
2937 pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2938 isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002939
Vu Pham90ecc6e2013-08-28 23:23:33 +03002940 return 1;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002941}
2942
2943static int
2944isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2945{
2946 struct se_cmd *se_cmd = &cmd->se_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002947 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002948 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2949 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Vu Phamd40945d2013-08-28 23:23:34 +03002950 struct isert_device *device = isert_conn->conn_device;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002951 struct ib_send_wr *wr_failed;
2952 int rc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002953
Vu Pham90ecc6e2013-08-28 23:23:33 +03002954 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2955 isert_cmd, se_cmd->data_length, cmd->write_data_done);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002956 wr->iser_ib_op = ISER_IB_RDMA_READ;
Vu Phamd40945d2013-08-28 23:23:34 +03002957 rc = device->reg_rdma_mem(conn, cmd, wr);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002958 if (rc) {
2959 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2960 return rc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002961 }
2962
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08002963 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002964
2965 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2966 if (rc) {
2967 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
Nicholas Bellingerb6b87a12014-02-27 09:05:03 -08002968 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002969 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002970 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2971 isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002972
Vu Pham90ecc6e2013-08-28 23:23:33 +03002973 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002974}
2975
2976static int
2977isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2978{
2979 int ret;
2980
2981 switch (state) {
2982 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2983 ret = isert_put_nopin(cmd, conn, false);
2984 break;
2985 default:
2986 pr_err("Unknown immediate state: 0x%02x\n", state);
2987 ret = -EINVAL;
2988 break;
2989 }
2990
2991 return ret;
2992}
2993
2994static int
2995isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2996{
2997 int ret;
2998
2999 switch (state) {
3000 case ISTATE_SEND_LOGOUTRSP:
3001 ret = isert_put_logout_rsp(cmd, conn);
3002 if (!ret) {
3003 pr_debug("Returning iSER Logout -EAGAIN\n");
3004 ret = -EAGAIN;
3005 }
3006 break;
3007 case ISTATE_SEND_NOPIN:
3008 ret = isert_put_nopin(cmd, conn, true);
3009 break;
3010 case ISTATE_SEND_TASKMGTRSP:
3011 ret = isert_put_tm_rsp(cmd, conn);
3012 break;
3013 case ISTATE_SEND_REJECT:
3014 ret = isert_put_reject(cmd, conn);
3015 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07003016 case ISTATE_SEND_TEXTRSP:
3017 ret = isert_put_text_rsp(cmd, conn);
3018 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003019 case ISTATE_SEND_STATUS:
3020 /*
3021 * Special case for sending non GOOD SCSI status from TX thread
3022 * context during pre se_cmd excecution failure.
3023 */
3024 ret = isert_put_response(conn, cmd);
3025 break;
3026 default:
3027 pr_err("Unknown response state: 0x%02x\n", state);
3028 ret = -EINVAL;
3029 break;
3030 }
3031
3032 return ret;
3033}
3034
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003035struct rdma_cm_id *
3036isert_setup_id(struct isert_np *isert_np)
3037{
3038 struct iscsi_np *np = isert_np->np;
3039 struct rdma_cm_id *id;
3040 struct sockaddr *sa;
3041 int ret;
3042
3043 sa = (struct sockaddr *)&np->np_sockaddr;
3044 pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
3045
3046 id = rdma_create_id(isert_cma_handler, isert_np,
3047 RDMA_PS_TCP, IB_QPT_RC);
3048 if (IS_ERR(id)) {
3049 pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
3050 ret = PTR_ERR(id);
3051 goto out;
3052 }
3053 pr_debug("id %p context %p\n", id, id->context);
3054
3055 ret = rdma_bind_addr(id, sa);
3056 if (ret) {
3057 pr_err("rdma_bind_addr() failed: %d\n", ret);
3058 goto out_id;
3059 }
3060
3061 ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
3062 if (ret) {
3063 pr_err("rdma_listen() failed: %d\n", ret);
3064 goto out_id;
3065 }
3066
3067 return id;
3068out_id:
3069 rdma_destroy_id(id);
3070out:
3071 return ERR_PTR(ret);
3072}
3073
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003074static int
3075isert_setup_np(struct iscsi_np *np,
3076 struct __kernel_sockaddr_storage *ksockaddr)
3077{
3078 struct isert_np *isert_np;
3079 struct rdma_cm_id *isert_lid;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003080 int ret;
3081
3082 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3083 if (!isert_np) {
3084 pr_err("Unable to allocate struct isert_np\n");
3085 return -ENOMEM;
3086 }
Sagi Grimberg531b7bf2014-04-29 13:13:45 +03003087 sema_init(&isert_np->np_sem, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003088 mutex_init(&isert_np->np_accept_mutex);
3089 INIT_LIST_HEAD(&isert_np->np_accept_list);
3090 init_completion(&isert_np->np_login_comp);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003091 isert_np->np = np;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003092
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003093 /*
3094 * Setup the np->np_sockaddr from the passed sockaddr setup
3095 * in iscsi_target_configfs.c code..
3096 */
3097 memcpy(&np->np_sockaddr, ksockaddr,
3098 sizeof(struct __kernel_sockaddr_storage));
3099
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003100 isert_lid = isert_setup_id(isert_np);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003101 if (IS_ERR(isert_lid)) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003102 ret = PTR_ERR(isert_lid);
3103 goto out;
3104 }
3105
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003106 isert_np->np_cm_id = isert_lid;
3107 np->np_context = isert_np;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003108
3109 return 0;
3110
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003111out:
3112 kfree(isert_np);
Sagi Grimbergca6c1d82014-12-02 16:57:27 +02003113
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003114 return ret;
3115}
3116
3117static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003118isert_rdma_accept(struct isert_conn *isert_conn)
3119{
3120 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3121 struct rdma_conn_param cp;
3122 int ret;
3123
3124 memset(&cp, 0, sizeof(struct rdma_conn_param));
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003125 cp.initiator_depth = isert_conn->initiator_depth;
3126 cp.retry_count = 7;
3127 cp.rnr_retry_count = 7;
3128
3129 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
3130
3131 ret = rdma_accept(cm_id, &cp);
3132 if (ret) {
3133 pr_err("rdma_accept() failed with: %d\n", ret);
3134 return ret;
3135 }
3136
3137 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
3138
3139 return 0;
3140}
3141
3142static int
3143isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3144{
3145 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
3146 int ret;
3147
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003148 pr_info("before login_req comp conn: %p\n", isert_conn);
3149 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3150 if (ret) {
3151 pr_err("isert_conn %p interrupted before got login req\n",
3152 isert_conn);
3153 return ret;
3154 }
3155 reinit_completion(&isert_conn->login_req_comp);
3156
Nicholas Bellinger6faaa852013-08-18 16:35:46 -07003157 /*
3158 * For login requests after the first PDU, isert_rx_login_req() will
3159 * kick schedule_delayed_work(&conn->login_work) as the packet is
3160 * received, which turns this callback from iscsi_target_do_login_rx()
3161 * into a NOP.
3162 */
3163 if (!login->first_request)
3164 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003165
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003166 isert_rx_login_req(isert_conn);
3167
3168 pr_info("before conn_login_comp conn: %p\n", conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003169 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3170 if (ret)
3171 return ret;
3172
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003173 pr_info("processing login->req: %p\n", login->req);
3174
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003175 return 0;
3176}
3177
3178static void
3179isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3180 struct isert_conn *isert_conn)
3181{
3182 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3183 struct rdma_route *cm_route = &cm_id->route;
3184 struct sockaddr_in *sock_in;
3185 struct sockaddr_in6 *sock_in6;
3186
3187 conn->login_family = np->np_sockaddr.ss_family;
3188
3189 if (np->np_sockaddr.ss_family == AF_INET6) {
3190 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
3191 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3192 &sock_in6->sin6_addr.in6_u);
3193 conn->login_port = ntohs(sock_in6->sin6_port);
3194
3195 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3196 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3197 &sock_in6->sin6_addr.in6_u);
3198 conn->local_port = ntohs(sock_in6->sin6_port);
3199 } else {
3200 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3201 sprintf(conn->login_ip, "%pI4",
3202 &sock_in->sin_addr.s_addr);
3203 conn->login_port = ntohs(sock_in->sin_port);
3204
3205 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3206 sprintf(conn->local_ip, "%pI4",
3207 &sock_in->sin_addr.s_addr);
3208 conn->local_port = ntohs(sock_in->sin_port);
3209 }
3210}
3211
3212static int
3213isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3214{
3215 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3216 struct isert_conn *isert_conn;
3217 int max_accept = 0, ret;
3218
3219accept_wait:
Sagi Grimberg531b7bf2014-04-29 13:13:45 +03003220 ret = down_interruptible(&isert_np->np_sem);
Sagi Grimberg1acff632014-10-02 21:40:34 -07003221 if (ret || max_accept > 5)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003222 return -ENODEV;
3223
3224 spin_lock_bh(&np->np_thread_lock);
Sagi Grimberge346ab32014-05-19 17:44:22 +03003225 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003226 spin_unlock_bh(&np->np_thread_lock);
Sagi Grimberge346ab32014-05-19 17:44:22 +03003227 pr_debug("np_thread_state %d for isert_accept_np\n",
3228 np->np_thread_state);
3229 /**
3230 * No point in stalling here when np_thread
3231 * is in state RESET/SHUTDOWN/EXIT - bail
3232 **/
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003233 return -ENODEV;
3234 }
3235 spin_unlock_bh(&np->np_thread_lock);
3236
3237 mutex_lock(&isert_np->np_accept_mutex);
3238 if (list_empty(&isert_np->np_accept_list)) {
3239 mutex_unlock(&isert_np->np_accept_mutex);
3240 max_accept++;
3241 goto accept_wait;
3242 }
3243 isert_conn = list_first_entry(&isert_np->np_accept_list,
3244 struct isert_conn, conn_accept_node);
3245 list_del_init(&isert_conn->conn_accept_node);
3246 mutex_unlock(&isert_np->np_accept_mutex);
3247
3248 conn->context = isert_conn;
3249 isert_conn->conn = conn;
3250 max_accept = 0;
3251
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003252 isert_set_conn_info(np, conn, isert_conn);
3253
Sagi Grimberg2371e5d2014-12-02 16:57:21 +02003254 pr_debug("Processing isert_conn: %p\n", isert_conn);
3255
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003256 return 0;
3257}
3258
3259static void
3260isert_free_np(struct iscsi_np *np)
3261{
3262 struct isert_np *isert_np = (struct isert_np *)np->np_context;
Sagi Grimberg268e6812014-12-02 16:57:36 +02003263 struct isert_conn *isert_conn, *n;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003264
Sagi Grimberg3b726ae2014-10-28 13:45:03 -07003265 if (isert_np->np_cm_id)
3266 rdma_destroy_id(isert_np->np_cm_id);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003267
Sagi Grimberg268e6812014-12-02 16:57:36 +02003268 /*
3269 * FIXME: At this point we don't have a good way to insure
3270 * that at this point we don't have hanging connections that
3271 * completed RDMA establishment but didn't start iscsi login
3272 * process. So work-around this by cleaning up what ever piled
3273 * up in np_accept_list.
3274 */
3275 mutex_lock(&isert_np->np_accept_mutex);
3276 if (!list_empty(&isert_np->np_accept_list)) {
3277 pr_info("Still have isert connections, cleaning up...\n");
3278 list_for_each_entry_safe(isert_conn, n,
3279 &isert_np->np_accept_list,
3280 conn_accept_node) {
3281 pr_info("cleaning isert_conn %p state (%d)\n",
3282 isert_conn, isert_conn->state);
3283 isert_connect_release(isert_conn);
3284 }
3285 }
3286 mutex_unlock(&isert_np->np_accept_mutex);
3287
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003288 np->np_context = NULL;
3289 kfree(isert_np);
3290}
3291
Sagi Grimbergb02efbf2014-12-02 16:57:29 +02003292static void isert_release_work(struct work_struct *work)
3293{
3294 struct isert_conn *isert_conn = container_of(work,
3295 struct isert_conn,
3296 release_work);
3297
3298 pr_info("Starting release conn %p\n", isert_conn);
3299
3300 wait_for_completion(&isert_conn->conn_wait);
3301
3302 mutex_lock(&isert_conn->conn_mutex);
3303 isert_conn->state = ISER_CONN_DOWN;
3304 mutex_unlock(&isert_conn->conn_mutex);
3305
3306 pr_info("Destroying conn %p\n", isert_conn);
3307 isert_put_conn(isert_conn);
3308}
3309
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003310static void isert_wait_conn(struct iscsi_conn *conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003311{
3312 struct isert_conn *isert_conn = conn->context;
3313
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003314 pr_debug("isert_wait_conn: Starting \n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003315
Sagi Grimberg9d49f5e2014-05-19 17:44:23 +03003316 mutex_lock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003317 /*
3318 * Only wait for conn_wait_comp_err if the isert_conn made it
3319 * into full feature phase..
3320 */
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07003321 if (isert_conn->state == ISER_CONN_INIT) {
3322 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07003323 return;
3324 }
Sagi Grimberg954f2372014-12-02 16:57:17 +02003325 isert_conn_terminate(isert_conn);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07003326 mutex_unlock(&isert_conn->conn_mutex);
3327
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003328 wait_for_completion(&isert_conn->conn_wait_comp_err);
Sagi Grimberg954f2372014-12-02 16:57:17 +02003329
Sagi Grimbergb02efbf2014-12-02 16:57:29 +02003330 INIT_WORK(&isert_conn->release_work, isert_release_work);
3331 queue_work(isert_release_wq, &isert_conn->release_work);
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003332}
3333
3334static void isert_free_conn(struct iscsi_conn *conn)
3335{
3336 struct isert_conn *isert_conn = conn->context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003337
3338 isert_put_conn(isert_conn);
3339}
3340
3341static struct iscsit_transport iser_target_transport = {
3342 .name = "IB/iSER",
3343 .transport_type = ISCSI_INFINIBAND,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07003344 .priv_size = sizeof(struct isert_cmd),
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003345 .owner = THIS_MODULE,
3346 .iscsit_setup_np = isert_setup_np,
3347 .iscsit_accept_np = isert_accept_np,
3348 .iscsit_free_np = isert_free_np,
Nicholas Bellingerdefd8842014-02-03 12:54:39 -08003349 .iscsit_wait_conn = isert_wait_conn,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003350 .iscsit_free_conn = isert_free_conn,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003351 .iscsit_get_login_rx = isert_get_login_rx,
3352 .iscsit_put_login_tx = isert_put_login_tx,
3353 .iscsit_immediate_queue = isert_immediate_queue,
3354 .iscsit_response_queue = isert_response_queue,
3355 .iscsit_get_dataout = isert_get_dataout,
3356 .iscsit_queue_data_in = isert_put_datain,
3357 .iscsit_queue_status = isert_put_response,
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -07003358 .iscsit_aborted_task = isert_aborted_task,
Nicholas Bellingere70beee2014-04-02 12:52:38 -07003359 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003360};
3361
3362static int __init isert_init(void)
3363{
3364 int ret;
3365
3366 isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
3367 if (!isert_rx_wq) {
3368 pr_err("Unable to allocate isert_rx_wq\n");
3369 return -ENOMEM;
3370 }
3371
3372 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
3373 if (!isert_comp_wq) {
3374 pr_err("Unable to allocate isert_comp_wq\n");
3375 ret = -ENOMEM;
3376 goto destroy_rx_wq;
3377 }
3378
Sagi Grimbergb02efbf2014-12-02 16:57:29 +02003379 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3380 WQ_UNBOUND_MAX_ACTIVE);
3381 if (!isert_release_wq) {
3382 pr_err("Unable to allocate isert_release_wq\n");
3383 ret = -ENOMEM;
3384 goto destroy_comp_wq;
3385 }
3386
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003387 iscsit_register_transport(&iser_target_transport);
Sagi Grimbergb02efbf2014-12-02 16:57:29 +02003388 pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3389
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003390 return 0;
3391
Sagi Grimbergb02efbf2014-12-02 16:57:29 +02003392destroy_comp_wq:
3393 destroy_workqueue(isert_comp_wq);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003394destroy_rx_wq:
3395 destroy_workqueue(isert_rx_wq);
3396 return ret;
3397}
3398
3399static void __exit isert_exit(void)
3400{
Sagi Grimbergf5ebec92014-05-19 17:44:25 +03003401 flush_scheduled_work();
Sagi Grimbergb02efbf2014-12-02 16:57:29 +02003402 destroy_workqueue(isert_release_wq);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08003403 destroy_workqueue(isert_comp_wq);
3404 destroy_workqueue(isert_rx_wq);
3405 iscsit_unregister_transport(&iser_target_transport);
3406 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
3407}
3408
3409MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3410MODULE_VERSION("0.1");
3411MODULE_AUTHOR("nab@Linux-iSCSI.org");
3412MODULE_LICENSE("GPL");
3413
3414module_init(isert_init);
3415module_exit(isert_exit);