blob: 2b161be3c1a346e3a7203a7f88a624dac4df1cee [file] [log] [blame]
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07004 * (c) Copyright 2013 Datera, Inc.
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08005 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
18
19#include <linux/string.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/socket.h>
23#include <linux/in.h>
24#include <linux/in6.h>
Nicholas Bellinger95b60f02013-11-05 13:16:12 -080025#include <linux/llist.h>
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080026#include <rdma/ib_verbs.h>
27#include <rdma/rdma_cm.h>
28#include <target/target_core_base.h>
29#include <target/target_core_fabric.h>
30#include <target/iscsi/iscsi_transport.h>
31
32#include "isert_proto.h"
33#include "ib_isert.h"
34
35#define ISERT_MAX_CONN 8
36#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38
39static DEFINE_MUTEX(device_list_mutex);
40static LIST_HEAD(device_list);
41static struct workqueue_struct *isert_rx_wq;
42static struct workqueue_struct *isert_comp_wq;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080043
44static void
Vu Phamd40945d2013-08-28 23:23:34 +030045isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
46static int
47isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
48 struct isert_rdma_wr *wr);
Vu Pham59464ef2013-08-28 23:23:35 +030049static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +020050isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +030051static int
Sagi Grimberga3a5a822014-01-09 18:40:50 +020052isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
53 struct isert_rdma_wr *wr);
Vu Phamd40945d2013-08-28 23:23:34 +030054
55static void
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080056isert_qp_event_callback(struct ib_event *e, void *context)
57{
58 struct isert_conn *isert_conn = (struct isert_conn *)context;
59
60 pr_err("isert_qp_event_callback event: %d\n", e->event);
61 switch (e->event) {
62 case IB_EVENT_COMM_EST:
63 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
64 break;
65 case IB_EVENT_QP_LAST_WQE_REACHED:
66 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
67 break;
68 default:
69 break;
70 }
71}
72
73static int
74isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
75{
76 int ret;
77
78 ret = ib_query_device(ib_dev, devattr);
79 if (ret) {
80 pr_err("ib_query_device() failed: %d\n", ret);
81 return ret;
82 }
83 pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
84 pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
85
86 return 0;
87}
88
89static int
90isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
91{
92 struct isert_device *device = isert_conn->conn_device;
93 struct ib_qp_init_attr attr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080094 int ret, index, min_index = 0;
95
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080096 mutex_lock(&device_list_mutex);
97 for (index = 0; index < device->cqs_used; index++)
98 if (device->cq_active_qps[index] <
99 device->cq_active_qps[min_index])
100 min_index = index;
101 device->cq_active_qps[min_index]++;
102 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
103 mutex_unlock(&device_list_mutex);
104
105 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
106 attr.event_handler = isert_qp_event_callback;
107 attr.qp_context = isert_conn;
108 attr.send_cq = device->dev_tx_cq[min_index];
109 attr.recv_cq = device->dev_rx_cq[min_index];
110 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
111 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
112 /*
113 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
114 * work-around for RDMA_READ..
115 */
Vu Pham59464ef2013-08-28 23:23:35 +0300116 attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800117 isert_conn->max_sge = attr.cap.max_send_sge;
118
119 attr.cap.max_recv_sge = 1;
120 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
121 attr.qp_type = IB_QPT_RC;
122
123 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
124 cma_id->device);
125 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
126 isert_conn->conn_pd->device);
127
128 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
129 if (ret) {
130 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
131 return ret;
132 }
133 isert_conn->conn_qp = cma_id->qp;
134 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
135
136 return 0;
137}
138
139static void
140isert_cq_event_callback(struct ib_event *e, void *context)
141{
142 pr_debug("isert_cq_event_callback event: %d\n", e->event);
143}
144
145static int
146isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
147{
148 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
149 struct iser_rx_desc *rx_desc;
150 struct ib_sge *rx_sg;
151 u64 dma_addr;
152 int i, j;
153
154 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
155 sizeof(struct iser_rx_desc), GFP_KERNEL);
156 if (!isert_conn->conn_rx_descs)
157 goto fail;
158
159 rx_desc = isert_conn->conn_rx_descs;
160
161 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
162 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
163 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
164 if (ib_dma_mapping_error(ib_dev, dma_addr))
165 goto dma_map_fail;
166
167 rx_desc->dma_addr = dma_addr;
168
169 rx_sg = &rx_desc->rx_sg;
170 rx_sg->addr = rx_desc->dma_addr;
171 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
172 rx_sg->lkey = isert_conn->conn_mr->lkey;
173 }
174
175 isert_conn->conn_rx_desc_head = 0;
176 return 0;
177
178dma_map_fail:
179 rx_desc = isert_conn->conn_rx_descs;
180 for (j = 0; j < i; j++, rx_desc++) {
181 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
182 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
183 }
184 kfree(isert_conn->conn_rx_descs);
185 isert_conn->conn_rx_descs = NULL;
186fail:
187 return -ENOMEM;
188}
189
190static void
191isert_free_rx_descriptors(struct isert_conn *isert_conn)
192{
193 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
194 struct iser_rx_desc *rx_desc;
195 int i;
196
197 if (!isert_conn->conn_rx_descs)
198 return;
199
200 rx_desc = isert_conn->conn_rx_descs;
201 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
202 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
203 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
204 }
205
206 kfree(isert_conn->conn_rx_descs);
207 isert_conn->conn_rx_descs = NULL;
208}
209
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800210static void isert_cq_tx_work(struct work_struct *);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800211static void isert_cq_tx_callback(struct ib_cq *, void *);
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800212static void isert_cq_rx_work(struct work_struct *);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800213static void isert_cq_rx_callback(struct ib_cq *, void *);
214
215static int
216isert_create_device_ib_res(struct isert_device *device)
217{
218 struct ib_device *ib_dev = device->ib_device;
219 struct isert_cq_desc *cq_desc;
Vu Pham59464ef2013-08-28 23:23:35 +0300220 struct ib_device_attr *dev_attr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800221 int ret = 0, i, j;
222
Vu Pham59464ef2013-08-28 23:23:35 +0300223 dev_attr = &device->dev_attr;
224 ret = isert_query_device(ib_dev, dev_attr);
225 if (ret)
226 return ret;
227
Vu Phamd40945d2013-08-28 23:23:34 +0300228 /* asign function handlers */
Vu Pham59464ef2013-08-28 23:23:35 +0300229 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200230 device->use_fastreg = 1;
231 device->reg_rdma_mem = isert_reg_rdma;
232 device->unreg_rdma_mem = isert_unreg_rdma;
Vu Pham59464ef2013-08-28 23:23:35 +0300233 } else {
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200234 device->use_fastreg = 0;
Vu Pham59464ef2013-08-28 23:23:35 +0300235 device->reg_rdma_mem = isert_map_rdma;
236 device->unreg_rdma_mem = isert_unmap_cmd;
237 }
Vu Phamd40945d2013-08-28 23:23:34 +0300238
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800239 device->cqs_used = min_t(int, num_online_cpus(),
240 device->ib_device->num_comp_vectors);
241 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200242 pr_debug("Using %d CQs, device %s supports %d vectors support "
243 "Fast registration %d\n",
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800244 device->cqs_used, device->ib_device->name,
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200245 device->ib_device->num_comp_vectors, device->use_fastreg);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800246 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
247 device->cqs_used, GFP_KERNEL);
248 if (!device->cq_desc) {
249 pr_err("Unable to allocate device->cq_desc\n");
250 return -ENOMEM;
251 }
252 cq_desc = device->cq_desc;
253
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800254 for (i = 0; i < device->cqs_used; i++) {
255 cq_desc[i].device = device;
256 cq_desc[i].cq_index = i;
257
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800258 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800259 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
260 isert_cq_rx_callback,
261 isert_cq_event_callback,
262 (void *)&cq_desc[i],
263 ISER_MAX_RX_CQ_LEN, i);
Wei Yongjun94a71112013-10-29 09:56:34 +0800264 if (IS_ERR(device->dev_rx_cq[i])) {
265 ret = PTR_ERR(device->dev_rx_cq[i]);
266 device->dev_rx_cq[i] = NULL;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800267 goto out_cq;
Wei Yongjun94a71112013-10-29 09:56:34 +0800268 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800269
Nicholas Bellinger2853c2b2013-12-11 16:20:13 -0800270 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800271 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
272 isert_cq_tx_callback,
273 isert_cq_event_callback,
274 (void *)&cq_desc[i],
275 ISER_MAX_TX_CQ_LEN, i);
Wei Yongjun94a71112013-10-29 09:56:34 +0800276 if (IS_ERR(device->dev_tx_cq[i])) {
277 ret = PTR_ERR(device->dev_tx_cq[i]);
278 device->dev_tx_cq[i] = NULL;
279 goto out_cq;
280 }
281
282 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
283 if (ret)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800284 goto out_cq;
285
Wei Yongjun94a71112013-10-29 09:56:34 +0800286 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
287 if (ret)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800288 goto out_cq;
289 }
290
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800291 return 0;
292
293out_cq:
294 for (j = 0; j < i; j++) {
295 cq_desc = &device->cq_desc[j];
296
297 if (device->dev_rx_cq[j]) {
298 cancel_work_sync(&cq_desc->cq_rx_work);
299 ib_destroy_cq(device->dev_rx_cq[j]);
300 }
301 if (device->dev_tx_cq[j]) {
302 cancel_work_sync(&cq_desc->cq_tx_work);
303 ib_destroy_cq(device->dev_tx_cq[j]);
304 }
305 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800306 kfree(device->cq_desc);
307
308 return ret;
309}
310
311static void
312isert_free_device_ib_res(struct isert_device *device)
313{
314 struct isert_cq_desc *cq_desc;
315 int i;
316
317 for (i = 0; i < device->cqs_used; i++) {
318 cq_desc = &device->cq_desc[i];
319
320 cancel_work_sync(&cq_desc->cq_rx_work);
321 cancel_work_sync(&cq_desc->cq_tx_work);
322 ib_destroy_cq(device->dev_rx_cq[i]);
323 ib_destroy_cq(device->dev_tx_cq[i]);
324 device->dev_rx_cq[i] = NULL;
325 device->dev_tx_cq[i] = NULL;
326 }
327
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800328 kfree(device->cq_desc);
329}
330
331static void
332isert_device_try_release(struct isert_device *device)
333{
334 mutex_lock(&device_list_mutex);
335 device->refcount--;
336 if (!device->refcount) {
337 isert_free_device_ib_res(device);
338 list_del(&device->dev_node);
339 kfree(device);
340 }
341 mutex_unlock(&device_list_mutex);
342}
343
344static struct isert_device *
345isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
346{
347 struct isert_device *device;
348 int ret;
349
350 mutex_lock(&device_list_mutex);
351 list_for_each_entry(device, &device_list, dev_node) {
352 if (device->ib_device->node_guid == cma_id->device->node_guid) {
353 device->refcount++;
354 mutex_unlock(&device_list_mutex);
355 return device;
356 }
357 }
358
359 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
360 if (!device) {
361 mutex_unlock(&device_list_mutex);
362 return ERR_PTR(-ENOMEM);
363 }
364
365 INIT_LIST_HEAD(&device->dev_node);
366
367 device->ib_device = cma_id->device;
368 ret = isert_create_device_ib_res(device);
369 if (ret) {
370 kfree(device);
371 mutex_unlock(&device_list_mutex);
372 return ERR_PTR(ret);
373 }
374
375 device->refcount++;
376 list_add_tail(&device->dev_node, &device_list);
377 mutex_unlock(&device_list_mutex);
378
379 return device;
380}
381
Vu Pham59464ef2013-08-28 23:23:35 +0300382static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200383isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +0300384{
385 struct fast_reg_descriptor *fr_desc, *tmp;
386 int i = 0;
387
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200388 if (list_empty(&isert_conn->conn_fr_pool))
Vu Pham59464ef2013-08-28 23:23:35 +0300389 return;
390
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200391 pr_debug("Freeing conn %p fastreg pool", isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300392
393 list_for_each_entry_safe(fr_desc, tmp,
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200394 &isert_conn->conn_fr_pool, list) {
Vu Pham59464ef2013-08-28 23:23:35 +0300395 list_del(&fr_desc->list);
396 ib_free_fast_reg_page_list(fr_desc->data_frpl);
397 ib_dereg_mr(fr_desc->data_mr);
398 kfree(fr_desc);
399 ++i;
400 }
401
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200402 if (i < isert_conn->conn_fr_pool_size)
Vu Pham59464ef2013-08-28 23:23:35 +0300403 pr_warn("Pool still has %d regions registered\n",
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200404 isert_conn->conn_fr_pool_size - i);
Vu Pham59464ef2013-08-28 23:23:35 +0300405}
406
407static int
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200408isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
409 struct fast_reg_descriptor *fr_desc)
410{
411 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
412 ISCSI_ISER_SG_TABLESIZE);
413 if (IS_ERR(fr_desc->data_frpl)) {
414 pr_err("Failed to allocate data frpl err=%ld\n",
415 PTR_ERR(fr_desc->data_frpl));
416 return PTR_ERR(fr_desc->data_frpl);
417 }
418
419 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
420 if (IS_ERR(fr_desc->data_mr)) {
421 pr_err("Failed to allocate data frmr err=%ld\n",
422 PTR_ERR(fr_desc->data_mr));
423 ib_free_fast_reg_page_list(fr_desc->data_frpl);
424 return PTR_ERR(fr_desc->data_mr);
425 }
426 pr_debug("Create fr_desc %p page_list %p\n",
427 fr_desc, fr_desc->data_frpl->page_list);
428
429 fr_desc->valid = true;
430
431 return 0;
432}
433
434static int
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200435isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +0300436{
437 struct fast_reg_descriptor *fr_desc;
438 struct isert_device *device = isert_conn->conn_device;
439 int i, ret;
440
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200441 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
442 isert_conn->conn_fr_pool_size = 0;
Vu Pham59464ef2013-08-28 23:23:35 +0300443 for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
444 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
445 if (!fr_desc) {
446 pr_err("Failed to allocate fast_reg descriptor\n");
447 ret = -ENOMEM;
448 goto err;
449 }
450
Sagi Grimbergdc87a902014-01-09 18:40:51 +0200451 ret = isert_create_fr_desc(device->ib_device,
452 isert_conn->conn_pd, fr_desc);
453 if (ret) {
454 pr_err("Failed to create fastreg descriptor err=%d\n",
455 ret);
Vu Pham59464ef2013-08-28 23:23:35 +0300456 goto err;
457 }
458
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200459 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
460 isert_conn->conn_fr_pool_size++;
Vu Pham59464ef2013-08-28 23:23:35 +0300461 }
462
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200463 pr_debug("Creating conn %p fastreg pool size=%d",
464 isert_conn, isert_conn->conn_fr_pool_size);
Vu Pham59464ef2013-08-28 23:23:35 +0300465
466 return 0;
467
468err:
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200469 isert_conn_free_fastreg_pool(isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300470 return ret;
471}
472
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800473static int
474isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
475{
476 struct iscsi_np *np = cma_id->context;
477 struct isert_np *isert_np = np->np_context;
478 struct isert_conn *isert_conn;
479 struct isert_device *device;
480 struct ib_device *ib_dev = cma_id->device;
481 int ret = 0;
482
483 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
484 cma_id, cma_id->context);
485
486 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
487 if (!isert_conn) {
488 pr_err("Unable to allocate isert_conn\n");
489 return -ENOMEM;
490 }
491 isert_conn->state = ISER_CONN_INIT;
492 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
493 init_completion(&isert_conn->conn_login_comp);
494 init_waitqueue_head(&isert_conn->conn_wait);
495 init_waitqueue_head(&isert_conn->conn_wait_comp_err);
496 kref_init(&isert_conn->conn_kref);
497 kref_get(&isert_conn->conn_kref);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700498 mutex_init(&isert_conn->conn_mutex);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -0800499 mutex_init(&isert_conn->conn_comp_mutex);
Vu Pham59464ef2013-08-28 23:23:35 +0300500 spin_lock_init(&isert_conn->conn_lock);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800501
502 cma_id->context = isert_conn;
503 isert_conn->conn_cm_id = cma_id;
504 isert_conn->responder_resources = event->param.conn.responder_resources;
505 isert_conn->initiator_depth = event->param.conn.initiator_depth;
506 pr_debug("Using responder_resources: %u initiator_depth: %u\n",
507 isert_conn->responder_resources, isert_conn->initiator_depth);
508
509 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
510 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
511 if (!isert_conn->login_buf) {
512 pr_err("Unable to allocate isert_conn->login_buf\n");
513 ret = -ENOMEM;
514 goto out;
515 }
516
517 isert_conn->login_req_buf = isert_conn->login_buf;
518 isert_conn->login_rsp_buf = isert_conn->login_buf +
519 ISCSI_DEF_MAX_RECV_SEG_LEN;
520 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
521 isert_conn->login_buf, isert_conn->login_req_buf,
522 isert_conn->login_rsp_buf);
523
524 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
525 (void *)isert_conn->login_req_buf,
526 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
527
528 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
529 if (ret) {
530 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
531 ret);
532 isert_conn->login_req_dma = 0;
533 goto out_login_buf;
534 }
535
536 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
537 (void *)isert_conn->login_rsp_buf,
538 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
539
540 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
541 if (ret) {
542 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
543 ret);
544 isert_conn->login_rsp_dma = 0;
545 goto out_req_dma_map;
546 }
547
548 device = isert_device_find_by_ib_dev(cma_id);
549 if (IS_ERR(device)) {
550 ret = PTR_ERR(device);
551 goto out_rsp_dma_map;
552 }
553
554 isert_conn->conn_device = device;
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200555 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
556 if (IS_ERR(isert_conn->conn_pd)) {
557 ret = PTR_ERR(isert_conn->conn_pd);
558 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
559 isert_conn, ret);
560 goto out_pd;
561 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800562
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200563 isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
564 IB_ACCESS_LOCAL_WRITE);
565 if (IS_ERR(isert_conn->conn_mr)) {
566 ret = PTR_ERR(isert_conn->conn_mr);
567 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
568 isert_conn, ret);
569 goto out_mr;
570 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800571
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200572 if (device->use_fastreg) {
573 ret = isert_conn_create_fastreg_pool(isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300574 if (ret) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200575 pr_err("Conn: %p failed to create fastreg pool\n",
576 isert_conn);
577 goto out_fastreg;
Vu Pham59464ef2013-08-28 23:23:35 +0300578 }
579 }
580
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800581 ret = isert_conn_setup_qp(isert_conn, cma_id);
582 if (ret)
583 goto out_conn_dev;
584
585 mutex_lock(&isert_np->np_accept_mutex);
586 list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
587 mutex_unlock(&isert_np->np_accept_mutex);
588
589 pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
590 wake_up(&isert_np->np_accept_wq);
591 return 0;
592
593out_conn_dev:
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200594 if (device->use_fastreg)
595 isert_conn_free_fastreg_pool(isert_conn);
596out_fastreg:
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200597 ib_dereg_mr(isert_conn->conn_mr);
598out_mr:
599 ib_dealloc_pd(isert_conn->conn_pd);
600out_pd:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800601 isert_device_try_release(device);
602out_rsp_dma_map:
603 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
604 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
605out_req_dma_map:
606 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
607 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
608out_login_buf:
609 kfree(isert_conn->login_buf);
610out:
611 kfree(isert_conn);
612 return ret;
613}
614
615static void
616isert_connect_release(struct isert_conn *isert_conn)
617{
618 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
619 struct isert_device *device = isert_conn->conn_device;
620 int cq_index;
621
622 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
623
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200624 if (device && device->use_fastreg)
625 isert_conn_free_fastreg_pool(isert_conn);
Vu Pham59464ef2013-08-28 23:23:35 +0300626
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800627 if (isert_conn->conn_qp) {
628 cq_index = ((struct isert_cq_desc *)
629 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
630 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
631 isert_conn->conn_device->cq_active_qps[cq_index]--;
632
633 rdma_destroy_qp(isert_conn->conn_cm_id);
634 }
635
636 isert_free_rx_descriptors(isert_conn);
637 rdma_destroy_id(isert_conn->conn_cm_id);
638
Sagi Grimbergeb6ab132014-01-09 18:40:49 +0200639 ib_dereg_mr(isert_conn->conn_mr);
640 ib_dealloc_pd(isert_conn->conn_pd);
641
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800642 if (isert_conn->login_buf) {
643 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
644 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
645 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
646 ISCSI_DEF_MAX_RECV_SEG_LEN,
647 DMA_FROM_DEVICE);
648 kfree(isert_conn->login_buf);
649 }
650 kfree(isert_conn);
651
652 if (device)
653 isert_device_try_release(device);
654
655 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
656}
657
658static void
659isert_connected_handler(struct rdma_cm_id *cma_id)
660{
661 return;
662}
663
664static void
665isert_release_conn_kref(struct kref *kref)
666{
667 struct isert_conn *isert_conn = container_of(kref,
668 struct isert_conn, conn_kref);
669
670 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
671 current->comm, current->pid);
672
673 isert_connect_release(isert_conn);
674}
675
676static void
677isert_put_conn(struct isert_conn *isert_conn)
678{
679 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
680}
681
682static void
683isert_disconnect_work(struct work_struct *work)
684{
685 struct isert_conn *isert_conn = container_of(work,
686 struct isert_conn, conn_logout_work);
687
688 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700689 mutex_lock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800690 isert_conn->state = ISER_CONN_DOWN;
691
692 if (isert_conn->post_recv_buf_count == 0 &&
693 atomic_read(&isert_conn->post_send_buf_count) == 0) {
694 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700695 mutex_unlock(&isert_conn->conn_mutex);
696 goto wake_up;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800697 }
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700698 if (!isert_conn->conn_cm_id) {
699 mutex_unlock(&isert_conn->conn_mutex);
700 isert_put_conn(isert_conn);
701 return;
702 }
703 if (!isert_conn->logout_posted) {
704 pr_debug("Calling rdma_disconnect for !logout_posted from"
705 " isert_disconnect_work\n");
706 rdma_disconnect(isert_conn->conn_cm_id);
707 mutex_unlock(&isert_conn->conn_mutex);
708 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
709 goto wake_up;
710 }
711 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800712
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700713wake_up:
714 wake_up(&isert_conn->conn_wait);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800715 isert_put_conn(isert_conn);
716}
717
718static void
719isert_disconnected_handler(struct rdma_cm_id *cma_id)
720{
721 struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
722
723 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
724 schedule_work(&isert_conn->conn_logout_work);
725}
726
727static int
728isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
729{
730 int ret = 0;
731
732 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
733 event->event, event->status, cma_id->context, cma_id);
734
735 switch (event->event) {
736 case RDMA_CM_EVENT_CONNECT_REQUEST:
737 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
738 ret = isert_connect_request(cma_id, event);
739 break;
740 case RDMA_CM_EVENT_ESTABLISHED:
741 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
742 isert_connected_handler(cma_id);
743 break;
744 case RDMA_CM_EVENT_DISCONNECTED:
745 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
746 isert_disconnected_handler(cma_id);
747 break;
748 case RDMA_CM_EVENT_DEVICE_REMOVAL:
749 case RDMA_CM_EVENT_ADDR_CHANGE:
750 break;
751 case RDMA_CM_EVENT_CONNECT_ERROR:
752 default:
753 pr_err("Unknown RDMA CMA event: %d\n", event->event);
754 break;
755 }
756
757 if (ret != 0) {
758 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
759 event->event, ret);
760 dump_stack();
761 }
762
763 return ret;
764}
765
766static int
767isert_post_recv(struct isert_conn *isert_conn, u32 count)
768{
769 struct ib_recv_wr *rx_wr, *rx_wr_failed;
770 int i, ret;
771 unsigned int rx_head = isert_conn->conn_rx_desc_head;
772 struct iser_rx_desc *rx_desc;
773
774 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
775 rx_desc = &isert_conn->conn_rx_descs[rx_head];
776 rx_wr->wr_id = (unsigned long)rx_desc;
777 rx_wr->sg_list = &rx_desc->rx_sg;
778 rx_wr->num_sge = 1;
779 rx_wr->next = rx_wr + 1;
780 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
781 }
782
783 rx_wr--;
784 rx_wr->next = NULL; /* mark end of work requests list */
785
786 isert_conn->post_recv_buf_count += count;
787 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
788 &rx_wr_failed);
789 if (ret) {
790 pr_err("ib_post_recv() failed with ret: %d\n", ret);
791 isert_conn->post_recv_buf_count -= count;
792 } else {
793 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
794 isert_conn->conn_rx_desc_head = rx_head;
795 }
796 return ret;
797}
798
799static int
800isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
801{
802 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
803 struct ib_send_wr send_wr, *send_wr_failed;
804 int ret;
805
806 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
807 ISER_HEADERS_LEN, DMA_TO_DEVICE);
808
809 send_wr.next = NULL;
810 send_wr.wr_id = (unsigned long)tx_desc;
811 send_wr.sg_list = tx_desc->tx_sg;
812 send_wr.num_sge = tx_desc->num_sge;
813 send_wr.opcode = IB_WR_SEND;
814 send_wr.send_flags = IB_SEND_SIGNALED;
815
816 atomic_inc(&isert_conn->post_send_buf_count);
817
818 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
819 if (ret) {
820 pr_err("ib_post_send() failed, ret: %d\n", ret);
821 atomic_dec(&isert_conn->post_send_buf_count);
822 }
823
824 return ret;
825}
826
827static void
828isert_create_send_desc(struct isert_conn *isert_conn,
829 struct isert_cmd *isert_cmd,
830 struct iser_tx_desc *tx_desc)
831{
832 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
833
834 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
835 ISER_HEADERS_LEN, DMA_TO_DEVICE);
836
837 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
838 tx_desc->iser_header.flags = ISER_VER;
839
840 tx_desc->num_sge = 1;
841 tx_desc->isert_cmd = isert_cmd;
842
843 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
844 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
845 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
846 }
847}
848
849static int
850isert_init_tx_hdrs(struct isert_conn *isert_conn,
851 struct iser_tx_desc *tx_desc)
852{
853 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
854 u64 dma_addr;
855
856 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
857 ISER_HEADERS_LEN, DMA_TO_DEVICE);
858 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
859 pr_err("ib_dma_mapping_error() failed\n");
860 return -ENOMEM;
861 }
862
863 tx_desc->dma_addr = dma_addr;
864 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
865 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
866 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
867
868 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
869 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
870 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
871
872 return 0;
873}
874
875static void
Nicholas Bellinger95b60f02013-11-05 13:16:12 -0800876isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
877 struct ib_send_wr *send_wr, bool coalesce)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800878{
Nicholas Bellinger95b60f02013-11-05 13:16:12 -0800879 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
880
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800881 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
882 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
883 send_wr->opcode = IB_WR_SEND;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -0800884 send_wr->sg_list = &tx_desc->tx_sg[0];
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800885 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -0800886 /*
887 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
888 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
889 */
890 mutex_lock(&isert_conn->conn_comp_mutex);
891 if (coalesce &&
892 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
893 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
894 mutex_unlock(&isert_conn->conn_comp_mutex);
895 return;
896 }
897 isert_conn->conn_comp_batch = 0;
898 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
899 mutex_unlock(&isert_conn->conn_comp_mutex);
900
901 send_wr->send_flags = IB_SEND_SIGNALED;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800902}
903
904static int
905isert_rdma_post_recvl(struct isert_conn *isert_conn)
906{
907 struct ib_recv_wr rx_wr, *rx_wr_fail;
908 struct ib_sge sge;
909 int ret;
910
911 memset(&sge, 0, sizeof(struct ib_sge));
912 sge.addr = isert_conn->login_req_dma;
913 sge.length = ISER_RX_LOGIN_SIZE;
914 sge.lkey = isert_conn->conn_mr->lkey;
915
916 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
917 sge.addr, sge.length, sge.lkey);
918
919 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
920 rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
921 rx_wr.sg_list = &sge;
922 rx_wr.num_sge = 1;
923
924 isert_conn->post_recv_buf_count++;
925 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
926 if (ret) {
927 pr_err("ib_post_recv() failed: %d\n", ret);
928 isert_conn->post_recv_buf_count--;
929 }
930
931 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
932 return ret;
933}
934
935static int
936isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
937 u32 length)
938{
939 struct isert_conn *isert_conn = conn->context;
940 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
941 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
942 int ret;
943
944 isert_create_send_desc(isert_conn, NULL, tx_desc);
945
946 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
947 sizeof(struct iscsi_hdr));
948
949 isert_init_tx_hdrs(isert_conn, tx_desc);
950
951 if (length > 0) {
952 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
953
954 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
955 length, DMA_TO_DEVICE);
956
957 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
958
959 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
960 length, DMA_TO_DEVICE);
961
962 tx_dsg->addr = isert_conn->login_rsp_dma;
963 tx_dsg->length = length;
964 tx_dsg->lkey = isert_conn->conn_mr->lkey;
965 tx_desc->num_sge = 2;
966 }
967 if (!login->login_failed) {
968 if (login->login_complete) {
969 ret = isert_alloc_rx_descriptors(isert_conn);
970 if (ret)
971 return ret;
972
973 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
974 if (ret)
975 return ret;
976
977 isert_conn->state = ISER_CONN_UP;
978 goto post_send;
979 }
980
981 ret = isert_rdma_post_recvl(isert_conn);
982 if (ret)
983 return ret;
984 }
985post_send:
986 ret = isert_post_send(isert_conn, tx_desc);
987 if (ret)
988 return ret;
989
990 return 0;
991}
992
993static void
994isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
995 struct isert_conn *isert_conn)
996{
997 struct iscsi_conn *conn = isert_conn->conn;
998 struct iscsi_login *login = conn->conn_login;
999 int size;
1000
1001 if (!login) {
1002 pr_err("conn->conn_login is NULL\n");
1003 dump_stack();
1004 return;
1005 }
1006
1007 if (login->first_request) {
1008 struct iscsi_login_req *login_req =
1009 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1010 /*
1011 * Setup the initial iscsi_login values from the leading
1012 * login request PDU.
1013 */
1014 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1015 login->current_stage =
1016 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1017 >> 2;
1018 login->version_min = login_req->min_version;
1019 login->version_max = login_req->max_version;
1020 memcpy(login->isid, login_req->isid, 6);
1021 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1022 login->init_task_tag = login_req->itt;
1023 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1024 login->cid = be16_to_cpu(login_req->cid);
1025 login->tsih = be16_to_cpu(login_req->tsih);
1026 }
1027
1028 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1029
1030 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1031 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1032 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
1033 memcpy(login->req_buf, &rx_desc->data[0], size);
1034
Nicholas Bellinger6faaa852013-08-18 16:35:46 -07001035 if (login->first_request) {
1036 complete(&isert_conn->conn_login_comp);
1037 return;
1038 }
1039 schedule_delayed_work(&conn->login_work, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001040}
1041
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001042static struct iscsi_cmd
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001043*isert_allocate_cmd(struct iscsi_conn *conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001044{
1045 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1046 struct isert_cmd *isert_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001047 struct iscsi_cmd *cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001048
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001049 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001050 if (!cmd) {
1051 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001052 return NULL;
1053 }
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001054 isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001055 isert_cmd->conn = isert_conn;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001056 isert_cmd->iscsi_cmd = cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001057
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001058 return cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001059}
1060
1061static int
1062isert_handle_scsi_cmd(struct isert_conn *isert_conn,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001063 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1064 struct iser_rx_desc *rx_desc, unsigned char *buf)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001065{
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001066 struct iscsi_conn *conn = isert_conn->conn;
1067 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1068 struct scatterlist *sg;
1069 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1070 bool dump_payload = false;
1071
1072 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1073 if (rc < 0)
1074 return rc;
1075
1076 imm_data = cmd->immediate_data;
1077 imm_data_len = cmd->first_burst_len;
1078 unsol_data = cmd->unsolicited_data;
1079
1080 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1081 if (rc < 0) {
1082 return 0;
1083 } else if (rc > 0) {
1084 dump_payload = true;
1085 goto sequence_cmd;
1086 }
1087
1088 if (!imm_data)
1089 return 0;
1090
1091 sg = &cmd->se_cmd.t_data_sg[0];
1092 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1093
1094 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1095 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1096
1097 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1098
1099 cmd->write_data_done += imm_data_len;
1100
1101 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1102 spin_lock_bh(&cmd->istate_lock);
1103 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1104 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1105 spin_unlock_bh(&cmd->istate_lock);
1106 }
1107
1108sequence_cmd:
Nicholas Bellinger561bf152013-07-03 03:58:58 -07001109 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001110
1111 if (!rc && dump_payload == false && unsol_data)
1112 iscsit_set_unsoliticed_dataout(cmd);
1113
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001114 return 0;
1115}
1116
1117static int
1118isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1119 struct iser_rx_desc *rx_desc, unsigned char *buf)
1120{
1121 struct scatterlist *sg_start;
1122 struct iscsi_conn *conn = isert_conn->conn;
1123 struct iscsi_cmd *cmd = NULL;
1124 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1125 u32 unsol_data_len = ntoh24(hdr->dlength);
1126 int rc, sg_nents, sg_off, page_off;
1127
1128 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1129 if (rc < 0)
1130 return rc;
1131 else if (!cmd)
1132 return 0;
1133 /*
1134 * FIXME: Unexpected unsolicited_data out
1135 */
1136 if (!cmd->unsolicited_data) {
1137 pr_err("Received unexpected solicited data payload\n");
1138 dump_stack();
1139 return -1;
1140 }
1141
1142 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1143 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1144
1145 sg_off = cmd->write_data_done / PAGE_SIZE;
1146 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1147 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1148 page_off = cmd->write_data_done % PAGE_SIZE;
1149 /*
1150 * FIXME: Non page-aligned unsolicited_data out
1151 */
1152 if (page_off) {
1153 pr_err("Received unexpected non-page aligned data payload\n");
1154 dump_stack();
1155 return -1;
1156 }
1157 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1158 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1159
1160 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1161 unsol_data_len);
1162
1163 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1164 if (rc < 0)
1165 return rc;
1166
1167 return 0;
1168}
1169
1170static int
Nicholas Bellinger778de362013-06-14 16:07:47 -07001171isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001172 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1173 unsigned char *buf)
Nicholas Bellinger778de362013-06-14 16:07:47 -07001174{
Nicholas Bellinger778de362013-06-14 16:07:47 -07001175 struct iscsi_conn *conn = isert_conn->conn;
1176 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1177 int rc;
1178
1179 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1180 if (rc < 0)
1181 return rc;
1182 /*
1183 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1184 */
1185
1186 return iscsit_process_nop_out(conn, cmd, hdr);
1187}
1188
1189static int
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001190isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001191 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1192 struct iscsi_text *hdr)
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001193{
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001194 struct iscsi_conn *conn = isert_conn->conn;
1195 u32 payload_length = ntoh24(hdr->dlength);
1196 int rc;
1197 unsigned char *text_in;
1198
1199 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1200 if (rc < 0)
1201 return rc;
1202
1203 text_in = kzalloc(payload_length, GFP_KERNEL);
1204 if (!text_in) {
1205 pr_err("Unable to allocate text_in of payload_length: %u\n",
1206 payload_length);
1207 return -ENOMEM;
1208 }
1209 cmd->text_in_ptr = text_in;
1210
1211 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1212
1213 return iscsit_process_text_cmd(conn, cmd, hdr);
1214}
1215
1216static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001217isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1218 uint32_t read_stag, uint64_t read_va,
1219 uint32_t write_stag, uint64_t write_va)
1220{
1221 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1222 struct iscsi_conn *conn = isert_conn->conn;
Nicholas Bellingerca40d242013-07-07 17:45:08 -07001223 struct iscsi_session *sess = conn->sess;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001224 struct iscsi_cmd *cmd;
1225 struct isert_cmd *isert_cmd;
1226 int ret = -EINVAL;
1227 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1228
Nicholas Bellingerca40d242013-07-07 17:45:08 -07001229 if (sess->sess_ops->SessionType &&
1230 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1231 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1232 " ignoring\n", opcode);
1233 return 0;
1234 }
1235
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001236 switch (opcode) {
1237 case ISCSI_OP_SCSI_CMD:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001238 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001239 if (!cmd)
1240 break;
1241
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001242 isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001243 isert_cmd->read_stag = read_stag;
1244 isert_cmd->read_va = read_va;
1245 isert_cmd->write_stag = write_stag;
1246 isert_cmd->write_va = write_va;
1247
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001248 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001249 rx_desc, (unsigned char *)hdr);
1250 break;
1251 case ISCSI_OP_NOOP_OUT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001252 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001253 if (!cmd)
1254 break;
1255
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001256 isert_cmd = iscsit_priv_cmd(cmd);
1257 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
Nicholas Bellinger778de362013-06-14 16:07:47 -07001258 rx_desc, (unsigned char *)hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001259 break;
1260 case ISCSI_OP_SCSI_DATA_OUT:
1261 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1262 (unsigned char *)hdr);
1263 break;
1264 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001265 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001266 if (!cmd)
1267 break;
1268
1269 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1270 (unsigned char *)hdr);
1271 break;
1272 case ISCSI_OP_LOGOUT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001273 cmd = isert_allocate_cmd(conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001274 if (!cmd)
1275 break;
1276
1277 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1278 if (ret > 0)
1279 wait_for_completion_timeout(&conn->conn_logout_comp,
1280 SECONDS_FOR_LOGOUT_COMP *
1281 HZ);
1282 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001283 case ISCSI_OP_TEXT:
Nicholas Bellinger676687c2014-01-20 03:36:44 +00001284 cmd = isert_allocate_cmd(conn);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001285 if (!cmd)
1286 break;
1287
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001288 isert_cmd = iscsit_priv_cmd(cmd);
1289 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001290 rx_desc, (struct iscsi_text *)hdr);
1291 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001292 default:
1293 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1294 dump_stack();
1295 break;
1296 }
1297
1298 return ret;
1299}
1300
1301static void
1302isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1303{
1304 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1305 uint64_t read_va = 0, write_va = 0;
1306 uint32_t read_stag = 0, write_stag = 0;
1307 int rc;
1308
1309 switch (iser_hdr->flags & 0xF0) {
1310 case ISCSI_CTRL:
1311 if (iser_hdr->flags & ISER_RSV) {
1312 read_stag = be32_to_cpu(iser_hdr->read_stag);
1313 read_va = be64_to_cpu(iser_hdr->read_va);
1314 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1315 read_stag, (unsigned long long)read_va);
1316 }
1317 if (iser_hdr->flags & ISER_WSV) {
1318 write_stag = be32_to_cpu(iser_hdr->write_stag);
1319 write_va = be64_to_cpu(iser_hdr->write_va);
1320 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1321 write_stag, (unsigned long long)write_va);
1322 }
1323
1324 pr_debug("ISER ISCSI_CTRL PDU\n");
1325 break;
1326 case ISER_HELLO:
1327 pr_err("iSER Hello message\n");
1328 break;
1329 default:
1330 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1331 break;
1332 }
1333
1334 rc = isert_rx_opcode(isert_conn, rx_desc,
1335 read_stag, read_va, write_stag, write_va);
1336}
1337
1338static void
1339isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1340 unsigned long xfer_len)
1341{
1342 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1343 struct iscsi_hdr *hdr;
1344 u64 rx_dma;
1345 int rx_buflen, outstanding;
1346
1347 if ((char *)desc == isert_conn->login_req_buf) {
1348 rx_dma = isert_conn->login_req_dma;
1349 rx_buflen = ISER_RX_LOGIN_SIZE;
1350 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1351 rx_dma, rx_buflen);
1352 } else {
1353 rx_dma = desc->dma_addr;
1354 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1355 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1356 rx_dma, rx_buflen);
1357 }
1358
1359 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1360
1361 hdr = &desc->iscsi_header;
1362 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1363 hdr->opcode, hdr->itt, hdr->flags,
1364 (int)(xfer_len - ISER_HEADERS_LEN));
1365
1366 if ((char *)desc == isert_conn->login_req_buf)
1367 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1368 isert_conn);
1369 else
1370 isert_rx_do_work(desc, isert_conn);
1371
1372 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1373 DMA_FROM_DEVICE);
1374
1375 isert_conn->post_recv_buf_count--;
1376 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1377 isert_conn->post_recv_buf_count);
1378
1379 if ((char *)desc == isert_conn->login_req_buf)
1380 return;
1381
1382 outstanding = isert_conn->post_recv_buf_count;
1383 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1384 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1385 ISERT_MIN_POSTED_RX);
1386 err = isert_post_recv(isert_conn, count);
1387 if (err) {
1388 pr_err("isert_post_recv() count: %d failed, %d\n",
1389 count, err);
1390 }
1391 }
1392}
1393
1394static void
1395isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1396{
1397 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1398 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1399
Vu Pham90ecc6e2013-08-28 23:23:33 +03001400 pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001401 if (wr->sge) {
Vu Pham90ecc6e2013-08-28 23:23:33 +03001402 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1403 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1404 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1405 DMA_TO_DEVICE : DMA_FROM_DEVICE);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001406 wr->sge = NULL;
1407 }
1408
Vu Pham90ecc6e2013-08-28 23:23:33 +03001409 if (wr->send_wr) {
1410 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1411 kfree(wr->send_wr);
1412 wr->send_wr = NULL;
1413 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001414
Vu Pham90ecc6e2013-08-28 23:23:33 +03001415 if (wr->ib_sge) {
1416 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1417 kfree(wr->ib_sge);
1418 wr->ib_sge = NULL;
1419 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001420}
1421
1422static void
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001423isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
Vu Pham59464ef2013-08-28 23:23:35 +03001424{
1425 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1426 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1427 LIST_HEAD(unmap_list);
1428
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001429 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
Vu Pham59464ef2013-08-28 23:23:35 +03001430
1431 if (wr->fr_desc) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001432 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
Vu Pham59464ef2013-08-28 23:23:35 +03001433 isert_cmd, wr->fr_desc);
1434 spin_lock_bh(&isert_conn->conn_lock);
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001435 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
Vu Pham59464ef2013-08-28 23:23:35 +03001436 spin_unlock_bh(&isert_conn->conn_lock);
1437 wr->fr_desc = NULL;
1438 }
1439
1440 if (wr->sge) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +02001441 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
Vu Pham59464ef2013-08-28 23:23:35 +03001442 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1443 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1444 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1445 wr->sge = NULL;
1446 }
1447
1448 wr->ib_sge = NULL;
1449 wr->send_wr = NULL;
1450}
1451
1452static void
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001453isert_put_cmd(struct isert_cmd *isert_cmd)
1454{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001455 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001456 struct isert_conn *isert_conn = isert_cmd->conn;
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001457 struct iscsi_conn *conn = isert_conn->conn;
Vu Phamd40945d2013-08-28 23:23:34 +03001458 struct isert_device *device = isert_conn->conn_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001459
1460 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1461
1462 switch (cmd->iscsi_opcode) {
1463 case ISCSI_OP_SCSI_CMD:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001464 spin_lock_bh(&conn->cmd_lock);
1465 if (!list_empty(&cmd->i_conn_node))
1466 list_del(&cmd->i_conn_node);
1467 spin_unlock_bh(&conn->cmd_lock);
1468
1469 if (cmd->data_direction == DMA_TO_DEVICE)
1470 iscsit_stop_dataout_timer(cmd);
1471
Vu Phamd40945d2013-08-28 23:23:34 +03001472 device->unreg_rdma_mem(isert_cmd, isert_conn);
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001473 transport_generic_free_cmd(&cmd->se_cmd, 0);
1474 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001475 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001476 spin_lock_bh(&conn->cmd_lock);
1477 if (!list_empty(&cmd->i_conn_node))
1478 list_del(&cmd->i_conn_node);
1479 spin_unlock_bh(&conn->cmd_lock);
1480
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001481 transport_generic_free_cmd(&cmd->se_cmd, 0);
1482 break;
1483 case ISCSI_OP_REJECT:
1484 case ISCSI_OP_NOOP_OUT:
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001485 case ISCSI_OP_TEXT:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001486 spin_lock_bh(&conn->cmd_lock);
1487 if (!list_empty(&cmd->i_conn_node))
1488 list_del(&cmd->i_conn_node);
1489 spin_unlock_bh(&conn->cmd_lock);
1490
1491 /*
1492 * Handle special case for REJECT when iscsi_add_reject*() has
1493 * overwritten the original iscsi_opcode assignment, and the
1494 * associated cmd->se_cmd needs to be released.
1495 */
1496 if (cmd->se_cmd.se_tfo != NULL) {
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001497 pr_debug("Calling transport_generic_free_cmd from"
1498 " isert_put_cmd for 0x%02x\n",
1499 cmd->iscsi_opcode);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001500 transport_generic_free_cmd(&cmd->se_cmd, 0);
1501 break;
1502 }
1503 /*
1504 * Fall-through
1505 */
1506 default:
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001507 iscsit_release_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001508 break;
1509 }
1510}
1511
1512static void
1513isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1514{
1515 if (tx_desc->dma_addr != 0) {
1516 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1517 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1518 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1519 tx_desc->dma_addr = 0;
1520 }
1521}
1522
1523static void
1524isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1525 struct ib_device *ib_dev)
1526{
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001527 if (isert_cmd->pdu_buf_dma != 0) {
1528 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1529 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1530 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1531 isert_cmd->pdu_buf_dma = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001532 }
1533
1534 isert_unmap_tx_desc(tx_desc, ib_dev);
1535 isert_put_cmd(isert_cmd);
1536}
1537
1538static void
1539isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1540 struct isert_cmd *isert_cmd)
1541{
1542 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001543 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001544 struct se_cmd *se_cmd = &cmd->se_cmd;
Vu Pham90ecc6e2013-08-28 23:23:33 +03001545 struct isert_conn *isert_conn = isert_cmd->conn;
Vu Phamd40945d2013-08-28 23:23:34 +03001546 struct isert_device *device = isert_conn->conn_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001547
1548 iscsit_stop_dataout_timer(cmd);
Vu Phamd40945d2013-08-28 23:23:34 +03001549 device->unreg_rdma_mem(isert_cmd, isert_conn);
Vu Pham90ecc6e2013-08-28 23:23:33 +03001550 cmd->write_data_done = wr->cur_rdma_length;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001551
Vu Pham90ecc6e2013-08-28 23:23:33 +03001552 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001553 spin_lock_bh(&cmd->istate_lock);
1554 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1555 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1556 spin_unlock_bh(&cmd->istate_lock);
1557
1558 target_execute_cmd(se_cmd);
1559}
1560
1561static void
1562isert_do_control_comp(struct work_struct *work)
1563{
1564 struct isert_cmd *isert_cmd = container_of(work,
1565 struct isert_cmd, comp_work);
1566 struct isert_conn *isert_conn = isert_cmd->conn;
1567 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001568 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001569
1570 switch (cmd->i_state) {
1571 case ISTATE_SEND_TASKMGTRSP:
1572 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1573
1574 atomic_dec(&isert_conn->post_send_buf_count);
1575 iscsit_tmr_post_handler(cmd, cmd->conn);
1576
1577 cmd->i_state = ISTATE_SENT_STATUS;
1578 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1579 break;
1580 case ISTATE_SEND_REJECT:
1581 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1582 atomic_dec(&isert_conn->post_send_buf_count);
1583
1584 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001585 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001586 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001587 case ISTATE_SEND_LOGOUTRSP:
1588 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1589 /*
1590 * Call atomic_dec(&isert_conn->post_send_buf_count)
1591 * from isert_free_conn()
1592 */
1593 isert_conn->logout_posted = true;
1594 iscsit_logout_post_handler(cmd, cmd->conn);
1595 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001596 case ISTATE_SEND_TEXTRSP:
1597 atomic_dec(&isert_conn->post_send_buf_count);
1598 cmd->i_state = ISTATE_SENT_STATUS;
1599 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1600 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001601 default:
1602 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1603 dump_stack();
1604 break;
1605 }
1606}
1607
1608static void
1609isert_response_completion(struct iser_tx_desc *tx_desc,
1610 struct isert_cmd *isert_cmd,
1611 struct isert_conn *isert_conn,
1612 struct ib_device *ib_dev)
1613{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001614 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001615
1616 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001617 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001618 cmd->i_state == ISTATE_SEND_REJECT ||
1619 cmd->i_state == ISTATE_SEND_TEXTRSP) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001620 isert_unmap_tx_desc(tx_desc, ib_dev);
1621
1622 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1623 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1624 return;
1625 }
1626 atomic_dec(&isert_conn->post_send_buf_count);
1627
1628 cmd->i_state = ISTATE_SENT_STATUS;
1629 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1630}
1631
1632static void
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001633__isert_send_completion(struct iser_tx_desc *tx_desc,
1634 struct isert_conn *isert_conn)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001635{
1636 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1637 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1638 struct isert_rdma_wr *wr;
1639
1640 if (!isert_cmd) {
1641 atomic_dec(&isert_conn->post_send_buf_count);
1642 isert_unmap_tx_desc(tx_desc, ib_dev);
1643 return;
1644 }
1645 wr = &isert_cmd->rdma_wr;
1646
1647 switch (wr->iser_ib_op) {
1648 case ISER_IB_RECV:
1649 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1650 dump_stack();
1651 break;
1652 case ISER_IB_SEND:
1653 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1654 isert_response_completion(tx_desc, isert_cmd,
1655 isert_conn, ib_dev);
1656 break;
1657 case ISER_IB_RDMA_WRITE:
1658 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1659 dump_stack();
1660 break;
1661 case ISER_IB_RDMA_READ:
1662 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1663
1664 atomic_dec(&isert_conn->post_send_buf_count);
1665 isert_completion_rdma_read(tx_desc, isert_cmd);
1666 break;
1667 default:
1668 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1669 dump_stack();
1670 break;
1671 }
1672}
1673
1674static void
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001675isert_send_completion(struct iser_tx_desc *tx_desc,
1676 struct isert_conn *isert_conn)
1677{
1678 struct llist_node *llnode = tx_desc->comp_llnode_batch;
1679 struct iser_tx_desc *t;
1680 /*
1681 * Drain coalesced completion llist starting from comp_llnode_batch
1682 * setup in isert_init_send_wr(), and then complete trailing tx_desc.
1683 */
1684 while (llnode) {
1685 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1686 llnode = llist_next(llnode);
1687 __isert_send_completion(t, isert_conn);
1688 }
1689 __isert_send_completion(tx_desc, isert_conn);
1690}
1691
1692static void
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001693isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1694{
1695 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1696
1697 if (tx_desc) {
1698 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1699
1700 if (!isert_cmd)
1701 isert_unmap_tx_desc(tx_desc, ib_dev);
1702 else
1703 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1704 }
1705
1706 if (isert_conn->post_recv_buf_count == 0 &&
1707 atomic_read(&isert_conn->post_send_buf_count) == 0) {
1708 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1709 pr_debug("Calling wake_up from isert_cq_comp_err\n");
1710
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07001711 mutex_lock(&isert_conn->conn_mutex);
1712 if (isert_conn->state != ISER_CONN_DOWN)
1713 isert_conn->state = ISER_CONN_TERMINATING;
1714 mutex_unlock(&isert_conn->conn_mutex);
1715
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001716 wake_up(&isert_conn->conn_wait_comp_err);
1717 }
1718}
1719
1720static void
1721isert_cq_tx_work(struct work_struct *work)
1722{
1723 struct isert_cq_desc *cq_desc = container_of(work,
1724 struct isert_cq_desc, cq_tx_work);
1725 struct isert_device *device = cq_desc->device;
1726 int cq_index = cq_desc->cq_index;
1727 struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1728 struct isert_conn *isert_conn;
1729 struct iser_tx_desc *tx_desc;
1730 struct ib_wc wc;
1731
1732 while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1733 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1734 isert_conn = wc.qp->qp_context;
1735
1736 if (wc.status == IB_WC_SUCCESS) {
1737 isert_send_completion(tx_desc, isert_conn);
1738 } else {
1739 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1740 pr_debug("TX wc.status: 0x%08x\n", wc.status);
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07001741 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001742 atomic_dec(&isert_conn->post_send_buf_count);
1743 isert_cq_comp_err(tx_desc, isert_conn);
1744 }
1745 }
1746
1747 ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1748}
1749
1750static void
1751isert_cq_tx_callback(struct ib_cq *cq, void *context)
1752{
1753 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1754
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001755 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1756}
1757
1758static void
1759isert_cq_rx_work(struct work_struct *work)
1760{
1761 struct isert_cq_desc *cq_desc = container_of(work,
1762 struct isert_cq_desc, cq_rx_work);
1763 struct isert_device *device = cq_desc->device;
1764 int cq_index = cq_desc->cq_index;
1765 struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1766 struct isert_conn *isert_conn;
1767 struct iser_rx_desc *rx_desc;
1768 struct ib_wc wc;
1769 unsigned long xfer_len;
1770
1771 while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1772 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1773 isert_conn = wc.qp->qp_context;
1774
1775 if (wc.status == IB_WC_SUCCESS) {
1776 xfer_len = (unsigned long)wc.byte_len;
1777 isert_rx_completion(rx_desc, isert_conn, xfer_len);
1778 } else {
1779 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07001780 if (wc.status != IB_WC_WR_FLUSH_ERR) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001781 pr_debug("RX wc.status: 0x%08x\n", wc.status);
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07001782 pr_debug("RX wc.vendor_err: 0x%08x\n",
1783 wc.vendor_err);
1784 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001785 isert_conn->post_recv_buf_count--;
1786 isert_cq_comp_err(NULL, isert_conn);
1787 }
1788 }
1789
1790 ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1791}
1792
1793static void
1794isert_cq_rx_callback(struct ib_cq *cq, void *context)
1795{
1796 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1797
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001798 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1799}
1800
1801static int
1802isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1803{
1804 struct ib_send_wr *wr_failed;
1805 int ret;
1806
1807 atomic_inc(&isert_conn->post_send_buf_count);
1808
1809 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1810 &wr_failed);
1811 if (ret) {
1812 pr_err("ib_post_send failed with %d\n", ret);
1813 atomic_dec(&isert_conn->post_send_buf_count);
1814 return ret;
1815 }
1816 return ret;
1817}
1818
1819static int
1820isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1821{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001822 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001823 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1824 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1825 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1826 &isert_cmd->tx_desc.iscsi_header;
1827
1828 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1829 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1830 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1831 /*
1832 * Attach SENSE DATA payload to iSCSI Response PDU
1833 */
1834 if (cmd->se_cmd.sense_buffer &&
1835 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1836 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1837 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1838 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001839 u32 padding, pdu_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001840
1841 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1842 cmd->sense_buffer);
1843 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1844
1845 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1846 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001847 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001848
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001849 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1850 (void *)cmd->sense_buffer, pdu_len,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001851 DMA_TO_DEVICE);
1852
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001853 isert_cmd->pdu_buf_len = pdu_len;
1854 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1855 tx_dsg->length = pdu_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001856 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1857 isert_cmd->tx_desc.num_sge = 2;
1858 }
1859
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001860 isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001861
1862 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1863
1864 return isert_post_response(isert_conn, isert_cmd);
1865}
1866
1867static int
1868isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1869 bool nopout_response)
1870{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001871 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001872 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1873 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1874
1875 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1876 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1877 &isert_cmd->tx_desc.iscsi_header,
1878 nopout_response);
1879 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001880 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001881
Masanari Iida8b513d02013-05-21 23:13:12 +09001882 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001883
1884 return isert_post_response(isert_conn, isert_cmd);
1885}
1886
1887static int
1888isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1889{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001890 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001891 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1892 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1893
1894 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1895 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1896 &isert_cmd->tx_desc.iscsi_header);
1897 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001898 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001899
1900 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1901
1902 return isert_post_response(isert_conn, isert_cmd);
1903}
1904
1905static int
1906isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1907{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001908 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001909 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1910 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1911
1912 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1913 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1914 &isert_cmd->tx_desc.iscsi_header);
1915 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001916 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001917
1918 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1919
1920 return isert_post_response(isert_conn, isert_cmd);
1921}
1922
1923static int
1924isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1925{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001926 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001927 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1928 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001929 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1930 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1931 struct iscsi_reject *hdr =
1932 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001933
1934 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001935 iscsit_build_reject(cmd, conn, hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001936 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001937
1938 hton24(hdr->dlength, ISCSI_HDR_LEN);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001939 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001940 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1941 DMA_TO_DEVICE);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001942 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
1943 tx_dsg->addr = isert_cmd->pdu_buf_dma;
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001944 tx_dsg->length = ISCSI_HDR_LEN;
1945 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1946 isert_cmd->tx_desc.num_sge = 2;
1947
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001948 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001949
1950 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1951
1952 return isert_post_response(isert_conn, isert_cmd);
1953}
1954
1955static int
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001956isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1957{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001958 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001959 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1960 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1961 struct iscsi_text_rsp *hdr =
1962 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1963 u32 txt_rsp_len;
1964 int rc;
1965
1966 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1967 rc = iscsit_build_text_rsp(cmd, conn, hdr);
1968 if (rc < 0)
1969 return rc;
1970
1971 txt_rsp_len = rc;
1972 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1973
1974 if (txt_rsp_len) {
1975 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1976 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1977 void *txt_rsp_buf = cmd->buf_ptr;
1978
1979 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1980 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
1981
1982 isert_cmd->pdu_buf_len = txt_rsp_len;
1983 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1984 tx_dsg->length = txt_rsp_len;
1985 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1986 isert_cmd->tx_desc.num_sge = 2;
1987 }
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08001988 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001989
1990 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1991
1992 return isert_post_response(isert_conn, isert_cmd);
1993}
1994
1995static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001996isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1997 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1998 u32 data_left, u32 offset)
1999{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002000 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002001 struct scatterlist *sg_start, *tmp_sg;
2002 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2003 u32 sg_off, page_off;
2004 int i = 0, sg_nents;
2005
2006 sg_off = offset / PAGE_SIZE;
2007 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2008 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2009 page_off = offset % PAGE_SIZE;
2010
2011 send_wr->sg_list = ib_sge;
2012 send_wr->num_sge = sg_nents;
2013 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2014 /*
2015 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2016 */
2017 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2018 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2019 (unsigned long long)tmp_sg->dma_address,
2020 tmp_sg->length, page_off);
2021
2022 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2023 ib_sge->length = min_t(u32, data_left,
2024 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2025 ib_sge->lkey = isert_conn->conn_mr->lkey;
2026
Vu Pham90ecc6e2013-08-28 23:23:33 +03002027 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2028 ib_sge->addr, ib_sge->length, ib_sge->lkey);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002029 page_off = 0;
2030 data_left -= ib_sge->length;
2031 ib_sge++;
2032 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
2033 }
2034
2035 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2036 send_wr->sg_list, send_wr->num_sge);
2037
2038 return sg_nents;
2039}
2040
2041static int
Vu Pham90ecc6e2013-08-28 23:23:33 +03002042isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2043 struct isert_rdma_wr *wr)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002044{
2045 struct se_cmd *se_cmd = &cmd->se_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002046 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002047 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002048 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002049 struct ib_send_wr *send_wr;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002050 struct ib_sge *ib_sge;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002051 struct scatterlist *sg_start;
2052 u32 sg_off = 0, sg_nents;
2053 u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
2054 int ret = 0, count, i, ib_sge_cnt;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002055
Vu Pham90ecc6e2013-08-28 23:23:33 +03002056 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2057 data_left = se_cmd->data_length;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002058 } else {
2059 sg_off = cmd->write_data_done / PAGE_SIZE;
2060 data_left = se_cmd->data_length - cmd->write_data_done;
2061 offset = cmd->write_data_done;
2062 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2063 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002064
Vu Pham90ecc6e2013-08-28 23:23:33 +03002065 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2066 sg_nents = se_cmd->t_data_nents - sg_off;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002067
Vu Pham90ecc6e2013-08-28 23:23:33 +03002068 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2069 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2070 DMA_TO_DEVICE : DMA_FROM_DEVICE);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002071 if (unlikely(!count)) {
Vu Pham90ecc6e2013-08-28 23:23:33 +03002072 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002073 return -EINVAL;
2074 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002075 wr->sge = sg_start;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002076 wr->num_sge = sg_nents;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002077 wr->cur_rdma_length = data_left;
2078 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2079 isert_cmd, count, sg_start, sg_nents, data_left);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002080
2081 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
2082 if (!ib_sge) {
Vu Pham90ecc6e2013-08-28 23:23:33 +03002083 pr_warn("Unable to allocate ib_sge\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002084 ret = -ENOMEM;
2085 goto unmap_sg;
2086 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002087 wr->ib_sge = ib_sge;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002088
2089 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
2090 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2091 GFP_KERNEL);
2092 if (!wr->send_wr) {
Vu Pham90ecc6e2013-08-28 23:23:33 +03002093 pr_debug("Unable to allocate wr->send_wr\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002094 ret = -ENOMEM;
2095 goto unmap_sg;
2096 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002097
2098 wr->isert_cmd = isert_cmd;
2099 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002100
2101 for (i = 0; i < wr->send_wr_num; i++) {
2102 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2103 data_len = min(data_left, rdma_write_max);
2104
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002105 send_wr->send_flags = 0;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002106 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2107 send_wr->opcode = IB_WR_RDMA_WRITE;
2108 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2109 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2110 if (i + 1 == wr->send_wr_num)
2111 send_wr->next = &isert_cmd->tx_desc.send_wr;
2112 else
2113 send_wr->next = &wr->send_wr[i + 1];
2114 } else {
2115 send_wr->opcode = IB_WR_RDMA_READ;
2116 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2117 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2118 if (i + 1 == wr->send_wr_num)
2119 send_wr->send_flags = IB_SEND_SIGNALED;
2120 else
2121 send_wr->next = &wr->send_wr[i + 1];
2122 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002123
2124 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2125 send_wr, data_len, offset);
2126 ib_sge += ib_sge_cnt;
2127
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002128 offset += data_len;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002129 va_offset += data_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002130 data_left -= data_len;
2131 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002132
2133 return 0;
2134unmap_sg:
2135 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2136 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2137 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2138 return ret;
2139}
2140
2141static int
Vu Pham59464ef2013-08-28 23:23:35 +03002142isert_map_fr_pagelist(struct ib_device *ib_dev,
2143 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2144{
2145 u64 start_addr, end_addr, page, chunk_start = 0;
2146 struct scatterlist *tmp_sg;
2147 int i = 0, new_chunk, last_ent, n_pages;
2148
2149 n_pages = 0;
2150 new_chunk = 1;
2151 last_ent = sg_nents - 1;
2152 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2153 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2154 if (new_chunk)
2155 chunk_start = start_addr;
2156 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2157
2158 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2159 i, (unsigned long long)tmp_sg->dma_address,
2160 tmp_sg->length);
2161
2162 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2163 new_chunk = 0;
2164 continue;
2165 }
2166 new_chunk = 1;
2167
2168 page = chunk_start & PAGE_MASK;
2169 do {
2170 fr_pl[n_pages++] = page;
2171 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2172 n_pages - 1, page);
2173 page += PAGE_SIZE;
2174 } while (page < end_addr);
2175 }
2176
2177 return n_pages;
2178}
2179
2180static int
2181isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
Sagi Grimberg9bd626e2014-01-09 18:40:54 +02002182 struct isert_conn *isert_conn, struct scatterlist *sg_start,
2183 struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
2184 unsigned int data_len)
Vu Pham59464ef2013-08-28 23:23:35 +03002185{
Vu Pham59464ef2013-08-28 23:23:35 +03002186 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Vu Pham59464ef2013-08-28 23:23:35 +03002187 struct ib_send_wr fr_wr, inv_wr;
2188 struct ib_send_wr *bad_wr, *wr = NULL;
Sagi Grimberg9bd626e2014-01-09 18:40:54 +02002189 int ret, pagelist_len;
2190 u32 page_off;
Vu Pham59464ef2013-08-28 23:23:35 +03002191 u8 key;
Vu Pham59464ef2013-08-28 23:23:35 +03002192
Sagi Grimberg9bd626e2014-01-09 18:40:54 +02002193 sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
Vu Pham59464ef2013-08-28 23:23:35 +03002194 page_off = offset % PAGE_SIZE;
2195
Sagi Grimberg9bd626e2014-01-09 18:40:54 +02002196 pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
2197 fr_desc, sg_nents, offset);
Vu Pham59464ef2013-08-28 23:23:35 +03002198
2199 pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
2200 &fr_desc->data_frpl->page_list[0]);
2201
2202 if (!fr_desc->valid) {
2203 memset(&inv_wr, 0, sizeof(inv_wr));
2204 inv_wr.opcode = IB_WR_LOCAL_INV;
2205 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
2206 wr = &inv_wr;
2207 /* Bump the key */
2208 key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
2209 ib_update_fast_reg_key(fr_desc->data_mr, ++key);
2210 }
2211
2212 /* Prepare FASTREG WR */
2213 memset(&fr_wr, 0, sizeof(fr_wr));
2214 fr_wr.opcode = IB_WR_FAST_REG_MR;
2215 fr_wr.wr.fast_reg.iova_start =
2216 fr_desc->data_frpl->page_list[0] + page_off;
2217 fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
2218 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2219 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2220 fr_wr.wr.fast_reg.length = data_len;
2221 fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
2222 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2223
2224 if (!wr)
2225 wr = &fr_wr;
2226 else
2227 wr->next = &fr_wr;
2228
2229 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2230 if (ret) {
2231 pr_err("fast registration failed, ret:%d\n", ret);
2232 return ret;
2233 }
2234 fr_desc->valid = false;
2235
2236 ib_sge->lkey = fr_desc->data_mr->lkey;
2237 ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
2238 ib_sge->length = data_len;
2239
2240 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2241 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2242
2243 return ret;
2244}
2245
2246static int
Sagi Grimberga3a5a822014-01-09 18:40:50 +02002247isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2248 struct isert_rdma_wr *wr)
Vu Pham59464ef2013-08-28 23:23:35 +03002249{
2250 struct se_cmd *se_cmd = &cmd->se_cmd;
2251 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2252 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2253 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2254 struct ib_send_wr *send_wr;
2255 struct ib_sge *ib_sge;
2256 struct scatterlist *sg_start;
2257 struct fast_reg_descriptor *fr_desc;
2258 u32 sg_off = 0, sg_nents;
2259 u32 offset = 0, data_len, data_left, rdma_write_max;
2260 int ret = 0, count;
2261 unsigned long flags;
2262
2263 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2264 data_left = se_cmd->data_length;
Vu Pham59464ef2013-08-28 23:23:35 +03002265 } else {
Vu Pham59464ef2013-08-28 23:23:35 +03002266 offset = cmd->write_data_done;
Sagi Grimberg9bd626e2014-01-09 18:40:54 +02002267 sg_off = offset / PAGE_SIZE;
2268 data_left = se_cmd->data_length - cmd->write_data_done;
Vu Pham59464ef2013-08-28 23:23:35 +03002269 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2270 }
2271
2272 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2273 sg_nents = se_cmd->t_data_nents - sg_off;
2274
2275 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2276 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2277 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2278 if (unlikely(!count)) {
2279 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2280 return -EINVAL;
2281 }
2282 wr->sge = sg_start;
2283 wr->num_sge = sg_nents;
2284 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2285 isert_cmd, count, sg_start, sg_nents, data_left);
2286
2287 memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
2288 ib_sge = &wr->s_ib_sge;
2289 wr->ib_sge = ib_sge;
2290
2291 wr->send_wr_num = 1;
2292 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2293 wr->send_wr = &wr->s_send_wr;
2294
2295 wr->isert_cmd = isert_cmd;
2296 rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
2297
2298 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2299 send_wr->sg_list = ib_sge;
2300 send_wr->num_sge = 1;
2301 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2302 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2303 send_wr->opcode = IB_WR_RDMA_WRITE;
2304 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2305 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2306 send_wr->send_flags = 0;
2307 send_wr->next = &isert_cmd->tx_desc.send_wr;
2308 } else {
2309 send_wr->opcode = IB_WR_RDMA_READ;
2310 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2311 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2312 send_wr->send_flags = IB_SEND_SIGNALED;
2313 }
2314
2315 data_len = min(data_left, rdma_write_max);
2316 wr->cur_rdma_length = data_len;
2317
Vu Phamf01b9f72013-11-11 19:04:29 +02002318 /* if there is a single dma entry, dma mr is sufficient */
2319 if (count == 1) {
2320 ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
2321 ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
2322 ib_sge->lkey = isert_conn->conn_mr->lkey;
2323 wr->fr_desc = NULL;
2324 } else {
2325 spin_lock_irqsave(&isert_conn->conn_lock, flags);
Sagi Grimberga3a5a822014-01-09 18:40:50 +02002326 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
Vu Phamf01b9f72013-11-11 19:04:29 +02002327 struct fast_reg_descriptor, list);
2328 list_del(&fr_desc->list);
2329 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2330 wr->fr_desc = fr_desc;
Vu Pham59464ef2013-08-28 23:23:35 +03002331
Sagi Grimberg9bd626e2014-01-09 18:40:54 +02002332 ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
2333 ib_sge, sg_nents, offset, data_len);
Vu Phamf01b9f72013-11-11 19:04:29 +02002334 if (ret) {
Sagi Grimberga3a5a822014-01-09 18:40:50 +02002335 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
Vu Phamf01b9f72013-11-11 19:04:29 +02002336 goto unmap_sg;
2337 }
Vu Pham59464ef2013-08-28 23:23:35 +03002338 }
2339
2340 return 0;
2341
2342unmap_sg:
2343 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2344 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2345 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2346 return ret;
2347}
2348
2349static int
Vu Pham90ecc6e2013-08-28 23:23:33 +03002350isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2351{
2352 struct se_cmd *se_cmd = &cmd->se_cmd;
Vu Pham59464ef2013-08-28 23:23:35 +03002353 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002354 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2355 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Vu Phamd40945d2013-08-28 23:23:34 +03002356 struct isert_device *device = isert_conn->conn_device;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002357 struct ib_send_wr *wr_failed;
2358 int rc;
2359
2360 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2361 isert_cmd, se_cmd->data_length);
2362 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
Vu Phamd40945d2013-08-28 23:23:34 +03002363 rc = device->reg_rdma_mem(conn, cmd, wr);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002364 if (rc) {
2365 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2366 return rc;
2367 }
2368
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002369 /*
2370 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2371 */
2372 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
Nicholas Bellinger04d9cd12013-11-12 18:05:07 -08002373 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002374 &isert_cmd->tx_desc.iscsi_header);
2375 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger95b60f02013-11-05 13:16:12 -08002376 isert_init_send_wr(isert_conn, isert_cmd,
2377 &isert_cmd->tx_desc.send_wr, true);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002378
2379 atomic_inc(&isert_conn->post_send_buf_count);
2380
2381 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2382 if (rc) {
2383 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2384 atomic_dec(&isert_conn->post_send_buf_count);
2385 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002386 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2387 isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002388
Vu Pham90ecc6e2013-08-28 23:23:33 +03002389 return 1;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002390}
2391
2392static int
2393isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2394{
2395 struct se_cmd *se_cmd = &cmd->se_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002396 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002397 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2398 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
Vu Phamd40945d2013-08-28 23:23:34 +03002399 struct isert_device *device = isert_conn->conn_device;
Vu Pham90ecc6e2013-08-28 23:23:33 +03002400 struct ib_send_wr *wr_failed;
2401 int rc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002402
Vu Pham90ecc6e2013-08-28 23:23:33 +03002403 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2404 isert_cmd, se_cmd->data_length, cmd->write_data_done);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002405 wr->iser_ib_op = ISER_IB_RDMA_READ;
Vu Phamd40945d2013-08-28 23:23:34 +03002406 rc = device->reg_rdma_mem(conn, cmd, wr);
Vu Pham90ecc6e2013-08-28 23:23:33 +03002407 if (rc) {
2408 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2409 return rc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002410 }
2411
2412 atomic_inc(&isert_conn->post_send_buf_count);
2413
2414 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2415 if (rc) {
2416 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2417 atomic_dec(&isert_conn->post_send_buf_count);
2418 }
Vu Pham90ecc6e2013-08-28 23:23:33 +03002419 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2420 isert_cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002421
Vu Pham90ecc6e2013-08-28 23:23:33 +03002422 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002423}
2424
2425static int
2426isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2427{
2428 int ret;
2429
2430 switch (state) {
2431 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2432 ret = isert_put_nopin(cmd, conn, false);
2433 break;
2434 default:
2435 pr_err("Unknown immediate state: 0x%02x\n", state);
2436 ret = -EINVAL;
2437 break;
2438 }
2439
2440 return ret;
2441}
2442
2443static int
2444isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2445{
2446 int ret;
2447
2448 switch (state) {
2449 case ISTATE_SEND_LOGOUTRSP:
2450 ret = isert_put_logout_rsp(cmd, conn);
2451 if (!ret) {
2452 pr_debug("Returning iSER Logout -EAGAIN\n");
2453 ret = -EAGAIN;
2454 }
2455 break;
2456 case ISTATE_SEND_NOPIN:
2457 ret = isert_put_nopin(cmd, conn, true);
2458 break;
2459 case ISTATE_SEND_TASKMGTRSP:
2460 ret = isert_put_tm_rsp(cmd, conn);
2461 break;
2462 case ISTATE_SEND_REJECT:
2463 ret = isert_put_reject(cmd, conn);
2464 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002465 case ISTATE_SEND_TEXTRSP:
2466 ret = isert_put_text_rsp(cmd, conn);
2467 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002468 case ISTATE_SEND_STATUS:
2469 /*
2470 * Special case for sending non GOOD SCSI status from TX thread
2471 * context during pre se_cmd excecution failure.
2472 */
2473 ret = isert_put_response(conn, cmd);
2474 break;
2475 default:
2476 pr_err("Unknown response state: 0x%02x\n", state);
2477 ret = -EINVAL;
2478 break;
2479 }
2480
2481 return ret;
2482}
2483
2484static int
2485isert_setup_np(struct iscsi_np *np,
2486 struct __kernel_sockaddr_storage *ksockaddr)
2487{
2488 struct isert_np *isert_np;
2489 struct rdma_cm_id *isert_lid;
2490 struct sockaddr *sa;
2491 int ret;
2492
2493 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2494 if (!isert_np) {
2495 pr_err("Unable to allocate struct isert_np\n");
2496 return -ENOMEM;
2497 }
2498 init_waitqueue_head(&isert_np->np_accept_wq);
2499 mutex_init(&isert_np->np_accept_mutex);
2500 INIT_LIST_HEAD(&isert_np->np_accept_list);
2501 init_completion(&isert_np->np_login_comp);
2502
2503 sa = (struct sockaddr *)ksockaddr;
2504 pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
2505 /*
2506 * Setup the np->np_sockaddr from the passed sockaddr setup
2507 * in iscsi_target_configfs.c code..
2508 */
2509 memcpy(&np->np_sockaddr, ksockaddr,
2510 sizeof(struct __kernel_sockaddr_storage));
2511
2512 isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
2513 IB_QPT_RC);
2514 if (IS_ERR(isert_lid)) {
2515 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2516 PTR_ERR(isert_lid));
2517 ret = PTR_ERR(isert_lid);
2518 goto out;
2519 }
2520
2521 ret = rdma_bind_addr(isert_lid, sa);
2522 if (ret) {
2523 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2524 goto out_lid;
2525 }
2526
2527 ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2528 if (ret) {
2529 pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2530 goto out_lid;
2531 }
2532
2533 isert_np->np_cm_id = isert_lid;
2534 np->np_context = isert_np;
2535 pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2536
2537 return 0;
2538
2539out_lid:
2540 rdma_destroy_id(isert_lid);
2541out:
2542 kfree(isert_np);
2543 return ret;
2544}
2545
2546static int
2547isert_check_accept_queue(struct isert_np *isert_np)
2548{
2549 int empty;
2550
2551 mutex_lock(&isert_np->np_accept_mutex);
2552 empty = list_empty(&isert_np->np_accept_list);
2553 mutex_unlock(&isert_np->np_accept_mutex);
2554
2555 return empty;
2556}
2557
2558static int
2559isert_rdma_accept(struct isert_conn *isert_conn)
2560{
2561 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2562 struct rdma_conn_param cp;
2563 int ret;
2564
2565 memset(&cp, 0, sizeof(struct rdma_conn_param));
2566 cp.responder_resources = isert_conn->responder_resources;
2567 cp.initiator_depth = isert_conn->initiator_depth;
2568 cp.retry_count = 7;
2569 cp.rnr_retry_count = 7;
2570
2571 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2572
2573 ret = rdma_accept(cm_id, &cp);
2574 if (ret) {
2575 pr_err("rdma_accept() failed with: %d\n", ret);
2576 return ret;
2577 }
2578
2579 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2580
2581 return 0;
2582}
2583
2584static int
2585isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2586{
2587 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2588 int ret;
2589
2590 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
Nicholas Bellinger6faaa852013-08-18 16:35:46 -07002591 /*
2592 * For login requests after the first PDU, isert_rx_login_req() will
2593 * kick schedule_delayed_work(&conn->login_work) as the packet is
2594 * received, which turns this callback from iscsi_target_do_login_rx()
2595 * into a NOP.
2596 */
2597 if (!login->first_request)
2598 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002599
2600 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2601 if (ret)
2602 return ret;
2603
2604 pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2605 return 0;
2606}
2607
2608static void
2609isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2610 struct isert_conn *isert_conn)
2611{
2612 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2613 struct rdma_route *cm_route = &cm_id->route;
2614 struct sockaddr_in *sock_in;
2615 struct sockaddr_in6 *sock_in6;
2616
2617 conn->login_family = np->np_sockaddr.ss_family;
2618
2619 if (np->np_sockaddr.ss_family == AF_INET6) {
2620 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2621 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2622 &sock_in6->sin6_addr.in6_u);
2623 conn->login_port = ntohs(sock_in6->sin6_port);
2624
2625 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2626 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2627 &sock_in6->sin6_addr.in6_u);
2628 conn->local_port = ntohs(sock_in6->sin6_port);
2629 } else {
2630 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2631 sprintf(conn->login_ip, "%pI4",
2632 &sock_in->sin_addr.s_addr);
2633 conn->login_port = ntohs(sock_in->sin_port);
2634
2635 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2636 sprintf(conn->local_ip, "%pI4",
2637 &sock_in->sin_addr.s_addr);
2638 conn->local_port = ntohs(sock_in->sin_port);
2639 }
2640}
2641
2642static int
2643isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2644{
2645 struct isert_np *isert_np = (struct isert_np *)np->np_context;
2646 struct isert_conn *isert_conn;
2647 int max_accept = 0, ret;
2648
2649accept_wait:
2650 ret = wait_event_interruptible(isert_np->np_accept_wq,
2651 !isert_check_accept_queue(isert_np) ||
2652 np->np_thread_state == ISCSI_NP_THREAD_RESET);
2653 if (max_accept > 5)
2654 return -ENODEV;
2655
2656 spin_lock_bh(&np->np_thread_lock);
2657 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
2658 spin_unlock_bh(&np->np_thread_lock);
2659 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2660 return -ENODEV;
2661 }
2662 spin_unlock_bh(&np->np_thread_lock);
2663
2664 mutex_lock(&isert_np->np_accept_mutex);
2665 if (list_empty(&isert_np->np_accept_list)) {
2666 mutex_unlock(&isert_np->np_accept_mutex);
2667 max_accept++;
2668 goto accept_wait;
2669 }
2670 isert_conn = list_first_entry(&isert_np->np_accept_list,
2671 struct isert_conn, conn_accept_node);
2672 list_del_init(&isert_conn->conn_accept_node);
2673 mutex_unlock(&isert_np->np_accept_mutex);
2674
2675 conn->context = isert_conn;
2676 isert_conn->conn = conn;
2677 max_accept = 0;
2678
2679 ret = isert_rdma_post_recvl(isert_conn);
2680 if (ret)
2681 return ret;
2682
2683 ret = isert_rdma_accept(isert_conn);
2684 if (ret)
2685 return ret;
2686
2687 isert_set_conn_info(np, conn, isert_conn);
2688
2689 pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2690 return 0;
2691}
2692
2693static void
2694isert_free_np(struct iscsi_np *np)
2695{
2696 struct isert_np *isert_np = (struct isert_np *)np->np_context;
2697
2698 rdma_destroy_id(isert_np->np_cm_id);
2699
2700 np->np_context = NULL;
2701 kfree(isert_np);
2702}
2703
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002704static int isert_check_state(struct isert_conn *isert_conn, int state)
2705{
2706 int ret;
2707
2708 mutex_lock(&isert_conn->conn_mutex);
2709 ret = (isert_conn->state == state);
2710 mutex_unlock(&isert_conn->conn_mutex);
2711
2712 return ret;
2713}
2714
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002715static void isert_free_conn(struct iscsi_conn *conn)
2716{
2717 struct isert_conn *isert_conn = conn->context;
2718
2719 pr_debug("isert_free_conn: Starting \n");
2720 /*
2721 * Decrement post_send_buf_count for special case when called
2722 * from isert_do_control_comp() -> iscsit_logout_post_handler()
2723 */
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002724 mutex_lock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002725 if (isert_conn->logout_posted)
2726 atomic_dec(&isert_conn->post_send_buf_count);
2727
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002728 if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
2729 pr_debug("Calling rdma_disconnect from isert_free_conn\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002730 rdma_disconnect(isert_conn->conn_cm_id);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002731 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002732 /*
2733 * Only wait for conn_wait_comp_err if the isert_conn made it
2734 * into full feature phase..
2735 */
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002736 if (isert_conn->state == ISER_CONN_UP) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002737 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2738 isert_conn->state);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002739 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002740
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002741 wait_event(isert_conn->conn_wait_comp_err,
2742 (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
2743
2744 wait_event(isert_conn->conn_wait,
2745 (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2746
2747 isert_put_conn(isert_conn);
2748 return;
2749 }
2750 if (isert_conn->state == ISER_CONN_INIT) {
2751 mutex_unlock(&isert_conn->conn_mutex);
2752 isert_put_conn(isert_conn);
2753 return;
2754 }
2755 pr_debug("isert_free_conn: wait_event conn_wait %d\n",
2756 isert_conn->state);
2757 mutex_unlock(&isert_conn->conn_mutex);
2758
2759 wait_event(isert_conn->conn_wait,
2760 (isert_check_state(isert_conn, ISER_CONN_DOWN)));
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002761
2762 isert_put_conn(isert_conn);
2763}
2764
2765static struct iscsit_transport iser_target_transport = {
2766 .name = "IB/iSER",
2767 .transport_type = ISCSI_INFINIBAND,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002768 .priv_size = sizeof(struct isert_cmd),
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002769 .owner = THIS_MODULE,
2770 .iscsit_setup_np = isert_setup_np,
2771 .iscsit_accept_np = isert_accept_np,
2772 .iscsit_free_np = isert_free_np,
2773 .iscsit_free_conn = isert_free_conn,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002774 .iscsit_get_login_rx = isert_get_login_rx,
2775 .iscsit_put_login_tx = isert_put_login_tx,
2776 .iscsit_immediate_queue = isert_immediate_queue,
2777 .iscsit_response_queue = isert_response_queue,
2778 .iscsit_get_dataout = isert_get_dataout,
2779 .iscsit_queue_data_in = isert_put_datain,
2780 .iscsit_queue_status = isert_put_response,
2781};
2782
2783static int __init isert_init(void)
2784{
2785 int ret;
2786
2787 isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2788 if (!isert_rx_wq) {
2789 pr_err("Unable to allocate isert_rx_wq\n");
2790 return -ENOMEM;
2791 }
2792
2793 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2794 if (!isert_comp_wq) {
2795 pr_err("Unable to allocate isert_comp_wq\n");
2796 ret = -ENOMEM;
2797 goto destroy_rx_wq;
2798 }
2799
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002800 iscsit_register_transport(&iser_target_transport);
2801 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2802 return 0;
2803
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002804destroy_rx_wq:
2805 destroy_workqueue(isert_rx_wq);
2806 return ret;
2807}
2808
2809static void __exit isert_exit(void)
2810{
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002811 destroy_workqueue(isert_comp_wq);
2812 destroy_workqueue(isert_rx_wq);
2813 iscsit_unregister_transport(&iser_target_transport);
2814 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2815}
2816
2817MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2818MODULE_VERSION("0.1");
2819MODULE_AUTHOR("nab@Linux-iSCSI.org");
2820MODULE_LICENSE("GPL");
2821
2822module_init(isert_init);
2823module_exit(isert_exit);