blob: 708a069002f3530e00002c5d1e454c1823119bb4 [file] [log] [blame]
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001#include <linux/socket.h>
2#include <linux/in.h>
3#include <linux/in6.h>
4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h>
6
7#define ISERT_RDMA_LISTEN_BACKLOG 10
Vu Pham59464ef2013-08-28 23:23:35 +03008#define ISCSI_ISER_SG_TABLESIZE 256
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08009
10enum isert_desc_type {
11 ISCSI_TX_CONTROL,
12 ISCSI_TX_DATAIN
13};
14
15enum iser_ib_op_code {
16 ISER_IB_RECV,
17 ISER_IB_SEND,
18 ISER_IB_RDMA_WRITE,
19 ISER_IB_RDMA_READ,
20};
21
22enum iser_conn_state {
23 ISER_CONN_INIT,
24 ISER_CONN_UP,
25 ISER_CONN_TERMINATING,
26 ISER_CONN_DOWN,
27};
28
29struct iser_rx_desc {
30 struct iser_hdr iser_header;
31 struct iscsi_hdr iscsi_header;
32 char data[ISER_RECV_DATA_SEG_LEN];
33 u64 dma_addr;
34 struct ib_sge rx_sg;
35 char pad[ISER_RX_PAD_SIZE];
36} __packed;
37
38struct iser_tx_desc {
39 struct iser_hdr iser_header;
40 struct iscsi_hdr iscsi_header;
41 enum isert_desc_type type;
42 u64 dma_addr;
43 struct ib_sge tx_sg[2];
44 int num_sge;
45 struct isert_cmd *isert_cmd;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -080046 struct llist_node *comp_llnode_batch;
47 struct llist_node comp_llnode;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080048 struct ib_send_wr send_wr;
49} __packed;
50
Vu Pham59464ef2013-08-28 23:23:35 +030051struct fast_reg_descriptor {
52 struct list_head list;
53 struct ib_mr *data_mr;
54 struct ib_fast_reg_page_list *data_frpl;
55 bool valid;
56};
57
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080058struct isert_rdma_wr {
59 struct list_head wr_list;
60 struct isert_cmd *isert_cmd;
61 enum iser_ib_op_code iser_ib_op;
62 struct ib_sge *ib_sge;
Vu Pham59464ef2013-08-28 23:23:35 +030063 struct ib_sge s_ib_sge;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080064 int num_sge;
65 struct scatterlist *sge;
66 int send_wr_num;
67 struct ib_send_wr *send_wr;
Vu Pham59464ef2013-08-28 23:23:35 +030068 struct ib_send_wr s_send_wr;
Vu Pham90ecc6e2013-08-28 23:23:33 +030069 u32 cur_rdma_length;
Vu Pham59464ef2013-08-28 23:23:35 +030070 struct fast_reg_descriptor *fr_desc;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080071};
72
73struct isert_cmd {
74 uint32_t read_stag;
75 uint32_t write_stag;
76 uint64_t read_va;
77 uint64_t write_va;
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -070078 u64 pdu_buf_dma;
79 u32 pdu_buf_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080080 u32 read_va_off;
81 u32 write_va_off;
82 u32 rdma_wr_num;
83 struct isert_conn *conn;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -070084 struct iscsi_cmd *iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080085 struct iser_tx_desc tx_desc;
86 struct isert_rdma_wr rdma_wr;
87 struct work_struct comp_work;
88};
89
90struct isert_device;
91
92struct isert_conn {
93 enum iser_conn_state state;
94 bool logout_posted;
95 int post_recv_buf_count;
96 atomic_t post_send_buf_count;
97 u32 responder_resources;
98 u32 initiator_depth;
99 u32 max_sge;
100 char *login_buf;
101 char *login_req_buf;
102 char *login_rsp_buf;
103 u64 login_req_dma;
104 u64 login_rsp_dma;
105 unsigned int conn_rx_desc_head;
106 struct iser_rx_desc *conn_rx_descs;
107 struct ib_recv_wr conn_rx_wr[ISERT_MIN_POSTED_RX];
108 struct iscsi_conn *conn;
109 struct list_head conn_accept_node;
110 struct completion conn_login_comp;
111 struct iser_tx_desc conn_login_tx_desc;
112 struct rdma_cm_id *conn_cm_id;
113 struct ib_pd *conn_pd;
114 struct ib_mr *conn_mr;
115 struct ib_qp *conn_qp;
116 struct isert_device *conn_device;
117 struct work_struct conn_logout_work;
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700118 struct mutex conn_mutex;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800119 wait_queue_head_t conn_wait;
120 wait_queue_head_t conn_wait_comp_err;
121 struct kref conn_kref;
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200122 struct list_head conn_fr_pool;
123 int conn_fr_pool_size;
124 /* lock to protect fastreg pool */
Vu Pham59464ef2013-08-28 23:23:35 +0300125 spinlock_t conn_lock;
Nicholas Bellinger95b60f02013-11-05 13:16:12 -0800126#define ISERT_COMP_BATCH_COUNT 8
127 int conn_comp_batch;
128 struct llist_head conn_comp_llist;
129 struct mutex conn_comp_mutex;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800130};
131
132#define ISERT_MAX_CQ 64
133
134struct isert_cq_desc {
135 struct isert_device *device;
136 int cq_index;
137 struct work_struct cq_rx_work;
138 struct work_struct cq_tx_work;
139};
140
141struct isert_device {
Sagi Grimberga3a5a822014-01-09 18:40:50 +0200142 int use_fastreg;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800143 int cqs_used;
144 int refcount;
145 int cq_active_qps[ISERT_MAX_CQ];
146 struct ib_device *ib_device;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800147 struct ib_cq *dev_rx_cq[ISERT_MAX_CQ];
148 struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
149 struct isert_cq_desc *cq_desc;
150 struct list_head dev_node;
Vu Pham59464ef2013-08-28 23:23:35 +0300151 struct ib_device_attr dev_attr;
Vu Phamd40945d2013-08-28 23:23:34 +0300152 int (*reg_rdma_mem)(struct iscsi_conn *conn,
153 struct iscsi_cmd *cmd,
154 struct isert_rdma_wr *wr);
155 void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
156 struct isert_conn *isert_conn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800157};
158
159struct isert_np {
160 wait_queue_head_t np_accept_wq;
161 struct rdma_cm_id *np_cm_id;
162 struct mutex np_accept_mutex;
163 struct list_head np_accept_list;
164 struct completion np_login_comp;
165};