IB/iser: Rename ib_conn -> iser_conn
Two reasons why we choose to do this:
1. No point today calling struct iser_conn by another name ib_conn
2. In the next patches we will restructure iser control plane representation
- struct iser_conn: connection logical representation
- struct ib_conn: connection RDMA layout representation
This patch does not change any functionality.
Signed-off-by: Ariel Nahum <arieln@mellanox.com>
Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 3bfec4b..778c166 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -213,19 +213,19 @@
*
* returns 0 on success, or errno code on failure
*/
-int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
+int iser_create_fmr_pool(struct iser_conn *iser_conn, unsigned cmds_max)
{
- struct iser_device *device = ib_conn->device;
+ struct iser_device *device = iser_conn->device;
struct ib_fmr_pool_param params;
int ret = -ENOMEM;
- ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
+ iser_conn->fmr.page_vec = kmalloc(sizeof(*iser_conn->fmr.page_vec) +
(sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
GFP_KERNEL);
- if (!ib_conn->fmr.page_vec)
+ if (!iser_conn->fmr.page_vec)
return ret;
- ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
+ iser_conn->fmr.page_vec->pages = (u64 *)(iser_conn->fmr.page_vec + 1);
params.page_shift = SHIFT_4K;
/* when the first/last SG element are not start/end *
@@ -241,16 +241,16 @@
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
- ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, ¶ms);
- if (!IS_ERR(ib_conn->fmr.pool))
+ iser_conn->fmr.pool = ib_create_fmr_pool(device->pd, ¶ms);
+ if (!IS_ERR(iser_conn->fmr.pool))
return 0;
/* no FMR => no need for page_vec */
- kfree(ib_conn->fmr.page_vec);
- ib_conn->fmr.page_vec = NULL;
+ kfree(iser_conn->fmr.page_vec);
+ iser_conn->fmr.page_vec = NULL;
- ret = PTR_ERR(ib_conn->fmr.pool);
- ib_conn->fmr.pool = NULL;
+ ret = PTR_ERR(iser_conn->fmr.pool);
+ iser_conn->fmr.pool = NULL;
if (ret != -ENOSYS) {
iser_err("FMR allocation failed, err %d\n", ret);
return ret;
@@ -263,18 +263,18 @@
/**
* iser_free_fmr_pool - releases the FMR pool and page vec
*/
-void iser_free_fmr_pool(struct iser_conn *ib_conn)
+void iser_free_fmr_pool(struct iser_conn *iser_conn)
{
iser_info("freeing conn %p fmr pool %p\n",
- ib_conn, ib_conn->fmr.pool);
+ iser_conn, iser_conn->fmr.pool);
- if (ib_conn->fmr.pool != NULL)
- ib_destroy_fmr_pool(ib_conn->fmr.pool);
+ if (iser_conn->fmr.pool != NULL)
+ ib_destroy_fmr_pool(iser_conn->fmr.pool);
- ib_conn->fmr.pool = NULL;
+ iser_conn->fmr.pool = NULL;
- kfree(ib_conn->fmr.page_vec);
- ib_conn->fmr.page_vec = NULL;
+ kfree(iser_conn->fmr.page_vec);
+ iser_conn->fmr.page_vec = NULL;
}
static int
@@ -367,14 +367,14 @@
* for fast registration work requests.
* returns 0 on success, or errno code on failure
*/
-int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max)
+int iser_create_fastreg_pool(struct iser_conn *iser_conn, unsigned cmds_max)
{
- struct iser_device *device = ib_conn->device;
+ struct iser_device *device = iser_conn->device;
struct fast_reg_descriptor *desc;
int i, ret;
- INIT_LIST_HEAD(&ib_conn->fastreg.pool);
- ib_conn->fastreg.pool_size = 0;
+ INIT_LIST_HEAD(&iser_conn->fastreg.pool);
+ iser_conn->fastreg.pool_size = 0;
for (i = 0; i < cmds_max; i++) {
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc) {
@@ -384,7 +384,7 @@
}
ret = iser_create_fastreg_desc(device->ib_device, device->pd,
- ib_conn->pi_support, desc);
+ iser_conn->pi_support, desc);
if (ret) {
iser_err("Failed to create fastreg descriptor err=%d\n",
ret);
@@ -392,31 +392,31 @@
goto err;
}
- list_add_tail(&desc->list, &ib_conn->fastreg.pool);
- ib_conn->fastreg.pool_size++;
+ list_add_tail(&desc->list, &iser_conn->fastreg.pool);
+ iser_conn->fastreg.pool_size++;
}
return 0;
err:
- iser_free_fastreg_pool(ib_conn);
+ iser_free_fastreg_pool(iser_conn);
return ret;
}
/**
* iser_free_fastreg_pool - releases the pool of fast_reg descriptors
*/
-void iser_free_fastreg_pool(struct iser_conn *ib_conn)
+void iser_free_fastreg_pool(struct iser_conn *iser_conn)
{
struct fast_reg_descriptor *desc, *tmp;
int i = 0;
- if (list_empty(&ib_conn->fastreg.pool))
+ if (list_empty(&iser_conn->fastreg.pool))
return;
- iser_info("freeing conn %p fr pool\n", ib_conn);
+ iser_info("freeing conn %p fr pool\n", iser_conn);
- list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
+ list_for_each_entry_safe(desc, tmp, &iser_conn->fastreg.pool, list) {
list_del(&desc->list);
ib_free_fast_reg_page_list(desc->data_frpl);
ib_dereg_mr(desc->data_mr);
@@ -430,9 +430,9 @@
++i;
}
- if (i < ib_conn->fastreg.pool_size)
+ if (i < iser_conn->fastreg.pool_size)
iser_warn("pool still has %d regions registered\n",
- ib_conn->fastreg.pool_size - i);
+ iser_conn->fastreg.pool_size - i);
}
/**
@@ -440,16 +440,16 @@
*
* returns 0 on success, -1 on failure
*/
-static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
+static int iser_create_ib_conn_res(struct iser_conn *iser_conn)
{
struct iser_device *device;
struct ib_qp_init_attr init_attr;
int ret = -ENOMEM;
int index, min_index = 0;
- BUG_ON(ib_conn->device == NULL);
+ BUG_ON(iser_conn->device == NULL);
- device = ib_conn->device;
+ device = iser_conn->device;
memset(&init_attr, 0, sizeof init_attr);
@@ -461,10 +461,10 @@
min_index = index;
device->cq_active_qps[min_index]++;
mutex_unlock(&ig.connlist_mutex);
- iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
+ iser_info("cq index %d used for iser_conn %p\n", min_index, iser_conn);
init_attr.event_handler = iser_qp_event_callback;
- init_attr.qp_context = (void *)ib_conn;
+ init_attr.qp_context = (void *)iser_conn;
init_attr.send_cq = device->tx_cq[min_index];
init_attr.recv_cq = device->rx_cq[min_index];
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
@@ -472,21 +472,21 @@
init_attr.cap.max_recv_sge = 1;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr.qp_type = IB_QPT_RC;
- if (ib_conn->pi_support) {
+ if (iser_conn->pi_support) {
init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS;
init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
} else {
init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
}
- ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
+ ret = rdma_create_qp(iser_conn->cma_id, device->pd, &init_attr);
if (ret)
goto out_err;
- ib_conn->qp = ib_conn->cma_id->qp;
+ iser_conn->qp = iser_conn->cma_id->qp;
iser_info("setting conn %p cma_id %p qp %p\n",
- ib_conn, ib_conn->cma_id,
- ib_conn->cma_id->qp);
+ iser_conn, iser_conn->cma_id,
+ iser_conn->cma_id->qp);
return ret;
out_err:
@@ -497,25 +497,25 @@
/**
* releases the QP object
*/
-static void iser_free_ib_conn_res(struct iser_conn *ib_conn)
+static void iser_free_ib_conn_res(struct iser_conn *iser_conn)
{
int cq_index;
- BUG_ON(ib_conn == NULL);
+ BUG_ON(iser_conn == NULL);
iser_info("freeing conn %p cma_id %p qp %p\n",
- ib_conn, ib_conn->cma_id,
- ib_conn->qp);
+ iser_conn, iser_conn->cma_id,
+ iser_conn->qp);
/* qp is created only once both addr & route are resolved */
- if (ib_conn->qp != NULL) {
- cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index;
- ib_conn->device->cq_active_qps[cq_index]--;
+ if (iser_conn->qp != NULL) {
+ cq_index = ((struct iser_cq_desc *)iser_conn->qp->recv_cq->cq_context)->cq_index;
+ iser_conn->device->cq_active_qps[cq_index]--;
- rdma_destroy_qp(ib_conn->cma_id);
+ rdma_destroy_qp(iser_conn->cma_id);
}
- ib_conn->qp = NULL;
+ iser_conn->qp = NULL;
}
/**
@@ -572,75 +572,77 @@
/**
* Called with state mutex held
**/
-static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
- enum iser_ib_conn_state comp,
- enum iser_ib_conn_state exch)
+static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
+ enum iser_conn_state comp,
+ enum iser_conn_state exch)
{
int ret;
- if ((ret = (ib_conn->state == comp)))
- ib_conn->state = exch;
+ ret = (iser_conn->state == comp);
+ if (ret)
+ iser_conn->state = exch;
+
return ret;
}
void iser_release_work(struct work_struct *work)
{
- struct iser_conn *ib_conn;
+ struct iser_conn *iser_conn;
int rc;
- ib_conn = container_of(work, struct iser_conn, release_work);
+ iser_conn = container_of(work, struct iser_conn, release_work);
/* wait for .conn_stop callback */
- rc = wait_for_completion_timeout(&ib_conn->stop_completion, 30 * HZ);
+ rc = wait_for_completion_timeout(&iser_conn->stop_completion, 30 * HZ);
WARN_ON(rc == 0);
/* wait for the qp`s post send and post receive buffers to empty */
- rc = wait_for_completion_timeout(&ib_conn->flush_completion, 30 * HZ);
+ rc = wait_for_completion_timeout(&iser_conn->flush_completion, 30 * HZ);
WARN_ON(rc == 0);
- ib_conn->state = ISER_CONN_DOWN;
+ iser_conn->state = ISER_CONN_DOWN;
- mutex_lock(&ib_conn->state_mutex);
- ib_conn->state = ISER_CONN_DOWN;
- mutex_unlock(&ib_conn->state_mutex);
+ mutex_lock(&iser_conn->state_mutex);
+ iser_conn->state = ISER_CONN_DOWN;
+ mutex_unlock(&iser_conn->state_mutex);
- iser_conn_release(ib_conn);
+ iser_conn_release(iser_conn);
}
/**
* Frees all conn objects and deallocs conn descriptor
*/
-void iser_conn_release(struct iser_conn *ib_conn)
+void iser_conn_release(struct iser_conn *iser_conn)
{
- struct iser_device *device = ib_conn->device;
+ struct iser_device *device = iser_conn->device;
mutex_lock(&ig.connlist_mutex);
- list_del(&ib_conn->conn_list);
+ list_del(&iser_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
- mutex_lock(&ib_conn->state_mutex);
- BUG_ON(ib_conn->state != ISER_CONN_DOWN);
+ mutex_lock(&iser_conn->state_mutex);
+ BUG_ON(iser_conn->state != ISER_CONN_DOWN);
- iser_free_rx_descriptors(ib_conn);
- iser_free_ib_conn_res(ib_conn);
- ib_conn->device = NULL;
+ iser_free_rx_descriptors(iser_conn);
+ iser_free_ib_conn_res(iser_conn);
+ iser_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
if (device != NULL)
iser_device_try_release(device);
- mutex_unlock(&ib_conn->state_mutex);
+ mutex_unlock(&iser_conn->state_mutex);
/* if cma handler context, the caller actually destroy the id */
- if (ib_conn->cma_id != NULL) {
- rdma_destroy_id(ib_conn->cma_id);
- ib_conn->cma_id = NULL;
+ if (iser_conn->cma_id != NULL) {
+ rdma_destroy_id(iser_conn->cma_id);
+ iser_conn->cma_id = NULL;
}
- kfree(ib_conn);
+ kfree(iser_conn);
}
/**
* triggers start of the disconnect procedures and wait for them to be done
*/
-void iser_conn_terminate(struct iser_conn *ib_conn)
+void iser_conn_terminate(struct iser_conn *iser_conn)
{
int err = 0;
@@ -649,11 +651,11 @@
* the QP state to ERROR
*/
- iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING);
- err = rdma_disconnect(ib_conn->cma_id);
+ iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP, ISER_CONN_TERMINATING);
+ err = rdma_disconnect(iser_conn->cma_id);
if (err)
iser_err("Failed to disconnect, conn: 0x%p err %d\n",
- ib_conn,err);
+ iser_conn, err);
}
/**
@@ -661,10 +663,10 @@
**/
static void iser_connect_error(struct rdma_cm_id *cma_id)
{
- struct iser_conn *ib_conn;
+ struct iser_conn *iser_conn;
- ib_conn = (struct iser_conn *)cma_id->context;
- ib_conn->state = ISER_CONN_DOWN;
+ iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn->state = ISER_CONN_DOWN;
}
/**
@@ -673,11 +675,11 @@
static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
- struct iser_conn *ib_conn;
+ struct iser_conn *iser_conn;
int ret;
- ib_conn = (struct iser_conn *)cma_id->context;
- if (ib_conn->state != ISER_CONN_PENDING)
+ iser_conn = (struct iser_conn *)cma_id->context;
+ if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
@@ -688,7 +690,7 @@
return;
}
- ib_conn->device = device;
+ iser_conn->device = device;
/* connection T10-PI support */
if (iser_pi_enable) {
@@ -696,10 +698,10 @@
IB_DEVICE_SIGNATURE_HANDOVER)) {
iser_warn("T10-PI requested but not supported on %s, "
"continue without T10-PI\n",
- ib_conn->device->ib_device->name);
- ib_conn->pi_support = false;
+ iser_conn->device->ib_device->name);
+ iser_conn->pi_support = false;
} else {
- ib_conn->pi_support = true;
+ iser_conn->pi_support = true;
}
}
@@ -719,10 +721,10 @@
struct rdma_conn_param conn_param;
int ret;
struct iser_cm_hdr req_hdr;
- struct iser_conn *ib_conn = (struct iser_conn *)cma_id->context;
- struct iser_device *device = ib_conn->device;
+ struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ struct iser_device *device = iser_conn->device;
- if (ib_conn->state != ISER_CONN_PENDING)
+ if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
@@ -755,34 +757,34 @@
static void iser_connected_handler(struct rdma_cm_id *cma_id)
{
- struct iser_conn *ib_conn;
+ struct iser_conn *iser_conn;
struct ib_qp_attr attr;
struct ib_qp_init_attr init_attr;
- ib_conn = (struct iser_conn *)cma_id->context;
- if (ib_conn->state != ISER_CONN_PENDING)
+ iser_conn = (struct iser_conn *)cma_id->context;
+ if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
- ib_conn->state = ISER_CONN_UP;
- complete(&ib_conn->up_completion);
+ iser_conn->state = ISER_CONN_UP;
+ complete(&iser_conn->up_completion);
}
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
{
- struct iser_conn *ib_conn;
+ struct iser_conn *iser_conn;
- ib_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = (struct iser_conn *)cma_id->context;
/* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */
- if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
- ISER_CONN_TERMINATING)){
- if (ib_conn->iscsi_conn)
- iscsi_conn_failure(ib_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED);
+ if (iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
+ ISER_CONN_TERMINATING)){
+ if (iser_conn->iscsi_conn)
+ iscsi_conn_failure(iser_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED);
else
iser_err("iscsi_iser connection isn't bound\n");
}
@@ -791,21 +793,21 @@
* block also exists in iser_handle_comp_error(), but it is needed here
* for cases of no flushes at all, e.g. discovery over rdma.
*/
- if (ib_conn->post_recv_buf_count == 0 &&
- (atomic_read(&ib_conn->post_send_buf_count) == 0)) {
- complete(&ib_conn->flush_completion);
+ if (iser_conn->post_recv_buf_count == 0 &&
+ (atomic_read(&iser_conn->post_send_buf_count) == 0)) {
+ complete(&iser_conn->flush_completion);
}
}
static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
- struct iser_conn *ib_conn;
+ struct iser_conn *iser_conn;
- ib_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = (struct iser_conn *)cma_id->context;
iser_info("event %d status %d conn %p id %p\n",
event->event, event->status, cma_id->context, cma_id);
- mutex_lock(&ib_conn->state_mutex);
+ mutex_lock(&iser_conn->state_mutex);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
iser_addr_handler(cma_id);
@@ -833,82 +835,82 @@
iser_err("Unexpected RDMA CM event (%d)\n", event->event);
break;
}
- mutex_unlock(&ib_conn->state_mutex);
+ mutex_unlock(&iser_conn->state_mutex);
return 0;
}
-void iser_conn_init(struct iser_conn *ib_conn)
+void iser_conn_init(struct iser_conn *iser_conn)
{
- ib_conn->state = ISER_CONN_INIT;
- ib_conn->post_recv_buf_count = 0;
- atomic_set(&ib_conn->post_send_buf_count, 0);
- init_completion(&ib_conn->stop_completion);
- init_completion(&ib_conn->flush_completion);
- init_completion(&ib_conn->up_completion);
- INIT_LIST_HEAD(&ib_conn->conn_list);
- spin_lock_init(&ib_conn->lock);
- mutex_init(&ib_conn->state_mutex);
+ iser_conn->state = ISER_CONN_INIT;
+ iser_conn->post_recv_buf_count = 0;
+ atomic_set(&iser_conn->post_send_buf_count, 0);
+ init_completion(&iser_conn->stop_completion);
+ init_completion(&iser_conn->flush_completion);
+ init_completion(&iser_conn->up_completion);
+ INIT_LIST_HEAD(&iser_conn->conn_list);
+ spin_lock_init(&iser_conn->lock);
+ mutex_init(&iser_conn->state_mutex);
}
/**
* starts the process of connecting to the target
* sleeps until the connection is established or rejected
*/
-int iser_connect(struct iser_conn *ib_conn,
+int iser_connect(struct iser_conn *iser_conn,
struct sockaddr *src_addr,
struct sockaddr *dst_addr,
int non_blocking)
{
int err = 0;
- mutex_lock(&ib_conn->state_mutex);
+ mutex_lock(&iser_conn->state_mutex);
- sprintf(ib_conn->name, "%pISp", dst_addr);
+ sprintf(iser_conn->name, "%pISp", dst_addr);
- iser_info("connecting to: %s\n", ib_conn->name);
+ iser_info("connecting to: %s\n", iser_conn->name);
/* the device is known only --after-- address resolution */
- ib_conn->device = NULL;
+ iser_conn->device = NULL;
- ib_conn->state = ISER_CONN_PENDING;
+ iser_conn->state = ISER_CONN_PENDING;
- ib_conn->cma_id = rdma_create_id(iser_cma_handler,
- (void *)ib_conn,
+ iser_conn->cma_id = rdma_create_id(iser_cma_handler,
+ (void *)iser_conn,
RDMA_PS_TCP, IB_QPT_RC);
- if (IS_ERR(ib_conn->cma_id)) {
- err = PTR_ERR(ib_conn->cma_id);
+ if (IS_ERR(iser_conn->cma_id)) {
+ err = PTR_ERR(iser_conn->cma_id);
iser_err("rdma_create_id failed: %d\n", err);
goto id_failure;
}
- err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
+ err = rdma_resolve_addr(iser_conn->cma_id, src_addr, dst_addr, 1000);
if (err) {
iser_err("rdma_resolve_addr failed: %d\n", err);
goto addr_failure;
}
if (!non_blocking) {
- wait_for_completion_interruptible(&ib_conn->up_completion);
+ wait_for_completion_interruptible(&iser_conn->up_completion);
- if (ib_conn->state != ISER_CONN_UP) {
+ if (iser_conn->state != ISER_CONN_UP) {
err = -EIO;
goto connect_failure;
}
}
- mutex_unlock(&ib_conn->state_mutex);
+ mutex_unlock(&iser_conn->state_mutex);
mutex_lock(&ig.connlist_mutex);
- list_add(&ib_conn->conn_list, &ig.connlist);
+ list_add(&iser_conn->conn_list, &ig.connlist);
mutex_unlock(&ig.connlist_mutex);
return 0;
id_failure:
- ib_conn->cma_id = NULL;
+ iser_conn->cma_id = NULL;
addr_failure:
- ib_conn->state = ISER_CONN_DOWN;
+ iser_conn->state = ISER_CONN_DOWN;
connect_failure:
- mutex_unlock(&ib_conn->state_mutex);
- iser_conn_release(ib_conn);
+ mutex_unlock(&iser_conn->state_mutex);
+ iser_conn_release(iser_conn);
return err;
}
@@ -917,7 +919,7 @@
*
* returns: 0 on success, errno code on failure
*/
-int iser_reg_page_vec(struct iser_conn *ib_conn,
+int iser_reg_page_vec(struct iser_conn *iser_conn,
struct iser_page_vec *page_vec,
struct iser_mem_reg *mem_reg)
{
@@ -929,7 +931,7 @@
page_list = page_vec->pages;
io_addr = page_list[0];
- mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
+ mem = ib_fmr_pool_map_phys(iser_conn->fmr.pool,
page_list,
page_vec->length,
io_addr);
@@ -987,7 +989,7 @@
enum iser_data_dir cmd_dir)
{
struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
- struct iser_conn *ib_conn = iser_task->ib_conn;
+ struct iser_conn *iser_conn = iser_task->iser_conn;
struct fast_reg_descriptor *desc = reg->mem_h;
if (!reg->is_mr)
@@ -995,61 +997,61 @@
reg->mem_h = NULL;
reg->is_mr = 0;
- spin_lock_bh(&ib_conn->lock);
- list_add_tail(&desc->list, &ib_conn->fastreg.pool);
- spin_unlock_bh(&ib_conn->lock);
+ spin_lock_bh(&iser_conn->lock);
+ list_add_tail(&desc->list, &iser_conn->fastreg.pool);
+ spin_unlock_bh(&iser_conn->lock);
}
-int iser_post_recvl(struct iser_conn *ib_conn)
+int iser_post_recvl(struct iser_conn *iser_conn)
{
struct ib_recv_wr rx_wr, *rx_wr_failed;
struct ib_sge sge;
int ib_ret;
- sge.addr = ib_conn->login_resp_dma;
+ sge.addr = iser_conn->login_resp_dma;
sge.length = ISER_RX_LOGIN_SIZE;
- sge.lkey = ib_conn->device->mr->lkey;
+ sge.lkey = iser_conn->device->mr->lkey;
- rx_wr.wr_id = (unsigned long)ib_conn->login_resp_buf;
+ rx_wr.wr_id = (unsigned long)iser_conn->login_resp_buf;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
rx_wr.next = NULL;
- ib_conn->post_recv_buf_count++;
- ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
+ iser_conn->post_recv_buf_count++;
+ ib_ret = ib_post_recv(iser_conn->qp, &rx_wr, &rx_wr_failed);
if (ib_ret) {
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
- ib_conn->post_recv_buf_count--;
+ iser_conn->post_recv_buf_count--;
}
return ib_ret;
}
-int iser_post_recvm(struct iser_conn *ib_conn, int count)
+int iser_post_recvm(struct iser_conn *iser_conn, int count)
{
struct ib_recv_wr *rx_wr, *rx_wr_failed;
int i, ib_ret;
- unsigned int my_rx_head = ib_conn->rx_desc_head;
+ unsigned int my_rx_head = iser_conn->rx_desc_head;
struct iser_rx_desc *rx_desc;
- for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
- rx_desc = &ib_conn->rx_descs[my_rx_head];
+ for (rx_wr = iser_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
+ rx_desc = &iser_conn->rx_descs[my_rx_head];
rx_wr->wr_id = (unsigned long)rx_desc;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
- my_rx_head = (my_rx_head + 1) & ib_conn->qp_max_recv_dtos_mask;
+ my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
}
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
- ib_conn->post_recv_buf_count += count;
- ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
+ iser_conn->post_recv_buf_count += count;
+ ib_ret = ib_post_recv(iser_conn->qp, iser_conn->rx_wr, &rx_wr_failed);
if (ib_ret) {
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
- ib_conn->post_recv_buf_count -= count;
+ iser_conn->post_recv_buf_count -= count;
} else
- ib_conn->rx_desc_head = my_rx_head;
+ iser_conn->rx_desc_head = my_rx_head;
return ib_ret;
}
@@ -1059,13 +1061,14 @@
*
* returns 0 on success, -1 on failure
*/
-int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
+int iser_post_send(struct iser_conn *iser_conn, struct iser_tx_desc *tx_desc)
{
int ib_ret;
struct ib_send_wr send_wr, *send_wr_failed;
- ib_dma_sync_single_for_device(ib_conn->device->ib_device,
- tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ ib_dma_sync_single_for_device(iser_conn->device->ib_device,
+ tx_desc->dma_addr, ISER_HEADERS_LEN,
+ DMA_TO_DEVICE);
send_wr.next = NULL;
send_wr.wr_id = (unsigned long)tx_desc;
@@ -1074,37 +1077,37 @@
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
- atomic_inc(&ib_conn->post_send_buf_count);
+ atomic_inc(&iser_conn->post_send_buf_count);
- ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
+ ib_ret = ib_post_send(iser_conn->qp, &send_wr, &send_wr_failed);
if (ib_ret) {
iser_err("ib_post_send failed, ret:%d\n", ib_ret);
- atomic_dec(&ib_conn->post_send_buf_count);
+ atomic_dec(&iser_conn->post_send_buf_count);
}
return ib_ret;
}
static void iser_handle_comp_error(struct iser_tx_desc *desc,
- struct iser_conn *ib_conn)
+ struct iser_conn *iser_conn)
{
if (desc && desc->type == ISCSI_TX_DATAOUT)
kmem_cache_free(ig.desc_cache, desc);
- if (ib_conn->post_recv_buf_count == 0 &&
- atomic_read(&ib_conn->post_send_buf_count) == 0) {
+ if (iser_conn->post_recv_buf_count == 0 &&
+ atomic_read(&iser_conn->post_send_buf_count) == 0) {
/**
* getting here when the state is UP means that the conn is
* being terminated asynchronously from the iSCSI layer's
* perspective. It is safe to peek at the connection state
* since iscsi_conn_failure is allowed to be called twice.
**/
- if (ib_conn->state == ISER_CONN_UP)
- iscsi_conn_failure(ib_conn->iscsi_conn,
+ if (iser_conn->state == ISER_CONN_UP)
+ iscsi_conn_failure(iser_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
/* no more non completed posts to the QP, complete the
* termination process w.o worrying on disconnect event */
- complete(&ib_conn->flush_completion);
+ complete(&iser_conn->flush_completion);
}
}
@@ -1113,15 +1116,15 @@
struct ib_cq *cq = device->tx_cq[cq_index];
struct ib_wc wc;
struct iser_tx_desc *tx_desc;
- struct iser_conn *ib_conn;
+ struct iser_conn *iser_conn;
int completed_tx = 0;
while (ib_poll_cq(cq, 1, &wc) == 1) {
tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id;
- ib_conn = wc.qp->qp_context;
+ iser_conn = wc.qp->qp_context;
if (wc.status == IB_WC_SUCCESS) {
if (wc.opcode == IB_WC_SEND)
- iser_snd_completion(tx_desc, ib_conn);
+ iser_snd_completion(tx_desc, iser_conn);
else
iser_err("expected opcode %d got %d\n",
IB_WC_SEND, wc.opcode);
@@ -1129,8 +1132,8 @@
iser_err("tx id %llx status %d vend_err %x\n",
wc.wr_id, wc.status, wc.vendor_err);
if (wc.wr_id != ISER_FASTREG_LI_WRID) {
- atomic_dec(&ib_conn->post_send_buf_count);
- iser_handle_comp_error(tx_desc, ib_conn);
+ atomic_dec(&iser_conn->post_send_buf_count);
+ iser_handle_comp_error(tx_desc, iser_conn);
}
}
completed_tx++;
@@ -1148,7 +1151,7 @@
struct ib_wc wc;
struct iser_rx_desc *desc;
unsigned long xfer_len;
- struct iser_conn *ib_conn;
+ struct iser_conn *iser_conn;
int completed_tx, completed_rx = 0;
/* First do tx drain, so in a case where we have rx flushes and a successful
@@ -1159,11 +1162,11 @@
while (ib_poll_cq(cq, 1, &wc) == 1) {
desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
BUG_ON(desc == NULL);
- ib_conn = wc.qp->qp_context;
+ iser_conn = wc.qp->qp_context;
if (wc.status == IB_WC_SUCCESS) {
if (wc.opcode == IB_WC_RECV) {
xfer_len = (unsigned long)wc.byte_len;
- iser_rcv_completion(desc, xfer_len, ib_conn);
+ iser_rcv_completion(desc, xfer_len, iser_conn);
} else
iser_err("expected opcode %d got %d\n",
IB_WC_RECV, wc.opcode);
@@ -1171,8 +1174,8 @@
if (wc.status != IB_WC_WR_FLUSH_ERR)
iser_err("rx id %llx status %d vend_err %x\n",
wc.wr_id, wc.status, wc.vendor_err);
- ib_conn->post_recv_buf_count--;
- iser_handle_comp_error(NULL, ib_conn);
+ iser_conn->post_recv_buf_count--;
+ iser_handle_comp_error(NULL, iser_conn);
}
completed_rx++;
if (!(completed_rx & 63))