drbd: move the drbd_work_queue from drbd_socket to drbd_connection
cherry-picked and adapted from drbd 9 devel branch
In 8.4, we don't distinguish between "resource work" and "connection
work" yet, we have one worker for both, as we still have only one connection.
We only ever used the "data.work",
no need to keep the "meta.work" around.
Move tconn->data.work to tconn->sender_work.
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 83d48d2..f500dc5 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -485,7 +485,7 @@
init_completion(&al_work.event);
al_work.w.cb = w_al_write_transaction;
al_work.w.mdev = mdev;
- drbd_queue_work_front(&mdev->tconn->data.work, &al_work.w);
+ drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
wait_for_completion(&al_work.event);
return al_work.err;
@@ -645,7 +645,7 @@
udw->enr = ext->lce.lc_number;
udw->w.cb = w_update_odbm;
udw->w.mdev = mdev;
- drbd_queue_work_front(&mdev->tconn->data.work, &udw->w);
+ drbd_queue_work_front(&mdev->tconn->sender_work, &udw->w);
} else {
dev_warn(DEV, "Could not kmalloc an udw\n");
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e84c7b6..c0d0de5 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -740,7 +740,6 @@
};
struct drbd_socket {
- struct drbd_work_queue work;
struct mutex mutex;
struct socket *socket;
/* this way we get our
@@ -871,6 +870,7 @@
struct drbd_thread worker;
struct drbd_thread asender;
cpumask_var_t cpu_mask;
+ struct drbd_work_queue sender_work;
};
struct drbd_conf {
@@ -2228,7 +2228,7 @@
wake_up(&mdev->misc_wait);
if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
}
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index f379d33..7e37149 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -379,7 +379,7 @@
set_bit(CREATE_BARRIER, &tconn->flags);
}
- drbd_queue_work(&tconn->data.work, &b->w);
+ drbd_queue_work(&tconn->sender_work, &b->w);
}
pn = &b->next;
} else {
@@ -2173,8 +2173,7 @@
D_ASSERT(list_empty(&mdev->read_ee));
D_ASSERT(list_empty(&mdev->net_ee));
D_ASSERT(list_empty(&mdev->resync_reads));
- D_ASSERT(list_empty(&mdev->tconn->data.work.q));
- D_ASSERT(list_empty(&mdev->tconn->meta.work.q));
+ D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
D_ASSERT(list_empty(&mdev->resync_work.list));
D_ASSERT(list_empty(&mdev->unplug_work.list));
D_ASSERT(list_empty(&mdev->go_diskless.list));
@@ -2349,7 +2348,6 @@
/* paranoia asserts */
D_ASSERT(mdev->open_cnt == 0);
- D_ASSERT(list_empty(&mdev->tconn->data.work.q));
/* end paranoia asserts */
/* cleanup stuff that may have been allocated during
@@ -2700,10 +2698,8 @@
init_waitqueue_head(&tconn->ping_wait);
idr_init(&tconn->volumes);
- drbd_init_workqueue(&tconn->data.work);
+ drbd_init_workqueue(&tconn->sender_work);
mutex_init(&tconn->data.mutex);
-
- drbd_init_workqueue(&tconn->meta.work);
mutex_init(&tconn->meta.mutex);
drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
@@ -3356,7 +3352,7 @@
{
D_ASSERT(mdev->state.disk == D_FAILED);
if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
- drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
}
/**
@@ -3394,7 +3390,7 @@
set_bit(BITMAP_IO, &mdev->flags);
if (atomic_read(&mdev->ap_bio_cnt) == 0) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
}
spin_unlock_irq(&mdev->tconn->req_lock);
}
@@ -3452,7 +3448,7 @@
{
struct drbd_conf *mdev = (struct drbd_conf *) data;
- drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work);
+ drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
}
static int w_md_sync(struct drbd_work *w, int unused)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 9aac1c4..34fc33b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -4413,7 +4413,7 @@
barr.w.cb = w_prev_work_done;
barr.w.tconn = tconn;
init_completion(&barr.done);
- drbd_queue_work(&tconn->data.work, &barr.w);
+ drbd_queue_work(&tconn->sender_work, &barr.w);
wait_for_completion(&barr.done);
}
@@ -5147,7 +5147,7 @@
if (w) {
w->cb = w_ov_finished;
w->mdev = mdev;
- drbd_queue_work_front(&mdev->tconn->data.work, w);
+ drbd_queue_work(&mdev->tconn->sender_work, w);
} else {
dev_err(DEV, "kmalloc(w) failed.");
ov_out_of_sync_print(mdev);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index a131174..e609557 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -170,7 +170,7 @@
* dec_ap_pending will be done in got_BarrierAck
* or (on connection loss) in tl_clear. */
inc_ap_pending(mdev);
- drbd_queue_work(&tconn->data.work, &b->w);
+ drbd_queue_work(&tconn->sender_work, &b->w);
set_bit(CREATE_BARRIER, &tconn->flags);
}
@@ -483,7 +483,7 @@
D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
req->rq_state |= RQ_NET_QUEUED;
req->w.cb = w_send_read_req;
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
case QUEUE_FOR_NET_WRITE:
@@ -527,7 +527,7 @@
D_ASSERT(req->rq_state & RQ_NET_PENDING);
req->rq_state |= RQ_NET_QUEUED;
req->w.cb = w_send_dblock;
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
/* close the epoch, in case it outgrew the limit */
rcu_read_lock();
@@ -542,7 +542,7 @@
case QUEUE_FOR_SEND_OOS:
req->rq_state |= RQ_NET_QUEUED;
req->w.cb = w_send_out_of_sync;
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
case READ_RETRY_REMOTE_CANCELED:
@@ -682,7 +682,7 @@
get_ldev(mdev);
req->w.cb = w_restart_disk_io;
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
case RESEND:
@@ -692,7 +692,7 @@
During connection handshake, we ensure that the peer was not rebooted. */
if (!(req->rq_state & RQ_NET_OK)) {
if (req->w.cb) {
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
}
break;
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index dd618b53..84a5072 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -1090,7 +1090,7 @@
ascw->w.cb = w_after_state_ch;
ascw->w.mdev = mdev;
ascw->done = done;
- drbd_queue_work(&mdev->tconn->data.work, &ascw->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &ascw->w);
} else {
dev_err(DEV, "Could not kmalloc an ascw\n");
}
@@ -1764,7 +1764,7 @@
acscw->w.cb = w_after_conn_state_ch;
kref_get(&tconn->kref);
acscw->w.tconn = tconn;
- drbd_queue_work(&tconn->data.work, &acscw->w);
+ drbd_queue_work(&tconn->sender_work, &acscw->w);
} else {
conn_err(tconn, "Could not kmalloc an acscw\n");
}
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index fb2e6c8..39ece3a 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -109,7 +109,7 @@
__drbd_chk_io_error(mdev, false);
spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
- drbd_queue_work(&mdev->tconn->data.work, &peer_req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &peer_req->w);
put_ldev(mdev);
}
@@ -401,7 +401,7 @@
struct drbd_conf *mdev = (struct drbd_conf *) data;
if (list_empty(&mdev->resync_work.list))
- drbd_queue_work(&mdev->tconn->data.work, &mdev->resync_work);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->resync_work);
}
static void fifo_set(struct fifo_buffer *fb, int value)
@@ -783,7 +783,7 @@
if (w) {
w->cb = w_resync_finished;
w->mdev = mdev;
- drbd_queue_work(&mdev->tconn->data.work, w);
+ drbd_queue_work(&mdev->tconn->sender_work, w);
return 1;
}
dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
@@ -1484,7 +1484,7 @@
{
struct drbd_conf *mdev = (struct drbd_conf *) data;
- drbd_queue_work(&mdev->tconn->data.work, &mdev->start_resync_work);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->start_resync_work);
}
int w_start_resync(struct drbd_work *w, int cancel)
@@ -1706,7 +1706,7 @@
/* as long as we use drbd_queue_work_front(),
* we may only dequeue single work items here, not batches. */
if (list_empty(&work_list))
- dequeue_work_item(&tconn->data.work, &work_list);
+ dequeue_work_item(&tconn->sender_work, &work_list);
/* Still nothing to do? Poke TCP, just in case,
* then wait for new work (or signal). */
@@ -1721,8 +1721,8 @@
drbd_tcp_uncork(tconn->data.socket);
mutex_unlock(&tconn->data.mutex);
- wait_event_interruptible(tconn->data.work.q_wait,
- dequeue_work_item(&tconn->data.work, &work_list));
+ wait_event_interruptible(tconn->sender_work.q_wait,
+ dequeue_work_item(&tconn->sender_work, &work_list));
mutex_lock(&tconn->data.mutex);
if (tconn->data.socket && cork)
@@ -1758,7 +1758,7 @@
list_del_init(&w->list);
w->cb(w, 1);
}
- dequeue_work_batch(&tconn->data.work, &work_list);
+ dequeue_work_batch(&tconn->sender_work, &work_list);
} while (!list_empty(&work_list));
rcu_read_lock();