block, cfq: move io_cq exit/release to blk-ioc.c
With kmem_cache managed by blk-ioc, io_cq exit/release can be moved to
blk-ioc too. The odd ->io_cq->exit/release() callbacks are replaced
with elevator_ops->elevator_exit_icq_fn() with unlinking from both ioc
and q, and freeing automatically handled by blk-ioc. The elevator
operation only need to perform exit operation specific to the elevator
- in cfq's case, exiting the cfqq's.
Also, clearing of io_cq's on q detach is moved to block core and
automatically performed on elevator switch and q release.
Because the q io_cq points to might be freed before RCU callback for
the io_cq runs, blk-ioc code should remember to which cache the io_cq
needs to be freed when the io_cq is released. New field
io_cq->__rcu_icq_cache is added for this purpose. As both the new
field and rcu_head are used only after io_cq is released and the
q/ioc_node fields aren't, they are put into unions.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 06e59ab..f6d3155 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2674,26 +2674,6 @@
cfq_put_cfqg(cfqg);
}
-static void cfq_icq_free_rcu(struct rcu_head *head)
-{
- kmem_cache_free(cfq_icq_pool,
- icq_to_cic(container_of(head, struct io_cq, rcu_head)));
-}
-
-static void cfq_icq_free(struct io_cq *icq)
-{
- call_rcu(&icq->rcu_head, cfq_icq_free_rcu);
-}
-
-static void cfq_release_icq(struct io_cq *icq)
-{
- struct io_context *ioc = icq->ioc;
-
- radix_tree_delete(&ioc->icq_tree, icq->q->id);
- hlist_del(&icq->ioc_node);
- cfq_icq_free(icq);
-}
-
static void cfq_put_cooperator(struct cfq_queue *cfqq)
{
struct cfq_queue *__cfqq, *next;
@@ -2731,17 +2711,6 @@
{
struct cfq_io_cq *cic = icq_to_cic(icq);
struct cfq_data *cfqd = cic_to_cfqd(cic);
- struct io_context *ioc = icq->ioc;
-
- list_del_init(&icq->q_node);
-
- /*
- * Both setting lookup hint to and clearing it from @icq are done
- * under queue_lock. If it's not pointing to @icq now, it never
- * will. Hint assignment itself can race safely.
- */
- if (rcu_dereference_raw(ioc->icq_hint) == icq)
- rcu_assign_pointer(ioc->icq_hint, NULL);
if (cic->cfqq[BLK_RW_ASYNC]) {
cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
@@ -2764,8 +2733,6 @@
cic->ttime.last_end_request = jiffies;
INIT_LIST_HEAD(&cic->icq.q_node);
INIT_HLIST_NODE(&cic->icq.ioc_node);
- cic->icq.exit = cfq_exit_icq;
- cic->icq.release = cfq_release_icq;
}
return cic;
@@ -3034,7 +3001,7 @@
if (ret)
printk(KERN_ERR "cfq: icq link failed!\n");
if (icq)
- cfq_icq_free(icq);
+ kmem_cache_free(cfq_icq_pool, icq);
return ret;
}
@@ -3774,17 +3741,6 @@
if (cfqd->active_queue)
__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
- while (!list_empty(&q->icq_list)) {
- struct io_cq *icq = list_entry(q->icq_list.next,
- struct io_cq, q_node);
- struct io_context *ioc = icq->ioc;
-
- spin_lock(&ioc->lock);
- cfq_exit_icq(icq);
- cfq_release_icq(icq);
- spin_unlock(&ioc->lock);
- }
-
cfq_put_async_queues(cfqd);
cfq_release_cfq_groups(cfqd);
@@ -4019,6 +3975,7 @@
.elevator_completed_req_fn = cfq_completed_request,
.elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request,
+ .elevator_exit_icq_fn = cfq_exit_icq,
.elevator_set_req_fn = cfq_set_request,
.elevator_put_req_fn = cfq_put_request,
.elevator_may_queue_fn = cfq_may_queue,