Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Common Block IO controller cgroup interface |
| 3 | * |
| 4 | * Based on ideas and code from CFQ, CFS and BFQ: |
| 5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
| 6 | * |
| 7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
| 8 | * Paolo Valente <paolo.valente@unimore.it> |
| 9 | * |
| 10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> |
| 11 | * Nauman Rafique <nauman@google.com> |
| 12 | */ |
| 13 | #include <linux/ioprio.h> |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 14 | #include <linux/kdev_t.h> |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 15 | #include <linux/module.h> |
Stephen Rothwell | accee78 | 2009-12-07 19:29:39 +1100 | [diff] [blame] | 16 | #include <linux/err.h> |
Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 17 | #include <linux/blkdev.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/slab.h> |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 19 | #include <linux/genhd.h> |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 20 | #include <linux/delay.h> |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 21 | #include <linux/atomic.h> |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 22 | #include "blk-cgroup.h" |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 23 | #include "blk.h" |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 24 | |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 25 | #define MAX_KEY_LEN 100 |
| 26 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 27 | static DEFINE_MUTEX(blkcg_pol_mutex); |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 28 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 29 | struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; |
| 30 | EXPORT_SYMBOL_GPL(blkcg_root); |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 31 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 32 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 33 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 34 | struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 35 | { |
| 36 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 37 | struct blkcg, css); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 38 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 39 | EXPORT_SYMBOL_GPL(cgroup_to_blkcg); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 40 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 41 | static struct blkcg *task_blkcg(struct task_struct *tsk) |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 42 | { |
| 43 | return container_of(task_subsys_state(tsk, blkio_subsys_id), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 44 | struct blkcg, css); |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 45 | } |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 46 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 47 | struct blkcg *bio_blkcg(struct bio *bio) |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 48 | { |
| 49 | if (bio && bio->bi_css) |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 50 | return container_of(bio->bi_css, struct blkcg, css); |
| 51 | return task_blkcg(current); |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 52 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 53 | EXPORT_SYMBOL_GPL(bio_blkcg); |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 54 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 55 | static bool blkcg_policy_enabled(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 56 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 57 | { |
| 58 | return pol && test_bit(pol->plid, q->blkcg_pols); |
| 59 | } |
| 60 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 61 | /** |
| 62 | * blkg_free - free a blkg |
| 63 | * @blkg: blkg to free |
| 64 | * |
| 65 | * Free @blkg which may be partially allocated. |
| 66 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 67 | static void blkg_free(struct blkcg_gq *blkg) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 68 | { |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 69 | int i; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 70 | |
| 71 | if (!blkg) |
| 72 | return; |
| 73 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 74 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 75 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 76 | struct blkg_policy_data *pd = blkg->pd[i]; |
| 77 | |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 78 | if (!pd) |
| 79 | continue; |
| 80 | |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 81 | if (pol && pol->pd_exit_fn) |
| 82 | pol->pd_exit_fn(blkg); |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 83 | |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 84 | kfree(pd); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 85 | } |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 86 | |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 87 | kfree(blkg); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | /** |
| 91 | * blkg_alloc - allocate a blkg |
| 92 | * @blkcg: block cgroup the new blkg is associated with |
| 93 | * @q: request_queue the new blkg is associated with |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 94 | * |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 95 | * Allocate a new blkg assocating @blkcg and @q. |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 96 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 97 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 98 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 99 | struct blkcg_gq *blkg; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 100 | int i; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 101 | |
| 102 | /* alloc and init base part */ |
| 103 | blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); |
| 104 | if (!blkg) |
| 105 | return NULL; |
| 106 | |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 107 | blkg->q = q; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 108 | INIT_LIST_HEAD(&blkg->q_node); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 109 | blkg->blkcg = blkcg; |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 110 | blkg->refcnt = 1; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 111 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 112 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 113 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 114 | struct blkg_policy_data *pd; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 115 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 116 | if (!blkcg_policy_enabled(q, pol)) |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 117 | continue; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 118 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 119 | /* alloc per-policy data and attach it to blkg */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 120 | pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 121 | if (!pd) { |
| 122 | blkg_free(blkg); |
| 123 | return NULL; |
| 124 | } |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 125 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 126 | blkg->pd[i] = pd; |
| 127 | pd->blkg = blkg; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 128 | } |
| 129 | |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 130 | /* invoke per-policy init */ |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 131 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 132 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 133 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 134 | if (blkcg_policy_enabled(blkg->q, pol)) |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 135 | pol->pd_init_fn(blkg); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 136 | } |
| 137 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 138 | return blkg; |
| 139 | } |
| 140 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 141 | static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, |
| 142 | struct request_queue *q) |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 143 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 144 | struct blkcg_gq *blkg; |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 145 | |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 146 | blkg = rcu_dereference(blkcg->blkg_hint); |
| 147 | if (blkg && blkg->q == q) |
| 148 | return blkg; |
| 149 | |
| 150 | /* |
| 151 | * Hint didn't match. Look up from the radix tree. Note that we |
| 152 | * may not be holding queue_lock and thus are not sure whether |
| 153 | * @blkg from blkg_tree has already been removed or not, so we |
| 154 | * can't update hint to the lookup result. Leave it to the caller. |
| 155 | */ |
| 156 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); |
| 157 | if (blkg && blkg->q == q) |
| 158 | return blkg; |
| 159 | |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 160 | return NULL; |
| 161 | } |
| 162 | |
| 163 | /** |
| 164 | * blkg_lookup - lookup blkg for the specified blkcg - q pair |
| 165 | * @blkcg: blkcg of interest |
| 166 | * @q: request_queue of interest |
| 167 | * |
| 168 | * Lookup blkg for the @blkcg - @q pair. This function should be called |
| 169 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing |
| 170 | * - see blk_queue_bypass_start() for details. |
| 171 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 172 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 173 | { |
| 174 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 175 | |
| 176 | if (unlikely(blk_queue_bypass(q))) |
| 177 | return NULL; |
| 178 | return __blkg_lookup(blkcg, q); |
| 179 | } |
| 180 | EXPORT_SYMBOL_GPL(blkg_lookup); |
| 181 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 182 | static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, |
| 183 | struct request_queue *q) |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 184 | __releases(q->queue_lock) __acquires(q->queue_lock) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 185 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 186 | struct blkcg_gq *blkg; |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame] | 187 | int ret; |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 188 | |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 189 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 190 | lockdep_assert_held(q->queue_lock); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 191 | |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 192 | /* lookup and update hint on success, see __blkg_lookup() for details */ |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 193 | blkg = __blkg_lookup(blkcg, q); |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 194 | if (blkg) { |
| 195 | rcu_assign_pointer(blkcg->blkg_hint, blkg); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 196 | return blkg; |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 197 | } |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 198 | |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 199 | /* blkg holds a reference to blkcg */ |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 200 | if (!css_tryget(&blkcg->css)) |
| 201 | return ERR_PTR(-EINVAL); |
| 202 | |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame] | 203 | /* allocate */ |
| 204 | ret = -ENOMEM; |
Vivek Goyal | 1cd9e03 | 2012-03-08 10:53:56 -0800 | [diff] [blame] | 205 | blkg = blkg_alloc(blkcg, q); |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame] | 206 | if (unlikely(!blkg)) |
| 207 | goto err_put; |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 208 | |
| 209 | /* insert */ |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 210 | ret = radix_tree_preload(GFP_ATOMIC); |
| 211 | if (ret) |
| 212 | goto err_free; |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame] | 213 | |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 214 | spin_lock(&blkcg->lock); |
| 215 | ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); |
| 216 | if (likely(!ret)) { |
| 217 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
| 218 | list_add(&blkg->q_node, &q->blkg_list); |
| 219 | } |
| 220 | spin_unlock(&blkcg->lock); |
| 221 | |
| 222 | radix_tree_preload_end(); |
| 223 | |
| 224 | if (!ret) |
| 225 | return blkg; |
| 226 | err_free: |
| 227 | blkg_free(blkg); |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame] | 228 | err_put: |
| 229 | css_put(&blkcg->css); |
| 230 | return ERR_PTR(ret); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 231 | } |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 232 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 233 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
| 234 | struct request_queue *q) |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 235 | { |
| 236 | /* |
| 237 | * This could be the first entry point of blkcg implementation and |
| 238 | * we shouldn't allow anything to go through for a bypassing queue. |
| 239 | */ |
| 240 | if (unlikely(blk_queue_bypass(q))) |
| 241 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); |
| 242 | return __blkg_lookup_create(blkcg, q); |
| 243 | } |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 244 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 245 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 246 | static void blkg_destroy(struct blkcg_gq *blkg) |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 247 | { |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 248 | struct request_queue *q = blkg->q; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 249 | struct blkcg *blkcg = blkg->blkcg; |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 250 | |
| 251 | lockdep_assert_held(q->queue_lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 252 | lockdep_assert_held(&blkcg->lock); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 253 | |
| 254 | /* Something wrong if we are trying to remove same group twice */ |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 255 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 256 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 257 | |
| 258 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 259 | list_del_init(&blkg->q_node); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 260 | hlist_del_init_rcu(&blkg->blkcg_node); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 261 | |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 262 | /* |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 263 | * Both setting lookup hint to and clearing it from @blkg are done |
| 264 | * under queue_lock. If it's not pointing to @blkg now, it never |
| 265 | * will. Hint assignment itself can race safely. |
| 266 | */ |
| 267 | if (rcu_dereference_raw(blkcg->blkg_hint) == blkg) |
| 268 | rcu_assign_pointer(blkcg->blkg_hint, NULL); |
| 269 | |
| 270 | /* |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 271 | * Put the reference taken at the time of creation so that when all |
| 272 | * queues are gone, group can be destroyed. |
| 273 | */ |
| 274 | blkg_put(blkg); |
| 275 | } |
| 276 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 277 | /** |
| 278 | * blkg_destroy_all - destroy all blkgs associated with a request_queue |
| 279 | * @q: request_queue of interest |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 280 | * |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 281 | * Destroy all blkgs associated with @q. |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 282 | */ |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 283 | static void blkg_destroy_all(struct request_queue *q) |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 284 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 285 | struct blkcg_gq *blkg, *n; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 286 | |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 287 | lockdep_assert_held(q->queue_lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 288 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 289 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 290 | struct blkcg *blkcg = blkg->blkcg; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 291 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 292 | spin_lock(&blkcg->lock); |
| 293 | blkg_destroy(blkg); |
| 294 | spin_unlock(&blkcg->lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 295 | } |
| 296 | } |
| 297 | |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 298 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
| 299 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 300 | blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head)); |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 301 | } |
| 302 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 303 | void __blkg_release(struct blkcg_gq *blkg) |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 304 | { |
| 305 | /* release the extra blkcg reference this blkg has been holding */ |
| 306 | css_put(&blkg->blkcg->css); |
| 307 | |
| 308 | /* |
| 309 | * A group is freed in rcu manner. But having an rcu lock does not |
| 310 | * mean that one can access all the fields of blkg and assume these |
| 311 | * are valid. For example, don't try to follow throtl_data and |
| 312 | * request queue links. |
| 313 | * |
| 314 | * Having a reference to blkg under an rcu allows acess to only |
| 315 | * values local to groups like group stats and group rate limits |
| 316 | */ |
| 317 | call_rcu(&blkg->rcu_head, blkg_rcu_free); |
| 318 | } |
| 319 | EXPORT_SYMBOL_GPL(__blkg_release); |
| 320 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 321 | static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, |
| 322 | u64 val) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 323 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 324 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
| 325 | struct blkcg_gq *blkg; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 326 | struct hlist_node *n; |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 327 | int i; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 328 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 329 | mutex_lock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 330 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 997a026 | 2012-03-08 10:53:58 -0800 | [diff] [blame] | 331 | |
| 332 | /* |
| 333 | * Note that stat reset is racy - it doesn't synchronize against |
| 334 | * stat updates. This is a debug feature which shouldn't exist |
| 335 | * anyway. If you get hit by a race, retry. |
| 336 | */ |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 337 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 338 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 339 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 340 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 341 | if (blkcg_policy_enabled(blkg->q, pol) && |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 342 | pol->pd_reset_stats_fn) |
| 343 | pol->pd_reset_stats_fn(blkg); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 344 | } |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 345 | } |
Vivek Goyal | f0bdc8c | 2011-05-19 15:38:30 -0400 | [diff] [blame] | 346 | |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 347 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 348 | mutex_unlock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 349 | return 0; |
| 350 | } |
| 351 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 352 | static const char *blkg_dev_name(struct blkcg_gq *blkg) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 353 | { |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 354 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
| 355 | if (blkg->q->backing_dev_info.dev) |
| 356 | return dev_name(blkg->q->backing_dev_info.dev); |
| 357 | return NULL; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 358 | } |
| 359 | |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 360 | /** |
| 361 | * blkcg_print_blkgs - helper for printing per-blkg data |
| 362 | * @sf: seq_file to print to |
| 363 | * @blkcg: blkcg of interest |
| 364 | * @prfill: fill function to print out a blkg |
| 365 | * @pol: policy in question |
| 366 | * @data: data to be passed to @prfill |
| 367 | * @show_total: to print out sum of prfill return values or not |
| 368 | * |
| 369 | * This function invokes @prfill on each blkg of @blkcg if pd for the |
| 370 | * policy specified by @pol exists. @prfill is invoked with @sf, the |
| 371 | * policy data and @data. If @show_total is %true, the sum of the return |
| 372 | * values from @prfill is printed with "Total" label at the end. |
| 373 | * |
| 374 | * This is to be used to construct print functions for |
| 375 | * cftype->read_seq_string method. |
| 376 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 377 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 378 | u64 (*prfill)(struct seq_file *, |
| 379 | struct blkg_policy_data *, int), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 380 | const struct blkcg_policy *pol, int data, |
Tejun Heo | ec39934 | 2012-04-13 13:11:27 -0700 | [diff] [blame] | 381 | bool show_total) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 382 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 383 | struct blkcg_gq *blkg; |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 384 | struct hlist_node *n; |
| 385 | u64 total = 0; |
| 386 | |
| 387 | spin_lock_irq(&blkcg->lock); |
| 388 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 389 | if (blkcg_policy_enabled(blkg->q, pol)) |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 390 | total += prfill(sf, blkg->pd[pol->plid], data); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 391 | spin_unlock_irq(&blkcg->lock); |
| 392 | |
| 393 | if (show_total) |
| 394 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); |
| 395 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 396 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 397 | |
| 398 | /** |
| 399 | * __blkg_prfill_u64 - prfill helper for a single u64 value |
| 400 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 401 | * @pd: policy private data of interest |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 402 | * @v: value to print |
| 403 | * |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 404 | * Print @v to @sf for the device assocaited with @pd. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 405 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 406 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 407 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 408 | const char *dname = blkg_dev_name(pd->blkg); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 409 | |
| 410 | if (!dname) |
| 411 | return 0; |
| 412 | |
| 413 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); |
| 414 | return v; |
| 415 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 416 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 417 | |
| 418 | /** |
| 419 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat |
| 420 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 421 | * @pd: policy private data of interest |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 422 | * @rwstat: rwstat to print |
| 423 | * |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 424 | * Print @rwstat to @sf for the device assocaited with @pd. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 425 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 426 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 427 | const struct blkg_rwstat *rwstat) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 428 | { |
| 429 | static const char *rwstr[] = { |
| 430 | [BLKG_RWSTAT_READ] = "Read", |
| 431 | [BLKG_RWSTAT_WRITE] = "Write", |
| 432 | [BLKG_RWSTAT_SYNC] = "Sync", |
| 433 | [BLKG_RWSTAT_ASYNC] = "Async", |
| 434 | }; |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 435 | const char *dname = blkg_dev_name(pd->blkg); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 436 | u64 v; |
| 437 | int i; |
| 438 | |
| 439 | if (!dname) |
| 440 | return 0; |
| 441 | |
| 442 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
| 443 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], |
| 444 | (unsigned long long)rwstat->cnt[i]); |
| 445 | |
| 446 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; |
| 447 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); |
| 448 | return v; |
| 449 | } |
| 450 | |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 451 | /** |
| 452 | * blkg_prfill_stat - prfill callback for blkg_stat |
| 453 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 454 | * @pd: policy private data of interest |
| 455 | * @off: offset to the blkg_stat in @pd |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 456 | * |
| 457 | * prfill callback for printing a blkg_stat. |
| 458 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 459 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 460 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 461 | return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 462 | } |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 463 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 464 | |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 465 | /** |
| 466 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat |
| 467 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 468 | * @pd: policy private data of interest |
| 469 | * @off: offset to the blkg_rwstat in @pd |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 470 | * |
| 471 | * prfill callback for printing a blkg_rwstat. |
| 472 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 473 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
| 474 | int off) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 475 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 476 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 477 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 478 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 479 | } |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 480 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 481 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 482 | /** |
| 483 | * blkg_conf_prep - parse and prepare for per-blkg config update |
| 484 | * @blkcg: target block cgroup |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 485 | * @pol: target policy |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 486 | * @input: input string |
| 487 | * @ctx: blkg_conf_ctx to be filled |
| 488 | * |
| 489 | * Parse per-blkg config update from @input and initialize @ctx with the |
| 490 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 491 | * value. This function returns with RCU read lock and queue lock held and |
| 492 | * must be paired with blkg_conf_finish(). |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 493 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 494 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
| 495 | const char *input, struct blkg_conf_ctx *ctx) |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 496 | __acquires(rcu) __acquires(disk->queue->queue_lock) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 497 | { |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 498 | struct gendisk *disk; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 499 | struct blkcg_gq *blkg; |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 500 | unsigned int major, minor; |
| 501 | unsigned long long v; |
| 502 | int part, ret; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 503 | |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 504 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) |
| 505 | return -EINVAL; |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 506 | |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 507 | disk = get_gendisk(MKDEV(major, minor), &part); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 508 | if (!disk || part) |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 509 | return -EINVAL; |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 510 | |
| 511 | rcu_read_lock(); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 512 | spin_lock_irq(disk->queue->queue_lock); |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 513 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 514 | if (blkcg_policy_enabled(disk->queue, pol)) |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 515 | blkg = blkg_lookup_create(blkcg, disk->queue); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 516 | else |
| 517 | blkg = ERR_PTR(-EINVAL); |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 518 | |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 519 | if (IS_ERR(blkg)) { |
| 520 | ret = PTR_ERR(blkg); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 521 | rcu_read_unlock(); |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 522 | spin_unlock_irq(disk->queue->queue_lock); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 523 | put_disk(disk); |
| 524 | /* |
| 525 | * If queue was bypassing, we should retry. Do so after a |
| 526 | * short msleep(). It isn't strictly necessary but queue |
| 527 | * can be bypassing for some time and it's always nice to |
| 528 | * avoid busy looping. |
| 529 | */ |
| 530 | if (ret == -EBUSY) { |
| 531 | msleep(10); |
| 532 | ret = restart_syscall(); |
Vivek Goyal | 7702e8f | 2010-09-15 17:06:36 -0400 | [diff] [blame] | 533 | } |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 534 | return ret; |
Vivek Goyal | 062a644 | 2010-09-15 17:06:33 -0400 | [diff] [blame] | 535 | } |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 536 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 537 | ctx->disk = disk; |
| 538 | ctx->blkg = blkg; |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 539 | ctx->v = v; |
| 540 | return 0; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 541 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 542 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 543 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 544 | /** |
| 545 | * blkg_conf_finish - finish up per-blkg config update |
| 546 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() |
| 547 | * |
| 548 | * Finish up after per-blkg config update. This function must be paired |
| 549 | * with blkg_conf_prep(). |
| 550 | */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 551 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 552 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 553 | { |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 554 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 555 | rcu_read_unlock(); |
| 556 | put_disk(ctx->disk); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 557 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 558 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 559 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 560 | struct cftype blkcg_files[] = { |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 561 | { |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 562 | .name = "reset_stats", |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 563 | .write_u64 = blkcg_reset_stats, |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 564 | }, |
Tejun Heo | 4baf6e3 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 565 | { } /* terminate */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 566 | }; |
| 567 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 568 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 569 | * blkcg_pre_destroy - cgroup pre_destroy callback |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 570 | * @cgroup: cgroup of interest |
| 571 | * |
| 572 | * This function is called when @cgroup is about to go away and responsible |
| 573 | * for shooting down all blkgs associated with @cgroup. blkgs should be |
| 574 | * removed while holding both q and blkcg locks. As blkcg lock is nested |
| 575 | * inside q lock, this function performs reverse double lock dancing. |
| 576 | * |
| 577 | * This is the blkcg counterpart of ioc_release_fn(). |
| 578 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 579 | static int blkcg_pre_destroy(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 580 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 581 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 582 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 583 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 584 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 585 | while (!hlist_empty(&blkcg->blkg_list)) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 586 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
| 587 | struct blkcg_gq, blkcg_node); |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 588 | struct request_queue *q = blkg->q; |
Vivek Goyal | b1c3576 | 2009-12-03 12:59:47 -0500 | [diff] [blame] | 589 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 590 | if (spin_trylock(q->queue_lock)) { |
| 591 | blkg_destroy(blkg); |
| 592 | spin_unlock(q->queue_lock); |
| 593 | } else { |
| 594 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 595 | cpu_relax(); |
Dan Carpenter | a556793 | 2012-03-29 20:57:08 +0200 | [diff] [blame] | 596 | spin_lock_irq(&blkcg->lock); |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 597 | } |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 598 | } |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 599 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 600 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 601 | return 0; |
| 602 | } |
| 603 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 604 | static void blkcg_destroy(struct cgroup *cgroup) |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 605 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 606 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 607 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 608 | if (blkcg != &blkcg_root) |
Ben Blum | 67523c4 | 2010-03-10 15:22:11 -0800 | [diff] [blame] | 609 | kfree(blkcg); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 610 | } |
| 611 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 612 | static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 613 | { |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 614 | static atomic64_t id_seq = ATOMIC64_INIT(0); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 615 | struct blkcg *blkcg; |
Li Zefan | 0341509 | 2010-05-07 08:57:00 +0200 | [diff] [blame] | 616 | struct cgroup *parent = cgroup->parent; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 617 | |
Li Zefan | 0341509 | 2010-05-07 08:57:00 +0200 | [diff] [blame] | 618 | if (!parent) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 619 | blkcg = &blkcg_root; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 620 | goto done; |
| 621 | } |
| 622 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 623 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
| 624 | if (!blkcg) |
| 625 | return ERR_PTR(-ENOMEM); |
| 626 | |
Tejun Heo | 3381cb8 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 627 | blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 628 | blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 629 | done: |
| 630 | spin_lock_init(&blkcg->lock); |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 631 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 632 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
| 633 | |
| 634 | return &blkcg->css; |
| 635 | } |
| 636 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 637 | /** |
| 638 | * blkcg_init_queue - initialize blkcg part of request queue |
| 639 | * @q: request_queue to initialize |
| 640 | * |
| 641 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg |
| 642 | * part of new request_queue @q. |
| 643 | * |
| 644 | * RETURNS: |
| 645 | * 0 on success, -errno on failure. |
| 646 | */ |
| 647 | int blkcg_init_queue(struct request_queue *q) |
| 648 | { |
| 649 | might_sleep(); |
| 650 | |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 651 | return blk_throtl_init(q); |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 652 | } |
| 653 | |
| 654 | /** |
| 655 | * blkcg_drain_queue - drain blkcg part of request_queue |
| 656 | * @q: request_queue to drain |
| 657 | * |
| 658 | * Called from blk_drain_queue(). Responsible for draining blkcg part. |
| 659 | */ |
| 660 | void blkcg_drain_queue(struct request_queue *q) |
| 661 | { |
| 662 | lockdep_assert_held(q->queue_lock); |
| 663 | |
| 664 | blk_throtl_drain(q); |
| 665 | } |
| 666 | |
| 667 | /** |
| 668 | * blkcg_exit_queue - exit and release blkcg part of request_queue |
| 669 | * @q: request_queue being released |
| 670 | * |
| 671 | * Called from blk_release_queue(). Responsible for exiting blkcg part. |
| 672 | */ |
| 673 | void blkcg_exit_queue(struct request_queue *q) |
| 674 | { |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 675 | spin_lock_irq(q->queue_lock); |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 676 | blkg_destroy_all(q); |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 677 | spin_unlock_irq(q->queue_lock); |
| 678 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 679 | blk_throtl_exit(q); |
| 680 | } |
| 681 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 682 | /* |
| 683 | * We cannot support shared io contexts, as we have no mean to support |
| 684 | * two tasks with the same ioc in two different groups without major rework |
| 685 | * of the main cic data structures. For now we allow a task to change |
| 686 | * its cgroup only if it's the only owner of its ioc. |
| 687 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 688 | static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 689 | { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 690 | struct task_struct *task; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 691 | struct io_context *ioc; |
| 692 | int ret = 0; |
| 693 | |
| 694 | /* task_lock() is needed to avoid races with exit_io_context() */ |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 695 | cgroup_taskset_for_each(task, cgrp, tset) { |
| 696 | task_lock(task); |
| 697 | ioc = task->io_context; |
| 698 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) |
| 699 | ret = -EINVAL; |
| 700 | task_unlock(task); |
| 701 | if (ret) |
| 702 | break; |
| 703 | } |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 704 | return ret; |
| 705 | } |
| 706 | |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 707 | struct cgroup_subsys blkio_subsys = { |
| 708 | .name = "blkio", |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 709 | .create = blkcg_create, |
| 710 | .can_attach = blkcg_can_attach, |
| 711 | .pre_destroy = blkcg_pre_destroy, |
| 712 | .destroy = blkcg_destroy, |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 713 | .subsys_id = blkio_subsys_id, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 714 | .base_cftypes = blkcg_files, |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 715 | .module = THIS_MODULE, |
| 716 | }; |
| 717 | EXPORT_SYMBOL_GPL(blkio_subsys); |
| 718 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 719 | /** |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 720 | * blkcg_activate_policy - activate a blkcg policy on a request_queue |
| 721 | * @q: request_queue of interest |
| 722 | * @pol: blkcg policy to activate |
| 723 | * |
| 724 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through |
| 725 | * bypass mode to populate its blkgs with policy_data for @pol. |
| 726 | * |
| 727 | * Activation happens with @q bypassed, so nobody would be accessing blkgs |
| 728 | * from IO path. Update of each blkg is protected by both queue and blkcg |
| 729 | * locks so that holding either lock and testing blkcg_policy_enabled() is |
| 730 | * always enough for dereferencing policy data. |
| 731 | * |
| 732 | * The caller is responsible for synchronizing [de]activations and policy |
| 733 | * [un]registerations. Returns 0 on success, -errno on failure. |
| 734 | */ |
| 735 | int blkcg_activate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 736 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 737 | { |
| 738 | LIST_HEAD(pds); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 739 | struct blkcg_gq *blkg; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 740 | struct blkg_policy_data *pd, *n; |
| 741 | int cnt = 0, ret; |
| 742 | |
| 743 | if (blkcg_policy_enabled(q, pol)) |
| 744 | return 0; |
| 745 | |
| 746 | blk_queue_bypass_start(q); |
| 747 | |
| 748 | /* make sure the root blkg exists and count the existing blkgs */ |
| 749 | spin_lock_irq(q->queue_lock); |
| 750 | |
| 751 | rcu_read_lock(); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 752 | blkg = __blkg_lookup_create(&blkcg_root, q); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 753 | rcu_read_unlock(); |
| 754 | |
| 755 | if (IS_ERR(blkg)) { |
| 756 | ret = PTR_ERR(blkg); |
| 757 | goto out_unlock; |
| 758 | } |
| 759 | q->root_blkg = blkg; |
| 760 | |
| 761 | list_for_each_entry(blkg, &q->blkg_list, q_node) |
| 762 | cnt++; |
| 763 | |
| 764 | spin_unlock_irq(q->queue_lock); |
| 765 | |
| 766 | /* allocate policy_data for all existing blkgs */ |
| 767 | while (cnt--) { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 768 | pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 769 | if (!pd) { |
| 770 | ret = -ENOMEM; |
| 771 | goto out_free; |
| 772 | } |
| 773 | list_add_tail(&pd->alloc_node, &pds); |
| 774 | } |
| 775 | |
| 776 | /* |
| 777 | * Install the allocated pds. With @q bypassing, no new blkg |
| 778 | * should have been created while the queue lock was dropped. |
| 779 | */ |
| 780 | spin_lock_irq(q->queue_lock); |
| 781 | |
| 782 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
| 783 | if (WARN_ON(list_empty(&pds))) { |
| 784 | /* umm... this shouldn't happen, just abort */ |
| 785 | ret = -ENOMEM; |
| 786 | goto out_unlock; |
| 787 | } |
| 788 | pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); |
| 789 | list_del_init(&pd->alloc_node); |
| 790 | |
| 791 | /* grab blkcg lock too while installing @pd on @blkg */ |
| 792 | spin_lock(&blkg->blkcg->lock); |
| 793 | |
| 794 | blkg->pd[pol->plid] = pd; |
| 795 | pd->blkg = blkg; |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 796 | pol->pd_init_fn(blkg); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 797 | |
| 798 | spin_unlock(&blkg->blkcg->lock); |
| 799 | } |
| 800 | |
| 801 | __set_bit(pol->plid, q->blkcg_pols); |
| 802 | ret = 0; |
| 803 | out_unlock: |
| 804 | spin_unlock_irq(q->queue_lock); |
| 805 | out_free: |
| 806 | blk_queue_bypass_end(q); |
| 807 | list_for_each_entry_safe(pd, n, &pds, alloc_node) |
| 808 | kfree(pd); |
| 809 | return ret; |
| 810 | } |
| 811 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); |
| 812 | |
| 813 | /** |
| 814 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue |
| 815 | * @q: request_queue of interest |
| 816 | * @pol: blkcg policy to deactivate |
| 817 | * |
| 818 | * Deactivate @pol on @q. Follows the same synchronization rules as |
| 819 | * blkcg_activate_policy(). |
| 820 | */ |
| 821 | void blkcg_deactivate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 822 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 823 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 824 | struct blkcg_gq *blkg; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 825 | |
| 826 | if (!blkcg_policy_enabled(q, pol)) |
| 827 | return; |
| 828 | |
| 829 | blk_queue_bypass_start(q); |
| 830 | spin_lock_irq(q->queue_lock); |
| 831 | |
| 832 | __clear_bit(pol->plid, q->blkcg_pols); |
| 833 | |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 834 | /* if no policy is left, no need for blkgs - shoot them down */ |
| 835 | if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) |
| 836 | blkg_destroy_all(q); |
| 837 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 838 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
| 839 | /* grab blkcg lock too while removing @pd from @blkg */ |
| 840 | spin_lock(&blkg->blkcg->lock); |
| 841 | |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 842 | if (pol->pd_exit_fn) |
| 843 | pol->pd_exit_fn(blkg); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 844 | |
| 845 | kfree(blkg->pd[pol->plid]); |
| 846 | blkg->pd[pol->plid] = NULL; |
| 847 | |
| 848 | spin_unlock(&blkg->blkcg->lock); |
| 849 | } |
| 850 | |
| 851 | spin_unlock_irq(q->queue_lock); |
| 852 | blk_queue_bypass_end(q); |
| 853 | } |
| 854 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); |
| 855 | |
| 856 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 857 | * blkcg_policy_register - register a blkcg policy |
| 858 | * @pol: blkcg policy to register |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 859 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 860 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
| 861 | * successful registration. Returns 0 on success and -errno on failure. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 862 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 863 | int blkcg_policy_register(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 864 | { |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 865 | int i, ret; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 866 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 867 | if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) |
| 868 | return -EINVAL; |
| 869 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 870 | mutex_lock(&blkcg_pol_mutex); |
| 871 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 872 | /* find an empty slot */ |
| 873 | ret = -ENOSPC; |
| 874 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 875 | if (!blkcg_policy[i]) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 876 | break; |
| 877 | if (i >= BLKCG_MAX_POLS) |
| 878 | goto out_unlock; |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 879 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 880 | /* register and update blkgs */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 881 | pol->plid = i; |
| 882 | blkcg_policy[i] = pol; |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 883 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 884 | /* everything is in place, add intf files for the new policy */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 885 | if (pol->cftypes) |
| 886 | WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes)); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 887 | ret = 0; |
| 888 | out_unlock: |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 889 | mutex_unlock(&blkcg_pol_mutex); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 890 | return ret; |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 891 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 892 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 893 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 894 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 895 | * blkcg_policy_unregister - unregister a blkcg policy |
| 896 | * @pol: blkcg policy to unregister |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 897 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 898 | * Undo blkcg_policy_register(@pol). Might sleep. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 899 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 900 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 901 | { |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 902 | mutex_lock(&blkcg_pol_mutex); |
| 903 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 904 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 905 | goto out_unlock; |
| 906 | |
| 907 | /* kill the intf files first */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 908 | if (pol->cftypes) |
| 909 | cgroup_rm_cftypes(&blkio_subsys, pol->cftypes); |
Tejun Heo | 44ea53d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 910 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 911 | /* unregister and update blkgs */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 912 | blkcg_policy[pol->plid] = NULL; |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 913 | out_unlock: |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 914 | mutex_unlock(&blkcg_pol_mutex); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 915 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 916 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |