Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Common Block IO controller cgroup interface |
| 3 | * |
| 4 | * Based on ideas and code from CFQ, CFS and BFQ: |
| 5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
| 6 | * |
| 7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
| 8 | * Paolo Valente <paolo.valente@unimore.it> |
| 9 | * |
| 10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> |
| 11 | * Nauman Rafique <nauman@google.com> |
| 12 | */ |
| 13 | #include <linux/ioprio.h> |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 14 | #include <linux/kdev_t.h> |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 15 | #include <linux/module.h> |
Stephen Rothwell | accee78 | 2009-12-07 19:29:39 +1100 | [diff] [blame] | 16 | #include <linux/err.h> |
Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 17 | #include <linux/blkdev.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/slab.h> |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 19 | #include <linux/genhd.h> |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 20 | #include <linux/delay.h> |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 21 | #include <linux/atomic.h> |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 22 | #include "blk-cgroup.h" |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 23 | #include "blk.h" |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 24 | |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 25 | #define MAX_KEY_LEN 100 |
| 26 | |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 27 | static DEFINE_SPINLOCK(blkio_list_lock); |
| 28 | static LIST_HEAD(blkio_list); |
Vivek Goyal | b1c3576 | 2009-12-03 12:59:47 -0500 | [diff] [blame] | 29 | |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 30 | static DEFINE_MUTEX(all_q_mutex); |
| 31 | static LIST_HEAD(all_q_list); |
| 32 | |
Vivek Goyal | 1cd9e03 | 2012-03-08 10:53:56 -0800 | [diff] [blame] | 33 | /* List of groups pending per cpu stats allocation */ |
| 34 | static DEFINE_SPINLOCK(alloc_list_lock); |
| 35 | static LIST_HEAD(alloc_list); |
| 36 | |
| 37 | static void blkio_stat_alloc_fn(struct work_struct *); |
| 38 | static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn); |
| 39 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 40 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 41 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); |
| 42 | |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 43 | static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES]; |
| 44 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 45 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) |
| 46 | { |
| 47 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), |
| 48 | struct blkio_cgroup, css); |
| 49 | } |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 50 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 51 | |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 52 | static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 53 | { |
| 54 | return container_of(task_subsys_state(tsk, blkio_subsys_id), |
| 55 | struct blkio_cgroup, css); |
| 56 | } |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 57 | |
| 58 | struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio) |
| 59 | { |
| 60 | if (bio && bio->bi_css) |
| 61 | return container_of(bio->bi_css, struct blkio_cgroup, css); |
| 62 | return task_blkio_cgroup(current); |
| 63 | } |
| 64 | EXPORT_SYMBOL_GPL(bio_blkio_cgroup); |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 65 | |
Vivek Goyal | 1cd9e03 | 2012-03-08 10:53:56 -0800 | [diff] [blame] | 66 | /* |
| 67 | * Worker for allocating per cpu stat for blk groups. This is scheduled on |
| 68 | * the system_nrt_wq once there are some groups on the alloc_list waiting |
| 69 | * for allocation. |
| 70 | */ |
| 71 | static void blkio_stat_alloc_fn(struct work_struct *work) |
| 72 | { |
| 73 | static void *pcpu_stats[BLKIO_NR_POLICIES]; |
| 74 | struct delayed_work *dwork = to_delayed_work(work); |
| 75 | struct blkio_group *blkg; |
| 76 | int i; |
| 77 | bool empty = false; |
| 78 | |
| 79 | alloc_stats: |
| 80 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
| 81 | if (pcpu_stats[i] != NULL) |
| 82 | continue; |
| 83 | |
| 84 | pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu); |
| 85 | |
| 86 | /* Allocation failed. Try again after some time. */ |
| 87 | if (pcpu_stats[i] == NULL) { |
| 88 | queue_delayed_work(system_nrt_wq, dwork, |
| 89 | msecs_to_jiffies(10)); |
| 90 | return; |
| 91 | } |
| 92 | } |
| 93 | |
| 94 | spin_lock_irq(&blkio_list_lock); |
| 95 | spin_lock(&alloc_list_lock); |
| 96 | |
| 97 | /* cgroup got deleted or queue exited. */ |
| 98 | if (!list_empty(&alloc_list)) { |
| 99 | blkg = list_first_entry(&alloc_list, struct blkio_group, |
| 100 | alloc_node); |
| 101 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
| 102 | struct blkg_policy_data *pd = blkg->pd[i]; |
| 103 | |
| 104 | if (blkio_policy[i] && pd && !pd->stats_cpu) |
| 105 | swap(pd->stats_cpu, pcpu_stats[i]); |
| 106 | } |
| 107 | |
| 108 | list_del_init(&blkg->alloc_node); |
| 109 | } |
| 110 | |
| 111 | empty = list_empty(&alloc_list); |
| 112 | |
| 113 | spin_unlock(&alloc_list_lock); |
| 114 | spin_unlock_irq(&blkio_list_lock); |
| 115 | |
| 116 | if (!empty) |
| 117 | goto alloc_stats; |
| 118 | } |
| 119 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 120 | /** |
| 121 | * blkg_free - free a blkg |
| 122 | * @blkg: blkg to free |
| 123 | * |
| 124 | * Free @blkg which may be partially allocated. |
| 125 | */ |
| 126 | static void blkg_free(struct blkio_group *blkg) |
| 127 | { |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 128 | int i; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 129 | |
| 130 | if (!blkg) |
| 131 | return; |
| 132 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 133 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame^] | 134 | struct blkio_policy_type *pol = blkio_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 135 | struct blkg_policy_data *pd = blkg->pd[i]; |
| 136 | |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame^] | 137 | if (!pd) |
| 138 | continue; |
| 139 | |
| 140 | if (pol && pol->ops.blkio_exit_group_fn) |
| 141 | pol->ops.blkio_exit_group_fn(blkg); |
| 142 | |
| 143 | free_percpu(pd->stats_cpu); |
| 144 | kfree(pd); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 145 | } |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 146 | |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 147 | kfree(blkg); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 148 | } |
| 149 | |
| 150 | /** |
| 151 | * blkg_alloc - allocate a blkg |
| 152 | * @blkcg: block cgroup the new blkg is associated with |
| 153 | * @q: request_queue the new blkg is associated with |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 154 | * |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 155 | * Allocate a new blkg assocating @blkcg and @q. |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 156 | */ |
| 157 | static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg, |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 158 | struct request_queue *q) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 159 | { |
| 160 | struct blkio_group *blkg; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 161 | int i; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 162 | |
| 163 | /* alloc and init base part */ |
| 164 | blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); |
| 165 | if (!blkg) |
| 166 | return NULL; |
| 167 | |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 168 | blkg->q = q; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 169 | INIT_LIST_HEAD(&blkg->q_node); |
Vivek Goyal | 1cd9e03 | 2012-03-08 10:53:56 -0800 | [diff] [blame] | 170 | INIT_LIST_HEAD(&blkg->alloc_node); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 171 | blkg->blkcg = blkcg; |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 172 | blkg->refcnt = 1; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 173 | cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); |
| 174 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 175 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
| 176 | struct blkio_policy_type *pol = blkio_policy[i]; |
| 177 | struct blkg_policy_data *pd; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 178 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 179 | if (!pol) |
| 180 | continue; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 181 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 182 | /* alloc per-policy data and attach it to blkg */ |
| 183 | pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC, |
| 184 | q->node); |
| 185 | if (!pd) { |
| 186 | blkg_free(blkg); |
| 187 | return NULL; |
| 188 | } |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 189 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 190 | blkg->pd[i] = pd; |
| 191 | pd->blkg = blkg; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 192 | } |
| 193 | |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 194 | /* invoke per-policy init */ |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 195 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
| 196 | struct blkio_policy_type *pol = blkio_policy[i]; |
| 197 | |
| 198 | if (pol) |
| 199 | pol->ops.blkio_init_group_fn(blkg); |
| 200 | } |
| 201 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 202 | return blkg; |
| 203 | } |
| 204 | |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 205 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, |
| 206 | struct request_queue *q, |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 207 | bool for_root) |
| 208 | __releases(q->queue_lock) __acquires(q->queue_lock) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 209 | { |
Vivek Goyal | 1cd9e03 | 2012-03-08 10:53:56 -0800 | [diff] [blame] | 210 | struct blkio_group *blkg; |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 211 | |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 212 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 213 | lockdep_assert_held(q->queue_lock); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 214 | |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 215 | /* |
| 216 | * This could be the first entry point of blkcg implementation and |
| 217 | * we shouldn't allow anything to go through for a bypassing queue. |
| 218 | * The following can be removed if blkg lookup is guaranteed to |
| 219 | * fail on a bypassing queue. |
| 220 | */ |
| 221 | if (unlikely(blk_queue_bypass(q)) && !for_root) |
| 222 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); |
| 223 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 224 | blkg = blkg_lookup(blkcg, q); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 225 | if (blkg) |
| 226 | return blkg; |
| 227 | |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 228 | /* blkg holds a reference to blkcg */ |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 229 | if (!css_tryget(&blkcg->css)) |
| 230 | return ERR_PTR(-EINVAL); |
| 231 | |
| 232 | /* |
| 233 | * Allocate and initialize. |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 234 | */ |
Vivek Goyal | 1cd9e03 | 2012-03-08 10:53:56 -0800 | [diff] [blame] | 235 | blkg = blkg_alloc(blkcg, q); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 236 | |
| 237 | /* did alloc fail? */ |
Vivek Goyal | 1cd9e03 | 2012-03-08 10:53:56 -0800 | [diff] [blame] | 238 | if (unlikely(!blkg)) { |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 239 | blkg = ERR_PTR(-ENOMEM); |
| 240 | goto out; |
| 241 | } |
| 242 | |
| 243 | /* insert */ |
| 244 | spin_lock(&blkcg->lock); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 245 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 246 | list_add(&blkg->q_node, &q->blkg_list); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 247 | spin_unlock(&blkcg->lock); |
Vivek Goyal | 1cd9e03 | 2012-03-08 10:53:56 -0800 | [diff] [blame] | 248 | |
| 249 | spin_lock(&alloc_list_lock); |
| 250 | list_add(&blkg->alloc_node, &alloc_list); |
| 251 | /* Queue per cpu stat allocation from worker thread. */ |
| 252 | queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0); |
| 253 | spin_unlock(&alloc_list_lock); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 254 | out: |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 255 | return blkg; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 256 | } |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 257 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 258 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 259 | /* called under rcu_read_lock(). */ |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 260 | struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 261 | struct request_queue *q) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 262 | { |
| 263 | struct blkio_group *blkg; |
| 264 | struct hlist_node *n; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 265 | |
Tejun Heo | ca32aef | 2012-03-05 13:15:03 -0800 | [diff] [blame] | 266 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 267 | if (blkg->q == q) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 268 | return blkg; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 269 | return NULL; |
| 270 | } |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 271 | EXPORT_SYMBOL_GPL(blkg_lookup); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 272 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 273 | static void blkg_destroy(struct blkio_group *blkg) |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 274 | { |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 275 | struct request_queue *q = blkg->q; |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 276 | struct blkio_cgroup *blkcg = blkg->blkcg; |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 277 | |
| 278 | lockdep_assert_held(q->queue_lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 279 | lockdep_assert_held(&blkcg->lock); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 280 | |
| 281 | /* Something wrong if we are trying to remove same group twice */ |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 282 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 283 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 284 | list_del_init(&blkg->q_node); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 285 | hlist_del_init_rcu(&blkg->blkcg_node); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 286 | |
Vivek Goyal | 1cd9e03 | 2012-03-08 10:53:56 -0800 | [diff] [blame] | 287 | spin_lock(&alloc_list_lock); |
| 288 | list_del_init(&blkg->alloc_node); |
| 289 | spin_unlock(&alloc_list_lock); |
| 290 | |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 291 | /* |
| 292 | * Put the reference taken at the time of creation so that when all |
| 293 | * queues are gone, group can be destroyed. |
| 294 | */ |
| 295 | blkg_put(blkg); |
| 296 | } |
| 297 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 298 | /* |
| 299 | * XXX: This updates blkg policy data in-place for root blkg, which is |
| 300 | * necessary across elevator switch and policy registration as root blkgs |
| 301 | * aren't shot down. This broken and racy implementation is temporary. |
| 302 | * Eventually, blkg shoot down will be replaced by proper in-place update. |
| 303 | */ |
| 304 | void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid) |
| 305 | { |
| 306 | struct blkio_policy_type *pol = blkio_policy[plid]; |
| 307 | struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q); |
| 308 | struct blkg_policy_data *pd; |
| 309 | |
| 310 | if (!blkg) |
| 311 | return; |
| 312 | |
| 313 | kfree(blkg->pd[plid]); |
| 314 | blkg->pd[plid] = NULL; |
| 315 | |
| 316 | if (!pol) |
| 317 | return; |
| 318 | |
| 319 | pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL); |
| 320 | WARN_ON_ONCE(!pd); |
| 321 | |
| 322 | pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu); |
| 323 | WARN_ON_ONCE(!pd->stats_cpu); |
| 324 | |
| 325 | blkg->pd[plid] = pd; |
| 326 | pd->blkg = blkg; |
| 327 | pol->ops.blkio_init_group_fn(blkg); |
| 328 | } |
| 329 | EXPORT_SYMBOL_GPL(update_root_blkg_pd); |
| 330 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 331 | /** |
| 332 | * blkg_destroy_all - destroy all blkgs associated with a request_queue |
| 333 | * @q: request_queue of interest |
| 334 | * @destroy_root: whether to destroy root blkg or not |
| 335 | * |
| 336 | * Destroy blkgs associated with @q. If @destroy_root is %true, all are |
| 337 | * destroyed; otherwise, root blkg is left alone. |
| 338 | */ |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 339 | void blkg_destroy_all(struct request_queue *q, bool destroy_root) |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 340 | { |
| 341 | struct blkio_group *blkg, *n; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 342 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 343 | spin_lock_irq(q->queue_lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 344 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 345 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
| 346 | struct blkio_cgroup *blkcg = blkg->blkcg; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 347 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 348 | /* skip root? */ |
| 349 | if (!destroy_root && blkg->blkcg == &blkio_root_cgroup) |
| 350 | continue; |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 351 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 352 | spin_lock(&blkcg->lock); |
| 353 | blkg_destroy(blkg); |
| 354 | spin_unlock(&blkcg->lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 355 | } |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 356 | |
| 357 | spin_unlock_irq(q->queue_lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 358 | } |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 359 | EXPORT_SYMBOL_GPL(blkg_destroy_all); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 360 | |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 361 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
| 362 | { |
| 363 | blkg_free(container_of(rcu_head, struct blkio_group, rcu_head)); |
| 364 | } |
| 365 | |
| 366 | void __blkg_release(struct blkio_group *blkg) |
| 367 | { |
| 368 | /* release the extra blkcg reference this blkg has been holding */ |
| 369 | css_put(&blkg->blkcg->css); |
| 370 | |
| 371 | /* |
| 372 | * A group is freed in rcu manner. But having an rcu lock does not |
| 373 | * mean that one can access all the fields of blkg and assume these |
| 374 | * are valid. For example, don't try to follow throtl_data and |
| 375 | * request queue links. |
| 376 | * |
| 377 | * Having a reference to blkg under an rcu allows acess to only |
| 378 | * values local to groups like group stats and group rate limits |
| 379 | */ |
| 380 | call_rcu(&blkg->rcu_head, blkg_rcu_free); |
| 381 | } |
| 382 | EXPORT_SYMBOL_GPL(__blkg_release); |
| 383 | |
Tejun Heo | c176826 | 2012-03-05 13:15:17 -0800 | [diff] [blame] | 384 | static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid) |
Vivek Goyal | f0bdc8c | 2011-05-19 15:38:30 -0400 | [diff] [blame] | 385 | { |
Tejun Heo | c176826 | 2012-03-05 13:15:17 -0800 | [diff] [blame] | 386 | struct blkg_policy_data *pd = blkg->pd[plid]; |
Tejun Heo | 997a026 | 2012-03-08 10:53:58 -0800 | [diff] [blame] | 387 | int cpu; |
Vivek Goyal | 1cd9e03 | 2012-03-08 10:53:56 -0800 | [diff] [blame] | 388 | |
| 389 | if (pd->stats_cpu == NULL) |
| 390 | return; |
Tejun Heo | 997a026 | 2012-03-08 10:53:58 -0800 | [diff] [blame] | 391 | |
| 392 | for_each_possible_cpu(cpu) { |
| 393 | struct blkio_group_stats_cpu *sc = |
| 394 | per_cpu_ptr(pd->stats_cpu, cpu); |
| 395 | |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 396 | blkg_rwstat_reset(&sc->service_bytes); |
| 397 | blkg_rwstat_reset(&sc->serviced); |
Vivek Goyal | f0bdc8c | 2011-05-19 15:38:30 -0400 | [diff] [blame] | 398 | } |
| 399 | } |
| 400 | |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 401 | static int |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 402 | blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 403 | { |
Tejun Heo | 997a026 | 2012-03-08 10:53:58 -0800 | [diff] [blame] | 404 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 405 | struct blkio_group *blkg; |
| 406 | struct hlist_node *n; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 407 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 408 | spin_lock(&blkio_list_lock); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 409 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 997a026 | 2012-03-08 10:53:58 -0800 | [diff] [blame] | 410 | |
| 411 | /* |
| 412 | * Note that stat reset is racy - it doesn't synchronize against |
| 413 | * stat updates. This is a debug feature which shouldn't exist |
| 414 | * anyway. If you get hit by a race, retry. |
| 415 | */ |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 416 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 417 | struct blkio_policy_type *pol; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 418 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 419 | list_for_each_entry(pol, &blkio_list, list) { |
| 420 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; |
Tejun Heo | 997a026 | 2012-03-08 10:53:58 -0800 | [diff] [blame] | 421 | struct blkio_group_stats *stats = &pd->stats; |
Vivek Goyal | f0bdc8c | 2011-05-19 15:38:30 -0400 | [diff] [blame] | 422 | |
Tejun Heo | 997a026 | 2012-03-08 10:53:58 -0800 | [diff] [blame] | 423 | /* queued stats shouldn't be cleared */ |
Tejun Heo | 41b38b6 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 424 | blkg_rwstat_reset(&stats->service_bytes); |
| 425 | blkg_rwstat_reset(&stats->serviced); |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 426 | blkg_rwstat_reset(&stats->merged); |
| 427 | blkg_rwstat_reset(&stats->service_time); |
| 428 | blkg_rwstat_reset(&stats->wait_time); |
| 429 | blkg_stat_reset(&stats->time); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 430 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 431 | blkg_stat_reset(&stats->unaccounted_time); |
| 432 | blkg_stat_reset(&stats->avg_queue_size_sum); |
| 433 | blkg_stat_reset(&stats->avg_queue_size_samples); |
| 434 | blkg_stat_reset(&stats->dequeue); |
| 435 | blkg_stat_reset(&stats->group_wait_time); |
| 436 | blkg_stat_reset(&stats->idle_time); |
| 437 | blkg_stat_reset(&stats->empty_time); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 438 | #endif |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 439 | blkio_reset_stats_cpu(blkg, pol->plid); |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame^] | 440 | |
| 441 | if (pol->ops.blkio_reset_group_stats_fn) |
| 442 | pol->ops.blkio_reset_group_stats_fn(blkg); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 443 | } |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 444 | } |
Vivek Goyal | f0bdc8c | 2011-05-19 15:38:30 -0400 | [diff] [blame] | 445 | |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 446 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 447 | spin_unlock(&blkio_list_lock); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 448 | return 0; |
| 449 | } |
| 450 | |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 451 | static const char *blkg_dev_name(struct blkio_group *blkg) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 452 | { |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 453 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
| 454 | if (blkg->q->backing_dev_info.dev) |
| 455 | return dev_name(blkg->q->backing_dev_info.dev); |
| 456 | return NULL; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 457 | } |
| 458 | |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 459 | /** |
| 460 | * blkcg_print_blkgs - helper for printing per-blkg data |
| 461 | * @sf: seq_file to print to |
| 462 | * @blkcg: blkcg of interest |
| 463 | * @prfill: fill function to print out a blkg |
| 464 | * @pol: policy in question |
| 465 | * @data: data to be passed to @prfill |
| 466 | * @show_total: to print out sum of prfill return values or not |
| 467 | * |
| 468 | * This function invokes @prfill on each blkg of @blkcg if pd for the |
| 469 | * policy specified by @pol exists. @prfill is invoked with @sf, the |
| 470 | * policy data and @data. If @show_total is %true, the sum of the return |
| 471 | * values from @prfill is printed with "Total" label at the end. |
| 472 | * |
| 473 | * This is to be used to construct print functions for |
| 474 | * cftype->read_seq_string method. |
| 475 | */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 476 | void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg, |
| 477 | u64 (*prfill)(struct seq_file *, struct blkg_policy_data *, int), |
| 478 | int pol, int data, bool show_total) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 479 | { |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 480 | struct blkio_group *blkg; |
| 481 | struct hlist_node *n; |
| 482 | u64 total = 0; |
| 483 | |
| 484 | spin_lock_irq(&blkcg->lock); |
| 485 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) |
| 486 | if (blkg->pd[pol]) |
| 487 | total += prfill(sf, blkg->pd[pol], data); |
| 488 | spin_unlock_irq(&blkcg->lock); |
| 489 | |
| 490 | if (show_total) |
| 491 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); |
| 492 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 493 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 494 | |
| 495 | /** |
| 496 | * __blkg_prfill_u64 - prfill helper for a single u64 value |
| 497 | * @sf: seq_file to print to |
| 498 | * @pd: policy data of interest |
| 499 | * @v: value to print |
| 500 | * |
| 501 | * Print @v to @sf for the device assocaited with @pd. |
| 502 | */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 503 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 504 | { |
| 505 | const char *dname = blkg_dev_name(pd->blkg); |
| 506 | |
| 507 | if (!dname) |
| 508 | return 0; |
| 509 | |
| 510 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); |
| 511 | return v; |
| 512 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 513 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 514 | |
| 515 | /** |
| 516 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat |
| 517 | * @sf: seq_file to print to |
| 518 | * @pd: policy data of interest |
| 519 | * @rwstat: rwstat to print |
| 520 | * |
| 521 | * Print @rwstat to @sf for the device assocaited with @pd. |
| 522 | */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 523 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
| 524 | const struct blkg_rwstat *rwstat) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 525 | { |
| 526 | static const char *rwstr[] = { |
| 527 | [BLKG_RWSTAT_READ] = "Read", |
| 528 | [BLKG_RWSTAT_WRITE] = "Write", |
| 529 | [BLKG_RWSTAT_SYNC] = "Sync", |
| 530 | [BLKG_RWSTAT_ASYNC] = "Async", |
| 531 | }; |
| 532 | const char *dname = blkg_dev_name(pd->blkg); |
| 533 | u64 v; |
| 534 | int i; |
| 535 | |
| 536 | if (!dname) |
| 537 | return 0; |
| 538 | |
| 539 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
| 540 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], |
| 541 | (unsigned long long)rwstat->cnt[i]); |
| 542 | |
| 543 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; |
| 544 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); |
| 545 | return v; |
| 546 | } |
| 547 | |
| 548 | static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, |
| 549 | int off) |
| 550 | { |
| 551 | return __blkg_prfill_u64(sf, pd, |
| 552 | blkg_stat_read((void *)&pd->stats + off)); |
| 553 | } |
| 554 | |
| 555 | static u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
| 556 | int off) |
| 557 | { |
| 558 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)&pd->stats + off); |
| 559 | |
| 560 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
| 561 | } |
| 562 | |
| 563 | /* print blkg_stat specified by BLKCG_STAT_PRIV() */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 564 | int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft, |
| 565 | struct seq_file *sf) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 566 | { |
| 567 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); |
| 568 | |
| 569 | blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, |
| 570 | BLKCG_STAT_POL(cft->private), |
| 571 | BLKCG_STAT_OFF(cft->private), false); |
| 572 | return 0; |
| 573 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 574 | EXPORT_SYMBOL_GPL(blkcg_print_stat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 575 | |
| 576 | /* print blkg_rwstat specified by BLKCG_STAT_PRIV() */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 577 | int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft, |
| 578 | struct seq_file *sf) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 579 | { |
| 580 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); |
| 581 | |
| 582 | blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, |
| 583 | BLKCG_STAT_POL(cft->private), |
| 584 | BLKCG_STAT_OFF(cft->private), true); |
| 585 | return 0; |
| 586 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 587 | EXPORT_SYMBOL_GPL(blkcg_print_rwstat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 588 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 589 | /** |
| 590 | * blkg_conf_prep - parse and prepare for per-blkg config update |
| 591 | * @blkcg: target block cgroup |
| 592 | * @input: input string |
| 593 | * @ctx: blkg_conf_ctx to be filled |
| 594 | * |
| 595 | * Parse per-blkg config update from @input and initialize @ctx with the |
| 596 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new |
| 597 | * value. This function returns with RCU read locked and must be paired |
| 598 | * with blkg_conf_finish(). |
| 599 | */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 600 | int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input, |
| 601 | struct blkg_conf_ctx *ctx) |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 602 | __acquires(rcu) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 603 | { |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 604 | struct gendisk *disk; |
| 605 | struct blkio_group *blkg; |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 606 | unsigned int major, minor; |
| 607 | unsigned long long v; |
| 608 | int part, ret; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 609 | |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 610 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) |
| 611 | return -EINVAL; |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 612 | |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 613 | disk = get_gendisk(MKDEV(major, minor), &part); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 614 | if (!disk || part) |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 615 | return -EINVAL; |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 616 | |
| 617 | rcu_read_lock(); |
| 618 | |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 619 | spin_lock_irq(disk->queue->queue_lock); |
Tejun Heo | aaec55a | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 620 | blkg = blkg_lookup_create(blkcg, disk->queue, false); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 621 | spin_unlock_irq(disk->queue->queue_lock); |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 622 | |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 623 | if (IS_ERR(blkg)) { |
| 624 | ret = PTR_ERR(blkg); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 625 | rcu_read_unlock(); |
| 626 | put_disk(disk); |
| 627 | /* |
| 628 | * If queue was bypassing, we should retry. Do so after a |
| 629 | * short msleep(). It isn't strictly necessary but queue |
| 630 | * can be bypassing for some time and it's always nice to |
| 631 | * avoid busy looping. |
| 632 | */ |
| 633 | if (ret == -EBUSY) { |
| 634 | msleep(10); |
| 635 | ret = restart_syscall(); |
Vivek Goyal | 7702e8f | 2010-09-15 17:06:36 -0400 | [diff] [blame] | 636 | } |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 637 | return ret; |
Vivek Goyal | 062a644 | 2010-09-15 17:06:33 -0400 | [diff] [blame] | 638 | } |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 639 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 640 | ctx->disk = disk; |
| 641 | ctx->blkg = blkg; |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 642 | ctx->v = v; |
| 643 | return 0; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 644 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 645 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 646 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 647 | /** |
| 648 | * blkg_conf_finish - finish up per-blkg config update |
| 649 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() |
| 650 | * |
| 651 | * Finish up after per-blkg config update. This function must be paired |
| 652 | * with blkg_conf_prep(). |
| 653 | */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 654 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 655 | __releases(rcu) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 656 | { |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 657 | rcu_read_unlock(); |
| 658 | put_disk(ctx->disk); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 659 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 660 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 661 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 662 | struct cftype blkio_files[] = { |
| 663 | { |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 664 | .name = "reset_stats", |
| 665 | .write_u64 = blkiocg_reset_stats, |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 666 | }, |
Tejun Heo | 4baf6e3 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 667 | { } /* terminate */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 668 | }; |
| 669 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 670 | /** |
| 671 | * blkiocg_pre_destroy - cgroup pre_destroy callback |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 672 | * @cgroup: cgroup of interest |
| 673 | * |
| 674 | * This function is called when @cgroup is about to go away and responsible |
| 675 | * for shooting down all blkgs associated with @cgroup. blkgs should be |
| 676 | * removed while holding both q and blkcg locks. As blkcg lock is nested |
| 677 | * inside q lock, this function performs reverse double lock dancing. |
| 678 | * |
| 679 | * This is the blkcg counterpart of ioc_release_fn(). |
| 680 | */ |
Tejun Heo | 959d851 | 2012-04-01 12:30:01 -0700 | [diff] [blame] | 681 | static int blkiocg_pre_destroy(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 682 | { |
| 683 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); |
| 684 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 685 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 686 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 687 | while (!hlist_empty(&blkcg->blkg_list)) { |
| 688 | struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first, |
| 689 | struct blkio_group, blkcg_node); |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 690 | struct request_queue *q = blkg->q; |
Vivek Goyal | b1c3576 | 2009-12-03 12:59:47 -0500 | [diff] [blame] | 691 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 692 | if (spin_trylock(q->queue_lock)) { |
| 693 | blkg_destroy(blkg); |
| 694 | spin_unlock(q->queue_lock); |
| 695 | } else { |
| 696 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 697 | cpu_relax(); |
Dan Carpenter | a556793 | 2012-03-29 20:57:08 +0200 | [diff] [blame] | 698 | spin_lock_irq(&blkcg->lock); |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 699 | } |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 700 | } |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 701 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 702 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 703 | return 0; |
| 704 | } |
| 705 | |
Li Zefan | 761b3ef | 2012-01-31 13:47:36 +0800 | [diff] [blame] | 706 | static void blkiocg_destroy(struct cgroup *cgroup) |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 707 | { |
| 708 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); |
| 709 | |
Ben Blum | 67523c4 | 2010-03-10 15:22:11 -0800 | [diff] [blame] | 710 | if (blkcg != &blkio_root_cgroup) |
| 711 | kfree(blkcg); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 712 | } |
| 713 | |
Li Zefan | 761b3ef | 2012-01-31 13:47:36 +0800 | [diff] [blame] | 714 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 715 | { |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 716 | static atomic64_t id_seq = ATOMIC64_INIT(0); |
Li Zefan | 0341509 | 2010-05-07 08:57:00 +0200 | [diff] [blame] | 717 | struct blkio_cgroup *blkcg; |
| 718 | struct cgroup *parent = cgroup->parent; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 719 | |
Li Zefan | 0341509 | 2010-05-07 08:57:00 +0200 | [diff] [blame] | 720 | if (!parent) { |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 721 | blkcg = &blkio_root_cgroup; |
| 722 | goto done; |
| 723 | } |
| 724 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 725 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
| 726 | if (!blkcg) |
| 727 | return ERR_PTR(-ENOMEM); |
| 728 | |
| 729 | blkcg->weight = BLKIO_WEIGHT_DEFAULT; |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 730 | blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 731 | done: |
| 732 | spin_lock_init(&blkcg->lock); |
| 733 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
| 734 | |
| 735 | return &blkcg->css; |
| 736 | } |
| 737 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 738 | /** |
| 739 | * blkcg_init_queue - initialize blkcg part of request queue |
| 740 | * @q: request_queue to initialize |
| 741 | * |
| 742 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg |
| 743 | * part of new request_queue @q. |
| 744 | * |
| 745 | * RETURNS: |
| 746 | * 0 on success, -errno on failure. |
| 747 | */ |
| 748 | int blkcg_init_queue(struct request_queue *q) |
| 749 | { |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 750 | int ret; |
| 751 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 752 | might_sleep(); |
| 753 | |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 754 | ret = blk_throtl_init(q); |
| 755 | if (ret) |
| 756 | return ret; |
| 757 | |
| 758 | mutex_lock(&all_q_mutex); |
| 759 | INIT_LIST_HEAD(&q->all_q_node); |
| 760 | list_add_tail(&q->all_q_node, &all_q_list); |
| 761 | mutex_unlock(&all_q_mutex); |
| 762 | |
| 763 | return 0; |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 764 | } |
| 765 | |
| 766 | /** |
| 767 | * blkcg_drain_queue - drain blkcg part of request_queue |
| 768 | * @q: request_queue to drain |
| 769 | * |
| 770 | * Called from blk_drain_queue(). Responsible for draining blkcg part. |
| 771 | */ |
| 772 | void blkcg_drain_queue(struct request_queue *q) |
| 773 | { |
| 774 | lockdep_assert_held(q->queue_lock); |
| 775 | |
| 776 | blk_throtl_drain(q); |
| 777 | } |
| 778 | |
| 779 | /** |
| 780 | * blkcg_exit_queue - exit and release blkcg part of request_queue |
| 781 | * @q: request_queue being released |
| 782 | * |
| 783 | * Called from blk_release_queue(). Responsible for exiting blkcg part. |
| 784 | */ |
| 785 | void blkcg_exit_queue(struct request_queue *q) |
| 786 | { |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 787 | mutex_lock(&all_q_mutex); |
| 788 | list_del_init(&q->all_q_node); |
| 789 | mutex_unlock(&all_q_mutex); |
| 790 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 791 | blkg_destroy_all(q, true); |
| 792 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 793 | blk_throtl_exit(q); |
| 794 | } |
| 795 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 796 | /* |
| 797 | * We cannot support shared io contexts, as we have no mean to support |
| 798 | * two tasks with the same ioc in two different groups without major rework |
| 799 | * of the main cic data structures. For now we allow a task to change |
| 800 | * its cgroup only if it's the only owner of its ioc. |
| 801 | */ |
Li Zefan | 761b3ef | 2012-01-31 13:47:36 +0800 | [diff] [blame] | 802 | static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 803 | { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 804 | struct task_struct *task; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 805 | struct io_context *ioc; |
| 806 | int ret = 0; |
| 807 | |
| 808 | /* task_lock() is needed to avoid races with exit_io_context() */ |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 809 | cgroup_taskset_for_each(task, cgrp, tset) { |
| 810 | task_lock(task); |
| 811 | ioc = task->io_context; |
| 812 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) |
| 813 | ret = -EINVAL; |
| 814 | task_unlock(task); |
| 815 | if (ret) |
| 816 | break; |
| 817 | } |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 818 | return ret; |
| 819 | } |
| 820 | |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 821 | static void blkcg_bypass_start(void) |
| 822 | __acquires(&all_q_mutex) |
| 823 | { |
| 824 | struct request_queue *q; |
| 825 | |
| 826 | mutex_lock(&all_q_mutex); |
| 827 | |
| 828 | list_for_each_entry(q, &all_q_list, all_q_node) { |
| 829 | blk_queue_bypass_start(q); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 830 | blkg_destroy_all(q, false); |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 831 | } |
| 832 | } |
| 833 | |
| 834 | static void blkcg_bypass_end(void) |
| 835 | __releases(&all_q_mutex) |
| 836 | { |
| 837 | struct request_queue *q; |
| 838 | |
| 839 | list_for_each_entry(q, &all_q_list, all_q_node) |
| 840 | blk_queue_bypass_end(q); |
| 841 | |
| 842 | mutex_unlock(&all_q_mutex); |
| 843 | } |
| 844 | |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 845 | struct cgroup_subsys blkio_subsys = { |
| 846 | .name = "blkio", |
| 847 | .create = blkiocg_create, |
| 848 | .can_attach = blkiocg_can_attach, |
Tejun Heo | 959d851 | 2012-04-01 12:30:01 -0700 | [diff] [blame] | 849 | .pre_destroy = blkiocg_pre_destroy, |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 850 | .destroy = blkiocg_destroy, |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 851 | .subsys_id = blkio_subsys_id, |
Tejun Heo | 4baf6e3 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 852 | .base_cftypes = blkio_files, |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 853 | .module = THIS_MODULE, |
| 854 | }; |
| 855 | EXPORT_SYMBOL_GPL(blkio_subsys); |
| 856 | |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 857 | void blkio_policy_register(struct blkio_policy_type *blkiop) |
| 858 | { |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 859 | struct request_queue *q; |
| 860 | |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 861 | blkcg_bypass_start(); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 862 | spin_lock(&blkio_list_lock); |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 863 | |
| 864 | BUG_ON(blkio_policy[blkiop->plid]); |
| 865 | blkio_policy[blkiop->plid] = blkiop; |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 866 | list_add_tail(&blkiop->list, &blkio_list); |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 867 | |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 868 | spin_unlock(&blkio_list_lock); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 869 | list_for_each_entry(q, &all_q_list, all_q_node) |
| 870 | update_root_blkg_pd(q, blkiop->plid); |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 871 | blkcg_bypass_end(); |
Tejun Heo | 44ea53d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 872 | |
| 873 | if (blkiop->cftypes) |
| 874 | WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes)); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 875 | } |
| 876 | EXPORT_SYMBOL_GPL(blkio_policy_register); |
| 877 | |
| 878 | void blkio_policy_unregister(struct blkio_policy_type *blkiop) |
| 879 | { |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 880 | struct request_queue *q; |
| 881 | |
Tejun Heo | 44ea53d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 882 | if (blkiop->cftypes) |
| 883 | cgroup_rm_cftypes(&blkio_subsys, blkiop->cftypes); |
| 884 | |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 885 | blkcg_bypass_start(); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 886 | spin_lock(&blkio_list_lock); |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 887 | |
| 888 | BUG_ON(blkio_policy[blkiop->plid] != blkiop); |
| 889 | blkio_policy[blkiop->plid] = NULL; |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 890 | list_del_init(&blkiop->list); |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 891 | |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 892 | spin_unlock(&blkio_list_lock); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 893 | list_for_each_entry(q, &all_q_list, all_q_node) |
| 894 | update_root_blkg_pd(q, blkiop->plid); |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 895 | blkcg_bypass_end(); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 896 | } |
| 897 | EXPORT_SYMBOL_GPL(blkio_policy_unregister); |