blob: 4e714f8ddcd2b53880c34026fb56c396e3e03865 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
Vivek Goyal22084192009-12-03 12:59:49 -050014#include <linux/kdev_t.h>
Vivek Goyal9d6a9862009-12-04 10:36:41 -050015#include <linux/module.h>
Stephen Rothwellaccee782009-12-07 19:29:39 +110016#include <linux/err.h>
Divyesh Shah91952912010-04-01 15:01:41 -070017#include <linux/blkdev.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Gui Jianfeng34d0f172010-04-13 16:05:49 +080019#include <linux/genhd.h>
Tejun Heo72e06c22012-03-05 13:15:00 -080020#include <linux/delay.h>
Tejun Heo9a9e8a22012-03-19 15:10:56 -070021#include <linux/atomic.h>
Tejun Heo72e06c22012-03-05 13:15:00 -080022#include "blk-cgroup.h"
Tejun Heo5efd6112012-03-05 13:15:12 -080023#include "blk.h"
Vivek Goyal3e252062009-12-04 10:36:42 -050024
Divyesh Shah84c124d2010-04-09 08:31:19 +020025#define MAX_KEY_LEN 100
26
Vivek Goyal3e252062009-12-04 10:36:42 -050027static DEFINE_SPINLOCK(blkio_list_lock);
28static LIST_HEAD(blkio_list);
Vivek Goyalb1c35762009-12-03 12:59:47 -050029
Tejun Heo923adde2012-03-05 13:15:13 -080030static DEFINE_MUTEX(all_q_mutex);
31static LIST_HEAD(all_q_list);
32
Vivek Goyal1cd9e032012-03-08 10:53:56 -080033/* List of groups pending per cpu stats allocation */
34static DEFINE_SPINLOCK(alloc_list_lock);
35static LIST_HEAD(alloc_list);
36
37static void blkio_stat_alloc_fn(struct work_struct *);
38static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn);
39
Vivek Goyal31e4c282009-12-03 12:59:42 -050040struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
Vivek Goyal9d6a9862009-12-04 10:36:41 -050041EXPORT_SYMBOL_GPL(blkio_root_cgroup);
42
Tejun Heo035d10b2012-03-05 13:15:04 -080043static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
44
Vivek Goyal31e4c282009-12-03 12:59:42 -050045struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
46{
47 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
48 struct blkio_cgroup, css);
49}
Vivek Goyal9d6a9862009-12-04 10:36:41 -050050EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
Vivek Goyal31e4c282009-12-03 12:59:42 -050051
Tejun Heo4f85cb92012-03-05 13:15:28 -080052static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
Vivek Goyal70087dc2011-05-16 15:24:08 +020053{
54 return container_of(task_subsys_state(tsk, blkio_subsys_id),
55 struct blkio_cgroup, css);
56}
Tejun Heo4f85cb92012-03-05 13:15:28 -080057
58struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
59{
60 if (bio && bio->bi_css)
61 return container_of(bio->bi_css, struct blkio_cgroup, css);
62 return task_blkio_cgroup(current);
63}
64EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
Vivek Goyal70087dc2011-05-16 15:24:08 +020065
Tejun Heoc1768262012-03-05 13:15:17 -080066static inline void blkio_update_group_weight(struct blkio_group *blkg,
67 int plid, unsigned int weight)
Vivek Goyal062a6442010-09-15 17:06:33 -040068{
69 struct blkio_policy_type *blkiop;
70
71 list_for_each_entry(blkiop, &blkio_list, list) {
72 /* If this policy does not own the blkg, do not send updates */
Tejun Heoc1768262012-03-05 13:15:17 -080073 if (blkiop->plid != plid)
Vivek Goyal062a6442010-09-15 17:06:33 -040074 continue;
75 if (blkiop->ops.blkio_update_group_weight_fn)
Tejun Heoca32aef2012-03-05 13:15:03 -080076 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
Vivek Goyalfe071432010-10-01 14:49:49 +020077 blkg, weight);
Vivek Goyal062a6442010-09-15 17:06:33 -040078 }
79}
80
Tejun Heoc1768262012-03-05 13:15:17 -080081static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
Tejun Heo3a8b31d2012-04-01 14:38:43 -070082 u64 bps, int rw)
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040083{
84 struct blkio_policy_type *blkiop;
85
86 list_for_each_entry(blkiop, &blkio_list, list) {
87
88 /* If this policy does not own the blkg, do not send updates */
Tejun Heoc1768262012-03-05 13:15:17 -080089 if (blkiop->plid != plid)
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040090 continue;
91
Tejun Heo3a8b31d2012-04-01 14:38:43 -070092 if (rw == READ && blkiop->ops.blkio_update_group_read_bps_fn)
Tejun Heoca32aef2012-03-05 13:15:03 -080093 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
Vivek Goyalfe071432010-10-01 14:49:49 +020094 blkg, bps);
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040095
Tejun Heo3a8b31d2012-04-01 14:38:43 -070096 if (rw == WRITE && blkiop->ops.blkio_update_group_write_bps_fn)
Tejun Heoca32aef2012-03-05 13:15:03 -080097 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
Vivek Goyalfe071432010-10-01 14:49:49 +020098 blkg, bps);
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040099 }
100}
101
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700102static inline void blkio_update_group_iops(struct blkio_group *blkg, int plid,
103 u64 iops, int rw)
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400104{
105 struct blkio_policy_type *blkiop;
106
107 list_for_each_entry(blkiop, &blkio_list, list) {
108
109 /* If this policy does not own the blkg, do not send updates */
Tejun Heoc1768262012-03-05 13:15:17 -0800110 if (blkiop->plid != plid)
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400111 continue;
112
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700113 if (rw == READ && blkiop->ops.blkio_update_group_read_iops_fn)
Tejun Heoca32aef2012-03-05 13:15:03 -0800114 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200115 blkg, iops);
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400116
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700117 if (rw == WRITE && blkiop->ops.blkio_update_group_write_iops_fn)
Tejun Heoca32aef2012-03-05 13:15:03 -0800118 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200119 blkg,iops);
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400120 }
121}
122
Divyesh Shahcdc11842010-04-08 21:15:10 -0700123#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoedf1b872012-03-08 10:54:00 -0800124/* This should be called with the queue_lock held. */
Divyesh Shah812df482010-04-08 21:15:35 -0700125static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800126 struct blkio_policy_type *pol,
127 struct blkio_group *curr_blkg)
Divyesh Shah812df482010-04-08 21:15:35 -0700128{
Tejun Heoc1768262012-03-05 13:15:17 -0800129 struct blkg_policy_data *pd = blkg->pd[pol->plid];
Tejun Heo549d3aa2012-03-05 13:15:16 -0800130
131 if (blkio_blkg_waiting(&pd->stats))
Divyesh Shah812df482010-04-08 21:15:35 -0700132 return;
133 if (blkg == curr_blkg)
134 return;
Tejun Heo549d3aa2012-03-05 13:15:16 -0800135 pd->stats.start_group_wait_time = sched_clock();
136 blkio_mark_blkg_waiting(&pd->stats);
Divyesh Shah812df482010-04-08 21:15:35 -0700137}
138
Tejun Heoedf1b872012-03-08 10:54:00 -0800139/* This should be called with the queue_lock held. */
Divyesh Shah812df482010-04-08 21:15:35 -0700140static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
141{
142 unsigned long long now;
143
144 if (!blkio_blkg_waiting(stats))
145 return;
146
147 now = sched_clock();
148 if (time_after64(now, stats->start_group_wait_time))
Tejun Heoedcb0722012-04-01 14:38:42 -0700149 blkg_stat_add(&stats->group_wait_time,
150 now - stats->start_group_wait_time);
Divyesh Shah812df482010-04-08 21:15:35 -0700151 blkio_clear_blkg_waiting(stats);
152}
153
Tejun Heoedf1b872012-03-08 10:54:00 -0800154/* This should be called with the queue_lock held. */
Divyesh Shah812df482010-04-08 21:15:35 -0700155static void blkio_end_empty_time(struct blkio_group_stats *stats)
156{
157 unsigned long long now;
158
159 if (!blkio_blkg_empty(stats))
160 return;
161
162 now = sched_clock();
163 if (time_after64(now, stats->start_empty_time))
Tejun Heoedcb0722012-04-01 14:38:42 -0700164 blkg_stat_add(&stats->empty_time,
165 now - stats->start_empty_time);
Divyesh Shah812df482010-04-08 21:15:35 -0700166 blkio_clear_blkg_empty(stats);
167}
168
Tejun Heoc1768262012-03-05 13:15:17 -0800169void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
170 struct blkio_policy_type *pol)
Divyesh Shah812df482010-04-08 21:15:35 -0700171{
Tejun Heoedf1b872012-03-08 10:54:00 -0800172 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
Divyesh Shah812df482010-04-08 21:15:35 -0700173
Tejun Heoedf1b872012-03-08 10:54:00 -0800174 lockdep_assert_held(blkg->q->queue_lock);
175 BUG_ON(blkio_blkg_idling(stats));
176
177 stats->start_idle_time = sched_clock();
178 blkio_mark_blkg_idling(stats);
Divyesh Shah812df482010-04-08 21:15:35 -0700179}
180EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
181
Tejun Heoc1768262012-03-05 13:15:17 -0800182void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
183 struct blkio_policy_type *pol)
Divyesh Shah812df482010-04-08 21:15:35 -0700184{
Tejun Heoedf1b872012-03-08 10:54:00 -0800185 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
Divyesh Shah812df482010-04-08 21:15:35 -0700186
Tejun Heoedf1b872012-03-08 10:54:00 -0800187 lockdep_assert_held(blkg->q->queue_lock);
188
Divyesh Shah812df482010-04-08 21:15:35 -0700189 if (blkio_blkg_idling(stats)) {
Tejun Heoedf1b872012-03-08 10:54:00 -0800190 unsigned long long now = sched_clock();
191
Tejun Heoedcb0722012-04-01 14:38:42 -0700192 if (time_after64(now, stats->start_idle_time))
193 blkg_stat_add(&stats->idle_time,
194 now - stats->start_idle_time);
Divyesh Shah812df482010-04-08 21:15:35 -0700195 blkio_clear_blkg_idling(stats);
196 }
Divyesh Shah812df482010-04-08 21:15:35 -0700197}
198EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
199
Tejun Heoc1768262012-03-05 13:15:17 -0800200void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
201 struct blkio_policy_type *pol)
Divyesh Shahcdc11842010-04-08 21:15:10 -0700202{
Tejun Heoedf1b872012-03-08 10:54:00 -0800203 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
Divyesh Shahcdc11842010-04-08 21:15:10 -0700204
Tejun Heoedf1b872012-03-08 10:54:00 -0800205 lockdep_assert_held(blkg->q->queue_lock);
206
Tejun Heoedcb0722012-04-01 14:38:42 -0700207 blkg_stat_add(&stats->avg_queue_size_sum,
208 blkg_rwstat_sum(&stats->queued));
209 blkg_stat_add(&stats->avg_queue_size_samples, 1);
Divyesh Shah812df482010-04-08 21:15:35 -0700210 blkio_update_group_wait_time(stats);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700211}
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200212EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
213
Tejun Heoc1768262012-03-05 13:15:17 -0800214void blkiocg_set_start_empty_time(struct blkio_group *blkg,
215 struct blkio_policy_type *pol)
Divyesh Shah28baf442010-04-14 11:22:38 +0200216{
Tejun Heoedf1b872012-03-08 10:54:00 -0800217 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
Divyesh Shah28baf442010-04-14 11:22:38 +0200218
Tejun Heoedf1b872012-03-08 10:54:00 -0800219 lockdep_assert_held(blkg->q->queue_lock);
Divyesh Shah28baf442010-04-14 11:22:38 +0200220
Tejun Heoedcb0722012-04-01 14:38:42 -0700221 if (blkg_rwstat_sum(&stats->queued))
Divyesh Shah28baf442010-04-14 11:22:38 +0200222 return;
Divyesh Shah28baf442010-04-14 11:22:38 +0200223
224 /*
Vivek Goyale5ff0822010-04-26 19:25:11 +0200225 * group is already marked empty. This can happen if cfqq got new
226 * request in parent group and moved to this group while being added
227 * to service tree. Just ignore the event and move on.
Divyesh Shah28baf442010-04-14 11:22:38 +0200228 */
Tejun Heoedf1b872012-03-08 10:54:00 -0800229 if (blkio_blkg_empty(stats))
Vivek Goyale5ff0822010-04-26 19:25:11 +0200230 return;
Vivek Goyale5ff0822010-04-26 19:25:11 +0200231
Divyesh Shah28baf442010-04-14 11:22:38 +0200232 stats->start_empty_time = sched_clock();
233 blkio_mark_blkg_empty(stats);
Divyesh Shah28baf442010-04-14 11:22:38 +0200234}
235EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
236
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200237void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800238 struct blkio_policy_type *pol,
239 unsigned long dequeue)
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200240{
Tejun Heoc1768262012-03-05 13:15:17 -0800241 struct blkg_policy_data *pd = blkg->pd[pol->plid];
Tejun Heo549d3aa2012-03-05 13:15:16 -0800242
Tejun Heoedf1b872012-03-08 10:54:00 -0800243 lockdep_assert_held(blkg->q->queue_lock);
244
Tejun Heoedcb0722012-04-01 14:38:42 -0700245 blkg_stat_add(&pd->stats.dequeue, dequeue);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200246}
247EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
Divyesh Shah812df482010-04-08 21:15:35 -0700248#else
249static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800250 struct blkio_policy_type *pol,
251 struct blkio_group *curr_blkg) { }
252static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
Divyesh Shahcdc11842010-04-08 21:15:10 -0700253#endif
254
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200255void blkiocg_update_io_add_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800256 struct blkio_policy_type *pol,
257 struct blkio_group *curr_blkg, bool direction,
258 bool sync)
Divyesh Shahcdc11842010-04-08 21:15:10 -0700259{
Tejun Heoedf1b872012-03-08 10:54:00 -0800260 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
Tejun Heoedcb0722012-04-01 14:38:42 -0700261 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700262
Tejun Heoedf1b872012-03-08 10:54:00 -0800263 lockdep_assert_held(blkg->q->queue_lock);
264
Tejun Heoedcb0722012-04-01 14:38:42 -0700265 blkg_rwstat_add(&stats->queued, rw, 1);
Tejun Heoedf1b872012-03-08 10:54:00 -0800266 blkio_end_empty_time(stats);
Tejun Heoc1768262012-03-05 13:15:17 -0800267 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700268}
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200269EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700270
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200271void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800272 struct blkio_policy_type *pol,
273 bool direction, bool sync)
Divyesh Shahcdc11842010-04-08 21:15:10 -0700274{
Tejun Heoedf1b872012-03-08 10:54:00 -0800275 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
Tejun Heoedcb0722012-04-01 14:38:42 -0700276 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700277
Tejun Heoedf1b872012-03-08 10:54:00 -0800278 lockdep_assert_held(blkg->q->queue_lock);
279
Tejun Heoedcb0722012-04-01 14:38:42 -0700280 blkg_rwstat_add(&stats->queued, rw, -1);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700281}
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200282EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700283
Tejun Heoc1768262012-03-05 13:15:17 -0800284void blkiocg_update_timeslice_used(struct blkio_group *blkg,
285 struct blkio_policy_type *pol,
286 unsigned long time,
287 unsigned long unaccounted_time)
Vivek Goyal22084192009-12-03 12:59:49 -0500288{
Tejun Heoedf1b872012-03-08 10:54:00 -0800289 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700290
Tejun Heoedf1b872012-03-08 10:54:00 -0800291 lockdep_assert_held(blkg->q->queue_lock);
292
Tejun Heoedcb0722012-04-01 14:38:42 -0700293 blkg_stat_add(&stats->time, time);
Vivek Goyala23e6862011-05-19 15:38:20 -0400294#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoedcb0722012-04-01 14:38:42 -0700295 blkg_stat_add(&stats->unaccounted_time, unaccounted_time);
Vivek Goyala23e6862011-05-19 15:38:20 -0400296#endif
Vivek Goyal22084192009-12-03 12:59:49 -0500297}
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700298EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
Vivek Goyal22084192009-12-03 12:59:49 -0500299
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400300/*
301 * should be called under rcu read lock or queue lock to make sure blkg pointer
302 * is valid.
303 */
Divyesh Shah84c124d2010-04-09 08:31:19 +0200304void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800305 struct blkio_policy_type *pol,
306 uint64_t bytes, bool direction, bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -0700307{
Tejun Heoedcb0722012-04-01 14:38:42 -0700308 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
Tejun Heoc1768262012-03-05 13:15:17 -0800309 struct blkg_policy_data *pd = blkg->pd[pol->plid];
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400310 struct blkio_group_stats_cpu *stats_cpu;
Vivek Goyal575969a2011-05-19 15:38:29 -0400311 unsigned long flags;
312
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800313 /* If per cpu stats are not allocated yet, don't do any accounting. */
314 if (pd->stats_cpu == NULL)
315 return;
316
Vivek Goyal575969a2011-05-19 15:38:29 -0400317 /*
318 * Disabling interrupts to provide mutual exclusion between two
319 * writes on same cpu. It probably is not needed for 64bit. Not
320 * optimizing that case yet.
321 */
322 local_irq_save(flags);
Divyesh Shah91952912010-04-01 15:01:41 -0700323
Tejun Heo549d3aa2012-03-05 13:15:16 -0800324 stats_cpu = this_cpu_ptr(pd->stats_cpu);
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400325
Tejun Heoedcb0722012-04-01 14:38:42 -0700326 blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
327 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
328 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
329
Vivek Goyal575969a2011-05-19 15:38:29 -0400330 local_irq_restore(flags);
Divyesh Shah91952912010-04-01 15:01:41 -0700331}
Divyesh Shah84c124d2010-04-09 08:31:19 +0200332EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
Divyesh Shah91952912010-04-01 15:01:41 -0700333
Divyesh Shah84c124d2010-04-09 08:31:19 +0200334void blkiocg_update_completion_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800335 struct blkio_policy_type *pol,
336 uint64_t start_time,
337 uint64_t io_start_time, bool direction,
338 bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -0700339{
Tejun Heoedf1b872012-03-08 10:54:00 -0800340 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
Divyesh Shah91952912010-04-01 15:01:41 -0700341 unsigned long long now = sched_clock();
Tejun Heoedcb0722012-04-01 14:38:42 -0700342 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
Divyesh Shah91952912010-04-01 15:01:41 -0700343
Tejun Heoedf1b872012-03-08 10:54:00 -0800344 lockdep_assert_held(blkg->q->queue_lock);
345
Divyesh Shah84c124d2010-04-09 08:31:19 +0200346 if (time_after64(now, io_start_time))
Tejun Heoedcb0722012-04-01 14:38:42 -0700347 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200348 if (time_after64(io_start_time, start_time))
Tejun Heoedcb0722012-04-01 14:38:42 -0700349 blkg_rwstat_add(&stats->wait_time, rw,
350 io_start_time - start_time);
Divyesh Shah91952912010-04-01 15:01:41 -0700351}
Divyesh Shah84c124d2010-04-09 08:31:19 +0200352EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
Divyesh Shah91952912010-04-01 15:01:41 -0700353
Vivek Goyal317389a2011-05-23 10:02:19 +0200354/* Merged stats are per cpu. */
Tejun Heoc1768262012-03-05 13:15:17 -0800355void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
356 struct blkio_policy_type *pol,
357 bool direction, bool sync)
Divyesh Shah812d4022010-04-08 21:14:23 -0700358{
Tejun Heoedf1b872012-03-08 10:54:00 -0800359 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
Tejun Heoedcb0722012-04-01 14:38:42 -0700360 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
Divyesh Shah812d4022010-04-08 21:14:23 -0700361
Tejun Heoedf1b872012-03-08 10:54:00 -0800362 lockdep_assert_held(blkg->q->queue_lock);
363
Tejun Heoedcb0722012-04-01 14:38:42 -0700364 blkg_rwstat_add(&stats->merged, rw, 1);
Divyesh Shah812d4022010-04-08 21:14:23 -0700365}
366EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
367
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800368/*
369 * Worker for allocating per cpu stat for blk groups. This is scheduled on
370 * the system_nrt_wq once there are some groups on the alloc_list waiting
371 * for allocation.
372 */
373static void blkio_stat_alloc_fn(struct work_struct *work)
374{
375 static void *pcpu_stats[BLKIO_NR_POLICIES];
376 struct delayed_work *dwork = to_delayed_work(work);
377 struct blkio_group *blkg;
378 int i;
379 bool empty = false;
380
381alloc_stats:
382 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
383 if (pcpu_stats[i] != NULL)
384 continue;
385
386 pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu);
387
388 /* Allocation failed. Try again after some time. */
389 if (pcpu_stats[i] == NULL) {
390 queue_delayed_work(system_nrt_wq, dwork,
391 msecs_to_jiffies(10));
392 return;
393 }
394 }
395
396 spin_lock_irq(&blkio_list_lock);
397 spin_lock(&alloc_list_lock);
398
399 /* cgroup got deleted or queue exited. */
400 if (!list_empty(&alloc_list)) {
401 blkg = list_first_entry(&alloc_list, struct blkio_group,
402 alloc_node);
403 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
404 struct blkg_policy_data *pd = blkg->pd[i];
405
406 if (blkio_policy[i] && pd && !pd->stats_cpu)
407 swap(pd->stats_cpu, pcpu_stats[i]);
408 }
409
410 list_del_init(&blkg->alloc_node);
411 }
412
413 empty = list_empty(&alloc_list);
414
415 spin_unlock(&alloc_list_lock);
416 spin_unlock_irq(&blkio_list_lock);
417
418 if (!empty)
419 goto alloc_stats;
420}
421
Tejun Heo03814112012-03-05 13:15:14 -0800422/**
423 * blkg_free - free a blkg
424 * @blkg: blkg to free
425 *
426 * Free @blkg which may be partially allocated.
427 */
428static void blkg_free(struct blkio_group *blkg)
429{
Tejun Heoe8989fa2012-03-05 13:15:20 -0800430 int i;
Tejun Heo549d3aa2012-03-05 13:15:16 -0800431
432 if (!blkg)
433 return;
434
Tejun Heoe8989fa2012-03-05 13:15:20 -0800435 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
436 struct blkg_policy_data *pd = blkg->pd[i];
437
438 if (pd) {
439 free_percpu(pd->stats_cpu);
440 kfree(pd);
441 }
Tejun Heo03814112012-03-05 13:15:14 -0800442 }
Tejun Heoe8989fa2012-03-05 13:15:20 -0800443
Tejun Heo549d3aa2012-03-05 13:15:16 -0800444 kfree(blkg);
Tejun Heo03814112012-03-05 13:15:14 -0800445}
446
447/**
448 * blkg_alloc - allocate a blkg
449 * @blkcg: block cgroup the new blkg is associated with
450 * @q: request_queue the new blkg is associated with
Tejun Heo03814112012-03-05 13:15:14 -0800451 *
Tejun Heoe8989fa2012-03-05 13:15:20 -0800452 * Allocate a new blkg assocating @blkcg and @q.
Tejun Heo03814112012-03-05 13:15:14 -0800453 */
454static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
Tejun Heoe8989fa2012-03-05 13:15:20 -0800455 struct request_queue *q)
Tejun Heo03814112012-03-05 13:15:14 -0800456{
457 struct blkio_group *blkg;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800458 int i;
Tejun Heo03814112012-03-05 13:15:14 -0800459
460 /* alloc and init base part */
461 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
462 if (!blkg)
463 return NULL;
464
Tejun Heoc875f4d2012-03-05 13:15:22 -0800465 blkg->q = q;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800466 INIT_LIST_HEAD(&blkg->q_node);
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800467 INIT_LIST_HEAD(&blkg->alloc_node);
Tejun Heo03814112012-03-05 13:15:14 -0800468 blkg->blkcg = blkcg;
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800469 blkg->refcnt = 1;
Tejun Heo03814112012-03-05 13:15:14 -0800470 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
471
Tejun Heoe8989fa2012-03-05 13:15:20 -0800472 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
473 struct blkio_policy_type *pol = blkio_policy[i];
474 struct blkg_policy_data *pd;
Tejun Heo03814112012-03-05 13:15:14 -0800475
Tejun Heoe8989fa2012-03-05 13:15:20 -0800476 if (!pol)
477 continue;
Tejun Heo549d3aa2012-03-05 13:15:16 -0800478
Tejun Heoe8989fa2012-03-05 13:15:20 -0800479 /* alloc per-policy data and attach it to blkg */
480 pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
481 q->node);
482 if (!pd) {
483 blkg_free(blkg);
484 return NULL;
485 }
Tejun Heo549d3aa2012-03-05 13:15:16 -0800486
Tejun Heoe8989fa2012-03-05 13:15:20 -0800487 blkg->pd[i] = pd;
488 pd->blkg = blkg;
Tejun Heo03814112012-03-05 13:15:14 -0800489 }
490
Tejun Heo549d3aa2012-03-05 13:15:16 -0800491 /* invoke per-policy init */
Tejun Heoe8989fa2012-03-05 13:15:20 -0800492 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
493 struct blkio_policy_type *pol = blkio_policy[i];
494
495 if (pol)
496 pol->ops.blkio_init_group_fn(blkg);
497 }
498
Tejun Heo03814112012-03-05 13:15:14 -0800499 return blkg;
500}
501
Tejun Heocd1604f2012-03-05 13:15:06 -0800502struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
503 struct request_queue *q,
Tejun Heocd1604f2012-03-05 13:15:06 -0800504 bool for_root)
505 __releases(q->queue_lock) __acquires(q->queue_lock)
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400506{
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800507 struct blkio_group *blkg;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400508
Tejun Heocd1604f2012-03-05 13:15:06 -0800509 WARN_ON_ONCE(!rcu_read_lock_held());
510 lockdep_assert_held(q->queue_lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500511
Tejun Heocd1604f2012-03-05 13:15:06 -0800512 /*
513 * This could be the first entry point of blkcg implementation and
514 * we shouldn't allow anything to go through for a bypassing queue.
515 * The following can be removed if blkg lookup is guaranteed to
516 * fail on a bypassing queue.
517 */
518 if (unlikely(blk_queue_bypass(q)) && !for_root)
519 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
520
Tejun Heoe8989fa2012-03-05 13:15:20 -0800521 blkg = blkg_lookup(blkcg, q);
Tejun Heocd1604f2012-03-05 13:15:06 -0800522 if (blkg)
523 return blkg;
524
Tejun Heo7ee9c562012-03-05 13:15:11 -0800525 /* blkg holds a reference to blkcg */
Tejun Heocd1604f2012-03-05 13:15:06 -0800526 if (!css_tryget(&blkcg->css))
527 return ERR_PTR(-EINVAL);
528
529 /*
530 * Allocate and initialize.
Tejun Heocd1604f2012-03-05 13:15:06 -0800531 */
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800532 blkg = blkg_alloc(blkcg, q);
Tejun Heocd1604f2012-03-05 13:15:06 -0800533
534 /* did alloc fail? */
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800535 if (unlikely(!blkg)) {
Tejun Heocd1604f2012-03-05 13:15:06 -0800536 blkg = ERR_PTR(-ENOMEM);
537 goto out;
538 }
539
540 /* insert */
541 spin_lock(&blkcg->lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500542 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800543 list_add(&blkg->q_node, &q->blkg_list);
Tejun Heocd1604f2012-03-05 13:15:06 -0800544 spin_unlock(&blkcg->lock);
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800545
546 spin_lock(&alloc_list_lock);
547 list_add(&blkg->alloc_node, &alloc_list);
548 /* Queue per cpu stat allocation from worker thread. */
549 queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0);
550 spin_unlock(&alloc_list_lock);
Tejun Heocd1604f2012-03-05 13:15:06 -0800551out:
Tejun Heocd1604f2012-03-05 13:15:06 -0800552 return blkg;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500553}
Tejun Heocd1604f2012-03-05 13:15:06 -0800554EXPORT_SYMBOL_GPL(blkg_lookup_create);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500555
Vivek Goyal31e4c282009-12-03 12:59:42 -0500556/* called under rcu_read_lock(). */
Tejun Heocd1604f2012-03-05 13:15:06 -0800557struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
Tejun Heoe8989fa2012-03-05 13:15:20 -0800558 struct request_queue *q)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500559{
560 struct blkio_group *blkg;
561 struct hlist_node *n;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500562
Tejun Heoca32aef2012-03-05 13:15:03 -0800563 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
Tejun Heoe8989fa2012-03-05 13:15:20 -0800564 if (blkg->q == q)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500565 return blkg;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500566 return NULL;
567}
Tejun Heocd1604f2012-03-05 13:15:06 -0800568EXPORT_SYMBOL_GPL(blkg_lookup);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500569
Tejun Heoe8989fa2012-03-05 13:15:20 -0800570static void blkg_destroy(struct blkio_group *blkg)
Tejun Heo72e06c22012-03-05 13:15:00 -0800571{
Tejun Heo03aa2642012-03-05 13:15:19 -0800572 struct request_queue *q = blkg->q;
Tejun Heo9f13ef62012-03-05 13:15:21 -0800573 struct blkio_cgroup *blkcg = blkg->blkcg;
Tejun Heo03aa2642012-03-05 13:15:19 -0800574
575 lockdep_assert_held(q->queue_lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800576 lockdep_assert_held(&blkcg->lock);
Tejun Heo03aa2642012-03-05 13:15:19 -0800577
578 /* Something wrong if we are trying to remove same group twice */
Tejun Heoe8989fa2012-03-05 13:15:20 -0800579 WARN_ON_ONCE(list_empty(&blkg->q_node));
Tejun Heo9f13ef62012-03-05 13:15:21 -0800580 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
Tejun Heoe8989fa2012-03-05 13:15:20 -0800581 list_del_init(&blkg->q_node);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800582 hlist_del_init_rcu(&blkg->blkcg_node);
Tejun Heo03aa2642012-03-05 13:15:19 -0800583
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800584 spin_lock(&alloc_list_lock);
585 list_del_init(&blkg->alloc_node);
586 spin_unlock(&alloc_list_lock);
587
Tejun Heo03aa2642012-03-05 13:15:19 -0800588 /*
589 * Put the reference taken at the time of creation so that when all
590 * queues are gone, group can be destroyed.
591 */
592 blkg_put(blkg);
593}
594
Tejun Heoe8989fa2012-03-05 13:15:20 -0800595/*
596 * XXX: This updates blkg policy data in-place for root blkg, which is
597 * necessary across elevator switch and policy registration as root blkgs
598 * aren't shot down. This broken and racy implementation is temporary.
599 * Eventually, blkg shoot down will be replaced by proper in-place update.
600 */
601void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
602{
603 struct blkio_policy_type *pol = blkio_policy[plid];
604 struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
605 struct blkg_policy_data *pd;
606
607 if (!blkg)
608 return;
609
610 kfree(blkg->pd[plid]);
611 blkg->pd[plid] = NULL;
612
613 if (!pol)
614 return;
615
616 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
617 WARN_ON_ONCE(!pd);
618
619 pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
620 WARN_ON_ONCE(!pd->stats_cpu);
621
622 blkg->pd[plid] = pd;
623 pd->blkg = blkg;
624 pol->ops.blkio_init_group_fn(blkg);
625}
626EXPORT_SYMBOL_GPL(update_root_blkg_pd);
627
Tejun Heo9f13ef62012-03-05 13:15:21 -0800628/**
629 * blkg_destroy_all - destroy all blkgs associated with a request_queue
630 * @q: request_queue of interest
631 * @destroy_root: whether to destroy root blkg or not
632 *
633 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
634 * destroyed; otherwise, root blkg is left alone.
635 */
Tejun Heoe8989fa2012-03-05 13:15:20 -0800636void blkg_destroy_all(struct request_queue *q, bool destroy_root)
Tejun Heo03aa2642012-03-05 13:15:19 -0800637{
638 struct blkio_group *blkg, *n;
Tejun Heo72e06c22012-03-05 13:15:00 -0800639
Tejun Heo9f13ef62012-03-05 13:15:21 -0800640 spin_lock_irq(q->queue_lock);
Tejun Heo72e06c22012-03-05 13:15:00 -0800641
Tejun Heo9f13ef62012-03-05 13:15:21 -0800642 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
643 struct blkio_cgroup *blkcg = blkg->blkcg;
Tejun Heo72e06c22012-03-05 13:15:00 -0800644
Tejun Heo9f13ef62012-03-05 13:15:21 -0800645 /* skip root? */
646 if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
647 continue;
Tejun Heo03aa2642012-03-05 13:15:19 -0800648
Tejun Heo9f13ef62012-03-05 13:15:21 -0800649 spin_lock(&blkcg->lock);
650 blkg_destroy(blkg);
651 spin_unlock(&blkcg->lock);
Tejun Heo72e06c22012-03-05 13:15:00 -0800652 }
Tejun Heo9f13ef62012-03-05 13:15:21 -0800653
654 spin_unlock_irq(q->queue_lock);
Tejun Heo72e06c22012-03-05 13:15:00 -0800655}
Tejun Heo03aa2642012-03-05 13:15:19 -0800656EXPORT_SYMBOL_GPL(blkg_destroy_all);
Tejun Heo72e06c22012-03-05 13:15:00 -0800657
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800658static void blkg_rcu_free(struct rcu_head *rcu_head)
659{
660 blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
661}
662
663void __blkg_release(struct blkio_group *blkg)
664{
665 /* release the extra blkcg reference this blkg has been holding */
666 css_put(&blkg->blkcg->css);
667
668 /*
669 * A group is freed in rcu manner. But having an rcu lock does not
670 * mean that one can access all the fields of blkg and assume these
671 * are valid. For example, don't try to follow throtl_data and
672 * request queue links.
673 *
674 * Having a reference to blkg under an rcu allows acess to only
675 * values local to groups like group stats and group rate limits
676 */
677 call_rcu(&blkg->rcu_head, blkg_rcu_free);
678}
679EXPORT_SYMBOL_GPL(__blkg_release);
680
Tejun Heoc1768262012-03-05 13:15:17 -0800681static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
Vivek Goyalf0bdc8c2011-05-19 15:38:30 -0400682{
Tejun Heoc1768262012-03-05 13:15:17 -0800683 struct blkg_policy_data *pd = blkg->pd[plid];
Tejun Heo997a0262012-03-08 10:53:58 -0800684 int cpu;
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800685
686 if (pd->stats_cpu == NULL)
687 return;
Tejun Heo997a0262012-03-08 10:53:58 -0800688
689 for_each_possible_cpu(cpu) {
690 struct blkio_group_stats_cpu *sc =
691 per_cpu_ptr(pd->stats_cpu, cpu);
692
Tejun Heoedcb0722012-04-01 14:38:42 -0700693 blkg_rwstat_reset(&sc->service_bytes);
694 blkg_rwstat_reset(&sc->serviced);
695 blkg_stat_reset(&sc->sectors);
Vivek Goyalf0bdc8c2011-05-19 15:38:30 -0400696 }
697}
698
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700699static int
Divyesh Shah84c124d2010-04-09 08:31:19 +0200700blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700701{
Tejun Heo997a0262012-03-08 10:53:58 -0800702 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700703 struct blkio_group *blkg;
704 struct hlist_node *n;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700705
Tejun Heoe8989fa2012-03-05 13:15:20 -0800706 spin_lock(&blkio_list_lock);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700707 spin_lock_irq(&blkcg->lock);
Tejun Heo997a0262012-03-08 10:53:58 -0800708
709 /*
710 * Note that stat reset is racy - it doesn't synchronize against
711 * stat updates. This is a debug feature which shouldn't exist
712 * anyway. If you get hit by a race, retry.
713 */
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700714 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
Tejun Heoe8989fa2012-03-05 13:15:20 -0800715 struct blkio_policy_type *pol;
Tejun Heo549d3aa2012-03-05 13:15:16 -0800716
Tejun Heoe8989fa2012-03-05 13:15:20 -0800717 list_for_each_entry(pol, &blkio_list, list) {
718 struct blkg_policy_data *pd = blkg->pd[pol->plid];
Tejun Heo997a0262012-03-08 10:53:58 -0800719 struct blkio_group_stats *stats = &pd->stats;
Vivek Goyalf0bdc8c2011-05-19 15:38:30 -0400720
Tejun Heo997a0262012-03-08 10:53:58 -0800721 /* queued stats shouldn't be cleared */
Tejun Heoedcb0722012-04-01 14:38:42 -0700722 blkg_rwstat_reset(&stats->merged);
723 blkg_rwstat_reset(&stats->service_time);
724 blkg_rwstat_reset(&stats->wait_time);
725 blkg_stat_reset(&stats->time);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800726#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoedcb0722012-04-01 14:38:42 -0700727 blkg_stat_reset(&stats->unaccounted_time);
728 blkg_stat_reset(&stats->avg_queue_size_sum);
729 blkg_stat_reset(&stats->avg_queue_size_samples);
730 blkg_stat_reset(&stats->dequeue);
731 blkg_stat_reset(&stats->group_wait_time);
732 blkg_stat_reset(&stats->idle_time);
733 blkg_stat_reset(&stats->empty_time);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800734#endif
Tejun Heoe8989fa2012-03-05 13:15:20 -0800735 blkio_reset_stats_cpu(blkg, pol->plid);
736 }
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700737 }
Vivek Goyalf0bdc8c2011-05-19 15:38:30 -0400738
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700739 spin_unlock_irq(&blkcg->lock);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800740 spin_unlock(&blkio_list_lock);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700741 return 0;
742}
743
Tejun Heod3d32e62012-04-01 14:38:42 -0700744static const char *blkg_dev_name(struct blkio_group *blkg)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700745{
Tejun Heod3d32e62012-04-01 14:38:42 -0700746 /* some drivers (floppy) instantiate a queue w/o disk registered */
747 if (blkg->q->backing_dev_info.dev)
748 return dev_name(blkg->q->backing_dev_info.dev);
749 return NULL;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700750}
751
Tejun Heod3d32e62012-04-01 14:38:42 -0700752/**
753 * blkcg_print_blkgs - helper for printing per-blkg data
754 * @sf: seq_file to print to
755 * @blkcg: blkcg of interest
756 * @prfill: fill function to print out a blkg
757 * @pol: policy in question
758 * @data: data to be passed to @prfill
759 * @show_total: to print out sum of prfill return values or not
760 *
761 * This function invokes @prfill on each blkg of @blkcg if pd for the
762 * policy specified by @pol exists. @prfill is invoked with @sf, the
763 * policy data and @data. If @show_total is %true, the sum of the return
764 * values from @prfill is printed with "Total" label at the end.
765 *
766 * This is to be used to construct print functions for
767 * cftype->read_seq_string method.
768 */
Tejun Heo829fdb52012-04-01 14:38:43 -0700769void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
770 u64 (*prfill)(struct seq_file *, struct blkg_policy_data *, int),
771 int pol, int data, bool show_total)
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400772{
Tejun Heod3d32e62012-04-01 14:38:42 -0700773 struct blkio_group *blkg;
774 struct hlist_node *n;
775 u64 total = 0;
776
777 spin_lock_irq(&blkcg->lock);
778 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
779 if (blkg->pd[pol])
780 total += prfill(sf, blkg->pd[pol], data);
781 spin_unlock_irq(&blkcg->lock);
782
783 if (show_total)
784 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
785}
Tejun Heo829fdb52012-04-01 14:38:43 -0700786EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
Tejun Heod3d32e62012-04-01 14:38:42 -0700787
788/**
789 * __blkg_prfill_u64 - prfill helper for a single u64 value
790 * @sf: seq_file to print to
791 * @pd: policy data of interest
792 * @v: value to print
793 *
794 * Print @v to @sf for the device assocaited with @pd.
795 */
Tejun Heo829fdb52012-04-01 14:38:43 -0700796u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
Tejun Heod3d32e62012-04-01 14:38:42 -0700797{
798 const char *dname = blkg_dev_name(pd->blkg);
799
800 if (!dname)
801 return 0;
802
803 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
804 return v;
805}
Tejun Heo829fdb52012-04-01 14:38:43 -0700806EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
Tejun Heod3d32e62012-04-01 14:38:42 -0700807
808/**
809 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
810 * @sf: seq_file to print to
811 * @pd: policy data of interest
812 * @rwstat: rwstat to print
813 *
814 * Print @rwstat to @sf for the device assocaited with @pd.
815 */
Tejun Heo829fdb52012-04-01 14:38:43 -0700816u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
817 const struct blkg_rwstat *rwstat)
Tejun Heod3d32e62012-04-01 14:38:42 -0700818{
819 static const char *rwstr[] = {
820 [BLKG_RWSTAT_READ] = "Read",
821 [BLKG_RWSTAT_WRITE] = "Write",
822 [BLKG_RWSTAT_SYNC] = "Sync",
823 [BLKG_RWSTAT_ASYNC] = "Async",
824 };
825 const char *dname = blkg_dev_name(pd->blkg);
826 u64 v;
827 int i;
828
829 if (!dname)
830 return 0;
831
832 for (i = 0; i < BLKG_RWSTAT_NR; i++)
833 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
834 (unsigned long long)rwstat->cnt[i]);
835
836 v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
837 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
838 return v;
839}
840
841static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
842 int off)
843{
844 return __blkg_prfill_u64(sf, pd,
845 blkg_stat_read((void *)&pd->stats + off));
846}
847
848static u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
849 int off)
850{
851 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)&pd->stats + off);
852
853 return __blkg_prfill_rwstat(sf, pd, &rwstat);
854}
855
856/* print blkg_stat specified by BLKCG_STAT_PRIV() */
Tejun Heo829fdb52012-04-01 14:38:43 -0700857int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft,
858 struct seq_file *sf)
Tejun Heod3d32e62012-04-01 14:38:42 -0700859{
860 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
861
862 blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat,
863 BLKCG_STAT_POL(cft->private),
864 BLKCG_STAT_OFF(cft->private), false);
865 return 0;
866}
Tejun Heo829fdb52012-04-01 14:38:43 -0700867EXPORT_SYMBOL_GPL(blkcg_print_stat);
Tejun Heod3d32e62012-04-01 14:38:42 -0700868
869/* print blkg_rwstat specified by BLKCG_STAT_PRIV() */
Tejun Heo829fdb52012-04-01 14:38:43 -0700870int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
871 struct seq_file *sf)
Tejun Heod3d32e62012-04-01 14:38:42 -0700872{
873 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
874
875 blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat,
876 BLKCG_STAT_POL(cft->private),
877 BLKCG_STAT_OFF(cft->private), true);
878 return 0;
879}
Tejun Heo829fdb52012-04-01 14:38:43 -0700880EXPORT_SYMBOL_GPL(blkcg_print_rwstat);
Tejun Heod3d32e62012-04-01 14:38:42 -0700881
882static u64 blkg_prfill_cpu_stat(struct seq_file *sf,
883 struct blkg_policy_data *pd, int off)
884{
885 u64 v = 0;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400886 int cpu;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400887
Tejun Heod3d32e62012-04-01 14:38:42 -0700888 for_each_possible_cpu(cpu) {
889 struct blkio_group_stats_cpu *sc =
890 per_cpu_ptr(pd->stats_cpu, cpu);
891
892 v += blkg_stat_read((void *)sc + off);
893 }
894
895 return __blkg_prfill_u64(sf, pd, v);
896}
897
898static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf,
899 struct blkg_policy_data *pd, int off)
900{
901 struct blkg_rwstat rwstat = { }, tmp;
902 int i, cpu;
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800903
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400904 for_each_possible_cpu(cpu) {
Tejun Heod3d32e62012-04-01 14:38:42 -0700905 struct blkio_group_stats_cpu *sc =
Tejun Heoedcb0722012-04-01 14:38:42 -0700906 per_cpu_ptr(pd->stats_cpu, cpu);
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400907
Tejun Heod3d32e62012-04-01 14:38:42 -0700908 tmp = blkg_rwstat_read((void *)sc + off);
909 for (i = 0; i < BLKG_RWSTAT_NR; i++)
910 rwstat.cnt[i] += tmp.cnt[i];
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400911 }
912
Tejun Heod3d32e62012-04-01 14:38:42 -0700913 return __blkg_prfill_rwstat(sf, pd, &rwstat);
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400914}
915
Tejun Heod3d32e62012-04-01 14:38:42 -0700916/* print per-cpu blkg_stat specified by BLKCG_STAT_PRIV() */
Tejun Heo829fdb52012-04-01 14:38:43 -0700917int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft,
918 struct seq_file *sf)
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400919{
Tejun Heod3d32e62012-04-01 14:38:42 -0700920 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400921
Tejun Heod3d32e62012-04-01 14:38:42 -0700922 blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_stat,
923 BLKCG_STAT_POL(cft->private),
924 BLKCG_STAT_OFF(cft->private), false);
925 return 0;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400926}
Tejun Heo829fdb52012-04-01 14:38:43 -0700927EXPORT_SYMBOL_GPL(blkcg_print_cpu_stat);
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400928
Tejun Heod3d32e62012-04-01 14:38:42 -0700929/* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
Tejun Heo829fdb52012-04-01 14:38:43 -0700930int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
931 struct seq_file *sf)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700932{
Tejun Heod3d32e62012-04-01 14:38:42 -0700933 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700934
Tejun Heod3d32e62012-04-01 14:38:42 -0700935 blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat,
936 BLKCG_STAT_POL(cft->private),
937 BLKCG_STAT_OFF(cft->private), true);
938 return 0;
939}
Tejun Heo829fdb52012-04-01 14:38:43 -0700940EXPORT_SYMBOL_GPL(blkcg_print_cpu_rwstat);
Tejun Heod3d32e62012-04-01 14:38:42 -0700941
Justin TerAvest9026e522011-03-22 21:26:54 +0100942#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heod3d32e62012-04-01 14:38:42 -0700943static u64 blkg_prfill_avg_queue_size(struct seq_file *sf,
944 struct blkg_policy_data *pd, int off)
945{
946 u64 samples = blkg_stat_read(&pd->stats.avg_queue_size_samples);
947 u64 v = 0;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200948
Tejun Heod3d32e62012-04-01 14:38:42 -0700949 if (samples) {
950 v = blkg_stat_read(&pd->stats.avg_queue_size_sum);
951 do_div(v, samples);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700952 }
Tejun Heod3d32e62012-04-01 14:38:42 -0700953 __blkg_prfill_u64(sf, pd, v);
954 return 0;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700955}
956
Tejun Heod3d32e62012-04-01 14:38:42 -0700957/* print avg_queue_size */
958static int blkcg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
959 struct seq_file *sf)
960{
961 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
962
963 blkcg_print_blkgs(sf, blkcg, blkg_prfill_avg_queue_size,
964 BLKIO_POLICY_PROP, 0, false);
965 return 0;
966}
967#endif /* CONFIG_DEBUG_BLK_CGROUP */
968
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700969/**
970 * blkg_conf_prep - parse and prepare for per-blkg config update
971 * @blkcg: target block cgroup
972 * @input: input string
973 * @ctx: blkg_conf_ctx to be filled
974 *
975 * Parse per-blkg config update from @input and initialize @ctx with the
976 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
977 * value. This function returns with RCU read locked and must be paired
978 * with blkg_conf_finish().
979 */
Tejun Heo829fdb52012-04-01 14:38:43 -0700980int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input,
981 struct blkg_conf_ctx *ctx)
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700982 __acquires(rcu)
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800983{
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700984 struct gendisk *disk;
985 struct blkio_group *blkg;
Tejun Heo726fa692012-04-01 14:38:43 -0700986 unsigned int major, minor;
987 unsigned long long v;
988 int part, ret;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800989
Tejun Heo726fa692012-04-01 14:38:43 -0700990 if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
991 return -EINVAL;
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700992
Tejun Heo726fa692012-04-01 14:38:43 -0700993 disk = get_gendisk(MKDEV(major, minor), &part);
Tejun Heo4bfd4822012-03-05 13:15:08 -0800994 if (!disk || part)
Tejun Heo726fa692012-04-01 14:38:43 -0700995 return -EINVAL;
Tejun Heoe56da7e2012-03-05 13:15:07 -0800996
997 rcu_read_lock();
998
Tejun Heo4bfd4822012-03-05 13:15:08 -0800999 spin_lock_irq(disk->queue->queue_lock);
Tejun Heoaaec55a2012-04-01 14:38:42 -07001000 blkg = blkg_lookup_create(blkcg, disk->queue, false);
Tejun Heo4bfd4822012-03-05 13:15:08 -08001001 spin_unlock_irq(disk->queue->queue_lock);
Tejun Heoe56da7e2012-03-05 13:15:07 -08001002
Tejun Heo4bfd4822012-03-05 13:15:08 -08001003 if (IS_ERR(blkg)) {
1004 ret = PTR_ERR(blkg);
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001005 rcu_read_unlock();
1006 put_disk(disk);
1007 /*
1008 * If queue was bypassing, we should retry. Do so after a
1009 * short msleep(). It isn't strictly necessary but queue
1010 * can be bypassing for some time and it's always nice to
1011 * avoid busy looping.
1012 */
1013 if (ret == -EBUSY) {
1014 msleep(10);
1015 ret = restart_syscall();
Vivek Goyal7702e8f2010-09-15 17:06:36 -04001016 }
Tejun Heo726fa692012-04-01 14:38:43 -07001017 return ret;
Vivek Goyal062a6442010-09-15 17:06:33 -04001018 }
Tejun Heoe56da7e2012-03-05 13:15:07 -08001019
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001020 ctx->disk = disk;
1021 ctx->blkg = blkg;
Tejun Heo726fa692012-04-01 14:38:43 -07001022 ctx->v = v;
1023 return 0;
Gui Jianfeng34d0f172010-04-13 16:05:49 +08001024}
Tejun Heo829fdb52012-04-01 14:38:43 -07001025EXPORT_SYMBOL_GPL(blkg_conf_prep);
Gui Jianfeng34d0f172010-04-13 16:05:49 +08001026
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001027/**
1028 * blkg_conf_finish - finish up per-blkg config update
1029 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
1030 *
1031 * Finish up after per-blkg config update. This function must be paired
1032 * with blkg_conf_prep().
1033 */
Tejun Heo829fdb52012-04-01 14:38:43 -07001034void blkg_conf_finish(struct blkg_conf_ctx *ctx)
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001035 __releases(rcu)
Gui Jianfeng34d0f172010-04-13 16:05:49 +08001036{
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001037 rcu_read_unlock();
1038 put_disk(ctx->disk);
Gui Jianfeng34d0f172010-04-13 16:05:49 +08001039}
Tejun Heo829fdb52012-04-01 14:38:43 -07001040EXPORT_SYMBOL_GPL(blkg_conf_finish);
Gui Jianfeng34d0f172010-04-13 16:05:49 +08001041
Tejun Heoc4682ae2012-04-01 14:38:43 -07001042/* for propio conf */
1043static u64 blkg_prfill_weight_device(struct seq_file *sf,
1044 struct blkg_policy_data *pd, int off)
Gui Jianfeng34d0f172010-04-13 16:05:49 +08001045{
Tejun Heoc4682ae2012-04-01 14:38:43 -07001046 if (!pd->conf.weight)
1047 return 0;
1048 return __blkg_prfill_u64(sf, pd, pd->conf.weight);
Vivek Goyal062a6442010-09-15 17:06:33 -04001049}
1050
Tejun Heoc4682ae2012-04-01 14:38:43 -07001051static int blkcg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1052 struct seq_file *sf)
Vivek Goyal062a6442010-09-15 17:06:33 -04001053{
Tejun Heoc4682ae2012-04-01 14:38:43 -07001054 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1055 blkg_prfill_weight_device, BLKIO_POLICY_PROP, 0,
1056 false);
1057 return 0;
Vivek Goyal062a6442010-09-15 17:06:33 -04001058}
1059
Tejun Heoc4682ae2012-04-01 14:38:43 -07001060static int blkcg_print_weight(struct cgroup *cgrp, struct cftype *cft,
1061 struct seq_file *sf)
Vivek Goyal062a6442010-09-15 17:06:33 -04001062{
Tejun Heoc4682ae2012-04-01 14:38:43 -07001063 seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->weight);
Vivek Goyal062a6442010-09-15 17:06:33 -04001064 return 0;
1065}
1066
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001067static int blkcg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1068 const char *buf)
1069{
1070 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1071 struct blkg_policy_data *pd;
1072 struct blkg_conf_ctx ctx;
1073 int ret;
1074
1075 ret = blkg_conf_prep(blkcg, buf, &ctx);
1076 if (ret)
1077 return ret;
1078
1079 ret = -EINVAL;
1080 pd = ctx.blkg->pd[BLKIO_POLICY_PROP];
1081 if (pd && (!ctx.v || (ctx.v >= BLKIO_WEIGHT_MIN &&
1082 ctx.v <= BLKIO_WEIGHT_MAX))) {
1083 pd->conf.weight = ctx.v;
1084 blkio_update_group_weight(ctx.blkg, BLKIO_POLICY_PROP,
1085 ctx.v ?: blkcg->weight);
1086 ret = 0;
1087 }
1088
1089 blkg_conf_finish(&ctx);
1090 return ret;
1091}
1092
Tejun Heo627f29f2012-04-01 14:38:43 -07001093static int blkcg_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
Vivek Goyal062a6442010-09-15 17:06:33 -04001094{
Tejun Heo627f29f2012-04-01 14:38:43 -07001095 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
Vivek Goyal062a6442010-09-15 17:06:33 -04001096 struct blkio_group *blkg;
1097 struct hlist_node *n;
Vivek Goyal062a6442010-09-15 17:06:33 -04001098
1099 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1100 return -EINVAL;
1101
1102 spin_lock(&blkio_list_lock);
1103 spin_lock_irq(&blkcg->lock);
1104 blkcg->weight = (unsigned int)val;
1105
Tejun Heo549d3aa2012-03-05 13:15:16 -08001106 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
Tejun Heo627f29f2012-04-01 14:38:43 -07001107 struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_PROP];
Tejun Heo549d3aa2012-03-05 13:15:16 -08001108
Tejun Heo627f29f2012-04-01 14:38:43 -07001109 if (pd && !pd->conf.weight)
1110 blkio_update_group_weight(blkg, BLKIO_POLICY_PROP,
1111 blkcg->weight);
Tejun Heo549d3aa2012-03-05 13:15:16 -08001112 }
Vivek Goyal062a6442010-09-15 17:06:33 -04001113
Vivek Goyal062a6442010-09-15 17:06:33 -04001114 spin_unlock_irq(&blkcg->lock);
1115 spin_unlock(&blkio_list_lock);
1116 return 0;
1117}
1118
Tejun Heoc4682ae2012-04-01 14:38:43 -07001119/* for blk-throttle conf */
1120#ifdef CONFIG_BLK_DEV_THROTTLING
1121static u64 blkg_prfill_conf_u64(struct seq_file *sf,
1122 struct blkg_policy_data *pd, int off)
1123{
1124 u64 v = *(u64 *)((void *)&pd->conf + off);
Vivek Goyal062a6442010-09-15 17:06:33 -04001125
Tejun Heoc4682ae2012-04-01 14:38:43 -07001126 if (!v)
1127 return 0;
1128 return __blkg_prfill_u64(sf, pd, v);
1129}
Vivek Goyal062a6442010-09-15 17:06:33 -04001130
Tejun Heoc4682ae2012-04-01 14:38:43 -07001131static int blkcg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1132 struct seq_file *sf)
1133{
Tejun Heoc4682ae2012-04-01 14:38:43 -07001134 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1135 blkg_prfill_conf_u64, BLKIO_POLICY_THROTL,
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001136 cft->private, false);
Vivek Goyal062a6442010-09-15 17:06:33 -04001137 return 0;
1138}
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001139
1140static int blkcg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1141 const char *buf, int rw,
1142 void (*update)(struct blkio_group *, int, u64, int))
1143{
1144 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1145 struct blkg_policy_data *pd;
1146 struct blkg_conf_ctx ctx;
1147 int ret;
1148
1149 ret = blkg_conf_prep(blkcg, buf, &ctx);
1150 if (ret)
1151 return ret;
1152
1153 ret = -EINVAL;
1154 pd = ctx.blkg->pd[BLKIO_POLICY_THROTL];
1155 if (pd) {
1156 *(u64 *)((void *)&pd->conf + cft->private) = ctx.v;
1157 update(ctx.blkg, BLKIO_POLICY_THROTL, ctx.v ?: -1, rw);
1158 ret = 0;
1159 }
1160
1161 blkg_conf_finish(&ctx);
1162 return ret;
1163}
1164
1165static int blkcg_set_conf_bps_r(struct cgroup *cgrp, struct cftype *cft,
1166 const char *buf)
1167{
1168 return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_bps);
1169}
1170
1171static int blkcg_set_conf_bps_w(struct cgroup *cgrp, struct cftype *cft,
1172 const char *buf)
1173{
1174 return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_bps);
1175}
1176
1177static int blkcg_set_conf_iops_r(struct cgroup *cgrp, struct cftype *cft,
1178 const char *buf)
1179{
1180 return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_iops);
1181}
1182
1183static int blkcg_set_conf_iops_w(struct cgroup *cgrp, struct cftype *cft,
1184 const char *buf)
1185{
1186 return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_iops);
1187}
Tejun Heoc4682ae2012-04-01 14:38:43 -07001188#endif
Vivek Goyal062a6442010-09-15 17:06:33 -04001189
Vivek Goyal31e4c282009-12-03 12:59:42 -05001190struct cftype blkio_files[] = {
1191 {
Gui Jianfeng34d0f172010-04-13 16:05:49 +08001192 .name = "weight_device",
Tejun Heoc4682ae2012-04-01 14:38:43 -07001193 .read_seq_string = blkcg_print_weight_device,
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001194 .write_string = blkcg_set_weight_device,
Gui Jianfeng34d0f172010-04-13 16:05:49 +08001195 .max_write_len = 256,
1196 },
1197 {
Vivek Goyal31e4c282009-12-03 12:59:42 -05001198 .name = "weight",
Tejun Heoc4682ae2012-04-01 14:38:43 -07001199 .read_seq_string = blkcg_print_weight,
Tejun Heo627f29f2012-04-01 14:38:43 -07001200 .write_u64 = blkcg_set_weight,
Vivek Goyal31e4c282009-12-03 12:59:42 -05001201 },
Vivek Goyal22084192009-12-03 12:59:49 -05001202 {
1203 .name = "time",
Tejun Heod3d32e62012-04-01 14:38:42 -07001204 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1205 offsetof(struct blkio_group_stats, time)),
1206 .read_seq_string = blkcg_print_stat,
Vivek Goyal22084192009-12-03 12:59:49 -05001207 },
1208 {
1209 .name = "sectors",
Tejun Heod3d32e62012-04-01 14:38:42 -07001210 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1211 offsetof(struct blkio_group_stats_cpu, sectors)),
1212 .read_seq_string = blkcg_print_cpu_stat,
Divyesh Shah303a3ac2010-04-01 15:01:24 -07001213 },
1214 {
1215 .name = "io_service_bytes",
Tejun Heod3d32e62012-04-01 14:38:42 -07001216 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1217 offsetof(struct blkio_group_stats_cpu, service_bytes)),
1218 .read_seq_string = blkcg_print_cpu_rwstat,
Divyesh Shah303a3ac2010-04-01 15:01:24 -07001219 },
1220 {
1221 .name = "io_serviced",
Tejun Heod3d32e62012-04-01 14:38:42 -07001222 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1223 offsetof(struct blkio_group_stats_cpu, serviced)),
1224 .read_seq_string = blkcg_print_cpu_rwstat,
Divyesh Shah303a3ac2010-04-01 15:01:24 -07001225 },
1226 {
1227 .name = "io_service_time",
Tejun Heod3d32e62012-04-01 14:38:42 -07001228 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1229 offsetof(struct blkio_group_stats, service_time)),
1230 .read_seq_string = blkcg_print_rwstat,
Divyesh Shah303a3ac2010-04-01 15:01:24 -07001231 },
1232 {
1233 .name = "io_wait_time",
Tejun Heod3d32e62012-04-01 14:38:42 -07001234 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1235 offsetof(struct blkio_group_stats, wait_time)),
1236 .read_seq_string = blkcg_print_rwstat,
Divyesh Shah84c124d2010-04-09 08:31:19 +02001237 },
1238 {
Divyesh Shah812d4022010-04-08 21:14:23 -07001239 .name = "io_merged",
Tejun Heod3d32e62012-04-01 14:38:42 -07001240 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1241 offsetof(struct blkio_group_stats, merged)),
1242 .read_seq_string = blkcg_print_rwstat,
Divyesh Shah812d4022010-04-08 21:14:23 -07001243 },
1244 {
Divyesh Shahcdc11842010-04-08 21:15:10 -07001245 .name = "io_queued",
Tejun Heod3d32e62012-04-01 14:38:42 -07001246 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1247 offsetof(struct blkio_group_stats, queued)),
1248 .read_seq_string = blkcg_print_rwstat,
Divyesh Shahcdc11842010-04-08 21:15:10 -07001249 },
1250 {
Divyesh Shah84c124d2010-04-09 08:31:19 +02001251 .name = "reset_stats",
1252 .write_u64 = blkiocg_reset_stats,
Vivek Goyal22084192009-12-03 12:59:49 -05001253 },
Vivek Goyal13f98252010-10-01 14:49:41 +02001254#ifdef CONFIG_BLK_DEV_THROTTLING
1255 {
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001256 .name = "throttle.read_bps_device",
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001257 .private = offsetof(struct blkio_group_conf, bps[READ]),
Tejun Heoc4682ae2012-04-01 14:38:43 -07001258 .read_seq_string = blkcg_print_conf_u64,
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001259 .write_string = blkcg_set_conf_bps_r,
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001260 .max_write_len = 256,
1261 },
1262
1263 {
1264 .name = "throttle.write_bps_device",
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001265 .private = offsetof(struct blkio_group_conf, bps[WRITE]),
Tejun Heoc4682ae2012-04-01 14:38:43 -07001266 .read_seq_string = blkcg_print_conf_u64,
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001267 .write_string = blkcg_set_conf_bps_w,
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001268 .max_write_len = 256,
1269 },
Vivek Goyal7702e8f2010-09-15 17:06:36 -04001270
1271 {
1272 .name = "throttle.read_iops_device",
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001273 .private = offsetof(struct blkio_group_conf, iops[READ]),
Tejun Heoc4682ae2012-04-01 14:38:43 -07001274 .read_seq_string = blkcg_print_conf_u64,
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001275 .write_string = blkcg_set_conf_iops_r,
Vivek Goyal7702e8f2010-09-15 17:06:36 -04001276 .max_write_len = 256,
1277 },
1278
1279 {
1280 .name = "throttle.write_iops_device",
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001281 .private = offsetof(struct blkio_group_conf, iops[WRITE]),
Tejun Heoc4682ae2012-04-01 14:38:43 -07001282 .read_seq_string = blkcg_print_conf_u64,
Tejun Heo3a8b31d2012-04-01 14:38:43 -07001283 .write_string = blkcg_set_conf_iops_w,
Vivek Goyal7702e8f2010-09-15 17:06:36 -04001284 .max_write_len = 256,
1285 },
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001286 {
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001287 .name = "throttle.io_service_bytes",
Tejun Heod3d32e62012-04-01 14:38:42 -07001288 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1289 offsetof(struct blkio_group_stats_cpu, service_bytes)),
1290 .read_seq_string = blkcg_print_cpu_rwstat,
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001291 },
1292 {
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001293 .name = "throttle.io_serviced",
Tejun Heod3d32e62012-04-01 14:38:42 -07001294 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1295 offsetof(struct blkio_group_stats_cpu, serviced)),
1296 .read_seq_string = blkcg_print_cpu_rwstat,
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001297 },
Vivek Goyal13f98252010-10-01 14:49:41 +02001298#endif /* CONFIG_BLK_DEV_THROTTLING */
1299
Vivek Goyal22084192009-12-03 12:59:49 -05001300#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shahcdc11842010-04-08 21:15:10 -07001301 {
1302 .name = "avg_queue_size",
Tejun Heod3d32e62012-04-01 14:38:42 -07001303 .read_seq_string = blkcg_print_avg_queue_size,
Divyesh Shahcdc11842010-04-08 21:15:10 -07001304 },
1305 {
Divyesh Shah812df482010-04-08 21:15:35 -07001306 .name = "group_wait_time",
Tejun Heod3d32e62012-04-01 14:38:42 -07001307 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1308 offsetof(struct blkio_group_stats, group_wait_time)),
1309 .read_seq_string = blkcg_print_stat,
Divyesh Shah812df482010-04-08 21:15:35 -07001310 },
1311 {
1312 .name = "idle_time",
Tejun Heod3d32e62012-04-01 14:38:42 -07001313 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1314 offsetof(struct blkio_group_stats, idle_time)),
1315 .read_seq_string = blkcg_print_stat,
Divyesh Shah812df482010-04-08 21:15:35 -07001316 },
1317 {
1318 .name = "empty_time",
Tejun Heod3d32e62012-04-01 14:38:42 -07001319 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1320 offsetof(struct blkio_group_stats, empty_time)),
1321 .read_seq_string = blkcg_print_stat,
Divyesh Shah812df482010-04-08 21:15:35 -07001322 },
1323 {
Vivek Goyal22084192009-12-03 12:59:49 -05001324 .name = "dequeue",
Tejun Heod3d32e62012-04-01 14:38:42 -07001325 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1326 offsetof(struct blkio_group_stats, dequeue)),
1327 .read_seq_string = blkcg_print_stat,
Divyesh Shahcdc11842010-04-08 21:15:10 -07001328 },
Justin TerAvest9026e522011-03-22 21:26:54 +01001329 {
1330 .name = "unaccounted_time",
Tejun Heod3d32e62012-04-01 14:38:42 -07001331 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1332 offsetof(struct blkio_group_stats, unaccounted_time)),
1333 .read_seq_string = blkcg_print_stat,
Justin TerAvest9026e522011-03-22 21:26:54 +01001334 },
Vivek Goyal22084192009-12-03 12:59:49 -05001335#endif
Tejun Heo4baf6e32012-04-01 12:09:55 -07001336 { } /* terminate */
Vivek Goyal31e4c282009-12-03 12:59:42 -05001337};
1338
Tejun Heo9f13ef62012-03-05 13:15:21 -08001339/**
1340 * blkiocg_pre_destroy - cgroup pre_destroy callback
Tejun Heo9f13ef62012-03-05 13:15:21 -08001341 * @cgroup: cgroup of interest
1342 *
1343 * This function is called when @cgroup is about to go away and responsible
1344 * for shooting down all blkgs associated with @cgroup. blkgs should be
1345 * removed while holding both q and blkcg locks. As blkcg lock is nested
1346 * inside q lock, this function performs reverse double lock dancing.
1347 *
1348 * This is the blkcg counterpart of ioc_release_fn().
1349 */
Tejun Heo959d8512012-04-01 12:30:01 -07001350static int blkiocg_pre_destroy(struct cgroup *cgroup)
Vivek Goyal31e4c282009-12-03 12:59:42 -05001351{
1352 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1353
Tejun Heo9f13ef62012-03-05 13:15:21 -08001354 spin_lock_irq(&blkcg->lock);
Tejun Heo7ee9c562012-03-05 13:15:11 -08001355
Tejun Heo9f13ef62012-03-05 13:15:21 -08001356 while (!hlist_empty(&blkcg->blkg_list)) {
1357 struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
1358 struct blkio_group, blkcg_node);
Tejun Heoc875f4d2012-03-05 13:15:22 -08001359 struct request_queue *q = blkg->q;
Vivek Goyalb1c35762009-12-03 12:59:47 -05001360
Tejun Heo9f13ef62012-03-05 13:15:21 -08001361 if (spin_trylock(q->queue_lock)) {
1362 blkg_destroy(blkg);
1363 spin_unlock(q->queue_lock);
1364 } else {
1365 spin_unlock_irq(&blkcg->lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -08001366 cpu_relax();
Dan Carpentera5567932012-03-29 20:57:08 +02001367 spin_lock_irq(&blkcg->lock);
Jens Axboe0f3942a2010-05-03 14:28:55 +02001368 }
Tejun Heo9f13ef62012-03-05 13:15:21 -08001369 }
Jens Axboe0f3942a2010-05-03 14:28:55 +02001370
Tejun Heo9f13ef62012-03-05 13:15:21 -08001371 spin_unlock_irq(&blkcg->lock);
Tejun Heo7ee9c562012-03-05 13:15:11 -08001372 return 0;
1373}
1374
Li Zefan761b3ef2012-01-31 13:47:36 +08001375static void blkiocg_destroy(struct cgroup *cgroup)
Tejun Heo7ee9c562012-03-05 13:15:11 -08001376{
1377 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1378
Ben Blum67523c42010-03-10 15:22:11 -08001379 if (blkcg != &blkio_root_cgroup)
1380 kfree(blkcg);
Vivek Goyal31e4c282009-12-03 12:59:42 -05001381}
1382
Li Zefan761b3ef2012-01-31 13:47:36 +08001383static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
Vivek Goyal31e4c282009-12-03 12:59:42 -05001384{
Tejun Heo9a9e8a22012-03-19 15:10:56 -07001385 static atomic64_t id_seq = ATOMIC64_INIT(0);
Li Zefan03415092010-05-07 08:57:00 +02001386 struct blkio_cgroup *blkcg;
1387 struct cgroup *parent = cgroup->parent;
Vivek Goyal31e4c282009-12-03 12:59:42 -05001388
Li Zefan03415092010-05-07 08:57:00 +02001389 if (!parent) {
Vivek Goyal31e4c282009-12-03 12:59:42 -05001390 blkcg = &blkio_root_cgroup;
1391 goto done;
1392 }
1393
Vivek Goyal31e4c282009-12-03 12:59:42 -05001394 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1395 if (!blkcg)
1396 return ERR_PTR(-ENOMEM);
1397
1398 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
Tejun Heo9a9e8a22012-03-19 15:10:56 -07001399 blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
Vivek Goyal31e4c282009-12-03 12:59:42 -05001400done:
1401 spin_lock_init(&blkcg->lock);
1402 INIT_HLIST_HEAD(&blkcg->blkg_list);
1403
1404 return &blkcg->css;
1405}
1406
Tejun Heo5efd6112012-03-05 13:15:12 -08001407/**
1408 * blkcg_init_queue - initialize blkcg part of request queue
1409 * @q: request_queue to initialize
1410 *
1411 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1412 * part of new request_queue @q.
1413 *
1414 * RETURNS:
1415 * 0 on success, -errno on failure.
1416 */
1417int blkcg_init_queue(struct request_queue *q)
1418{
Tejun Heo923adde2012-03-05 13:15:13 -08001419 int ret;
1420
Tejun Heo5efd6112012-03-05 13:15:12 -08001421 might_sleep();
1422
Tejun Heo923adde2012-03-05 13:15:13 -08001423 ret = blk_throtl_init(q);
1424 if (ret)
1425 return ret;
1426
1427 mutex_lock(&all_q_mutex);
1428 INIT_LIST_HEAD(&q->all_q_node);
1429 list_add_tail(&q->all_q_node, &all_q_list);
1430 mutex_unlock(&all_q_mutex);
1431
1432 return 0;
Tejun Heo5efd6112012-03-05 13:15:12 -08001433}
1434
1435/**
1436 * blkcg_drain_queue - drain blkcg part of request_queue
1437 * @q: request_queue to drain
1438 *
1439 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1440 */
1441void blkcg_drain_queue(struct request_queue *q)
1442{
1443 lockdep_assert_held(q->queue_lock);
1444
1445 blk_throtl_drain(q);
1446}
1447
1448/**
1449 * blkcg_exit_queue - exit and release blkcg part of request_queue
1450 * @q: request_queue being released
1451 *
1452 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1453 */
1454void blkcg_exit_queue(struct request_queue *q)
1455{
Tejun Heo923adde2012-03-05 13:15:13 -08001456 mutex_lock(&all_q_mutex);
1457 list_del_init(&q->all_q_node);
1458 mutex_unlock(&all_q_mutex);
1459
Tejun Heoe8989fa2012-03-05 13:15:20 -08001460 blkg_destroy_all(q, true);
1461
Tejun Heo5efd6112012-03-05 13:15:12 -08001462 blk_throtl_exit(q);
1463}
1464
Vivek Goyal31e4c282009-12-03 12:59:42 -05001465/*
1466 * We cannot support shared io contexts, as we have no mean to support
1467 * two tasks with the same ioc in two different groups without major rework
1468 * of the main cic data structures. For now we allow a task to change
1469 * its cgroup only if it's the only owner of its ioc.
1470 */
Li Zefan761b3ef2012-01-31 13:47:36 +08001471static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
Vivek Goyal31e4c282009-12-03 12:59:42 -05001472{
Tejun Heobb9d97b2011-12-12 18:12:21 -08001473 struct task_struct *task;
Vivek Goyal31e4c282009-12-03 12:59:42 -05001474 struct io_context *ioc;
1475 int ret = 0;
1476
1477 /* task_lock() is needed to avoid races with exit_io_context() */
Tejun Heobb9d97b2011-12-12 18:12:21 -08001478 cgroup_taskset_for_each(task, cgrp, tset) {
1479 task_lock(task);
1480 ioc = task->io_context;
1481 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1482 ret = -EINVAL;
1483 task_unlock(task);
1484 if (ret)
1485 break;
1486 }
Vivek Goyal31e4c282009-12-03 12:59:42 -05001487 return ret;
1488}
1489
Tejun Heo923adde2012-03-05 13:15:13 -08001490static void blkcg_bypass_start(void)
1491 __acquires(&all_q_mutex)
1492{
1493 struct request_queue *q;
1494
1495 mutex_lock(&all_q_mutex);
1496
1497 list_for_each_entry(q, &all_q_list, all_q_node) {
1498 blk_queue_bypass_start(q);
Tejun Heoe8989fa2012-03-05 13:15:20 -08001499 blkg_destroy_all(q, false);
Tejun Heo923adde2012-03-05 13:15:13 -08001500 }
1501}
1502
1503static void blkcg_bypass_end(void)
1504 __releases(&all_q_mutex)
1505{
1506 struct request_queue *q;
1507
1508 list_for_each_entry(q, &all_q_list, all_q_node)
1509 blk_queue_bypass_end(q);
1510
1511 mutex_unlock(&all_q_mutex);
1512}
1513
Tejun Heo676f7c82012-04-01 12:09:55 -07001514struct cgroup_subsys blkio_subsys = {
1515 .name = "blkio",
1516 .create = blkiocg_create,
1517 .can_attach = blkiocg_can_attach,
Tejun Heo959d8512012-04-01 12:30:01 -07001518 .pre_destroy = blkiocg_pre_destroy,
Tejun Heo676f7c82012-04-01 12:09:55 -07001519 .destroy = blkiocg_destroy,
Tejun Heo676f7c82012-04-01 12:09:55 -07001520 .subsys_id = blkio_subsys_id,
Tejun Heo4baf6e32012-04-01 12:09:55 -07001521 .base_cftypes = blkio_files,
Tejun Heo676f7c82012-04-01 12:09:55 -07001522 .module = THIS_MODULE,
1523};
1524EXPORT_SYMBOL_GPL(blkio_subsys);
1525
Vivek Goyal3e252062009-12-04 10:36:42 -05001526void blkio_policy_register(struct blkio_policy_type *blkiop)
1527{
Tejun Heoe8989fa2012-03-05 13:15:20 -08001528 struct request_queue *q;
1529
Tejun Heo923adde2012-03-05 13:15:13 -08001530 blkcg_bypass_start();
Vivek Goyal3e252062009-12-04 10:36:42 -05001531 spin_lock(&blkio_list_lock);
Tejun Heo035d10b2012-03-05 13:15:04 -08001532
1533 BUG_ON(blkio_policy[blkiop->plid]);
1534 blkio_policy[blkiop->plid] = blkiop;
Vivek Goyal3e252062009-12-04 10:36:42 -05001535 list_add_tail(&blkiop->list, &blkio_list);
Tejun Heo035d10b2012-03-05 13:15:04 -08001536
Vivek Goyal3e252062009-12-04 10:36:42 -05001537 spin_unlock(&blkio_list_lock);
Tejun Heoe8989fa2012-03-05 13:15:20 -08001538 list_for_each_entry(q, &all_q_list, all_q_node)
1539 update_root_blkg_pd(q, blkiop->plid);
Tejun Heo923adde2012-03-05 13:15:13 -08001540 blkcg_bypass_end();
Tejun Heo44ea53d2012-04-01 14:38:43 -07001541
1542 if (blkiop->cftypes)
1543 WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes));
Vivek Goyal3e252062009-12-04 10:36:42 -05001544}
1545EXPORT_SYMBOL_GPL(blkio_policy_register);
1546
1547void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1548{
Tejun Heoe8989fa2012-03-05 13:15:20 -08001549 struct request_queue *q;
1550
Tejun Heo44ea53d2012-04-01 14:38:43 -07001551 if (blkiop->cftypes)
1552 cgroup_rm_cftypes(&blkio_subsys, blkiop->cftypes);
1553
Tejun Heo923adde2012-03-05 13:15:13 -08001554 blkcg_bypass_start();
Vivek Goyal3e252062009-12-04 10:36:42 -05001555 spin_lock(&blkio_list_lock);
Tejun Heo035d10b2012-03-05 13:15:04 -08001556
1557 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
1558 blkio_policy[blkiop->plid] = NULL;
Vivek Goyal3e252062009-12-04 10:36:42 -05001559 list_del_init(&blkiop->list);
Tejun Heo035d10b2012-03-05 13:15:04 -08001560
Vivek Goyal3e252062009-12-04 10:36:42 -05001561 spin_unlock(&blkio_list_lock);
Tejun Heoe8989fa2012-03-05 13:15:20 -08001562 list_for_each_entry(q, &all_q_list, all_q_node)
1563 update_root_blkg_pd(q, blkiop->plid);
Tejun Heo923adde2012-03-05 13:15:13 -08001564 blkcg_bypass_end();
Vivek Goyal3e252062009-12-04 10:36:42 -05001565}
1566EXPORT_SYMBOL_GPL(blkio_policy_unregister);