blob: 7578df3938b4bfb8d565828eec77ef2bd6cda251 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
Vivek Goyal575969a2011-05-19 15:38:29 -040017#include <linux/u64_stats_sync.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050018
Vivek Goyal062a6442010-09-15 17:06:33 -040019enum blkio_policy_id {
20 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040021 BLKIO_POLICY_THROTL, /* Throttling */
Tejun Heo035d10b2012-03-05 13:15:04 -080022
23 BLKIO_NR_POLICIES,
Vivek Goyal062a6442010-09-15 17:06:33 -040024};
25
Vivek Goyal9355aed2010-10-01 21:16:41 +020026/* Max limits for throttle policy */
27#define THROTL_IOPS_MAX UINT_MAX
28
Tejun Heo32e380a2012-03-05 13:14:54 -080029#ifdef CONFIG_BLK_CGROUP
Jens Axboe2f5ea472009-12-03 21:06:43 +010030
Divyesh Shah84c124d2010-04-09 08:31:19 +020031enum stat_type {
Tejun Heo5fe224d2012-03-08 10:53:57 -080032 /* Number of IOs merged */
33 BLKIO_STAT_MERGED,
Divyesh Shah84c124d2010-04-09 08:31:19 +020034 /* Total time spent (in ns) between request dispatch to the driver and
35 * request completion for IOs doen by this cgroup. This may not be
36 * accurate when NCQ is turned on. */
Tejun Heo5fe224d2012-03-08 10:53:57 -080037 BLKIO_STAT_SERVICE_TIME,
Divyesh Shah84c124d2010-04-09 08:31:19 +020038 /* Total time spent waiting in scheduler queue in ns */
39 BLKIO_STAT_WAIT_TIME,
Divyesh Shahcdc11842010-04-08 21:15:10 -070040 /* Number of IOs queued up */
41 BLKIO_STAT_QUEUED,
Tejun Heoc4c76a02012-03-08 10:53:59 -080042
Divyesh Shah84c124d2010-04-09 08:31:19 +020043 /* All the single valued stats go below this */
44 BLKIO_STAT_TIME,
Vivek Goyala23e6862011-05-19 15:38:20 -040045#ifdef CONFIG_DEBUG_BLK_CGROUP
Justin TerAvest167400d2011-03-12 16:54:00 +010046 /* Time not charged to this cgroup */
47 BLKIO_STAT_UNACCOUNTED_TIME,
Divyesh Shahcdc11842010-04-08 21:15:10 -070048 BLKIO_STAT_AVG_QUEUE_SIZE,
Divyesh Shah812df482010-04-08 21:15:35 -070049 BLKIO_STAT_IDLE_TIME,
50 BLKIO_STAT_EMPTY_TIME,
51 BLKIO_STAT_GROUP_WAIT_TIME,
Divyesh Shah84c124d2010-04-09 08:31:19 +020052 BLKIO_STAT_DEQUEUE
53#endif
54};
55
Tejun Heoc4c76a02012-03-08 10:53:59 -080056/* Types lower than this live in stat_arr and have subtypes */
57#define BLKIO_STAT_ARR_NR (BLKIO_STAT_QUEUED + 1)
58
Vivek Goyal5624a4e2011-05-19 15:38:28 -040059/* Per cpu stats */
60enum stat_type_cpu {
Vivek Goyal5624a4e2011-05-19 15:38:28 -040061 /* Total bytes transferred */
62 BLKIO_STAT_CPU_SERVICE_BYTES,
63 /* Total IOs serviced, post merge */
64 BLKIO_STAT_CPU_SERVICED,
Tejun Heo2aa4a152012-04-01 14:38:42 -070065
66 /* All the single valued stats go below this */
67 BLKIO_STAT_CPU_SECTORS,
Vivek Goyal5624a4e2011-05-19 15:38:28 -040068};
69
Tejun Heo2aa4a152012-04-01 14:38:42 -070070#define BLKIO_STAT_CPU_ARR_NR (BLKIO_STAT_CPU_SERVICED + 1)
71
Tejun Heoedcb0722012-04-01 14:38:42 -070072enum blkg_rwstat_type {
73 BLKG_RWSTAT_READ,
74 BLKG_RWSTAT_WRITE,
75 BLKG_RWSTAT_SYNC,
76 BLKG_RWSTAT_ASYNC,
77
78 BLKG_RWSTAT_NR,
79 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070080};
81
Divyesh Shah812df482010-04-08 21:15:35 -070082/* blkg state flags */
83enum blkg_state_flags {
84 BLKG_waiting = 0,
85 BLKG_idling,
86 BLKG_empty,
87};
88
Vivek Goyal062a6442010-09-15 17:06:33 -040089/* cgroup files owned by proportional weight policy */
90enum blkcg_file_name_prop {
91 BLKIO_PROP_weight = 1,
92 BLKIO_PROP_weight_device,
93 BLKIO_PROP_io_service_bytes,
94 BLKIO_PROP_io_serviced,
95 BLKIO_PROP_time,
96 BLKIO_PROP_sectors,
Justin TerAvest167400d2011-03-12 16:54:00 +010097 BLKIO_PROP_unaccounted_time,
Vivek Goyal062a6442010-09-15 17:06:33 -040098 BLKIO_PROP_io_service_time,
99 BLKIO_PROP_io_wait_time,
100 BLKIO_PROP_io_merged,
101 BLKIO_PROP_io_queued,
102 BLKIO_PROP_avg_queue_size,
103 BLKIO_PROP_group_wait_time,
104 BLKIO_PROP_idle_time,
105 BLKIO_PROP_empty_time,
106 BLKIO_PROP_dequeue,
107};
108
Vivek Goyal4c9eefa2010-09-15 17:06:34 -0400109/* cgroup files owned by throttle policy */
110enum blkcg_file_name_throtl {
111 BLKIO_THROTL_read_bps_device,
112 BLKIO_THROTL_write_bps_device,
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400113 BLKIO_THROTL_read_iops_device,
114 BLKIO_THROTL_write_iops_device,
Vivek Goyal4c9eefa2010-09-15 17:06:34 -0400115 BLKIO_THROTL_io_service_bytes,
116 BLKIO_THROTL_io_serviced,
117};
118
Vivek Goyal31e4c282009-12-03 12:59:42 -0500119struct blkio_cgroup {
120 struct cgroup_subsys_state css;
121 unsigned int weight;
122 spinlock_t lock;
123 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -0700124
125 /* for policies to test whether associated blkcg has changed */
126 uint64_t id;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500127};
128
Tejun Heoedcb0722012-04-01 14:38:42 -0700129struct blkg_stat {
130 struct u64_stats_sync syncp;
131 uint64_t cnt;
132};
133
134struct blkg_rwstat {
135 struct u64_stats_sync syncp;
136 uint64_t cnt[BLKG_RWSTAT_NR];
137};
138
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700139struct blkio_group_stats {
Tejun Heoedcb0722012-04-01 14:38:42 -0700140 /* number of ios merged */
141 struct blkg_rwstat merged;
142 /* total time spent on device in ns, may not be accurate w/ queueing */
143 struct blkg_rwstat service_time;
144 /* total time spent waiting in scheduler queue in ns */
145 struct blkg_rwstat wait_time;
146 /* number of IOs queued up */
147 struct blkg_rwstat queued;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700148 /* total disk time and nr sectors dispatched by this group */
Tejun Heoedcb0722012-04-01 14:38:42 -0700149 struct blkg_stat time;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700150#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoedcb0722012-04-01 14:38:42 -0700151 /* time not charged to this cgroup */
152 struct blkg_stat unaccounted_time;
153 /* sum of number of ios queued across all samples */
154 struct blkg_stat avg_queue_size_sum;
155 /* count of samples taken for average */
156 struct blkg_stat avg_queue_size_samples;
157 /* how many times this group has been removed from service tree */
158 struct blkg_stat dequeue;
159 /* total time spent waiting for it to be assigned a timeslice. */
160 struct blkg_stat group_wait_time;
161 /* time spent idling for this blkio_group */
162 struct blkg_stat idle_time;
163 /* total time with empty current active q with other requests queued */
164 struct blkg_stat empty_time;
Tejun Heo997a0262012-03-08 10:53:58 -0800165 /* fields after this shouldn't be cleared on stat reset */
Tejun Heoedcb0722012-04-01 14:38:42 -0700166 uint64_t start_group_wait_time;
167 uint64_t start_idle_time;
168 uint64_t start_empty_time;
169 uint16_t flags;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700170#endif
171};
172
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400173/* Per cpu blkio group stats */
174struct blkio_group_stats_cpu {
Tejun Heoedcb0722012-04-01 14:38:42 -0700175 /* total bytes transferred */
176 struct blkg_rwstat service_bytes;
177 /* total IOs serviced, post merge */
178 struct blkg_rwstat serviced;
179 /* total sectors transferred */
180 struct blkg_stat sectors;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400181};
182
Tejun Heoe56da7e2012-03-05 13:15:07 -0800183struct blkio_group_conf {
184 unsigned int weight;
185 unsigned int iops[2];
186 u64 bps[2];
187};
188
Tejun Heo03814112012-03-05 13:15:14 -0800189/* per-blkg per-policy data */
190struct blkg_policy_data {
191 /* the blkg this per-policy data belongs to */
192 struct blkio_group *blkg;
193
Tejun Heo549d3aa2012-03-05 13:15:16 -0800194 /* Configuration */
195 struct blkio_group_conf conf;
196
197 struct blkio_group_stats stats;
198 /* Per cpu stats pointer */
199 struct blkio_group_stats_cpu __percpu *stats_cpu;
200
Tejun Heo03814112012-03-05 13:15:14 -0800201 /* pol->pdata_size bytes of private data used by policy impl */
202 char pdata[] __aligned(__alignof__(unsigned long long));
203};
204
Vivek Goyal31e4c282009-12-03 12:59:42 -0500205struct blkio_group {
Tejun Heoc875f4d2012-03-05 13:15:22 -0800206 /* Pointer to the associated request_queue */
207 struct request_queue *q;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800208 struct list_head q_node;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500209 struct hlist_node blkcg_node;
Tejun Heo7ee9c562012-03-05 13:15:11 -0800210 struct blkio_cgroup *blkcg;
Vivek Goyal2868ef72009-12-03 12:59:48 -0500211 /* Store cgroup path */
212 char path[128];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800213 /* reference count */
214 int refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500215
Tejun Heo549d3aa2012-03-05 13:15:16 -0800216 struct blkg_policy_data *pd[BLKIO_NR_POLICIES];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800217
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800218 /* List of blkg waiting for per cpu stats memory to be allocated */
219 struct list_head alloc_node;
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800220 struct rcu_head rcu_head;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500221};
222
Tejun Heo03814112012-03-05 13:15:14 -0800223typedef void (blkio_init_group_fn)(struct blkio_group *blkg);
Tejun Heoca32aef2012-03-05 13:15:03 -0800224typedef void (blkio_update_group_weight_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200225 struct blkio_group *blkg, unsigned int weight);
Tejun Heoca32aef2012-03-05 13:15:03 -0800226typedef void (blkio_update_group_read_bps_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200227 struct blkio_group *blkg, u64 read_bps);
Tejun Heoca32aef2012-03-05 13:15:03 -0800228typedef void (blkio_update_group_write_bps_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200229 struct blkio_group *blkg, u64 write_bps);
Tejun Heoca32aef2012-03-05 13:15:03 -0800230typedef void (blkio_update_group_read_iops_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200231 struct blkio_group *blkg, unsigned int read_iops);
Tejun Heoca32aef2012-03-05 13:15:03 -0800232typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200233 struct blkio_group *blkg, unsigned int write_iops);
Vivek Goyal3e252062009-12-04 10:36:42 -0500234
235struct blkio_policy_ops {
Tejun Heo03814112012-03-05 13:15:14 -0800236 blkio_init_group_fn *blkio_init_group_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500237 blkio_update_group_weight_fn *blkio_update_group_weight_fn;
Vivek Goyal4c9eefa2010-09-15 17:06:34 -0400238 blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
239 blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400240 blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
241 blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500242};
243
244struct blkio_policy_type {
245 struct list_head list;
246 struct blkio_policy_ops ops;
Vivek Goyal062a6442010-09-15 17:06:33 -0400247 enum blkio_policy_id plid;
Tejun Heo03814112012-03-05 13:15:14 -0800248 size_t pdata_size; /* policy specific private data size */
Vivek Goyal3e252062009-12-04 10:36:42 -0500249};
250
Tejun Heo5efd6112012-03-05 13:15:12 -0800251extern int blkcg_init_queue(struct request_queue *q);
252extern void blkcg_drain_queue(struct request_queue *q);
253extern void blkcg_exit_queue(struct request_queue *q);
254
Vivek Goyal3e252062009-12-04 10:36:42 -0500255/* Blkio controller policy registration */
256extern void blkio_policy_register(struct blkio_policy_type *);
257extern void blkio_policy_unregister(struct blkio_policy_type *);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800258extern void blkg_destroy_all(struct request_queue *q, bool destroy_root);
259extern void update_root_blkg_pd(struct request_queue *q,
260 enum blkio_policy_id plid);
Vivek Goyal3e252062009-12-04 10:36:42 -0500261
Tejun Heo03814112012-03-05 13:15:14 -0800262/**
263 * blkg_to_pdata - get policy private data
264 * @blkg: blkg of interest
265 * @pol: policy of interest
266 *
267 * Return pointer to private data associated with the @blkg-@pol pair.
268 */
269static inline void *blkg_to_pdata(struct blkio_group *blkg,
270 struct blkio_policy_type *pol)
271{
Tejun Heo549d3aa2012-03-05 13:15:16 -0800272 return blkg ? blkg->pd[pol->plid]->pdata : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800273}
274
275/**
276 * pdata_to_blkg - get blkg associated with policy private data
277 * @pdata: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800278 *
Tejun Heoaaec55a2012-04-01 14:38:42 -0700279 * @pdata is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800280 */
Tejun Heoaaec55a2012-04-01 14:38:42 -0700281static inline struct blkio_group *pdata_to_blkg(void *pdata)
Tejun Heo03814112012-03-05 13:15:14 -0800282{
283 if (pdata) {
284 struct blkg_policy_data *pd =
285 container_of(pdata, struct blkg_policy_data, pdata);
286 return pd->blkg;
287 }
288 return NULL;
289}
290
Vivek Goyalafc24d42010-04-26 19:27:56 +0200291static inline char *blkg_path(struct blkio_group *blkg)
292{
293 return blkg->path;
294}
295
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800296/**
297 * blkg_get - get a blkg reference
298 * @blkg: blkg to get
299 *
300 * The caller should be holding queue_lock and an existing reference.
301 */
302static inline void blkg_get(struct blkio_group *blkg)
303{
304 lockdep_assert_held(blkg->q->queue_lock);
305 WARN_ON_ONCE(!blkg->refcnt);
306 blkg->refcnt++;
307}
308
309void __blkg_release(struct blkio_group *blkg);
310
311/**
312 * blkg_put - put a blkg reference
313 * @blkg: blkg to put
314 *
315 * The caller should be holding queue_lock.
316 */
317static inline void blkg_put(struct blkio_group *blkg)
318{
319 lockdep_assert_held(blkg->q->queue_lock);
320 WARN_ON_ONCE(blkg->refcnt <= 0);
321 if (!--blkg->refcnt)
322 __blkg_release(blkg);
323}
324
Tejun Heoedcb0722012-04-01 14:38:42 -0700325/**
326 * blkg_stat_add - add a value to a blkg_stat
327 * @stat: target blkg_stat
328 * @val: value to add
329 *
330 * Add @val to @stat. The caller is responsible for synchronizing calls to
331 * this function.
332 */
333static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
334{
335 u64_stats_update_begin(&stat->syncp);
336 stat->cnt += val;
337 u64_stats_update_end(&stat->syncp);
338}
339
340/**
341 * blkg_stat_read - read the current value of a blkg_stat
342 * @stat: blkg_stat to read
343 *
344 * Read the current value of @stat. This function can be called without
345 * synchroniztion and takes care of u64 atomicity.
346 */
347static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
348{
349 unsigned int start;
350 uint64_t v;
351
352 do {
353 start = u64_stats_fetch_begin(&stat->syncp);
354 v = stat->cnt;
355 } while (u64_stats_fetch_retry(&stat->syncp, start));
356
357 return v;
358}
359
360/**
361 * blkg_stat_reset - reset a blkg_stat
362 * @stat: blkg_stat to reset
363 */
364static inline void blkg_stat_reset(struct blkg_stat *stat)
365{
366 stat->cnt = 0;
367}
368
369/**
370 * blkg_rwstat_add - add a value to a blkg_rwstat
371 * @rwstat: target blkg_rwstat
372 * @rw: mask of REQ_{WRITE|SYNC}
373 * @val: value to add
374 *
375 * Add @val to @rwstat. The counters are chosen according to @rw. The
376 * caller is responsible for synchronizing calls to this function.
377 */
378static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
379 int rw, uint64_t val)
380{
381 u64_stats_update_begin(&rwstat->syncp);
382
383 if (rw & REQ_WRITE)
384 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
385 else
386 rwstat->cnt[BLKG_RWSTAT_READ] += val;
387 if (rw & REQ_SYNC)
388 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
389 else
390 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
391
392 u64_stats_update_end(&rwstat->syncp);
393}
394
395/**
396 * blkg_rwstat_read - read the current values of a blkg_rwstat
397 * @rwstat: blkg_rwstat to read
398 *
399 * Read the current snapshot of @rwstat and return it as the return value.
400 * This function can be called without synchronization and takes care of
401 * u64 atomicity.
402 */
403static struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
404{
405 unsigned int start;
406 struct blkg_rwstat tmp;
407
408 do {
409 start = u64_stats_fetch_begin(&rwstat->syncp);
410 tmp = *rwstat;
411 } while (u64_stats_fetch_retry(&rwstat->syncp, start));
412
413 return tmp;
414}
415
416/**
417 * blkg_rwstat_sum - read the total count of a blkg_rwstat
418 * @rwstat: blkg_rwstat to read
419 *
420 * Return the total count of @rwstat regardless of the IO direction. This
421 * function can be called without synchronization and takes care of u64
422 * atomicity.
423 */
424static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
425{
426 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
427
428 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
429}
430
431/**
432 * blkg_rwstat_reset - reset a blkg_rwstat
433 * @rwstat: blkg_rwstat to reset
434 */
435static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
436{
437 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
438}
439
Jens Axboe2f5ea472009-12-03 21:06:43 +0100440#else
441
442struct blkio_group {
443};
444
Vivek Goyal3e252062009-12-04 10:36:42 -0500445struct blkio_policy_type {
446};
447
Tejun Heo5efd6112012-03-05 13:15:12 -0800448static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
449static inline void blkcg_drain_queue(struct request_queue *q) { }
450static inline void blkcg_exit_queue(struct request_queue *q) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500451static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
452static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
Tejun Heo03aa2642012-03-05 13:15:19 -0800453static inline void blkg_destroy_all(struct request_queue *q,
Tejun Heo03aa2642012-03-05 13:15:19 -0800454 bool destory_root) { }
Tejun Heoe8989fa2012-03-05 13:15:20 -0800455static inline void update_root_blkg_pd(struct request_queue *q,
456 enum blkio_policy_id plid) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500457
Tejun Heo03814112012-03-05 13:15:14 -0800458static inline void *blkg_to_pdata(struct blkio_group *blkg,
459 struct blkio_policy_type *pol) { return NULL; }
460static inline struct blkio_group *pdata_to_blkg(void *pdata,
461 struct blkio_policy_type *pol) { return NULL; }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200462static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800463static inline void blkg_get(struct blkio_group *blkg) { }
464static inline void blkg_put(struct blkio_group *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200465
Jens Axboe2f5ea472009-12-03 21:06:43 +0100466#endif
467
Justin TerAvestdf457f82011-03-08 19:45:00 +0100468#define BLKIO_WEIGHT_MIN 10
Vivek Goyal31e4c282009-12-03 12:59:42 -0500469#define BLKIO_WEIGHT_MAX 1000
470#define BLKIO_WEIGHT_DEFAULT 500
471
Vivek Goyal2868ef72009-12-03 12:59:48 -0500472#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoc1768262012-03-05 13:15:17 -0800473void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
474 struct blkio_policy_type *pol);
Divyesh Shah91952912010-04-01 15:01:41 -0700475void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800476 struct blkio_policy_type *pol,
477 unsigned long dequeue);
478void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
479 struct blkio_policy_type *pol);
480void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
481 struct blkio_policy_type *pol);
482void blkiocg_set_start_empty_time(struct blkio_group *blkg,
483 struct blkio_policy_type *pol);
Divyesh Shah812df482010-04-08 21:15:35 -0700484
485#define BLKG_FLAG_FNS(name) \
486static inline void blkio_mark_blkg_##name( \
487 struct blkio_group_stats *stats) \
488{ \
489 stats->flags |= (1 << BLKG_##name); \
490} \
491static inline void blkio_clear_blkg_##name( \
492 struct blkio_group_stats *stats) \
493{ \
494 stats->flags &= ~(1 << BLKG_##name); \
495} \
496static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
497{ \
498 return (stats->flags & (1 << BLKG_##name)) != 0; \
499} \
500
501BLKG_FLAG_FNS(waiting)
502BLKG_FLAG_FNS(idling)
503BLKG_FLAG_FNS(empty)
504#undef BLKG_FLAG_FNS
Vivek Goyal2868ef72009-12-03 12:59:48 -0500505#else
Tejun Heoc1768262012-03-05 13:15:17 -0800506static inline void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
507 struct blkio_policy_type *pol) { }
Divyesh Shah91952912010-04-01 15:01:41 -0700508static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800509 struct blkio_policy_type *pol, unsigned long dequeue) { }
510static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
511 struct blkio_policy_type *pol) { }
512static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
513 struct blkio_policy_type *pol) { }
514static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg,
515 struct blkio_policy_type *pol) { }
Vivek Goyal2868ef72009-12-03 12:59:48 -0500516#endif
517
Tejun Heo32e380a2012-03-05 13:14:54 -0800518#ifdef CONFIG_BLK_CGROUP
Vivek Goyal31e4c282009-12-03 12:59:42 -0500519extern struct blkio_cgroup blkio_root_cgroup;
520extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
Tejun Heo4f85cb92012-03-05 13:15:28 -0800521extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio);
Tejun Heocd1604f2012-03-05 13:15:06 -0800522extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
Tejun Heoe8989fa2012-03-05 13:15:20 -0800523 struct request_queue *q);
Tejun Heocd1604f2012-03-05 13:15:06 -0800524struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
525 struct request_queue *q,
Tejun Heocd1604f2012-03-05 13:15:06 -0800526 bool for_root);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700527void blkiocg_update_timeslice_used(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800528 struct blkio_policy_type *pol,
529 unsigned long time,
530 unsigned long unaccounted_time);
531void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
532 struct blkio_policy_type *pol,
533 uint64_t bytes, bool direction, bool sync);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200534void blkiocg_update_completion_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800535 struct blkio_policy_type *pol,
536 uint64_t start_time,
537 uint64_t io_start_time, bool direction,
538 bool sync);
539void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
540 struct blkio_policy_type *pol,
541 bool direction, bool sync);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200542void blkiocg_update_io_add_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800543 struct blkio_policy_type *pol,
544 struct blkio_group *curr_blkg, bool direction,
545 bool sync);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200546void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800547 struct blkio_policy_type *pol,
548 bool direction, bool sync);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500549#else
Jens Axboe2f5ea472009-12-03 21:06:43 +0100550struct cgroup;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500551static inline struct blkio_cgroup *
552cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
Vivek Goyal70087dc2011-05-16 15:24:08 +0200553static inline struct blkio_cgroup *
Tejun Heo4f85cb92012-03-05 13:15:28 -0800554bio_blkio_cgroup(struct bio *bio) { return NULL; }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500555
Tejun Heocd1604f2012-03-05 13:15:06 -0800556static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
557 void *key) { return NULL; }
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700558static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800559 struct blkio_policy_type *pol, unsigned long time,
560 unsigned long unaccounted_time) { }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200561static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800562 struct blkio_policy_type *pol, uint64_t bytes,
563 bool direction, bool sync) { }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200564static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800565 struct blkio_policy_type *pol, uint64_t start_time,
566 uint64_t io_start_time, bool direction, bool sync) { }
Divyesh Shah812d4022010-04-08 21:14:23 -0700567static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800568 struct blkio_policy_type *pol, bool direction,
569 bool sync) { }
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200570static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800571 struct blkio_policy_type *pol,
572 struct blkio_group *curr_blkg, bool direction,
573 bool sync) { }
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200574static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800575 struct blkio_policy_type *pol, bool direction,
576 bool sync) { }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500577#endif
578#endif /* _BLK_CGROUP_H */