blob: bea7f3b9a88e8be758b054cfac45242523812374 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
17
Ben Blum67523c42010-03-10 15:22:11 -080018#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
19
20#ifndef CONFIG_BLK_CGROUP
21/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
22extern struct cgroup_subsys blkio_subsys;
23#define blkio_subsys_id blkio_subsys.subsys_id
24#endif
Jens Axboe2f5ea472009-12-03 21:06:43 +010025
Divyesh Shah84c124d2010-04-09 08:31:19 +020026enum stat_type {
27 /* Total time spent (in ns) between request dispatch to the driver and
28 * request completion for IOs doen by this cgroup. This may not be
29 * accurate when NCQ is turned on. */
30 BLKIO_STAT_SERVICE_TIME = 0,
31 /* Total bytes transferred */
32 BLKIO_STAT_SERVICE_BYTES,
33 /* Total IOs serviced, post merge */
34 BLKIO_STAT_SERVICED,
35 /* Total time spent waiting in scheduler queue in ns */
36 BLKIO_STAT_WAIT_TIME,
Divyesh Shah812d4022010-04-08 21:14:23 -070037 /* Number of IOs merged */
38 BLKIO_STAT_MERGED,
Divyesh Shahcdc11842010-04-08 21:15:10 -070039 /* Number of IOs queued up */
40 BLKIO_STAT_QUEUED,
Divyesh Shah84c124d2010-04-09 08:31:19 +020041 /* All the single valued stats go below this */
42 BLKIO_STAT_TIME,
43 BLKIO_STAT_SECTORS,
44#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shahcdc11842010-04-08 21:15:10 -070045 BLKIO_STAT_AVG_QUEUE_SIZE,
Divyesh Shah84c124d2010-04-09 08:31:19 +020046 BLKIO_STAT_DEQUEUE
47#endif
48};
49
50enum stat_sub_type {
51 BLKIO_STAT_READ = 0,
52 BLKIO_STAT_WRITE,
53 BLKIO_STAT_SYNC,
54 BLKIO_STAT_ASYNC,
55 BLKIO_STAT_TOTAL
Divyesh Shah303a3ac2010-04-01 15:01:24 -070056};
57
Vivek Goyal31e4c282009-12-03 12:59:42 -050058struct blkio_cgroup {
59 struct cgroup_subsys_state css;
60 unsigned int weight;
61 spinlock_t lock;
62 struct hlist_head blkg_list;
63};
64
Divyesh Shah303a3ac2010-04-01 15:01:24 -070065struct blkio_group_stats {
66 /* total disk time and nr sectors dispatched by this group */
67 uint64_t time;
68 uint64_t sectors;
Divyesh Shahcdc11842010-04-08 21:15:10 -070069 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
Divyesh Shah303a3ac2010-04-01 15:01:24 -070070#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shahcdc11842010-04-08 21:15:10 -070071 /* Sum of number of IOs queued across all samples */
72 uint64_t avg_queue_size_sum;
73 /* Count of samples taken for average */
74 uint64_t avg_queue_size_samples;
Divyesh Shah303a3ac2010-04-01 15:01:24 -070075 /* How many times this group has been removed from service tree */
76 unsigned long dequeue;
77#endif
78};
79
Vivek Goyal31e4c282009-12-03 12:59:42 -050080struct blkio_group {
81 /* An rcu protected unique identifier for the group */
82 void *key;
83 struct hlist_node blkcg_node;
Vivek Goyalb1c35762009-12-03 12:59:47 -050084 unsigned short blkcg_id;
Vivek Goyal2868ef72009-12-03 12:59:48 -050085#ifdef CONFIG_DEBUG_BLK_CGROUP
86 /* Store cgroup path */
87 char path[128];
88#endif
Vivek Goyal22084192009-12-03 12:59:49 -050089 /* The device MKDEV(major, minor), this group has been created for */
Divyesh Shah84c124d2010-04-09 08:31:19 +020090 dev_t dev;
Vivek Goyal22084192009-12-03 12:59:49 -050091
Divyesh Shah303a3ac2010-04-01 15:01:24 -070092 /* Need to serialize the stats in the case of reset/update */
93 spinlock_t stats_lock;
94 struct blkio_group_stats stats;
Vivek Goyal31e4c282009-12-03 12:59:42 -050095};
96
Vivek Goyal3e252062009-12-04 10:36:42 -050097typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
98typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg,
99 unsigned int weight);
100
101struct blkio_policy_ops {
102 blkio_unlink_group_fn *blkio_unlink_group_fn;
103 blkio_update_group_weight_fn *blkio_update_group_weight_fn;
104};
105
106struct blkio_policy_type {
107 struct list_head list;
108 struct blkio_policy_ops ops;
109};
110
111/* Blkio controller policy registration */
112extern void blkio_policy_register(struct blkio_policy_type *);
113extern void blkio_policy_unregister(struct blkio_policy_type *);
114
Jens Axboe2f5ea472009-12-03 21:06:43 +0100115#else
116
117struct blkio_group {
118};
119
Vivek Goyal3e252062009-12-04 10:36:42 -0500120struct blkio_policy_type {
121};
122
123static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
124static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
125
Jens Axboe2f5ea472009-12-03 21:06:43 +0100126#endif
127
Vivek Goyal31e4c282009-12-03 12:59:42 -0500128#define BLKIO_WEIGHT_MIN 100
129#define BLKIO_WEIGHT_MAX 1000
130#define BLKIO_WEIGHT_DEFAULT 500
131
Vivek Goyal2868ef72009-12-03 12:59:48 -0500132#ifdef CONFIG_DEBUG_BLK_CGROUP
133static inline char *blkg_path(struct blkio_group *blkg)
134{
135 return blkg->path;
136}
Divyesh Shahcdc11842010-04-08 21:15:10 -0700137void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg);
Divyesh Shah91952912010-04-01 15:01:41 -0700138void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Vivek Goyal22084192009-12-03 12:59:49 -0500139 unsigned long dequeue);
Vivek Goyal2868ef72009-12-03 12:59:48 -0500140#else
141static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
Divyesh Shahcdc11842010-04-08 21:15:10 -0700142static inline void blkiocg_update_set_active_queue_stats(
143 struct blkio_group *blkg) {}
Divyesh Shah91952912010-04-01 15:01:41 -0700144static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
145 unsigned long dequeue) {}
Vivek Goyal2868ef72009-12-03 12:59:48 -0500146#endif
147
Ben Blum67523c42010-03-10 15:22:11 -0800148#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500149extern struct blkio_cgroup blkio_root_cgroup;
150extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
151extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
Vivek Goyal22084192009-12-03 12:59:49 -0500152 struct blkio_group *blkg, void *key, dev_t dev);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500153extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
154extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
155 void *key);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200156void blkio_group_init(struct blkio_group *blkg);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700157void blkiocg_update_timeslice_used(struct blkio_group *blkg,
158 unsigned long time);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200159void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
160 bool direction, bool sync);
161void blkiocg_update_completion_stats(struct blkio_group *blkg,
162 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
Divyesh Shah812d4022010-04-08 21:14:23 -0700163void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
164 bool sync);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700165void blkiocg_update_request_add_stats(struct blkio_group *blkg,
166 struct blkio_group *curr_blkg, bool direction, bool sync);
167void blkiocg_update_request_remove_stats(struct blkio_group *blkg,
168 bool direction, bool sync);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500169#else
Jens Axboe2f5ea472009-12-03 21:06:43 +0100170struct cgroup;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500171static inline struct blkio_cgroup *
172cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
173
Divyesh Shah84c124d2010-04-09 08:31:19 +0200174static inline void blkio_group_init(struct blkio_group *blkg) {}
Vivek Goyal31e4c282009-12-03 12:59:42 -0500175static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
Divyesh Shah84c124d2010-04-09 08:31:19 +0200176 struct blkio_group *blkg, void *key, dev_t dev) {}
Vivek Goyal31e4c282009-12-03 12:59:42 -0500177
178static inline int
179blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
180
181static inline struct blkio_group *
182blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700183static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
Divyesh Shah9a0785b2010-04-01 15:01:04 -0700184 unsigned long time) {}
Divyesh Shah84c124d2010-04-09 08:31:19 +0200185static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
186 uint64_t bytes, bool direction, bool sync) {}
187static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
188 uint64_t start_time, uint64_t io_start_time, bool direction,
189 bool sync) {}
Divyesh Shah812d4022010-04-08 21:14:23 -0700190static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
191 bool direction, bool sync) {}
Divyesh Shahcdc11842010-04-08 21:15:10 -0700192static inline void blkiocg_update_request_add_stats(struct blkio_group *blkg,
193 struct blkio_group *curr_blkg, bool direction, bool sync) {}
194static inline void blkiocg_update_request_remove_stats(struct blkio_group *blkg,
195 bool direction, bool sync) {}
Vivek Goyal31e4c282009-12-03 12:59:42 -0500196#endif
197#endif /* _BLK_CGROUP_H */