blob: b22e55390a4f72b66290219daa7438cee9514647 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
17
Ben Blum67523c42010-03-10 15:22:11 -080018#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
19
20#ifndef CONFIG_BLK_CGROUP
21/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
22extern struct cgroup_subsys blkio_subsys;
23#define blkio_subsys_id blkio_subsys.subsys_id
24#endif
Jens Axboe2f5ea472009-12-03 21:06:43 +010025
Divyesh Shah84c124d2010-04-09 08:31:19 +020026enum stat_type {
27 /* Total time spent (in ns) between request dispatch to the driver and
28 * request completion for IOs doen by this cgroup. This may not be
29 * accurate when NCQ is turned on. */
30 BLKIO_STAT_SERVICE_TIME = 0,
31 /* Total bytes transferred */
32 BLKIO_STAT_SERVICE_BYTES,
33 /* Total IOs serviced, post merge */
34 BLKIO_STAT_SERVICED,
35 /* Total time spent waiting in scheduler queue in ns */
36 BLKIO_STAT_WAIT_TIME,
37 /* All the single valued stats go below this */
38 BLKIO_STAT_TIME,
39 BLKIO_STAT_SECTORS,
40#ifdef CONFIG_DEBUG_BLK_CGROUP
41 BLKIO_STAT_DEQUEUE
42#endif
43};
44
45enum stat_sub_type {
46 BLKIO_STAT_READ = 0,
47 BLKIO_STAT_WRITE,
48 BLKIO_STAT_SYNC,
49 BLKIO_STAT_ASYNC,
50 BLKIO_STAT_TOTAL
Divyesh Shah303a3ac2010-04-01 15:01:24 -070051};
52
Vivek Goyal31e4c282009-12-03 12:59:42 -050053struct blkio_cgroup {
54 struct cgroup_subsys_state css;
55 unsigned int weight;
56 spinlock_t lock;
57 struct hlist_head blkg_list;
58};
59
Divyesh Shah303a3ac2010-04-01 15:01:24 -070060struct blkio_group_stats {
61 /* total disk time and nr sectors dispatched by this group */
62 uint64_t time;
63 uint64_t sectors;
Divyesh Shah84c124d2010-04-09 08:31:19 +020064 uint64_t stat_arr[BLKIO_STAT_WAIT_TIME + 1][BLKIO_STAT_TOTAL];
Divyesh Shah303a3ac2010-04-01 15:01:24 -070065#ifdef CONFIG_DEBUG_BLK_CGROUP
66 /* How many times this group has been removed from service tree */
67 unsigned long dequeue;
68#endif
69};
70
Vivek Goyal31e4c282009-12-03 12:59:42 -050071struct blkio_group {
72 /* An rcu protected unique identifier for the group */
73 void *key;
74 struct hlist_node blkcg_node;
Vivek Goyalb1c35762009-12-03 12:59:47 -050075 unsigned short blkcg_id;
Vivek Goyal2868ef72009-12-03 12:59:48 -050076#ifdef CONFIG_DEBUG_BLK_CGROUP
77 /* Store cgroup path */
78 char path[128];
79#endif
Vivek Goyal22084192009-12-03 12:59:49 -050080 /* The device MKDEV(major, minor), this group has been created for */
Divyesh Shah84c124d2010-04-09 08:31:19 +020081 dev_t dev;
Vivek Goyal22084192009-12-03 12:59:49 -050082
Divyesh Shah303a3ac2010-04-01 15:01:24 -070083 /* Need to serialize the stats in the case of reset/update */
84 spinlock_t stats_lock;
85 struct blkio_group_stats stats;
Vivek Goyal31e4c282009-12-03 12:59:42 -050086};
87
Vivek Goyal3e252062009-12-04 10:36:42 -050088typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
89typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg,
90 unsigned int weight);
91
92struct blkio_policy_ops {
93 blkio_unlink_group_fn *blkio_unlink_group_fn;
94 blkio_update_group_weight_fn *blkio_update_group_weight_fn;
95};
96
97struct blkio_policy_type {
98 struct list_head list;
99 struct blkio_policy_ops ops;
100};
101
102/* Blkio controller policy registration */
103extern void blkio_policy_register(struct blkio_policy_type *);
104extern void blkio_policy_unregister(struct blkio_policy_type *);
105
Jens Axboe2f5ea472009-12-03 21:06:43 +0100106#else
107
108struct blkio_group {
109};
110
Vivek Goyal3e252062009-12-04 10:36:42 -0500111struct blkio_policy_type {
112};
113
114static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
115static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
116
Jens Axboe2f5ea472009-12-03 21:06:43 +0100117#endif
118
Vivek Goyal31e4c282009-12-03 12:59:42 -0500119#define BLKIO_WEIGHT_MIN 100
120#define BLKIO_WEIGHT_MAX 1000
121#define BLKIO_WEIGHT_DEFAULT 500
122
Vivek Goyal2868ef72009-12-03 12:59:48 -0500123#ifdef CONFIG_DEBUG_BLK_CGROUP
124static inline char *blkg_path(struct blkio_group *blkg)
125{
126 return blkg->path;
127}
Divyesh Shah91952912010-04-01 15:01:41 -0700128void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Vivek Goyal22084192009-12-03 12:59:49 -0500129 unsigned long dequeue);
Vivek Goyal2868ef72009-12-03 12:59:48 -0500130#else
131static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
Divyesh Shah91952912010-04-01 15:01:41 -0700132static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
133 unsigned long dequeue) {}
Vivek Goyal2868ef72009-12-03 12:59:48 -0500134#endif
135
Ben Blum67523c42010-03-10 15:22:11 -0800136#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500137extern struct blkio_cgroup blkio_root_cgroup;
138extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
139extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
Vivek Goyal22084192009-12-03 12:59:49 -0500140 struct blkio_group *blkg, void *key, dev_t dev);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500141extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
142extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
143 void *key);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200144void blkio_group_init(struct blkio_group *blkg);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700145void blkiocg_update_timeslice_used(struct blkio_group *blkg,
146 unsigned long time);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200147void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
148 bool direction, bool sync);
149void blkiocg_update_completion_stats(struct blkio_group *blkg,
150 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500151#else
Jens Axboe2f5ea472009-12-03 21:06:43 +0100152struct cgroup;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500153static inline struct blkio_cgroup *
154cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
155
Divyesh Shah84c124d2010-04-09 08:31:19 +0200156static inline void blkio_group_init(struct blkio_group *blkg) {}
Vivek Goyal31e4c282009-12-03 12:59:42 -0500157static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
Divyesh Shah84c124d2010-04-09 08:31:19 +0200158 struct blkio_group *blkg, void *key, dev_t dev) {}
Vivek Goyal31e4c282009-12-03 12:59:42 -0500159
160static inline int
161blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
162
163static inline struct blkio_group *
164blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700165static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
Divyesh Shah9a0785b2010-04-01 15:01:04 -0700166 unsigned long time) {}
Divyesh Shah84c124d2010-04-09 08:31:19 +0200167static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
168 uint64_t bytes, bool direction, bool sync) {}
169static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
170 uint64_t start_time, uint64_t io_start_time, bool direction,
171 bool sync) {}
Vivek Goyal31e4c282009-12-03 12:59:42 -0500172#endif
173#endif /* _BLK_CGROUP_H */