Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 1 | #ifndef __RES_COUNTER_H__ |
| 2 | #define __RES_COUNTER_H__ |
| 3 | |
| 4 | /* |
| 5 | * Resource Counters |
| 6 | * Contain common data types and routines for resource accounting |
| 7 | * |
| 8 | * Copyright 2007 OpenVZ SWsoft Inc |
| 9 | * |
| 10 | * Author: Pavel Emelianov <xemul@openvz.org> |
| 11 | * |
Li Zefan | 45ce80f | 2009-01-15 13:50:59 -0800 | [diff] [blame] | 12 | * See Documentation/cgroups/resource_counter.txt for more |
Pavel Emelyanov | faebe9f | 2008-04-29 01:00:18 -0700 | [diff] [blame] | 13 | * info about what this counter is. |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 14 | */ |
| 15 | |
| 16 | #include <linux/cgroup.h> |
| 17 | |
| 18 | /* |
| 19 | * The core object. the cgroup that wishes to account for some |
| 20 | * resource may include this counter into its structures and use |
| 21 | * the helpers described beyond |
| 22 | */ |
| 23 | |
| 24 | struct res_counter { |
| 25 | /* |
| 26 | * the current resource consumption level |
| 27 | */ |
Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 28 | unsigned long long usage; |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 29 | /* |
Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 30 | * the maximal value of the usage from the counter creation |
| 31 | */ |
| 32 | unsigned long long max_usage; |
| 33 | /* |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 34 | * the limit that usage cannot exceed |
| 35 | */ |
Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 36 | unsigned long long limit; |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 37 | /* |
Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 38 | * the limit that usage can be exceed |
| 39 | */ |
| 40 | unsigned long long soft_limit; |
| 41 | /* |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 42 | * the number of unsuccessful attempts to consume the resource |
| 43 | */ |
Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 44 | unsigned long long failcnt; |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 45 | /* |
| 46 | * the lock to protect all of the above. |
| 47 | * the routines below consider this to be IRQ-safe |
| 48 | */ |
| 49 | spinlock_t lock; |
Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 50 | /* |
| 51 | * Parent counter, used for hierarchial resource accounting |
| 52 | */ |
| 53 | struct res_counter *parent; |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 54 | }; |
| 55 | |
Daisuke Nishimura | c5b947b | 2009-06-17 16:27:20 -0700 | [diff] [blame] | 56 | #define RESOURCE_MAX (unsigned long long)LLONG_MAX |
| 57 | |
Paul Menage | 2c7eabf | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 58 | /** |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 59 | * Helpers to interact with userspace |
Paul Menage | 2c7eabf | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 60 | * res_counter_read_u64() - returns the value of the specified member. |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 61 | * res_counter_read/_write - put/get the specified fields from the |
| 62 | * res_counter struct to/from the user |
| 63 | * |
| 64 | * @counter: the counter in question |
| 65 | * @member: the field to work with (see RES_xxx below) |
| 66 | * @buf: the buffer to opeate on,... |
| 67 | * @nbytes: its size... |
| 68 | * @pos: and the offset. |
| 69 | */ |
| 70 | |
Paul Menage | 2c7eabf | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 71 | u64 res_counter_read_u64(struct res_counter *counter, int member); |
| 72 | |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 73 | ssize_t res_counter_read(struct res_counter *counter, int member, |
Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 74 | const char __user *buf, size_t nbytes, loff_t *pos, |
| 75 | int (*read_strategy)(unsigned long long val, char *s)); |
Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 76 | |
| 77 | typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val); |
| 78 | |
| 79 | int res_counter_memparse_write_strategy(const char *buf, |
| 80 | unsigned long long *res); |
| 81 | |
| 82 | int res_counter_write(struct res_counter *counter, int member, |
| 83 | const char *buffer, write_strategy_fn write_strategy); |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 84 | |
| 85 | /* |
| 86 | * the field descriptors. one for each member of res_counter |
| 87 | */ |
| 88 | |
| 89 | enum { |
| 90 | RES_USAGE, |
Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 91 | RES_MAX_USAGE, |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 92 | RES_LIMIT, |
| 93 | RES_FAILCNT, |
Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 94 | RES_SOFT_LIMIT, |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 95 | }; |
| 96 | |
| 97 | /* |
| 98 | * helpers for accounting |
| 99 | */ |
| 100 | |
Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 101 | void res_counter_init(struct res_counter *counter, struct res_counter *parent); |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 102 | |
| 103 | /* |
| 104 | * charge - try to consume more resource. |
| 105 | * |
| 106 | * @counter: the counter |
| 107 | * @val: the amount of the resource. each controller defines its own |
| 108 | * units, e.g. numbers, bytes, Kbytes, etc |
| 109 | * |
| 110 | * returns 0 on success and <0 if the counter->usage will exceed the |
| 111 | * counter->limit _locked call expects the counter->lock to be taken |
Glauber Costa | 0e90b31 | 2012-01-20 04:57:16 +0000 | [diff] [blame] | 112 | * |
| 113 | * charge_nofail works the same, except that it charges the resource |
| 114 | * counter unconditionally, and returns < 0 if the after the current |
| 115 | * charge we are over limit. |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 116 | */ |
| 117 | |
Pavel Emelyanov | f2992db | 2008-07-25 01:46:55 -0700 | [diff] [blame] | 118 | int __must_check res_counter_charge_locked(struct res_counter *counter, |
| 119 | unsigned long val); |
| 120 | int __must_check res_counter_charge(struct res_counter *counter, |
KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 121 | unsigned long val, struct res_counter **limit_fail_at); |
Glauber Costa | 0e90b31 | 2012-01-20 04:57:16 +0000 | [diff] [blame] | 122 | int __must_check res_counter_charge_nofail(struct res_counter *counter, |
| 123 | unsigned long val, struct res_counter **limit_fail_at); |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 124 | |
| 125 | /* |
| 126 | * uncharge - tell that some portion of the resource is released |
| 127 | * |
| 128 | * @counter: the counter |
| 129 | * @val: the amount of the resource |
| 130 | * |
| 131 | * these calls check for usage underflow and show a warning on the console |
| 132 | * _locked call expects the counter->lock to be taken |
| 133 | */ |
| 134 | |
| 135 | void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); |
KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 136 | void res_counter_uncharge(struct res_counter *counter, unsigned long val); |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 137 | |
Johannes Weiner | 9d11ea9 | 2011-03-23 16:42:21 -0700 | [diff] [blame] | 138 | /** |
| 139 | * res_counter_margin - calculate chargeable space of a counter |
| 140 | * @cnt: the counter |
| 141 | * |
| 142 | * Returns the difference between the hard limit and the current usage |
| 143 | * of resource counter @cnt. |
| 144 | */ |
| 145 | static inline unsigned long long res_counter_margin(struct res_counter *cnt) |
Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 146 | { |
Johannes Weiner | 9d11ea9 | 2011-03-23 16:42:21 -0700 | [diff] [blame] | 147 | unsigned long long margin; |
| 148 | unsigned long flags; |
Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 149 | |
Johannes Weiner | 9d11ea9 | 2011-03-23 16:42:21 -0700 | [diff] [blame] | 150 | spin_lock_irqsave(&cnt->lock, flags); |
Glauber Costa | 8cfd14a | 2012-01-20 04:57:15 +0000 | [diff] [blame] | 151 | if (cnt->limit > cnt->usage) |
| 152 | margin = cnt->limit - cnt->usage; |
| 153 | else |
| 154 | margin = 0; |
Johannes Weiner | 9d11ea9 | 2011-03-23 16:42:21 -0700 | [diff] [blame] | 155 | spin_unlock_irqrestore(&cnt->lock, flags); |
| 156 | return margin; |
Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 157 | } |
| 158 | |
| 159 | /** |
| 160 | * Get the difference between the usage and the soft limit |
| 161 | * @cnt: The counter |
| 162 | * |
| 163 | * Returns 0 if usage is less than or equal to soft limit |
| 164 | * The difference between usage and soft limit, otherwise. |
| 165 | */ |
| 166 | static inline unsigned long long |
| 167 | res_counter_soft_limit_excess(struct res_counter *cnt) |
| 168 | { |
| 169 | unsigned long long excess; |
| 170 | unsigned long flags; |
| 171 | |
| 172 | spin_lock_irqsave(&cnt->lock, flags); |
| 173 | if (cnt->usage <= cnt->soft_limit) |
| 174 | excess = 0; |
| 175 | else |
| 176 | excess = cnt->usage - cnt->soft_limit; |
| 177 | spin_unlock_irqrestore(&cnt->lock, flags); |
| 178 | return excess; |
| 179 | } |
| 180 | |
Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 181 | static inline void res_counter_reset_max(struct res_counter *cnt) |
| 182 | { |
| 183 | unsigned long flags; |
| 184 | |
| 185 | spin_lock_irqsave(&cnt->lock, flags); |
| 186 | cnt->max_usage = cnt->usage; |
| 187 | spin_unlock_irqrestore(&cnt->lock, flags); |
| 188 | } |
| 189 | |
Pavel Emelyanov | 29f2a4d | 2008-04-29 01:00:21 -0700 | [diff] [blame] | 190 | static inline void res_counter_reset_failcnt(struct res_counter *cnt) |
| 191 | { |
| 192 | unsigned long flags; |
| 193 | |
| 194 | spin_lock_irqsave(&cnt->lock, flags); |
| 195 | cnt->failcnt = 0; |
| 196 | spin_unlock_irqrestore(&cnt->lock, flags); |
| 197 | } |
KAMEZAWA Hiroyuki | 12b9804 | 2008-07-25 01:47:19 -0700 | [diff] [blame] | 198 | |
| 199 | static inline int res_counter_set_limit(struct res_counter *cnt, |
| 200 | unsigned long long limit) |
| 201 | { |
| 202 | unsigned long flags; |
| 203 | int ret = -EBUSY; |
| 204 | |
| 205 | spin_lock_irqsave(&cnt->lock, flags); |
Li Zefan | 11d55d2 | 2008-09-05 14:00:18 -0700 | [diff] [blame] | 206 | if (cnt->usage <= limit) { |
KAMEZAWA Hiroyuki | 12b9804 | 2008-07-25 01:47:19 -0700 | [diff] [blame] | 207 | cnt->limit = limit; |
| 208 | ret = 0; |
| 209 | } |
| 210 | spin_unlock_irqrestore(&cnt->lock, flags); |
| 211 | return ret; |
| 212 | } |
| 213 | |
Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 214 | static inline int |
| 215 | res_counter_set_soft_limit(struct res_counter *cnt, |
| 216 | unsigned long long soft_limit) |
| 217 | { |
| 218 | unsigned long flags; |
| 219 | |
| 220 | spin_lock_irqsave(&cnt->lock, flags); |
| 221 | cnt->soft_limit = soft_limit; |
| 222 | spin_unlock_irqrestore(&cnt->lock, flags); |
| 223 | return 0; |
| 224 | } |
| 225 | |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 226 | #endif |