blob: 7d7fbe2ef7822089c802c5654b4e0ec243f24a80 [file] [log] [blame]
Pavel Emelianove552b662008-02-07 00:13:49 -08001#ifndef __RES_COUNTER_H__
2#define __RES_COUNTER_H__
3
4/*
5 * Resource Counters
6 * Contain common data types and routines for resource accounting
7 *
8 * Copyright 2007 OpenVZ SWsoft Inc
9 *
10 * Author: Pavel Emelianov <xemul@openvz.org>
11 *
Li Zefan45ce80f2009-01-15 13:50:59 -080012 * See Documentation/cgroups/resource_counter.txt for more
Pavel Emelyanovfaebe9f2008-04-29 01:00:18 -070013 * info about what this counter is.
Pavel Emelianove552b662008-02-07 00:13:49 -080014 */
15
16#include <linux/cgroup.h>
17
18/*
19 * The core object. the cgroup that wishes to account for some
20 * resource may include this counter into its structures and use
21 * the helpers described beyond
22 */
23
24struct res_counter {
25 /*
26 * the current resource consumption level
27 */
Balbir Singh0eea1032008-02-07 00:13:57 -080028 unsigned long long usage;
Pavel Emelianove552b662008-02-07 00:13:49 -080029 /*
Pavel Emelyanovc84872e2008-04-29 01:00:17 -070030 * the maximal value of the usage from the counter creation
31 */
32 unsigned long long max_usage;
33 /*
Pavel Emelianove552b662008-02-07 00:13:49 -080034 * the limit that usage cannot exceed
35 */
Balbir Singh0eea1032008-02-07 00:13:57 -080036 unsigned long long limit;
Pavel Emelianove552b662008-02-07 00:13:49 -080037 /*
Balbir Singh296c81d2009-09-23 15:56:36 -070038 * the limit that usage can be exceed
39 */
40 unsigned long long soft_limit;
41 /*
Pavel Emelianove552b662008-02-07 00:13:49 -080042 * the number of unsuccessful attempts to consume the resource
43 */
Balbir Singh0eea1032008-02-07 00:13:57 -080044 unsigned long long failcnt;
Pavel Emelianove552b662008-02-07 00:13:49 -080045 /*
46 * the lock to protect all of the above.
47 * the routines below consider this to be IRQ-safe
48 */
49 spinlock_t lock;
Balbir Singh28dbc4b2009-01-07 18:08:05 -080050 /*
51 * Parent counter, used for hierarchial resource accounting
52 */
53 struct res_counter *parent;
Pavel Emelianove552b662008-02-07 00:13:49 -080054};
55
Daisuke Nishimurac5b947b2009-06-17 16:27:20 -070056#define RESOURCE_MAX (unsigned long long)LLONG_MAX
57
Paul Menage2c7eabf2008-04-29 00:59:58 -070058/**
Pavel Emelianove552b662008-02-07 00:13:49 -080059 * Helpers to interact with userspace
Paul Menage2c7eabf2008-04-29 00:59:58 -070060 * res_counter_read_u64() - returns the value of the specified member.
Pavel Emelianove552b662008-02-07 00:13:49 -080061 * res_counter_read/_write - put/get the specified fields from the
62 * res_counter struct to/from the user
63 *
64 * @counter: the counter in question
65 * @member: the field to work with (see RES_xxx below)
66 * @buf: the buffer to opeate on,...
67 * @nbytes: its size...
68 * @pos: and the offset.
69 */
70
Paul Menage2c7eabf2008-04-29 00:59:58 -070071u64 res_counter_read_u64(struct res_counter *counter, int member);
72
Pavel Emelianove552b662008-02-07 00:13:49 -080073ssize_t res_counter_read(struct res_counter *counter, int member,
Balbir Singh0eea1032008-02-07 00:13:57 -080074 const char __user *buf, size_t nbytes, loff_t *pos,
75 int (*read_strategy)(unsigned long long val, char *s));
Paul Menage856c13a2008-07-25 01:47:04 -070076
77typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val);
78
79int res_counter_memparse_write_strategy(const char *buf,
80 unsigned long long *res);
81
82int res_counter_write(struct res_counter *counter, int member,
83 const char *buffer, write_strategy_fn write_strategy);
Pavel Emelianove552b662008-02-07 00:13:49 -080084
85/*
86 * the field descriptors. one for each member of res_counter
87 */
88
89enum {
90 RES_USAGE,
Pavel Emelyanovc84872e2008-04-29 01:00:17 -070091 RES_MAX_USAGE,
Pavel Emelianove552b662008-02-07 00:13:49 -080092 RES_LIMIT,
93 RES_FAILCNT,
Balbir Singh296c81d2009-09-23 15:56:36 -070094 RES_SOFT_LIMIT,
Pavel Emelianove552b662008-02-07 00:13:49 -080095};
96
97/*
98 * helpers for accounting
99 */
100
Balbir Singh28dbc4b2009-01-07 18:08:05 -0800101void res_counter_init(struct res_counter *counter, struct res_counter *parent);
Pavel Emelianove552b662008-02-07 00:13:49 -0800102
103/*
104 * charge - try to consume more resource.
105 *
106 * @counter: the counter
107 * @val: the amount of the resource. each controller defines its own
108 * units, e.g. numbers, bytes, Kbytes, etc
109 *
110 * returns 0 on success and <0 if the counter->usage will exceed the
111 * counter->limit _locked call expects the counter->lock to be taken
Glauber Costa0e90b312012-01-20 04:57:16 +0000112 *
113 * charge_nofail works the same, except that it charges the resource
114 * counter unconditionally, and returns < 0 if the after the current
115 * charge we are over limit.
Pavel Emelianove552b662008-02-07 00:13:49 -0800116 */
117
Pavel Emelyanovf2992db2008-07-25 01:46:55 -0700118int __must_check res_counter_charge_locked(struct res_counter *counter,
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +0200119 unsigned long val, bool force);
Pavel Emelyanovf2992db2008-07-25 01:46:55 -0700120int __must_check res_counter_charge(struct res_counter *counter,
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700121 unsigned long val, struct res_counter **limit_fail_at);
Glauber Costa04eac7f2012-05-29 15:07:05 -0700122int res_counter_charge_nofail(struct res_counter *counter,
Glauber Costa0e90b312012-01-20 04:57:16 +0000123 unsigned long val, struct res_counter **limit_fail_at);
Pavel Emelianove552b662008-02-07 00:13:49 -0800124
125/*
126 * uncharge - tell that some portion of the resource is released
127 *
128 * @counter: the counter
129 * @val: the amount of the resource
130 *
131 * these calls check for usage underflow and show a warning on the console
132 * _locked call expects the counter->lock to be taken
133 */
134
135void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700136void res_counter_uncharge(struct res_counter *counter, unsigned long val);
Pavel Emelianove552b662008-02-07 00:13:49 -0800137
Frederic Weisbecker2bb2ba92012-05-29 15:07:03 -0700138void res_counter_uncharge_until(struct res_counter *counter,
139 struct res_counter *top,
140 unsigned long val);
Johannes Weiner9d11ea92011-03-23 16:42:21 -0700141/**
142 * res_counter_margin - calculate chargeable space of a counter
143 * @cnt: the counter
144 *
145 * Returns the difference between the hard limit and the current usage
146 * of resource counter @cnt.
147 */
148static inline unsigned long long res_counter_margin(struct res_counter *cnt)
Balbir Singh66e17072008-02-07 00:13:56 -0800149{
Johannes Weiner9d11ea92011-03-23 16:42:21 -0700150 unsigned long long margin;
151 unsigned long flags;
Balbir Singh66e17072008-02-07 00:13:56 -0800152
Johannes Weiner9d11ea92011-03-23 16:42:21 -0700153 spin_lock_irqsave(&cnt->lock, flags);
Glauber Costa8cfd14a2012-01-20 04:57:15 +0000154 if (cnt->limit > cnt->usage)
155 margin = cnt->limit - cnt->usage;
156 else
157 margin = 0;
Johannes Weiner9d11ea92011-03-23 16:42:21 -0700158 spin_unlock_irqrestore(&cnt->lock, flags);
159 return margin;
Balbir Singh296c81d2009-09-23 15:56:36 -0700160}
161
162/**
163 * Get the difference between the usage and the soft limit
164 * @cnt: The counter
165 *
166 * Returns 0 if usage is less than or equal to soft limit
167 * The difference between usage and soft limit, otherwise.
168 */
169static inline unsigned long long
170res_counter_soft_limit_excess(struct res_counter *cnt)
171{
172 unsigned long long excess;
173 unsigned long flags;
174
175 spin_lock_irqsave(&cnt->lock, flags);
176 if (cnt->usage <= cnt->soft_limit)
177 excess = 0;
178 else
179 excess = cnt->usage - cnt->soft_limit;
180 spin_unlock_irqrestore(&cnt->lock, flags);
181 return excess;
182}
183
Pavel Emelyanovc84872e2008-04-29 01:00:17 -0700184static inline void res_counter_reset_max(struct res_counter *cnt)
185{
186 unsigned long flags;
187
188 spin_lock_irqsave(&cnt->lock, flags);
189 cnt->max_usage = cnt->usage;
190 spin_unlock_irqrestore(&cnt->lock, flags);
191}
192
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -0700193static inline void res_counter_reset_failcnt(struct res_counter *cnt)
194{
195 unsigned long flags;
196
197 spin_lock_irqsave(&cnt->lock, flags);
198 cnt->failcnt = 0;
199 spin_unlock_irqrestore(&cnt->lock, flags);
200}
KAMEZAWA Hiroyuki12b98042008-07-25 01:47:19 -0700201
202static inline int res_counter_set_limit(struct res_counter *cnt,
203 unsigned long long limit)
204{
205 unsigned long flags;
206 int ret = -EBUSY;
207
208 spin_lock_irqsave(&cnt->lock, flags);
Li Zefan11d55d22008-09-05 14:00:18 -0700209 if (cnt->usage <= limit) {
KAMEZAWA Hiroyuki12b98042008-07-25 01:47:19 -0700210 cnt->limit = limit;
211 ret = 0;
212 }
213 spin_unlock_irqrestore(&cnt->lock, flags);
214 return ret;
215}
216
Balbir Singh296c81d2009-09-23 15:56:36 -0700217static inline int
218res_counter_set_soft_limit(struct res_counter *cnt,
219 unsigned long long soft_limit)
220{
221 unsigned long flags;
222
223 spin_lock_irqsave(&cnt->lock, flags);
224 cnt->soft_limit = soft_limit;
225 spin_unlock_irqrestore(&cnt->lock, flags);
226 return 0;
227}
228
Pavel Emelianove552b662008-02-07 00:13:49 -0800229#endif