blob: ad581aa2369a2ed8f925c395b2b4eadd9d8640f2 [file] [log] [blame]
Pavel Emelianove552b662008-02-07 00:13:49 -08001/*
2 * resource cgroups
3 *
4 * Copyright 2007 OpenVZ SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 *
8 */
9
10#include <linux/types.h>
11#include <linux/parser.h>
12#include <linux/fs.h>
13#include <linux/res_counter.h>
14#include <linux/uaccess.h>
Paul Menage856c13a2008-07-25 01:47:04 -070015#include <linux/mm.h>
Pavel Emelianove552b662008-02-07 00:13:49 -080016
Balbir Singh28dbc4b2009-01-07 18:08:05 -080017void res_counter_init(struct res_counter *counter, struct res_counter *parent)
Pavel Emelianove552b662008-02-07 00:13:49 -080018{
19 spin_lock_init(&counter->lock);
Daisuke Nishimurac5b947b2009-06-17 16:27:20 -070020 counter->limit = RESOURCE_MAX;
Balbir Singh296c81d2009-09-23 15:56:36 -070021 counter->soft_limit = RESOURCE_MAX;
Balbir Singh28dbc4b2009-01-07 18:08:05 -080022 counter->parent = parent;
Pavel Emelianove552b662008-02-07 00:13:49 -080023}
24
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020025int res_counter_charge_locked(struct res_counter *counter, unsigned long val,
26 bool force)
Pavel Emelianove552b662008-02-07 00:13:49 -080027{
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020028 int ret = 0;
29
Pavel Emelianove552b662008-02-07 00:13:49 -080030 if (counter->usage + val > counter->limit) {
31 counter->failcnt++;
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020032 ret = -ENOMEM;
33 if (!force)
34 return ret;
Pavel Emelianove552b662008-02-07 00:13:49 -080035 }
36
37 counter->usage += val;
Frederic Weisbecker0d4dde12012-04-25 01:11:36 +020038 if (counter->usage > counter->max_usage)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -070039 counter->max_usage = counter->usage;
Pavel Emelianove552b662008-02-07 00:13:49 -080040 return ret;
41}
42
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020043static int __res_counter_charge(struct res_counter *counter, unsigned long val,
44 struct res_counter **limit_fail_at, bool force)
Glauber Costa0e90b312012-01-20 04:57:16 +000045{
46 int ret, r;
47 unsigned long flags;
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020048 struct res_counter *c, *u;
Glauber Costa0e90b312012-01-20 04:57:16 +000049
50 r = ret = 0;
51 *limit_fail_at = NULL;
52 local_irq_save(flags);
53 for (c = counter; c != NULL; c = c->parent) {
54 spin_lock(&c->lock);
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020055 r = res_counter_charge_locked(c, val, force);
Glauber Costa0e90b312012-01-20 04:57:16 +000056 spin_unlock(&c->lock);
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020057 if (r < 0 && !ret) {
Glauber Costa0e90b312012-01-20 04:57:16 +000058 ret = r;
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020059 *limit_fail_at = c;
60 if (!force)
61 break;
62 }
63 }
64
65 if (ret < 0 && !force) {
66 for (u = counter; u != c; u = u->parent) {
67 spin_lock(&u->lock);
68 res_counter_uncharge_locked(u, val);
69 spin_unlock(&u->lock);
Glauber Costa0e90b312012-01-20 04:57:16 +000070 }
71 }
72 local_irq_restore(flags);
73
74 return ret;
75}
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020076
77int res_counter_charge(struct res_counter *counter, unsigned long val,
78 struct res_counter **limit_fail_at)
79{
80 return __res_counter_charge(counter, val, limit_fail_at, false);
81}
82
83int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
84 struct res_counter **limit_fail_at)
85{
86 return __res_counter_charge(counter, val, limit_fail_at, true);
87}
88
Pavel Emelianove552b662008-02-07 00:13:49 -080089void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
90{
91 if (WARN_ON(counter->usage < val))
92 val = counter->usage;
93
94 counter->usage -= val;
95}
96
Frederic Weisbecker2bb2ba92012-05-29 15:07:03 -070097void res_counter_uncharge_until(struct res_counter *counter,
98 struct res_counter *top,
99 unsigned long val)
Pavel Emelianove552b662008-02-07 00:13:49 -0800100{
101 unsigned long flags;
Balbir Singh28dbc4b2009-01-07 18:08:05 -0800102 struct res_counter *c;
Pavel Emelianove552b662008-02-07 00:13:49 -0800103
Balbir Singh28dbc4b2009-01-07 18:08:05 -0800104 local_irq_save(flags);
Frederic Weisbecker2bb2ba92012-05-29 15:07:03 -0700105 for (c = counter; c != top; c = c->parent) {
Balbir Singh28dbc4b2009-01-07 18:08:05 -0800106 spin_lock(&c->lock);
107 res_counter_uncharge_locked(c, val);
108 spin_unlock(&c->lock);
109 }
110 local_irq_restore(flags);
Pavel Emelianove552b662008-02-07 00:13:49 -0800111}
112
Frederic Weisbecker2bb2ba92012-05-29 15:07:03 -0700113void res_counter_uncharge(struct res_counter *counter, unsigned long val)
114{
115 res_counter_uncharge_until(counter, NULL, val);
116}
Pavel Emelianove552b662008-02-07 00:13:49 -0800117
Balbir Singh0eea1032008-02-07 00:13:57 -0800118static inline unsigned long long *
119res_counter_member(struct res_counter *counter, int member)
Pavel Emelianove552b662008-02-07 00:13:49 -0800120{
121 switch (member) {
122 case RES_USAGE:
123 return &counter->usage;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -0700124 case RES_MAX_USAGE:
125 return &counter->max_usage;
Pavel Emelianove552b662008-02-07 00:13:49 -0800126 case RES_LIMIT:
127 return &counter->limit;
128 case RES_FAILCNT:
129 return &counter->failcnt;
Balbir Singh296c81d2009-09-23 15:56:36 -0700130 case RES_SOFT_LIMIT:
131 return &counter->soft_limit;
Pavel Emelianove552b662008-02-07 00:13:49 -0800132 };
133
134 BUG();
135 return NULL;
136}
137
138ssize_t res_counter_read(struct res_counter *counter, int member,
Balbir Singh0eea1032008-02-07 00:13:57 -0800139 const char __user *userbuf, size_t nbytes, loff_t *pos,
140 int (*read_strategy)(unsigned long long val, char *st_buf))
Pavel Emelianove552b662008-02-07 00:13:49 -0800141{
Balbir Singh0eea1032008-02-07 00:13:57 -0800142 unsigned long long *val;
Pavel Emelianove552b662008-02-07 00:13:49 -0800143 char buf[64], *s;
144
145 s = buf;
146 val = res_counter_member(counter, member);
Balbir Singh0eea1032008-02-07 00:13:57 -0800147 if (read_strategy)
148 s += read_strategy(*val, s);
149 else
150 s += sprintf(s, "%llu\n", *val);
Pavel Emelianove552b662008-02-07 00:13:49 -0800151 return simple_read_from_buffer((void __user *)userbuf, nbytes,
152 pos, buf, s - buf);
153}
154
KAMEZAWA Hiroyuki6c191cd2011-03-23 16:42:18 -0700155#if BITS_PER_LONG == 32
156u64 res_counter_read_u64(struct res_counter *counter, int member)
157{
158 unsigned long flags;
159 u64 ret;
160
161 spin_lock_irqsave(&counter->lock, flags);
162 ret = *res_counter_member(counter, member);
163 spin_unlock_irqrestore(&counter->lock, flags);
164
165 return ret;
166}
167#else
Paul Menage2c7eabf2008-04-29 00:59:58 -0700168u64 res_counter_read_u64(struct res_counter *counter, int member)
169{
170 return *res_counter_member(counter, member);
171}
KAMEZAWA Hiroyuki6c191cd2011-03-23 16:42:18 -0700172#endif
Paul Menage2c7eabf2008-04-29 00:59:58 -0700173
Paul Menage856c13a2008-07-25 01:47:04 -0700174int res_counter_memparse_write_strategy(const char *buf,
175 unsigned long long *res)
Pavel Emelianove552b662008-02-07 00:13:49 -0800176{
Paul Menage856c13a2008-07-25 01:47:04 -0700177 char *end;
Daisuke Nishimurac5b947b2009-06-17 16:27:20 -0700178
179 /* return RESOURCE_MAX(unlimited) if "-1" is specified */
180 if (*buf == '-') {
181 *res = simple_strtoull(buf + 1, &end, 10);
182 if (*res != 1 || *end != '\0')
183 return -EINVAL;
184 *res = RESOURCE_MAX;
185 return 0;
186 }
187
Davidlohr Bueso52dcf8a2011-12-05 22:13:41 +0100188 *res = memparse(buf, &end);
Paul Menage856c13a2008-07-25 01:47:04 -0700189 if (*end != '\0')
190 return -EINVAL;
191
192 *res = PAGE_ALIGN(*res);
193 return 0;
194}
195
196int res_counter_write(struct res_counter *counter, int member,
197 const char *buf, write_strategy_fn write_strategy)
198{
199 char *end;
Balbir Singh0eea1032008-02-07 00:13:57 -0800200 unsigned long flags;
201 unsigned long long tmp, *val;
Pavel Emelianove552b662008-02-07 00:13:49 -0800202
Balbir Singh0eea1032008-02-07 00:13:57 -0800203 if (write_strategy) {
Paul Menage856c13a2008-07-25 01:47:04 -0700204 if (write_strategy(buf, &tmp))
205 return -EINVAL;
Balbir Singh0eea1032008-02-07 00:13:57 -0800206 } else {
207 tmp = simple_strtoull(buf, &end, 10);
208 if (*end != '\0')
Paul Menage856c13a2008-07-25 01:47:04 -0700209 return -EINVAL;
Balbir Singh0eea1032008-02-07 00:13:57 -0800210 }
211 spin_lock_irqsave(&counter->lock, flags);
Pavel Emelianove552b662008-02-07 00:13:49 -0800212 val = res_counter_member(counter, member);
213 *val = tmp;
Balbir Singh0eea1032008-02-07 00:13:57 -0800214 spin_unlock_irqrestore(&counter->lock, flags);
Paul Menage856c13a2008-07-25 01:47:04 -0700215 return 0;
Pavel Emelianove552b662008-02-07 00:13:49 -0800216}