blob: 51dbac6a363358b435a14f9e7952b9489eb3bf9d [file] [log] [blame]
Pavel Emelianove552b662008-02-07 00:13:49 -08001/*
2 * resource cgroups
3 *
4 * Copyright 2007 OpenVZ SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 *
8 */
9
10#include <linux/types.h>
11#include <linux/parser.h>
12#include <linux/fs.h>
13#include <linux/res_counter.h>
14#include <linux/uaccess.h>
Paul Menage856c13a2008-07-25 01:47:04 -070015#include <linux/mm.h>
Pavel Emelianove552b662008-02-07 00:13:49 -080016
Balbir Singh28dbc4b2009-01-07 18:08:05 -080017void res_counter_init(struct res_counter *counter, struct res_counter *parent)
Pavel Emelianove552b662008-02-07 00:13:49 -080018{
19 spin_lock_init(&counter->lock);
Sha Zhengju6de5a8b2013-09-12 15:13:47 -070020 counter->limit = RES_COUNTER_MAX;
21 counter->soft_limit = RES_COUNTER_MAX;
Balbir Singh28dbc4b2009-01-07 18:08:05 -080022 counter->parent = parent;
Pavel Emelianove552b662008-02-07 00:13:49 -080023}
24
David Rientjes539a13b2014-04-07 15:37:32 -070025static u64 res_counter_uncharge_locked(struct res_counter *counter,
26 unsigned long val)
27{
28 if (WARN_ON(counter->usage < val))
29 val = counter->usage;
30
31 counter->usage -= val;
32 return counter->usage;
33}
34
35static int res_counter_charge_locked(struct res_counter *counter,
36 unsigned long val, bool force)
Pavel Emelianove552b662008-02-07 00:13:49 -080037{
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020038 int ret = 0;
39
Pavel Emelianove552b662008-02-07 00:13:49 -080040 if (counter->usage + val > counter->limit) {
41 counter->failcnt++;
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020042 ret = -ENOMEM;
43 if (!force)
44 return ret;
Pavel Emelianove552b662008-02-07 00:13:49 -080045 }
46
47 counter->usage += val;
Frederic Weisbecker0d4dde12012-04-25 01:11:36 +020048 if (counter->usage > counter->max_usage)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -070049 counter->max_usage = counter->usage;
Pavel Emelianove552b662008-02-07 00:13:49 -080050 return ret;
51}
52
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020053static int __res_counter_charge(struct res_counter *counter, unsigned long val,
54 struct res_counter **limit_fail_at, bool force)
Glauber Costa0e90b312012-01-20 04:57:16 +000055{
56 int ret, r;
57 unsigned long flags;
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020058 struct res_counter *c, *u;
Glauber Costa0e90b312012-01-20 04:57:16 +000059
60 r = ret = 0;
61 *limit_fail_at = NULL;
62 local_irq_save(flags);
63 for (c = counter; c != NULL; c = c->parent) {
64 spin_lock(&c->lock);
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020065 r = res_counter_charge_locked(c, val, force);
Glauber Costa0e90b312012-01-20 04:57:16 +000066 spin_unlock(&c->lock);
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020067 if (r < 0 && !ret) {
Glauber Costa0e90b312012-01-20 04:57:16 +000068 ret = r;
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020069 *limit_fail_at = c;
70 if (!force)
71 break;
72 }
73 }
74
75 if (ret < 0 && !force) {
76 for (u = counter; u != c; u = u->parent) {
77 spin_lock(&u->lock);
78 res_counter_uncharge_locked(u, val);
79 spin_unlock(&u->lock);
Glauber Costa0e90b312012-01-20 04:57:16 +000080 }
81 }
82 local_irq_restore(flags);
83
84 return ret;
85}
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020086
87int res_counter_charge(struct res_counter *counter, unsigned long val,
88 struct res_counter **limit_fail_at)
89{
90 return __res_counter_charge(counter, val, limit_fail_at, false);
91}
92
93int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
94 struct res_counter **limit_fail_at)
95{
96 return __res_counter_charge(counter, val, limit_fail_at, true);
97}
98
Glauber Costa50bdd432012-12-18 14:22:04 -080099u64 res_counter_uncharge_until(struct res_counter *counter,
100 struct res_counter *top,
101 unsigned long val)
Pavel Emelianove552b662008-02-07 00:13:49 -0800102{
103 unsigned long flags;
Balbir Singh28dbc4b2009-01-07 18:08:05 -0800104 struct res_counter *c;
Glauber Costa50bdd432012-12-18 14:22:04 -0800105 u64 ret = 0;
Pavel Emelianove552b662008-02-07 00:13:49 -0800106
Balbir Singh28dbc4b2009-01-07 18:08:05 -0800107 local_irq_save(flags);
Frederic Weisbecker2bb2ba92012-05-29 15:07:03 -0700108 for (c = counter; c != top; c = c->parent) {
Glauber Costa50bdd432012-12-18 14:22:04 -0800109 u64 r;
Balbir Singh28dbc4b2009-01-07 18:08:05 -0800110 spin_lock(&c->lock);
Glauber Costa50bdd432012-12-18 14:22:04 -0800111 r = res_counter_uncharge_locked(c, val);
112 if (c == counter)
113 ret = r;
Balbir Singh28dbc4b2009-01-07 18:08:05 -0800114 spin_unlock(&c->lock);
115 }
116 local_irq_restore(flags);
Glauber Costa50bdd432012-12-18 14:22:04 -0800117 return ret;
Pavel Emelianove552b662008-02-07 00:13:49 -0800118}
119
Glauber Costa50bdd432012-12-18 14:22:04 -0800120u64 res_counter_uncharge(struct res_counter *counter, unsigned long val)
Frederic Weisbecker2bb2ba92012-05-29 15:07:03 -0700121{
Glauber Costa50bdd432012-12-18 14:22:04 -0800122 return res_counter_uncharge_until(counter, NULL, val);
Frederic Weisbecker2bb2ba92012-05-29 15:07:03 -0700123}
Pavel Emelianove552b662008-02-07 00:13:49 -0800124
Balbir Singh0eea1032008-02-07 00:13:57 -0800125static inline unsigned long long *
126res_counter_member(struct res_counter *counter, int member)
Pavel Emelianove552b662008-02-07 00:13:49 -0800127{
128 switch (member) {
129 case RES_USAGE:
130 return &counter->usage;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -0700131 case RES_MAX_USAGE:
132 return &counter->max_usage;
Pavel Emelianove552b662008-02-07 00:13:49 -0800133 case RES_LIMIT:
134 return &counter->limit;
135 case RES_FAILCNT:
136 return &counter->failcnt;
Balbir Singh296c81d2009-09-23 15:56:36 -0700137 case RES_SOFT_LIMIT:
138 return &counter->soft_limit;
Pavel Emelianove552b662008-02-07 00:13:49 -0800139 };
140
141 BUG();
142 return NULL;
143}
144
145ssize_t res_counter_read(struct res_counter *counter, int member,
Balbir Singh0eea1032008-02-07 00:13:57 -0800146 const char __user *userbuf, size_t nbytes, loff_t *pos,
147 int (*read_strategy)(unsigned long long val, char *st_buf))
Pavel Emelianove552b662008-02-07 00:13:49 -0800148{
Balbir Singh0eea1032008-02-07 00:13:57 -0800149 unsigned long long *val;
Pavel Emelianove552b662008-02-07 00:13:49 -0800150 char buf[64], *s;
151
152 s = buf;
153 val = res_counter_member(counter, member);
Balbir Singh0eea1032008-02-07 00:13:57 -0800154 if (read_strategy)
155 s += read_strategy(*val, s);
156 else
157 s += sprintf(s, "%llu\n", *val);
Pavel Emelianove552b662008-02-07 00:13:49 -0800158 return simple_read_from_buffer((void __user *)userbuf, nbytes,
159 pos, buf, s - buf);
160}
161
KAMEZAWA Hiroyuki6c191cd2011-03-23 16:42:18 -0700162#if BITS_PER_LONG == 32
163u64 res_counter_read_u64(struct res_counter *counter, int member)
164{
165 unsigned long flags;
166 u64 ret;
167
168 spin_lock_irqsave(&counter->lock, flags);
169 ret = *res_counter_member(counter, member);
170 spin_unlock_irqrestore(&counter->lock, flags);
171
172 return ret;
173}
174#else
Paul Menage2c7eabf2008-04-29 00:59:58 -0700175u64 res_counter_read_u64(struct res_counter *counter, int member)
176{
177 return *res_counter_member(counter, member);
178}
KAMEZAWA Hiroyuki6c191cd2011-03-23 16:42:18 -0700179#endif
Paul Menage2c7eabf2008-04-29 00:59:58 -0700180
Paul Menage856c13a2008-07-25 01:47:04 -0700181int res_counter_memparse_write_strategy(const char *buf,
Sha Zhengju1a36e592013-09-12 15:13:49 -0700182 unsigned long long *resp)
Pavel Emelianove552b662008-02-07 00:13:49 -0800183{
Paul Menage856c13a2008-07-25 01:47:04 -0700184 char *end;
Sha Zhengju1a36e592013-09-12 15:13:49 -0700185 unsigned long long res;
Daisuke Nishimurac5b947b2009-06-17 16:27:20 -0700186
Sha Zhengju6de5a8b2013-09-12 15:13:47 -0700187 /* return RES_COUNTER_MAX(unlimited) if "-1" is specified */
Daisuke Nishimurac5b947b2009-06-17 16:27:20 -0700188 if (*buf == '-') {
Sha Zhengju1a36e592013-09-12 15:13:49 -0700189 res = simple_strtoull(buf + 1, &end, 10);
190 if (res != 1 || *end != '\0')
Daisuke Nishimurac5b947b2009-06-17 16:27:20 -0700191 return -EINVAL;
Sha Zhengju1a36e592013-09-12 15:13:49 -0700192 *resp = RES_COUNTER_MAX;
Daisuke Nishimurac5b947b2009-06-17 16:27:20 -0700193 return 0;
194 }
195
Sha Zhengju1a36e592013-09-12 15:13:49 -0700196 res = memparse(buf, &end);
Paul Menage856c13a2008-07-25 01:47:04 -0700197 if (*end != '\0')
198 return -EINVAL;
199
Sha Zhengju1a36e592013-09-12 15:13:49 -0700200 if (PAGE_ALIGN(res) >= res)
201 res = PAGE_ALIGN(res);
Sha Zhengju3af33512013-09-12 15:13:48 -0700202 else
Sha Zhengju1a36e592013-09-12 15:13:49 -0700203 res = RES_COUNTER_MAX;
204
205 *resp = res;
Sha Zhengju3af33512013-09-12 15:13:48 -0700206
Paul Menage856c13a2008-07-25 01:47:04 -0700207 return 0;
208}