blob: 6d269cce7aa13c4593540f6aa80be389e3ffc720 [file] [log] [blame]
Pavel Emelianove552b662008-02-07 00:13:49 -08001/*
2 * resource cgroups
3 *
4 * Copyright 2007 OpenVZ SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 *
8 */
9
10#include <linux/types.h>
11#include <linux/parser.h>
12#include <linux/fs.h>
13#include <linux/res_counter.h>
14#include <linux/uaccess.h>
Paul Menage856c13a2008-07-25 01:47:04 -070015#include <linux/mm.h>
Pavel Emelianove552b662008-02-07 00:13:49 -080016
Balbir Singh28dbc4b2009-01-07 18:08:05 -080017void res_counter_init(struct res_counter *counter, struct res_counter *parent)
Pavel Emelianove552b662008-02-07 00:13:49 -080018{
19 spin_lock_init(&counter->lock);
Daisuke Nishimurac5b947b2009-06-17 16:27:20 -070020 counter->limit = RESOURCE_MAX;
Balbir Singh296c81d2009-09-23 15:56:36 -070021 counter->soft_limit = RESOURCE_MAX;
Balbir Singh28dbc4b2009-01-07 18:08:05 -080022 counter->parent = parent;
Pavel Emelianove552b662008-02-07 00:13:49 -080023}
24
25int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
26{
27 if (counter->usage + val > counter->limit) {
28 counter->failcnt++;
29 return -ENOMEM;
30 }
31
32 counter->usage += val;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -070033 if (counter->usage > counter->max_usage)
34 counter->max_usage = counter->usage;
Pavel Emelianove552b662008-02-07 00:13:49 -080035 return 0;
36}
37
Balbir Singh28dbc4b2009-01-07 18:08:05 -080038int res_counter_charge(struct res_counter *counter, unsigned long val,
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -070039 struct res_counter **limit_fail_at)
Pavel Emelianove552b662008-02-07 00:13:49 -080040{
41 int ret;
42 unsigned long flags;
Balbir Singh28dbc4b2009-01-07 18:08:05 -080043 struct res_counter *c, *u;
Pavel Emelianove552b662008-02-07 00:13:49 -080044
Balbir Singh28dbc4b2009-01-07 18:08:05 -080045 *limit_fail_at = NULL;
46 local_irq_save(flags);
47 for (c = counter; c != NULL; c = c->parent) {
48 spin_lock(&c->lock);
49 ret = res_counter_charge_locked(c, val);
50 spin_unlock(&c->lock);
51 if (ret < 0) {
52 *limit_fail_at = c;
53 goto undo;
54 }
55 }
56 ret = 0;
57 goto done;
58undo:
59 for (u = counter; u != c; u = u->parent) {
60 spin_lock(&u->lock);
61 res_counter_uncharge_locked(u, val);
62 spin_unlock(&u->lock);
63 }
64done:
65 local_irq_restore(flags);
Pavel Emelianove552b662008-02-07 00:13:49 -080066 return ret;
67}
68
69void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
70{
71 if (WARN_ON(counter->usage < val))
72 val = counter->usage;
73
74 counter->usage -= val;
75}
76
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -070077void res_counter_uncharge(struct res_counter *counter, unsigned long val)
Pavel Emelianove552b662008-02-07 00:13:49 -080078{
79 unsigned long flags;
Balbir Singh28dbc4b2009-01-07 18:08:05 -080080 struct res_counter *c;
Pavel Emelianove552b662008-02-07 00:13:49 -080081
Balbir Singh28dbc4b2009-01-07 18:08:05 -080082 local_irq_save(flags);
83 for (c = counter; c != NULL; c = c->parent) {
84 spin_lock(&c->lock);
85 res_counter_uncharge_locked(c, val);
86 spin_unlock(&c->lock);
87 }
88 local_irq_restore(flags);
Pavel Emelianove552b662008-02-07 00:13:49 -080089}
90
91
Balbir Singh0eea1032008-02-07 00:13:57 -080092static inline unsigned long long *
93res_counter_member(struct res_counter *counter, int member)
Pavel Emelianove552b662008-02-07 00:13:49 -080094{
95 switch (member) {
96 case RES_USAGE:
97 return &counter->usage;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -070098 case RES_MAX_USAGE:
99 return &counter->max_usage;
Pavel Emelianove552b662008-02-07 00:13:49 -0800100 case RES_LIMIT:
101 return &counter->limit;
102 case RES_FAILCNT:
103 return &counter->failcnt;
Balbir Singh296c81d2009-09-23 15:56:36 -0700104 case RES_SOFT_LIMIT:
105 return &counter->soft_limit;
Pavel Emelianove552b662008-02-07 00:13:49 -0800106 };
107
108 BUG();
109 return NULL;
110}
111
112ssize_t res_counter_read(struct res_counter *counter, int member,
Balbir Singh0eea1032008-02-07 00:13:57 -0800113 const char __user *userbuf, size_t nbytes, loff_t *pos,
114 int (*read_strategy)(unsigned long long val, char *st_buf))
Pavel Emelianove552b662008-02-07 00:13:49 -0800115{
Balbir Singh0eea1032008-02-07 00:13:57 -0800116 unsigned long long *val;
Pavel Emelianove552b662008-02-07 00:13:49 -0800117 char buf[64], *s;
118
119 s = buf;
120 val = res_counter_member(counter, member);
Balbir Singh0eea1032008-02-07 00:13:57 -0800121 if (read_strategy)
122 s += read_strategy(*val, s);
123 else
124 s += sprintf(s, "%llu\n", *val);
Pavel Emelianove552b662008-02-07 00:13:49 -0800125 return simple_read_from_buffer((void __user *)userbuf, nbytes,
126 pos, buf, s - buf);
127}
128
KAMEZAWA Hiroyuki6c191cd2011-03-23 16:42:18 -0700129#if BITS_PER_LONG == 32
130u64 res_counter_read_u64(struct res_counter *counter, int member)
131{
132 unsigned long flags;
133 u64 ret;
134
135 spin_lock_irqsave(&counter->lock, flags);
136 ret = *res_counter_member(counter, member);
137 spin_unlock_irqrestore(&counter->lock, flags);
138
139 return ret;
140}
141#else
Paul Menage2c7eabf2008-04-29 00:59:58 -0700142u64 res_counter_read_u64(struct res_counter *counter, int member)
143{
144 return *res_counter_member(counter, member);
145}
KAMEZAWA Hiroyuki6c191cd2011-03-23 16:42:18 -0700146#endif
Paul Menage2c7eabf2008-04-29 00:59:58 -0700147
Paul Menage856c13a2008-07-25 01:47:04 -0700148int res_counter_memparse_write_strategy(const char *buf,
149 unsigned long long *res)
Pavel Emelianove552b662008-02-07 00:13:49 -0800150{
Paul Menage856c13a2008-07-25 01:47:04 -0700151 char *end;
Daisuke Nishimurac5b947b2009-06-17 16:27:20 -0700152
153 /* return RESOURCE_MAX(unlimited) if "-1" is specified */
154 if (*buf == '-') {
155 *res = simple_strtoull(buf + 1, &end, 10);
156 if (*res != 1 || *end != '\0')
157 return -EINVAL;
158 *res = RESOURCE_MAX;
159 return 0;
160 }
161
Davidlohr Bueso52dcf8a2011-12-05 22:13:41 +0100162 *res = memparse(buf, &end);
Paul Menage856c13a2008-07-25 01:47:04 -0700163 if (*end != '\0')
164 return -EINVAL;
165
166 *res = PAGE_ALIGN(*res);
167 return 0;
168}
169
170int res_counter_write(struct res_counter *counter, int member,
171 const char *buf, write_strategy_fn write_strategy)
172{
173 char *end;
Balbir Singh0eea1032008-02-07 00:13:57 -0800174 unsigned long flags;
175 unsigned long long tmp, *val;
Pavel Emelianove552b662008-02-07 00:13:49 -0800176
Balbir Singh0eea1032008-02-07 00:13:57 -0800177 if (write_strategy) {
Paul Menage856c13a2008-07-25 01:47:04 -0700178 if (write_strategy(buf, &tmp))
179 return -EINVAL;
Balbir Singh0eea1032008-02-07 00:13:57 -0800180 } else {
181 tmp = simple_strtoull(buf, &end, 10);
182 if (*end != '\0')
Paul Menage856c13a2008-07-25 01:47:04 -0700183 return -EINVAL;
Balbir Singh0eea1032008-02-07 00:13:57 -0800184 }
185 spin_lock_irqsave(&counter->lock, flags);
Pavel Emelianove552b662008-02-07 00:13:49 -0800186 val = res_counter_member(counter, member);
187 *val = tmp;
Balbir Singh0eea1032008-02-07 00:13:57 -0800188 spin_unlock_irqrestore(&counter->lock, flags);
Paul Menage856c13a2008-07-25 01:47:04 -0700189 return 0;
Pavel Emelianove552b662008-02-07 00:13:49 -0800190}