blob: f275c8eca772c4dfe6bcdda6b1253a65fefe3e83 [file] [log] [blame]
Pavel Emelianove552b662008-02-07 00:13:49 -08001/*
2 * resource cgroups
3 *
4 * Copyright 2007 OpenVZ SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 *
8 */
9
10#include <linux/types.h>
11#include <linux/parser.h>
12#include <linux/fs.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070013#include <linux/slab.h>
Pavel Emelianove552b662008-02-07 00:13:49 -080014#include <linux/res_counter.h>
15#include <linux/uaccess.h>
Paul Menage856c13a2008-07-25 01:47:04 -070016#include <linux/mm.h>
Pavel Emelianove552b662008-02-07 00:13:49 -080017
18void res_counter_init(struct res_counter *counter)
19{
20 spin_lock_init(&counter->lock);
Balbir Singh0eea1032008-02-07 00:13:57 -080021 counter->limit = (unsigned long long)LLONG_MAX;
Pavel Emelianove552b662008-02-07 00:13:49 -080022}
23
24int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
25{
26 if (counter->usage + val > counter->limit) {
27 counter->failcnt++;
28 return -ENOMEM;
29 }
30
31 counter->usage += val;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -070032 if (counter->usage > counter->max_usage)
33 counter->max_usage = counter->usage;
Pavel Emelianove552b662008-02-07 00:13:49 -080034 return 0;
35}
36
37int res_counter_charge(struct res_counter *counter, unsigned long val)
38{
39 int ret;
40 unsigned long flags;
41
42 spin_lock_irqsave(&counter->lock, flags);
43 ret = res_counter_charge_locked(counter, val);
44 spin_unlock_irqrestore(&counter->lock, flags);
45 return ret;
46}
47
48void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
49{
50 if (WARN_ON(counter->usage < val))
51 val = counter->usage;
52
53 counter->usage -= val;
54}
55
56void res_counter_uncharge(struct res_counter *counter, unsigned long val)
57{
58 unsigned long flags;
59
60 spin_lock_irqsave(&counter->lock, flags);
61 res_counter_uncharge_locked(counter, val);
62 spin_unlock_irqrestore(&counter->lock, flags);
63}
64
65
Balbir Singh0eea1032008-02-07 00:13:57 -080066static inline unsigned long long *
67res_counter_member(struct res_counter *counter, int member)
Pavel Emelianove552b662008-02-07 00:13:49 -080068{
69 switch (member) {
70 case RES_USAGE:
71 return &counter->usage;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -070072 case RES_MAX_USAGE:
73 return &counter->max_usage;
Pavel Emelianove552b662008-02-07 00:13:49 -080074 case RES_LIMIT:
75 return &counter->limit;
76 case RES_FAILCNT:
77 return &counter->failcnt;
78 };
79
80 BUG();
81 return NULL;
82}
83
84ssize_t res_counter_read(struct res_counter *counter, int member,
Balbir Singh0eea1032008-02-07 00:13:57 -080085 const char __user *userbuf, size_t nbytes, loff_t *pos,
86 int (*read_strategy)(unsigned long long val, char *st_buf))
Pavel Emelianove552b662008-02-07 00:13:49 -080087{
Balbir Singh0eea1032008-02-07 00:13:57 -080088 unsigned long long *val;
Pavel Emelianove552b662008-02-07 00:13:49 -080089 char buf[64], *s;
90
91 s = buf;
92 val = res_counter_member(counter, member);
Balbir Singh0eea1032008-02-07 00:13:57 -080093 if (read_strategy)
94 s += read_strategy(*val, s);
95 else
96 s += sprintf(s, "%llu\n", *val);
Pavel Emelianove552b662008-02-07 00:13:49 -080097 return simple_read_from_buffer((void __user *)userbuf, nbytes,
98 pos, buf, s - buf);
99}
100
Paul Menage2c7eabf2008-04-29 00:59:58 -0700101u64 res_counter_read_u64(struct res_counter *counter, int member)
102{
103 return *res_counter_member(counter, member);
104}
105
Paul Menage856c13a2008-07-25 01:47:04 -0700106int res_counter_memparse_write_strategy(const char *buf,
107 unsigned long long *res)
Pavel Emelianove552b662008-02-07 00:13:49 -0800108{
Paul Menage856c13a2008-07-25 01:47:04 -0700109 char *end;
110 /* FIXME - make memparse() take const char* args */
111 *res = memparse((char *)buf, &end);
112 if (*end != '\0')
113 return -EINVAL;
114
115 *res = PAGE_ALIGN(*res);
116 return 0;
117}
118
119int res_counter_write(struct res_counter *counter, int member,
120 const char *buf, write_strategy_fn write_strategy)
121{
122 char *end;
Balbir Singh0eea1032008-02-07 00:13:57 -0800123 unsigned long flags;
124 unsigned long long tmp, *val;
Pavel Emelianove552b662008-02-07 00:13:49 -0800125
Balbir Singh0eea1032008-02-07 00:13:57 -0800126 if (write_strategy) {
Paul Menage856c13a2008-07-25 01:47:04 -0700127 if (write_strategy(buf, &tmp))
128 return -EINVAL;
Balbir Singh0eea1032008-02-07 00:13:57 -0800129 } else {
130 tmp = simple_strtoull(buf, &end, 10);
131 if (*end != '\0')
Paul Menage856c13a2008-07-25 01:47:04 -0700132 return -EINVAL;
Balbir Singh0eea1032008-02-07 00:13:57 -0800133 }
134 spin_lock_irqsave(&counter->lock, flags);
Pavel Emelianove552b662008-02-07 00:13:49 -0800135 val = res_counter_member(counter, member);
136 *val = tmp;
Balbir Singh0eea1032008-02-07 00:13:57 -0800137 spin_unlock_irqrestore(&counter->lock, flags);
Paul Menage856c13a2008-07-25 01:47:04 -0700138 return 0;
Pavel Emelianove552b662008-02-07 00:13:49 -0800139}