blob: 80e48cd9d0c7828a8df707996221098099f0a7d5 [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/res_counter.h>
18#include <linux/memcontrol.h>
19#include <linux/cgroup.h>
20
21struct cgroup_subsys mem_cgroup_subsys;
22
23/*
24 * The memory controller data structure. The memory controller controls both
25 * page cache and RSS per cgroup. We would eventually like to provide
26 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
27 * to help the administrator determine what knobs to tune.
28 *
29 * TODO: Add a water mark for the memory controller. Reclaim will begin when
30 * we hit the water mark.
31 */
32struct mem_cgroup {
33 struct cgroup_subsys_state css;
34 /*
35 * the counter to account for memory usage
36 */
37 struct res_counter res;
38};
39
40/*
41 * A page_cgroup page is associated with every page descriptor. The
42 * page_cgroup helps us identify information about the cgroup
43 */
44struct page_cgroup {
45 struct list_head lru; /* per cgroup LRU list */
46 struct page *page;
47 struct mem_cgroup *mem_cgroup;
48};
49
50
51static inline
52struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
53{
54 return container_of(cgroup_subsys_state(cont,
55 mem_cgroup_subsys_id), struct mem_cgroup,
56 css);
57}
58
59static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
60 struct file *file, char __user *userbuf, size_t nbytes,
61 loff_t *ppos)
62{
63 return res_counter_read(&mem_cgroup_from_cont(cont)->res,
64 cft->private, userbuf, nbytes, ppos);
65}
66
67static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
68 struct file *file, const char __user *userbuf,
69 size_t nbytes, loff_t *ppos)
70{
71 return res_counter_write(&mem_cgroup_from_cont(cont)->res,
72 cft->private, userbuf, nbytes, ppos);
73}
74
75static struct cftype mem_cgroup_files[] = {
76 {
77 .name = "usage",
78 .private = RES_USAGE,
79 .read = mem_cgroup_read,
80 },
81 {
82 .name = "limit",
83 .private = RES_LIMIT,
84 .write = mem_cgroup_write,
85 .read = mem_cgroup_read,
86 },
87 {
88 .name = "failcnt",
89 .private = RES_FAILCNT,
90 .read = mem_cgroup_read,
91 },
92};
93
94static struct cgroup_subsys_state *
95mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
96{
97 struct mem_cgroup *mem;
98
99 mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
100 if (!mem)
101 return -ENOMEM;
102
103 res_counter_init(&mem->res);
104 return &mem->css;
105}
106
107static void mem_cgroup_destroy(struct cgroup_subsys *ss,
108 struct cgroup *cont)
109{
110 kfree(mem_cgroup_from_cont(cont));
111}
112
113static int mem_cgroup_populate(struct cgroup_subsys *ss,
114 struct cgroup *cont)
115{
116 return cgroup_add_files(cont, ss, mem_cgroup_files,
117 ARRAY_SIZE(mem_cgroup_files));
118}
119
120struct cgroup_subsys mem_cgroup_subsys = {
121 .name = "memory",
122 .subsys_id = mem_cgroup_subsys_id,
123 .create = mem_cgroup_create,
124 .destroy = mem_cgroup_destroy,
125 .populate = mem_cgroup_populate,
126 .early_init = 0,
127};