blob: 78242b4d7edfd835725a9f3cf9c7aec0b8b6aac7 [file] [log] [blame]
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001#include <linux/mm.h>
2#include <linux/mmzone.h>
3#include <linux/bootmem.h>
4#include <linux/bit_spinlock.h>
5#include <linux/page_cgroup.h>
6#include <linux/hash.h>
7#include <linux/memory.h>
Paul Mundt4c8210422008-10-22 14:14:58 -07008#include <linux/vmalloc.h>
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07009
10static void __meminit
11__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
12{
13 pc->flags = 0;
14 pc->mem_cgroup = NULL;
15 pc->page = pfn_to_page(pfn);
16}
17static unsigned long total_usage;
18
19#if !defined(CONFIG_SPARSEMEM)
20
21
22void __init pgdat_page_cgroup_init(struct pglist_data *pgdat)
23{
24 pgdat->node_page_cgroup = NULL;
25}
26
27struct page_cgroup *lookup_page_cgroup(struct page *page)
28{
29 unsigned long pfn = page_to_pfn(page);
30 unsigned long offset;
31 struct page_cgroup *base;
32
33 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
34 if (unlikely(!base))
35 return NULL;
36
37 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
38 return base + offset;
39}
40
41static int __init alloc_node_page_cgroup(int nid)
42{
43 struct page_cgroup *base, *pc;
44 unsigned long table_size;
45 unsigned long start_pfn, nr_pages, index;
46
47 start_pfn = NODE_DATA(nid)->node_start_pfn;
48 nr_pages = NODE_DATA(nid)->node_spanned_pages;
49
50 table_size = sizeof(struct page_cgroup) * nr_pages;
51
52 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
53 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
54 if (!base)
55 return -ENOMEM;
56 for (index = 0; index < nr_pages; index++) {
57 pc = base + index;
58 __init_page_cgroup(pc, start_pfn + index);
59 }
60 NODE_DATA(nid)->node_page_cgroup = base;
61 total_usage += table_size;
62 return 0;
63}
64
65void __init page_cgroup_init(void)
66{
67
68 int nid, fail;
69
70 for_each_online_node(nid) {
71 fail = alloc_node_page_cgroup(nid);
72 if (fail)
73 goto fail;
74 }
75 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
76 printk(KERN_INFO "please try cgroup_disable=memory option if you"
77 " don't want\n");
78 return;
79fail:
80 printk(KERN_CRIT "allocation of page_cgroup was failed.\n");
81 printk(KERN_CRIT "please try cgroup_disable=memory boot option\n");
82 panic("Out of memory");
83}
84
85#else /* CONFIG_FLAT_NODE_MEM_MAP */
86
87struct page_cgroup *lookup_page_cgroup(struct page *page)
88{
89 unsigned long pfn = page_to_pfn(page);
90 struct mem_section *section = __pfn_to_section(pfn);
91
92 return section->page_cgroup + pfn;
93}
94
95int __meminit init_section_page_cgroup(unsigned long pfn)
96{
97 struct mem_section *section;
98 struct page_cgroup *base, *pc;
99 unsigned long table_size;
100 int nid, index;
101
102 section = __pfn_to_section(pfn);
103
104 if (section->page_cgroup)
105 return 0;
106
107 nid = page_to_nid(pfn_to_page(pfn));
108
109 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
110 base = kmalloc_node(table_size, GFP_KERNEL, nid);
111 if (!base)
112 base = vmalloc_node(table_size, nid);
113
114 if (!base) {
115 printk(KERN_ERR "page cgroup allocation failure\n");
116 return -ENOMEM;
117 }
118
119 for (index = 0; index < PAGES_PER_SECTION; index++) {
120 pc = base + index;
121 __init_page_cgroup(pc, pfn + index);
122 }
123
124 section = __pfn_to_section(pfn);
125 section->page_cgroup = base - pfn;
126 total_usage += table_size;
127 return 0;
128}
129#ifdef CONFIG_MEMORY_HOTPLUG
130void __free_page_cgroup(unsigned long pfn)
131{
132 struct mem_section *ms;
133 struct page_cgroup *base;
134
135 ms = __pfn_to_section(pfn);
136 if (!ms || !ms->page_cgroup)
137 return;
138 base = ms->page_cgroup + pfn;
139 ms->page_cgroup = NULL;
140 if (is_vmalloc_addr(base))
141 vfree(base);
142 else
143 kfree(base);
144}
145
146int online_page_cgroup(unsigned long start_pfn,
147 unsigned long nr_pages,
148 int nid)
149{
150 unsigned long start, end, pfn;
151 int fail = 0;
152
153 start = start_pfn & (PAGES_PER_SECTION - 1);
154 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
155
156 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
157 if (!pfn_present(pfn))
158 continue;
159 fail = init_section_page_cgroup(pfn);
160 }
161 if (!fail)
162 return 0;
163
164 /* rollback */
165 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
166 __free_page_cgroup(pfn);
167
168 return -ENOMEM;
169}
170
171int offline_page_cgroup(unsigned long start_pfn,
172 unsigned long nr_pages, int nid)
173{
174 unsigned long start, end, pfn;
175
176 start = start_pfn & (PAGES_PER_SECTION - 1);
177 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
178
179 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
180 __free_page_cgroup(pfn);
181 return 0;
182
183}
184
185static int page_cgroup_callback(struct notifier_block *self,
186 unsigned long action, void *arg)
187{
188 struct memory_notify *mn = arg;
189 int ret = 0;
190 switch (action) {
191 case MEM_GOING_ONLINE:
192 ret = online_page_cgroup(mn->start_pfn,
193 mn->nr_pages, mn->status_change_nid);
194 break;
195 case MEM_CANCEL_ONLINE:
196 case MEM_OFFLINE:
197 offline_page_cgroup(mn->start_pfn,
198 mn->nr_pages, mn->status_change_nid);
199 break;
200 case MEM_GOING_OFFLINE:
201 break;
202 case MEM_ONLINE:
203 case MEM_CANCEL_OFFLINE:
204 break;
205 }
206 ret = notifier_from_errno(ret);
207 return ret;
208}
209
210#endif
211
212void __init page_cgroup_init(void)
213{
214 unsigned long pfn;
215 int fail = 0;
216
217 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
218 if (!pfn_present(pfn))
219 continue;
220 fail = init_section_page_cgroup(pfn);
221 }
222 if (fail) {
223 printk(KERN_CRIT "try cgroup_disable=memory boot option\n");
224 panic("Out of memory");
225 } else {
226 hotplug_memory_notifier(page_cgroup_callback, 0);
227 }
228 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
229 printk(KERN_INFO "please try cgroup_disable=memory option if you don't"
230 " want\n");
231}
232
233void __init pgdat_page_cgroup_init(struct pglist_data *pgdat)
234{
235 return;
236}
237
238#endif