blob: 6bdc67dbbc28920c36d0511057071d7229f58e39 [file] [log] [blame]
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001#include <linux/mm.h>
2#include <linux/mmzone.h>
3#include <linux/bootmem.h>
4#include <linux/bit_spinlock.h>
5#include <linux/page_cgroup.h>
6#include <linux/hash.h>
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -07007#include <linux/slab.h>
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07008#include <linux/memory.h>
Paul Mundt4c8210422008-10-22 14:14:58 -07009#include <linux/vmalloc.h>
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -070010#include <linux/cgroup.h>
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080011#include <linux/swapops.h>
Catalin Marinas7952f982010-07-19 11:54:14 +010012#include <linux/kmemleak.h>
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070013
Johannes Weiner6b3ae582011-03-23 16:42:30 -070014static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070015{
16 pc->flags = 0;
Johannes Weiner6b3ae582011-03-23 16:42:30 -070017 set_page_cgroup_array_id(pc, id);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070018 pc->mem_cgroup = NULL;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080019 INIT_LIST_HEAD(&pc->lru);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070020}
21static unsigned long total_usage;
22
23#if !defined(CONFIG_SPARSEMEM)
24
25
Al Viro31168482008-11-22 17:33:24 +000026void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070027{
28 pgdat->node_page_cgroup = NULL;
29}
30
31struct page_cgroup *lookup_page_cgroup(struct page *page)
32{
33 unsigned long pfn = page_to_pfn(page);
34 unsigned long offset;
35 struct page_cgroup *base;
36
37 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
38 if (unlikely(!base))
39 return NULL;
40
41 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
42 return base + offset;
43}
44
Johannes Weiner6b3ae582011-03-23 16:42:30 -070045struct page *lookup_cgroup_page(struct page_cgroup *pc)
46{
47 unsigned long pfn;
48 struct page *page;
49 pg_data_t *pgdat;
50
51 pgdat = NODE_DATA(page_cgroup_array_id(pc));
52 pfn = pc - pgdat->node_page_cgroup + pgdat->node_start_pfn;
53 page = pfn_to_page(pfn);
54 VM_BUG_ON(pc != lookup_page_cgroup(page));
55 return page;
56}
57
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070058static int __init alloc_node_page_cgroup(int nid)
59{
60 struct page_cgroup *base, *pc;
61 unsigned long table_size;
62 unsigned long start_pfn, nr_pages, index;
63
64 start_pfn = NODE_DATA(nid)->node_start_pfn;
65 nr_pages = NODE_DATA(nid)->node_spanned_pages;
66
KAMEZAWA Hiroyuki653d22c2008-12-09 13:14:20 -080067 if (!nr_pages)
68 return 0;
69
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070070 table_size = sizeof(struct page_cgroup) * nr_pages;
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +030071
72 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
73 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
74 if (!base)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070075 return -ENOMEM;
76 for (index = 0; index < nr_pages; index++) {
77 pc = base + index;
Johannes Weiner6b3ae582011-03-23 16:42:30 -070078 init_page_cgroup(pc, nid);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070079 }
80 NODE_DATA(nid)->node_page_cgroup = base;
81 total_usage += table_size;
82 return 0;
83}
84
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +030085void __init page_cgroup_init_flatmem(void)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070086{
87
88 int nid, fail;
89
Hirokazu Takahashif8d665422009-01-07 18:08:02 -080090 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -070091 return;
92
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070093 for_each_online_node(nid) {
94 fail = alloc_node_page_cgroup(nid);
95 if (fail)
96 goto fail;
97 }
98 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
Randy Dunlap8ca739e2009-06-17 16:26:32 -070099 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you"
100 " don't want memory cgroups\n");
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700101 return;
102fail:
Randy Dunlap8ca739e2009-06-17 16:26:32 -0700103 printk(KERN_CRIT "allocation of page_cgroup failed.\n");
104 printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n");
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700105 panic("Out of memory");
106}
107
108#else /* CONFIG_FLAT_NODE_MEM_MAP */
109
110struct page_cgroup *lookup_page_cgroup(struct page *page)
111{
112 unsigned long pfn = page_to_pfn(page);
113 struct mem_section *section = __pfn_to_section(pfn);
114
Balbir Singhd69b0422009-06-17 16:26:34 -0700115 if (!section->page_cgroup)
116 return NULL;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700117 return section->page_cgroup + pfn;
118}
119
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700120struct page *lookup_cgroup_page(struct page_cgroup *pc)
121{
122 struct mem_section *section;
123 struct page *page;
124 unsigned long nr;
125
126 nr = page_cgroup_array_id(pc);
127 section = __nr_to_section(nr);
128 page = pfn_to_page(pc - section->page_cgroup);
129 VM_BUG_ON(pc != lookup_page_cgroup(page));
130 return page;
131}
132
Namhyung Kim268433b2011-05-26 16:25:29 -0700133static void *__meminit alloc_page_cgroup(size_t size, int nid)
Michal Hockodde79e02011-03-23 16:42:40 -0700134{
135 void *addr = NULL;
136
Andi Kleen21a3c962011-05-11 15:13:35 -0700137 addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN);
Michal Hockodde79e02011-03-23 16:42:40 -0700138 if (addr)
139 return addr;
140
141 if (node_state(nid, N_HIGH_MEMORY))
142 addr = vmalloc_node(size, nid);
143 else
144 addr = vmalloc(size);
145
146 return addr;
147}
148
149#ifdef CONFIG_MEMORY_HOTPLUG
150static void free_page_cgroup(void *addr)
151{
152 if (is_vmalloc_addr(addr)) {
153 vfree(addr);
154 } else {
155 struct page *page = virt_to_page(addr);
Michal Hocko6cfddb22011-03-23 16:42:41 -0700156 size_t table_size =
157 sizeof(struct page_cgroup) * PAGES_PER_SECTION;
158
159 BUG_ON(PageReserved(page));
160 free_pages_exact(addr, table_size);
Michal Hockodde79e02011-03-23 16:42:40 -0700161 }
162}
163#endif
164
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700165static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700166{
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700167 struct page_cgroup *base, *pc;
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700168 struct mem_section *section;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700169 unsigned long table_size;
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700170 unsigned long nr;
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700171 int index;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700172
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700173 nr = pfn_to_section_nr(pfn);
174 section = __nr_to_section(nr);
175
176 if (section->page_cgroup)
177 return 0;
178
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700179 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
Michal Hockodde79e02011-03-23 16:42:40 -0700180 base = alloc_page_cgroup(table_size, nid);
181
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700182 /*
183 * The value stored in section->page_cgroup is (base - pfn)
184 * and it does not point to the memory block allocated above,
185 * causing kmemleak false positives.
186 */
187 kmemleak_not_leak(base);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700188
189 if (!base) {
190 printk(KERN_ERR "page cgroup allocation failure\n");
191 return -ENOMEM;
192 }
193
194 for (index = 0; index < PAGES_PER_SECTION; index++) {
195 pc = base + index;
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700196 init_page_cgroup(pc, nr);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700197 }
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700198 /*
199 * The passed "pfn" may not be aligned to SECTION. For the calculation
200 * we need to apply a mask.
201 */
202 pfn &= PAGE_SECTION_MASK;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700203 section->page_cgroup = base - pfn;
204 total_usage += table_size;
205 return 0;
206}
207#ifdef CONFIG_MEMORY_HOTPLUG
208void __free_page_cgroup(unsigned long pfn)
209{
210 struct mem_section *ms;
211 struct page_cgroup *base;
212
213 ms = __pfn_to_section(pfn);
214 if (!ms || !ms->page_cgroup)
215 return;
216 base = ms->page_cgroup + pfn;
Michal Hockodde79e02011-03-23 16:42:40 -0700217 free_page_cgroup(base);
218 ms->page_cgroup = NULL;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700219}
220
Al Viro31168482008-11-22 17:33:24 +0000221int __meminit online_page_cgroup(unsigned long start_pfn,
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700222 unsigned long nr_pages,
223 int nid)
224{
225 unsigned long start, end, pfn;
226 int fail = 0;
227
Daniel Kiper1bb36fb2011-07-25 17:12:13 -0700228 start = SECTION_ALIGN_DOWN(start_pfn);
229 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700230
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700231 if (nid == -1) {
232 /*
233 * In this case, "nid" already exists and contains valid memory.
234 * "start_pfn" passed to us is a pfn which is an arg for
235 * online__pages(), and start_pfn should exist.
236 */
237 nid = pfn_to_nid(start_pfn);
238 VM_BUG_ON(!node_state(nid, N_ONLINE));
239 }
240
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700241 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
242 if (!pfn_present(pfn))
243 continue;
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700244 fail = init_section_page_cgroup(pfn, nid);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700245 }
246 if (!fail)
247 return 0;
248
249 /* rollback */
250 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
251 __free_page_cgroup(pfn);
252
253 return -ENOMEM;
254}
255
Al Viro31168482008-11-22 17:33:24 +0000256int __meminit offline_page_cgroup(unsigned long start_pfn,
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700257 unsigned long nr_pages, int nid)
258{
259 unsigned long start, end, pfn;
260
Daniel Kiper1bb36fb2011-07-25 17:12:13 -0700261 start = SECTION_ALIGN_DOWN(start_pfn);
262 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700263
264 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
265 __free_page_cgroup(pfn);
266 return 0;
267
268}
269
Al Viro31168482008-11-22 17:33:24 +0000270static int __meminit page_cgroup_callback(struct notifier_block *self,
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700271 unsigned long action, void *arg)
272{
273 struct memory_notify *mn = arg;
274 int ret = 0;
275 switch (action) {
276 case MEM_GOING_ONLINE:
277 ret = online_page_cgroup(mn->start_pfn,
278 mn->nr_pages, mn->status_change_nid);
279 break;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700280 case MEM_OFFLINE:
281 offline_page_cgroup(mn->start_pfn,
282 mn->nr_pages, mn->status_change_nid);
283 break;
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -0800284 case MEM_CANCEL_ONLINE:
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700285 case MEM_GOING_OFFLINE:
286 break;
287 case MEM_ONLINE:
288 case MEM_CANCEL_OFFLINE:
289 break;
290 }
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -0800291
Prarit Bhargava5fda1bd2011-03-22 16:30:49 -0700292 return notifier_from_errno(ret);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700293}
294
295#endif
296
297void __init page_cgroup_init(void)
298{
299 unsigned long pfn;
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700300 int nid;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700301
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800302 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -0700303 return;
304
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700305 for_each_node_state(nid, N_HIGH_MEMORY) {
306 unsigned long start_pfn, end_pfn;
307
308 start_pfn = node_start_pfn(nid);
309 end_pfn = node_end_pfn(nid);
310 /*
311 * start_pfn and end_pfn may not be aligned to SECTION and the
312 * page->flags of out of node pages are not initialized. So we
313 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
314 */
315 for (pfn = start_pfn;
316 pfn < end_pfn;
317 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
318
319 if (!pfn_valid(pfn))
320 continue;
321 /*
322 * Nodes's pfns can be overlapping.
323 * We know some arch can have a nodes layout such as
324 * -------------pfn-------------->
325 * N0 | N1 | N2 | N0 | N1 | N2|....
326 */
327 if (pfn_to_nid(pfn) != nid)
328 continue;
329 if (init_section_page_cgroup(pfn, nid))
330 goto oom;
331 }
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700332 }
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700333 hotplug_memory_notifier(page_cgroup_callback, 0);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700334 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700335 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
336 "don't want memory cgroups\n");
337 return;
338oom:
339 printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
340 panic("Out of memory");
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700341}
342
Al Viro31168482008-11-22 17:33:24 +0000343void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700344{
345 return;
346}
347
348#endif
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800349
350
351#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
352
353static DEFINE_MUTEX(swap_cgroup_mutex);
354struct swap_cgroup_ctrl {
355 struct page **map;
356 unsigned long length;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400357 spinlock_t lock;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800358};
359
360struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
361
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800362struct swap_cgroup {
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700363 unsigned short id;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800364};
365#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
366#define SC_POS_MASK (SC_PER_PAGE - 1)
367
368/*
369 * SwapCgroup implements "lookup" and "exchange" operations.
370 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
371 * against SwapCache. At swap_free(), this is accessed directly from swap.
372 *
373 * This means,
374 * - we have no race in "exchange" when we're accessed via SwapCache because
375 * SwapCache(and its swp_entry) is under lock.
376 * - When called via swap_free(), there is no user of this entry and no race.
377 * Then, we don't need lock around "exchange".
378 *
379 * TODO: we can push these buffers out to HIGHMEM.
380 */
381
382/*
383 * allocate buffer for swap_cgroup.
384 */
385static int swap_cgroup_prepare(int type)
386{
387 struct page *page;
388 struct swap_cgroup_ctrl *ctrl;
389 unsigned long idx, max;
390
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800391 ctrl = &swap_cgroup_ctrl[type];
392
393 for (idx = 0; idx < ctrl->length; idx++) {
394 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
395 if (!page)
396 goto not_enough_page;
397 ctrl->map[idx] = page;
398 }
399 return 0;
400not_enough_page:
401 max = idx;
402 for (idx = 0; idx < max; idx++)
403 __free_page(ctrl->map[idx]);
404
405 return -ENOMEM;
406}
407
408/**
Daisuke Nishimura02491442010-03-10 15:22:17 -0800409 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
410 * @end: swap entry to be cmpxchged
411 * @old: old id
412 * @new: new id
413 *
414 * Returns old id at success, 0 at failure.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300415 * (There is no mem_cgroup using 0 as its id)
Daisuke Nishimura02491442010-03-10 15:22:17 -0800416 */
417unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
418 unsigned short old, unsigned short new)
419{
420 int type = swp_type(ent);
421 unsigned long offset = swp_offset(ent);
422 unsigned long idx = offset / SC_PER_PAGE;
423 unsigned long pos = offset & SC_POS_MASK;
424 struct swap_cgroup_ctrl *ctrl;
425 struct page *mappage;
426 struct swap_cgroup *sc;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400427 unsigned long flags;
428 unsigned short retval;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800429
430 ctrl = &swap_cgroup_ctrl[type];
431
432 mappage = ctrl->map[idx];
433 sc = page_address(mappage);
434 sc += pos;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400435 spin_lock_irqsave(&ctrl->lock, flags);
436 retval = sc->id;
437 if (retval == old)
438 sc->id = new;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800439 else
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400440 retval = 0;
441 spin_unlock_irqrestore(&ctrl->lock, flags);
442 return retval;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800443}
444
445/**
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800446 * swap_cgroup_record - record mem_cgroup for this swp_entry.
447 * @ent: swap entry to be recorded into
448 * @mem: mem_cgroup to be recorded
449 *
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700450 * Returns old value at success, 0 at failure.
451 * (Of course, old value can be 0.)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800452 */
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700453unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800454{
455 int type = swp_type(ent);
456 unsigned long offset = swp_offset(ent);
457 unsigned long idx = offset / SC_PER_PAGE;
458 unsigned long pos = offset & SC_POS_MASK;
459 struct swap_cgroup_ctrl *ctrl;
460 struct page *mappage;
461 struct swap_cgroup *sc;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700462 unsigned short old;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400463 unsigned long flags;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800464
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800465 ctrl = &swap_cgroup_ctrl[type];
466
467 mappage = ctrl->map[idx];
468 sc = page_address(mappage);
469 sc += pos;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400470 spin_lock_irqsave(&ctrl->lock, flags);
471 old = sc->id;
472 sc->id = id;
473 spin_unlock_irqrestore(&ctrl->lock, flags);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800474
475 return old;
476}
477
478/**
479 * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
480 * @ent: swap entry to be looked up.
481 *
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700482 * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800483 */
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700484unsigned short lookup_swap_cgroup(swp_entry_t ent)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800485{
486 int type = swp_type(ent);
487 unsigned long offset = swp_offset(ent);
488 unsigned long idx = offset / SC_PER_PAGE;
489 unsigned long pos = offset & SC_POS_MASK;
490 struct swap_cgroup_ctrl *ctrl;
491 struct page *mappage;
492 struct swap_cgroup *sc;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700493 unsigned short ret;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800494
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800495 ctrl = &swap_cgroup_ctrl[type];
496 mappage = ctrl->map[idx];
497 sc = page_address(mappage);
498 sc += pos;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700499 ret = sc->id;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800500 return ret;
501}
502
503int swap_cgroup_swapon(int type, unsigned long max_pages)
504{
505 void *array;
506 unsigned long array_size;
507 unsigned long length;
508 struct swap_cgroup_ctrl *ctrl;
509
510 if (!do_swap_account)
511 return 0;
512
Namhyung Kim33278f72011-05-26 16:25:30 -0700513 length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800514 array_size = length * sizeof(void *);
515
Joe Perches8c1fec12011-05-28 10:36:34 -0700516 array = vzalloc(array_size);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800517 if (!array)
518 goto nomem;
519
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800520 ctrl = &swap_cgroup_ctrl[type];
521 mutex_lock(&swap_cgroup_mutex);
522 ctrl->length = length;
523 ctrl->map = array;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400524 spin_lock_init(&ctrl->lock);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800525 if (swap_cgroup_prepare(type)) {
526 /* memory shortage */
527 ctrl->map = NULL;
528 ctrl->length = 0;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800529 mutex_unlock(&swap_cgroup_mutex);
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700530 vfree(array);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800531 goto nomem;
532 }
533 mutex_unlock(&swap_cgroup_mutex);
534
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800535 return 0;
536nomem:
537 printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
538 printk(KERN_INFO
WANG Cong00a66d22011-07-25 17:12:12 -0700539 "swap_cgroup can be disabled by swapaccount=0 boot option\n");
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800540 return -ENOMEM;
541}
542
543void swap_cgroup_swapoff(int type)
544{
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700545 struct page **map;
546 unsigned long i, length;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800547 struct swap_cgroup_ctrl *ctrl;
548
549 if (!do_swap_account)
550 return;
551
552 mutex_lock(&swap_cgroup_mutex);
553 ctrl = &swap_cgroup_ctrl[type];
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700554 map = ctrl->map;
555 length = ctrl->length;
556 ctrl->map = NULL;
557 ctrl->length = 0;
558 mutex_unlock(&swap_cgroup_mutex);
559
560 if (map) {
561 for (i = 0; i < length; i++) {
562 struct page *page = map[i];
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800563 if (page)
564 __free_page(page);
565 }
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700566 vfree(map);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800567 }
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800568}
569
570#endif