blob: f59405a8d7523596559920f254223ad370ae10ba [file] [log] [blame]
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001#include <linux/mm.h>
2#include <linux/mmzone.h>
3#include <linux/bootmem.h>
4#include <linux/bit_spinlock.h>
5#include <linux/page_cgroup.h>
6#include <linux/hash.h>
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -07007#include <linux/slab.h>
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07008#include <linux/memory.h>
Paul Mundt4c8210422008-10-22 14:14:58 -07009#include <linux/vmalloc.h>
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -070010#include <linux/cgroup.h>
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080011#include <linux/swapops.h>
Catalin Marinas7952f982010-07-19 11:54:14 +010012#include <linux/kmemleak.h>
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070013
Johannes Weiner6b3ae582011-03-23 16:42:30 -070014static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070015{
16 pc->flags = 0;
Johannes Weiner6b3ae582011-03-23 16:42:30 -070017 set_page_cgroup_array_id(pc, id);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070018 pc->mem_cgroup = NULL;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070019}
20static unsigned long total_usage;
21
22#if !defined(CONFIG_SPARSEMEM)
23
24
Al Viro31168482008-11-22 17:33:24 +000025void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070026{
27 pgdat->node_page_cgroup = NULL;
28}
29
30struct page_cgroup *lookup_page_cgroup(struct page *page)
31{
32 unsigned long pfn = page_to_pfn(page);
33 unsigned long offset;
34 struct page_cgroup *base;
35
36 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
37 if (unlikely(!base))
38 return NULL;
39
40 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
41 return base + offset;
42}
43
Johannes Weiner6b3ae582011-03-23 16:42:30 -070044struct page *lookup_cgroup_page(struct page_cgroup *pc)
45{
46 unsigned long pfn;
47 struct page *page;
48 pg_data_t *pgdat;
49
50 pgdat = NODE_DATA(page_cgroup_array_id(pc));
51 pfn = pc - pgdat->node_page_cgroup + pgdat->node_start_pfn;
52 page = pfn_to_page(pfn);
53 VM_BUG_ON(pc != lookup_page_cgroup(page));
54 return page;
55}
56
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070057static int __init alloc_node_page_cgroup(int nid)
58{
59 struct page_cgroup *base, *pc;
60 unsigned long table_size;
61 unsigned long start_pfn, nr_pages, index;
62
63 start_pfn = NODE_DATA(nid)->node_start_pfn;
64 nr_pages = NODE_DATA(nid)->node_spanned_pages;
65
KAMEZAWA Hiroyuki653d22c2008-12-09 13:14:20 -080066 if (!nr_pages)
67 return 0;
68
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070069 table_size = sizeof(struct page_cgroup) * nr_pages;
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +030070
71 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
72 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
73 if (!base)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070074 return -ENOMEM;
75 for (index = 0; index < nr_pages; index++) {
76 pc = base + index;
Johannes Weiner6b3ae582011-03-23 16:42:30 -070077 init_page_cgroup(pc, nid);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070078 }
79 NODE_DATA(nid)->node_page_cgroup = base;
80 total_usage += table_size;
81 return 0;
82}
83
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +030084void __init page_cgroup_init_flatmem(void)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070085{
86
87 int nid, fail;
88
Hirokazu Takahashif8d66542009-01-07 18:08:02 -080089 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -070090 return;
91
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070092 for_each_online_node(nid) {
93 fail = alloc_node_page_cgroup(nid);
94 if (fail)
95 goto fail;
96 }
97 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
Randy Dunlap8ca739e2009-06-17 16:26:32 -070098 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you"
99 " don't want memory cgroups\n");
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700100 return;
101fail:
Randy Dunlap8ca739e2009-06-17 16:26:32 -0700102 printk(KERN_CRIT "allocation of page_cgroup failed.\n");
103 printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n");
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700104 panic("Out of memory");
105}
106
107#else /* CONFIG_FLAT_NODE_MEM_MAP */
108
109struct page_cgroup *lookup_page_cgroup(struct page *page)
110{
111 unsigned long pfn = page_to_pfn(page);
112 struct mem_section *section = __pfn_to_section(pfn);
113
Balbir Singhd69b0422009-06-17 16:26:34 -0700114 if (!section->page_cgroup)
115 return NULL;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700116 return section->page_cgroup + pfn;
117}
118
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700119struct page *lookup_cgroup_page(struct page_cgroup *pc)
120{
121 struct mem_section *section;
122 struct page *page;
123 unsigned long nr;
124
125 nr = page_cgroup_array_id(pc);
126 section = __nr_to_section(nr);
127 page = pfn_to_page(pc - section->page_cgroup);
128 VM_BUG_ON(pc != lookup_page_cgroup(page));
129 return page;
130}
131
Namhyung Kim268433b2011-05-26 16:25:29 -0700132static void *__meminit alloc_page_cgroup(size_t size, int nid)
Michal Hockodde79e02011-03-23 16:42:40 -0700133{
134 void *addr = NULL;
Steven Rostedtff7ee932011-11-02 13:38:11 -0700135 gfp_t flags = GFP_KERNEL | __GFP_NOWARN;
Michal Hockodde79e02011-03-23 16:42:40 -0700136
Steven Rostedtff7ee932011-11-02 13:38:11 -0700137 addr = alloc_pages_exact_nid(nid, size, flags);
138 if (addr) {
139 kmemleak_alloc(addr, size, 1, flags);
Michal Hockodde79e02011-03-23 16:42:40 -0700140 return addr;
Steven Rostedtff7ee932011-11-02 13:38:11 -0700141 }
Michal Hockodde79e02011-03-23 16:42:40 -0700142
143 if (node_state(nid, N_HIGH_MEMORY))
144 addr = vmalloc_node(size, nid);
145 else
146 addr = vmalloc(size);
147
148 return addr;
149}
150
151#ifdef CONFIG_MEMORY_HOTPLUG
152static void free_page_cgroup(void *addr)
153{
154 if (is_vmalloc_addr(addr)) {
155 vfree(addr);
156 } else {
157 struct page *page = virt_to_page(addr);
Michal Hocko6cfddb22011-03-23 16:42:41 -0700158 size_t table_size =
159 sizeof(struct page_cgroup) * PAGES_PER_SECTION;
160
161 BUG_ON(PageReserved(page));
162 free_pages_exact(addr, table_size);
Michal Hockodde79e02011-03-23 16:42:40 -0700163 }
164}
165#endif
166
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700167static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700168{
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700169 struct page_cgroup *base, *pc;
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700170 struct mem_section *section;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700171 unsigned long table_size;
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700172 unsigned long nr;
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700173 int index;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700174
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700175 nr = pfn_to_section_nr(pfn);
176 section = __nr_to_section(nr);
177
178 if (section->page_cgroup)
179 return 0;
180
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700181 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
Michal Hockodde79e02011-03-23 16:42:40 -0700182 base = alloc_page_cgroup(table_size, nid);
183
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700184 /*
185 * The value stored in section->page_cgroup is (base - pfn)
186 * and it does not point to the memory block allocated above,
187 * causing kmemleak false positives.
188 */
189 kmemleak_not_leak(base);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700190
191 if (!base) {
192 printk(KERN_ERR "page cgroup allocation failure\n");
193 return -ENOMEM;
194 }
195
196 for (index = 0; index < PAGES_PER_SECTION; index++) {
197 pc = base + index;
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700198 init_page_cgroup(pc, nr);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700199 }
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700200 /*
201 * The passed "pfn" may not be aligned to SECTION. For the calculation
202 * we need to apply a mask.
203 */
204 pfn &= PAGE_SECTION_MASK;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700205 section->page_cgroup = base - pfn;
206 total_usage += table_size;
207 return 0;
208}
209#ifdef CONFIG_MEMORY_HOTPLUG
210void __free_page_cgroup(unsigned long pfn)
211{
212 struct mem_section *ms;
213 struct page_cgroup *base;
214
215 ms = __pfn_to_section(pfn);
216 if (!ms || !ms->page_cgroup)
217 return;
218 base = ms->page_cgroup + pfn;
Michal Hockodde79e02011-03-23 16:42:40 -0700219 free_page_cgroup(base);
220 ms->page_cgroup = NULL;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700221}
222
Al Viro31168482008-11-22 17:33:24 +0000223int __meminit online_page_cgroup(unsigned long start_pfn,
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700224 unsigned long nr_pages,
225 int nid)
226{
227 unsigned long start, end, pfn;
228 int fail = 0;
229
Daniel Kiper1bb36fb2011-07-25 17:12:13 -0700230 start = SECTION_ALIGN_DOWN(start_pfn);
231 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700232
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700233 if (nid == -1) {
234 /*
235 * In this case, "nid" already exists and contains valid memory.
236 * "start_pfn" passed to us is a pfn which is an arg for
237 * online__pages(), and start_pfn should exist.
238 */
239 nid = pfn_to_nid(start_pfn);
240 VM_BUG_ON(!node_state(nid, N_ONLINE));
241 }
242
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700243 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
244 if (!pfn_present(pfn))
245 continue;
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700246 fail = init_section_page_cgroup(pfn, nid);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700247 }
248 if (!fail)
249 return 0;
250
251 /* rollback */
252 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
253 __free_page_cgroup(pfn);
254
255 return -ENOMEM;
256}
257
Al Viro31168482008-11-22 17:33:24 +0000258int __meminit offline_page_cgroup(unsigned long start_pfn,
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700259 unsigned long nr_pages, int nid)
260{
261 unsigned long start, end, pfn;
262
Daniel Kiper1bb36fb2011-07-25 17:12:13 -0700263 start = SECTION_ALIGN_DOWN(start_pfn);
264 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700265
266 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
267 __free_page_cgroup(pfn);
268 return 0;
269
270}
271
Al Viro31168482008-11-22 17:33:24 +0000272static int __meminit page_cgroup_callback(struct notifier_block *self,
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700273 unsigned long action, void *arg)
274{
275 struct memory_notify *mn = arg;
276 int ret = 0;
277 switch (action) {
278 case MEM_GOING_ONLINE:
279 ret = online_page_cgroup(mn->start_pfn,
280 mn->nr_pages, mn->status_change_nid);
281 break;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700282 case MEM_OFFLINE:
283 offline_page_cgroup(mn->start_pfn,
284 mn->nr_pages, mn->status_change_nid);
285 break;
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -0800286 case MEM_CANCEL_ONLINE:
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700287 case MEM_GOING_OFFLINE:
288 break;
289 case MEM_ONLINE:
290 case MEM_CANCEL_OFFLINE:
291 break;
292 }
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -0800293
Prarit Bhargava5fda1bd2011-03-22 16:30:49 -0700294 return notifier_from_errno(ret);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700295}
296
297#endif
298
299void __init page_cgroup_init(void)
300{
301 unsigned long pfn;
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700302 int nid;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700303
Hirokazu Takahashif8d66542009-01-07 18:08:02 -0800304 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -0700305 return;
306
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700307 for_each_node_state(nid, N_HIGH_MEMORY) {
308 unsigned long start_pfn, end_pfn;
309
310 start_pfn = node_start_pfn(nid);
311 end_pfn = node_end_pfn(nid);
312 /*
313 * start_pfn and end_pfn may not be aligned to SECTION and the
314 * page->flags of out of node pages are not initialized. So we
315 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
316 */
317 for (pfn = start_pfn;
318 pfn < end_pfn;
319 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
320
321 if (!pfn_valid(pfn))
322 continue;
323 /*
324 * Nodes's pfns can be overlapping.
325 * We know some arch can have a nodes layout such as
326 * -------------pfn-------------->
327 * N0 | N1 | N2 | N0 | N1 | N2|....
328 */
329 if (pfn_to_nid(pfn) != nid)
330 continue;
331 if (init_section_page_cgroup(pfn, nid))
332 goto oom;
333 }
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700334 }
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700335 hotplug_memory_notifier(page_cgroup_callback, 0);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700336 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700337 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
338 "don't want memory cgroups\n");
339 return;
340oom:
341 printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
342 panic("Out of memory");
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700343}
344
Al Viro31168482008-11-22 17:33:24 +0000345void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700346{
347 return;
348}
349
350#endif
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800351
352
353#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
354
355static DEFINE_MUTEX(swap_cgroup_mutex);
356struct swap_cgroup_ctrl {
357 struct page **map;
358 unsigned long length;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400359 spinlock_t lock;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800360};
361
H Hartley Sweeten61600f52011-11-02 13:38:36 -0700362static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800363
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800364struct swap_cgroup {
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700365 unsigned short id;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800366};
367#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
368#define SC_POS_MASK (SC_PER_PAGE - 1)
369
370/*
371 * SwapCgroup implements "lookup" and "exchange" operations.
372 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
373 * against SwapCache. At swap_free(), this is accessed directly from swap.
374 *
375 * This means,
376 * - we have no race in "exchange" when we're accessed via SwapCache because
377 * SwapCache(and its swp_entry) is under lock.
378 * - When called via swap_free(), there is no user of this entry and no race.
379 * Then, we don't need lock around "exchange".
380 *
381 * TODO: we can push these buffers out to HIGHMEM.
382 */
383
384/*
385 * allocate buffer for swap_cgroup.
386 */
387static int swap_cgroup_prepare(int type)
388{
389 struct page *page;
390 struct swap_cgroup_ctrl *ctrl;
391 unsigned long idx, max;
392
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800393 ctrl = &swap_cgroup_ctrl[type];
394
395 for (idx = 0; idx < ctrl->length; idx++) {
396 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
397 if (!page)
398 goto not_enough_page;
399 ctrl->map[idx] = page;
400 }
401 return 0;
402not_enough_page:
403 max = idx;
404 for (idx = 0; idx < max; idx++)
405 __free_page(ctrl->map[idx]);
406
407 return -ENOMEM;
408}
409
410/**
Daisuke Nishimura02491442010-03-10 15:22:17 -0800411 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
412 * @end: swap entry to be cmpxchged
413 * @old: old id
414 * @new: new id
415 *
416 * Returns old id at success, 0 at failure.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300417 * (There is no mem_cgroup using 0 as its id)
Daisuke Nishimura02491442010-03-10 15:22:17 -0800418 */
419unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
420 unsigned short old, unsigned short new)
421{
422 int type = swp_type(ent);
423 unsigned long offset = swp_offset(ent);
424 unsigned long idx = offset / SC_PER_PAGE;
425 unsigned long pos = offset & SC_POS_MASK;
426 struct swap_cgroup_ctrl *ctrl;
427 struct page *mappage;
428 struct swap_cgroup *sc;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400429 unsigned long flags;
430 unsigned short retval;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800431
432 ctrl = &swap_cgroup_ctrl[type];
433
434 mappage = ctrl->map[idx];
435 sc = page_address(mappage);
436 sc += pos;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400437 spin_lock_irqsave(&ctrl->lock, flags);
438 retval = sc->id;
439 if (retval == old)
440 sc->id = new;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800441 else
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400442 retval = 0;
443 spin_unlock_irqrestore(&ctrl->lock, flags);
444 return retval;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800445}
446
447/**
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800448 * swap_cgroup_record - record mem_cgroup for this swp_entry.
449 * @ent: swap entry to be recorded into
450 * @mem: mem_cgroup to be recorded
451 *
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700452 * Returns old value at success, 0 at failure.
453 * (Of course, old value can be 0.)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800454 */
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700455unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800456{
457 int type = swp_type(ent);
458 unsigned long offset = swp_offset(ent);
459 unsigned long idx = offset / SC_PER_PAGE;
460 unsigned long pos = offset & SC_POS_MASK;
461 struct swap_cgroup_ctrl *ctrl;
462 struct page *mappage;
463 struct swap_cgroup *sc;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700464 unsigned short old;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400465 unsigned long flags;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800466
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800467 ctrl = &swap_cgroup_ctrl[type];
468
469 mappage = ctrl->map[idx];
470 sc = page_address(mappage);
471 sc += pos;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400472 spin_lock_irqsave(&ctrl->lock, flags);
473 old = sc->id;
474 sc->id = id;
475 spin_unlock_irqrestore(&ctrl->lock, flags);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800476
477 return old;
478}
479
480/**
481 * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
482 * @ent: swap entry to be looked up.
483 *
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700484 * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800485 */
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700486unsigned short lookup_swap_cgroup(swp_entry_t ent)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800487{
488 int type = swp_type(ent);
489 unsigned long offset = swp_offset(ent);
490 unsigned long idx = offset / SC_PER_PAGE;
491 unsigned long pos = offset & SC_POS_MASK;
492 struct swap_cgroup_ctrl *ctrl;
493 struct page *mappage;
494 struct swap_cgroup *sc;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700495 unsigned short ret;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800496
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800497 ctrl = &swap_cgroup_ctrl[type];
498 mappage = ctrl->map[idx];
499 sc = page_address(mappage);
500 sc += pos;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700501 ret = sc->id;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800502 return ret;
503}
504
505int swap_cgroup_swapon(int type, unsigned long max_pages)
506{
507 void *array;
508 unsigned long array_size;
509 unsigned long length;
510 struct swap_cgroup_ctrl *ctrl;
511
512 if (!do_swap_account)
513 return 0;
514
Namhyung Kim33278f72011-05-26 16:25:30 -0700515 length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800516 array_size = length * sizeof(void *);
517
Joe Perches8c1fec12011-05-28 10:36:34 -0700518 array = vzalloc(array_size);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800519 if (!array)
520 goto nomem;
521
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800522 ctrl = &swap_cgroup_ctrl[type];
523 mutex_lock(&swap_cgroup_mutex);
524 ctrl->length = length;
525 ctrl->map = array;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400526 spin_lock_init(&ctrl->lock);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800527 if (swap_cgroup_prepare(type)) {
528 /* memory shortage */
529 ctrl->map = NULL;
530 ctrl->length = 0;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800531 mutex_unlock(&swap_cgroup_mutex);
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700532 vfree(array);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800533 goto nomem;
534 }
535 mutex_unlock(&swap_cgroup_mutex);
536
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800537 return 0;
538nomem:
539 printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
540 printk(KERN_INFO
WANG Cong00a66d22011-07-25 17:12:12 -0700541 "swap_cgroup can be disabled by swapaccount=0 boot option\n");
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800542 return -ENOMEM;
543}
544
545void swap_cgroup_swapoff(int type)
546{
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700547 struct page **map;
548 unsigned long i, length;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800549 struct swap_cgroup_ctrl *ctrl;
550
551 if (!do_swap_account)
552 return;
553
554 mutex_lock(&swap_cgroup_mutex);
555 ctrl = &swap_cgroup_ctrl[type];
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700556 map = ctrl->map;
557 length = ctrl->length;
558 ctrl->map = NULL;
559 ctrl->length = 0;
560 mutex_unlock(&swap_cgroup_mutex);
561
562 if (map) {
563 for (i = 0; i < length; i++) {
564 struct page *page = map[i];
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800565 if (page)
566 __free_page(page);
567 }
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700568 vfree(map);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800569 }
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800570}
571
572#endif