blob: a88cdba27809b490cc64b283e32d841ab7f0dd42 [file] [log] [blame]
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001#ifndef __LINUX_PAGE_CGROUP_H
2#define __LINUX_PAGE_CGROUP_H
3
Johannes Weiner6b3ae582011-03-23 16:42:30 -07004enum {
5 /* flags for mem_cgroup */
6 PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
Johannes Weiner6b3ae582011-03-23 16:42:30 -07007 PCG_USED, /* this object is in use. */
8 PCG_MIGRATION, /* under page migration */
Johannes Weiner6b3ae582011-03-23 16:42:30 -07009 __NR_PCG_FLAGS,
10};
11
12#ifndef __GENERATING_BOUNDS_H
13#include <generated/bounds.h>
14
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070015#ifdef CONFIG_CGROUP_MEM_RES_CTLR
16#include <linux/bit_spinlock.h>
Johannes Weiner6b3ae582011-03-23 16:42:30 -070017
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070018/*
19 * Page Cgroup can be considered as an extended mem_map.
20 * A page_cgroup page is associated with every page descriptor. The
21 * page_cgroup helps us identify information about the cgroup
22 * All page cgroups are allocated at boot or memory hotplug event,
23 * then the page cgroup for pfn always exists.
24 */
25struct page_cgroup {
26 unsigned long flags;
27 struct mem_cgroup *mem_cgroup;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070028};
29
Al Viro31168482008-11-22 17:33:24 +000030void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +030031
32#ifdef CONFIG_SPARSEMEM
33static inline void __init page_cgroup_init_flatmem(void)
34{
35}
36extern void __init page_cgroup_init(void);
37#else
38void __init page_cgroup_init_flatmem(void);
39static inline void __init page_cgroup_init(void)
40{
41}
42#endif
43
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070044struct page_cgroup *lookup_page_cgroup(struct page *page);
Johannes Weiner6b3ae582011-03-23 16:42:30 -070045struct page *lookup_cgroup_page(struct page_cgroup *pc);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070046
47#define TESTPCGFLAG(uname, lname) \
48static inline int PageCgroup##uname(struct page_cgroup *pc) \
49 { return test_bit(PCG_##lname, &pc->flags); }
50
51#define SETPCGFLAG(uname, lname) \
52static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
53 { set_bit(PCG_##lname, &pc->flags); }
54
55#define CLEARPCGFLAG(uname, lname) \
56static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
57 { clear_bit(PCG_##lname, &pc->flags); }
58
Balbir Singh4b3bde42009-09-23 15:56:32 -070059#define TESTCLEARPCGFLAG(uname, lname) \
60static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
61 { return test_and_clear_bit(PCG_##lname, &pc->flags); }
62
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070063TESTPCGFLAG(Used, USED)
64CLEARPCGFLAG(Used, USED)
Balbir Singh4b3bde42009-09-23 15:56:32 -070065SETPCGFLAG(Used, USED)
66
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -070067SETPCGFLAG(Migration, MIGRATION)
68CLEARPCGFLAG(Migration, MIGRATION)
69TESTPCGFLAG(Migration, MIGRATION)
70
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070071static inline void lock_page_cgroup(struct page_cgroup *pc)
72{
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -080073 /*
74 * Don't take this lock in IRQ context.
KAMEZAWA Hiroyukib2402852012-03-21 16:34:22 -070075 * This lock is for pc->mem_cgroup, USED, MIGRATION
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -080076 */
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070077 bit_spin_lock(PCG_LOCK, &pc->flags);
78}
79
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070080static inline void unlock_page_cgroup(struct page_cgroup *pc)
81{
82 bit_spin_unlock(PCG_LOCK, &pc->flags);
83}
84
85#else /* CONFIG_CGROUP_MEM_RES_CTLR */
86struct page_cgroup;
87
Al Viro31168482008-11-22 17:33:24 +000088static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070089{
90}
91
92static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
93{
94 return NULL;
95}
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -070096
97static inline void page_cgroup_init(void)
98{
99}
100
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +0300101static inline void __init page_cgroup_init_flatmem(void)
102{
103}
104
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700105#endif /* CONFIG_CGROUP_MEM_RES_CTLR */
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800106
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800107#include <linux/swap.h>
Jaswinder Singh Rajput97572752009-09-20 16:20:44 +0530108
109#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
Daisuke Nishimura02491442010-03-10 15:22:17 -0800110extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
111 unsigned short old, unsigned short new);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700112extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800113extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800114extern int swap_cgroup_swapon(int type, unsigned long max_pages);
115extern void swap_cgroup_swapoff(int type);
116#else
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800117
118static inline
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700119unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800120{
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700121 return 0;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800122}
123
124static inline
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800125unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800126{
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700127 return 0;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800128}
129
130static inline int
131swap_cgroup_swapon(int type, unsigned long max_pages)
132{
133 return 0;
134}
135
136static inline void swap_cgroup_swapoff(int type)
137{
138 return;
139}
140
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700141#endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
142
143#endif /* !__GENERATING_BOUNDS_H */
144
145#endif /* __LINUX_PAGE_CGROUP_H */