KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_PAGE_CGROUP_H |
| 2 | #define __LINUX_PAGE_CGROUP_H |
| 3 | |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 4 | enum { |
| 5 | /* flags for mem_cgroup */ |
| 6 | PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */ |
| 7 | PCG_CACHE, /* charged as cache */ |
| 8 | PCG_USED, /* this object is in use. */ |
| 9 | PCG_MIGRATION, /* under page migration */ |
| 10 | /* flags for mem_cgroup and file and I/O status */ |
| 11 | PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */ |
| 12 | PCG_FILE_MAPPED, /* page is accounted as "mapped" */ |
| 13 | /* No lock in page_cgroup */ |
| 14 | PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */ |
| 15 | __NR_PCG_FLAGS, |
| 16 | }; |
| 17 | |
| 18 | #ifndef __GENERATING_BOUNDS_H |
| 19 | #include <generated/bounds.h> |
| 20 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 21 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
| 22 | #include <linux/bit_spinlock.h> |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 23 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 24 | /* |
| 25 | * Page Cgroup can be considered as an extended mem_map. |
| 26 | * A page_cgroup page is associated with every page descriptor. The |
| 27 | * page_cgroup helps us identify information about the cgroup |
| 28 | * All page cgroups are allocated at boot or memory hotplug event, |
| 29 | * then the page cgroup for pfn always exists. |
| 30 | */ |
| 31 | struct page_cgroup { |
| 32 | unsigned long flags; |
| 33 | struct mem_cgroup *mem_cgroup; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 34 | struct list_head lru; /* per cgroup LRU list */ |
| 35 | }; |
| 36 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 37 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); |
KAMEZAWA Hiroyuki | ca371c0 | 2009-06-12 10:33:53 +0300 | [diff] [blame] | 38 | |
| 39 | #ifdef CONFIG_SPARSEMEM |
| 40 | static inline void __init page_cgroup_init_flatmem(void) |
| 41 | { |
| 42 | } |
| 43 | extern void __init page_cgroup_init(void); |
| 44 | #else |
| 45 | void __init page_cgroup_init_flatmem(void); |
| 46 | static inline void __init page_cgroup_init(void) |
| 47 | { |
| 48 | } |
| 49 | #endif |
| 50 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 51 | struct page_cgroup *lookup_page_cgroup(struct page *page); |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 52 | struct page *lookup_cgroup_page(struct page_cgroup *pc); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 53 | |
| 54 | #define TESTPCGFLAG(uname, lname) \ |
| 55 | static inline int PageCgroup##uname(struct page_cgroup *pc) \ |
| 56 | { return test_bit(PCG_##lname, &pc->flags); } |
| 57 | |
| 58 | #define SETPCGFLAG(uname, lname) \ |
| 59 | static inline void SetPageCgroup##uname(struct page_cgroup *pc)\ |
| 60 | { set_bit(PCG_##lname, &pc->flags); } |
| 61 | |
| 62 | #define CLEARPCGFLAG(uname, lname) \ |
| 63 | static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \ |
| 64 | { clear_bit(PCG_##lname, &pc->flags); } |
| 65 | |
Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 66 | #define TESTCLEARPCGFLAG(uname, lname) \ |
| 67 | static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ |
| 68 | { return test_and_clear_bit(PCG_##lname, &pc->flags); } |
| 69 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 70 | /* Cache flag is set only once (at allocation) */ |
| 71 | TESTPCGFLAG(Cache, CACHE) |
Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 72 | CLEARPCGFLAG(Cache, CACHE) |
| 73 | SETPCGFLAG(Cache, CACHE) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 74 | |
| 75 | TESTPCGFLAG(Used, USED) |
| 76 | CLEARPCGFLAG(Used, USED) |
Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 77 | SETPCGFLAG(Used, USED) |
| 78 | |
| 79 | SETPCGFLAG(AcctLRU, ACCT_LRU) |
| 80 | CLEARPCGFLAG(AcctLRU, ACCT_LRU) |
| 81 | TESTPCGFLAG(AcctLRU, ACCT_LRU) |
| 82 | TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 83 | |
KAMEZAWA Hiroyuki | 8725d54 | 2010-04-06 14:35:05 -0700 | [diff] [blame] | 84 | |
| 85 | SETPCGFLAG(FileMapped, FILE_MAPPED) |
| 86 | CLEARPCGFLAG(FileMapped, FILE_MAPPED) |
| 87 | TESTPCGFLAG(FileMapped, FILE_MAPPED) |
| 88 | |
akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 89 | SETPCGFLAG(Migration, MIGRATION) |
| 90 | CLEARPCGFLAG(Migration, MIGRATION) |
| 91 | TESTPCGFLAG(Migration, MIGRATION) |
| 92 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 93 | static inline void lock_page_cgroup(struct page_cgroup *pc) |
| 94 | { |
KAMEZAWA Hiroyuki | dbd4ea7 | 2011-01-13 15:47:38 -0800 | [diff] [blame] | 95 | /* |
| 96 | * Don't take this lock in IRQ context. |
| 97 | * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION |
| 98 | */ |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 99 | bit_spin_lock(PCG_LOCK, &pc->flags); |
| 100 | } |
| 101 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 102 | static inline void unlock_page_cgroup(struct page_cgroup *pc) |
| 103 | { |
| 104 | bit_spin_unlock(PCG_LOCK, &pc->flags); |
| 105 | } |
| 106 | |
KAMEZAWA Hiroyuki | dbd4ea7 | 2011-01-13 15:47:38 -0800 | [diff] [blame] | 107 | static inline void move_lock_page_cgroup(struct page_cgroup *pc, |
| 108 | unsigned long *flags) |
| 109 | { |
| 110 | /* |
| 111 | * We know updates to pc->flags of page cache's stats are from both of |
| 112 | * usual context or IRQ context. Disable IRQ to avoid deadlock. |
| 113 | */ |
| 114 | local_irq_save(*flags); |
| 115 | bit_spin_lock(PCG_MOVE_LOCK, &pc->flags); |
| 116 | } |
| 117 | |
| 118 | static inline void move_unlock_page_cgroup(struct page_cgroup *pc, |
| 119 | unsigned long *flags) |
| 120 | { |
| 121 | bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags); |
| 122 | local_irq_restore(*flags); |
| 123 | } |
| 124 | |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 125 | #ifdef CONFIG_SPARSEMEM |
| 126 | #define PCG_ARRAYID_WIDTH SECTIONS_SHIFT |
| 127 | #else |
| 128 | #define PCG_ARRAYID_WIDTH NODES_SHIFT |
| 129 | #endif |
| 130 | |
| 131 | #if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS) |
| 132 | #error Not enough space left in pc->flags to store page_cgroup array IDs |
| 133 | #endif |
| 134 | |
| 135 | /* pc->flags: ARRAY-ID | FLAGS */ |
| 136 | |
| 137 | #define PCG_ARRAYID_MASK ((1UL << PCG_ARRAYID_WIDTH) - 1) |
| 138 | |
| 139 | #define PCG_ARRAYID_OFFSET (BITS_PER_LONG - PCG_ARRAYID_WIDTH) |
| 140 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 141 | * Zero the shift count for non-existent fields, to prevent compiler |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 142 | * warnings and ensure references are optimized away. |
| 143 | */ |
| 144 | #define PCG_ARRAYID_SHIFT (PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0)) |
| 145 | |
| 146 | static inline void set_page_cgroup_array_id(struct page_cgroup *pc, |
| 147 | unsigned long id) |
| 148 | { |
| 149 | pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT); |
| 150 | pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT; |
| 151 | } |
| 152 | |
| 153 | static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc) |
| 154 | { |
| 155 | return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK; |
| 156 | } |
| 157 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 158 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ |
| 159 | struct page_cgroup; |
| 160 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 161 | static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 162 | { |
| 163 | } |
| 164 | |
| 165 | static inline struct page_cgroup *lookup_page_cgroup(struct page *page) |
| 166 | { |
| 167 | return NULL; |
| 168 | } |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 169 | |
| 170 | static inline void page_cgroup_init(void) |
| 171 | { |
| 172 | } |
| 173 | |
KAMEZAWA Hiroyuki | ca371c0 | 2009-06-12 10:33:53 +0300 | [diff] [blame] | 174 | static inline void __init page_cgroup_init_flatmem(void) |
| 175 | { |
| 176 | } |
| 177 | |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 178 | #endif /* CONFIG_CGROUP_MEM_RES_CTLR */ |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 179 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 180 | #include <linux/swap.h> |
Jaswinder Singh Rajput | 9757275 | 2009-09-20 16:20:44 +0530 | [diff] [blame] | 181 | |
| 182 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 183 | extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, |
| 184 | unsigned short old, unsigned short new); |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 185 | extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); |
| 186 | extern unsigned short lookup_swap_cgroup(swp_entry_t ent); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 187 | extern int swap_cgroup_swapon(int type, unsigned long max_pages); |
| 188 | extern void swap_cgroup_swapoff(int type); |
| 189 | #else |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 190 | |
| 191 | static inline |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 192 | unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 193 | { |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 194 | return 0; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | static inline |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 198 | unsigned short lookup_swap_cgroup(swp_entry_t ent) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 199 | { |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 200 | return 0; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 201 | } |
| 202 | |
| 203 | static inline int |
| 204 | swap_cgroup_swapon(int type, unsigned long max_pages) |
| 205 | { |
| 206 | return 0; |
| 207 | } |
| 208 | |
| 209 | static inline void swap_cgroup_swapoff(int type) |
| 210 | { |
| 211 | return; |
| 212 | } |
| 213 | |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 214 | #endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */ |
| 215 | |
| 216 | #endif /* !__GENERATING_BOUNDS_H */ |
| 217 | |
| 218 | #endif /* __LINUX_PAGE_CGROUP_H */ |