blob: 961ecc7d30bc2ec69b87699c39e5db63a5aff0f7 [file] [log] [blame]
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001#ifndef __LINUX_PAGE_CGROUP_H
2#define __LINUX_PAGE_CGROUP_H
3
Johannes Weiner6b3ae582011-03-23 16:42:30 -07004enum {
5 /* flags for mem_cgroup */
6 PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
7 PCG_CACHE, /* charged as cache */
8 PCG_USED, /* this object is in use. */
9 PCG_MIGRATION, /* under page migration */
10 /* flags for mem_cgroup and file and I/O status */
11 PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
12 PCG_FILE_MAPPED, /* page is accounted as "mapped" */
13 /* No lock in page_cgroup */
14 PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
15 __NR_PCG_FLAGS,
16};
17
18#ifndef __GENERATING_BOUNDS_H
19#include <generated/bounds.h>
20
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070021#ifdef CONFIG_CGROUP_MEM_RES_CTLR
22#include <linux/bit_spinlock.h>
Johannes Weiner6b3ae582011-03-23 16:42:30 -070023
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070024/*
25 * Page Cgroup can be considered as an extended mem_map.
26 * A page_cgroup page is associated with every page descriptor. The
27 * page_cgroup helps us identify information about the cgroup
28 * All page cgroups are allocated at boot or memory hotplug event,
29 * then the page cgroup for pfn always exists.
30 */
31struct page_cgroup {
32 unsigned long flags;
33 struct mem_cgroup *mem_cgroup;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070034 struct list_head lru; /* per cgroup LRU list */
35};
36
Al Viro31168482008-11-22 17:33:24 +000037void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +030038
39#ifdef CONFIG_SPARSEMEM
40static inline void __init page_cgroup_init_flatmem(void)
41{
42}
43extern void __init page_cgroup_init(void);
44#else
45void __init page_cgroup_init_flatmem(void);
46static inline void __init page_cgroup_init(void)
47{
48}
49#endif
50
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070051struct page_cgroup *lookup_page_cgroup(struct page *page);
Johannes Weiner6b3ae582011-03-23 16:42:30 -070052struct page *lookup_cgroup_page(struct page_cgroup *pc);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070053
54#define TESTPCGFLAG(uname, lname) \
55static inline int PageCgroup##uname(struct page_cgroup *pc) \
56 { return test_bit(PCG_##lname, &pc->flags); }
57
58#define SETPCGFLAG(uname, lname) \
59static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
60 { set_bit(PCG_##lname, &pc->flags); }
61
62#define CLEARPCGFLAG(uname, lname) \
63static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
64 { clear_bit(PCG_##lname, &pc->flags); }
65
Balbir Singh4b3bde42009-09-23 15:56:32 -070066#define TESTCLEARPCGFLAG(uname, lname) \
67static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
68 { return test_and_clear_bit(PCG_##lname, &pc->flags); }
69
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070070/* Cache flag is set only once (at allocation) */
71TESTPCGFLAG(Cache, CACHE)
Balbir Singh4b3bde42009-09-23 15:56:32 -070072CLEARPCGFLAG(Cache, CACHE)
73SETPCGFLAG(Cache, CACHE)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070074
75TESTPCGFLAG(Used, USED)
76CLEARPCGFLAG(Used, USED)
Balbir Singh4b3bde42009-09-23 15:56:32 -070077SETPCGFLAG(Used, USED)
78
79SETPCGFLAG(AcctLRU, ACCT_LRU)
80CLEARPCGFLAG(AcctLRU, ACCT_LRU)
81TESTPCGFLAG(AcctLRU, ACCT_LRU)
82TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070083
KAMEZAWA Hiroyuki8725d542010-04-06 14:35:05 -070084
85SETPCGFLAG(FileMapped, FILE_MAPPED)
86CLEARPCGFLAG(FileMapped, FILE_MAPPED)
87TESTPCGFLAG(FileMapped, FILE_MAPPED)
88
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -070089SETPCGFLAG(Migration, MIGRATION)
90CLEARPCGFLAG(Migration, MIGRATION)
91TESTPCGFLAG(Migration, MIGRATION)
92
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070093static inline void lock_page_cgroup(struct page_cgroup *pc)
94{
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -080095 /*
96 * Don't take this lock in IRQ context.
97 * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
98 */
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070099 bit_spin_lock(PCG_LOCK, &pc->flags);
100}
101
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700102static inline void unlock_page_cgroup(struct page_cgroup *pc)
103{
104 bit_spin_unlock(PCG_LOCK, &pc->flags);
105}
106
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -0800107static inline void move_lock_page_cgroup(struct page_cgroup *pc,
108 unsigned long *flags)
109{
110 /*
111 * We know updates to pc->flags of page cache's stats are from both of
112 * usual context or IRQ context. Disable IRQ to avoid deadlock.
113 */
114 local_irq_save(*flags);
115 bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
116}
117
118static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
119 unsigned long *flags)
120{
121 bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
122 local_irq_restore(*flags);
123}
124
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700125#ifdef CONFIG_SPARSEMEM
126#define PCG_ARRAYID_WIDTH SECTIONS_SHIFT
127#else
128#define PCG_ARRAYID_WIDTH NODES_SHIFT
129#endif
130
131#if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS)
132#error Not enough space left in pc->flags to store page_cgroup array IDs
133#endif
134
135/* pc->flags: ARRAY-ID | FLAGS */
136
137#define PCG_ARRAYID_MASK ((1UL << PCG_ARRAYID_WIDTH) - 1)
138
139#define PCG_ARRAYID_OFFSET (BITS_PER_LONG - PCG_ARRAYID_WIDTH)
140/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300141 * Zero the shift count for non-existent fields, to prevent compiler
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700142 * warnings and ensure references are optimized away.
143 */
144#define PCG_ARRAYID_SHIFT (PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0))
145
146static inline void set_page_cgroup_array_id(struct page_cgroup *pc,
147 unsigned long id)
148{
149 pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT);
150 pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT;
151}
152
153static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc)
154{
155 return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK;
156}
157
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700158#else /* CONFIG_CGROUP_MEM_RES_CTLR */
159struct page_cgroup;
160
Al Viro31168482008-11-22 17:33:24 +0000161static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700162{
163}
164
165static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
166{
167 return NULL;
168}
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -0700169
170static inline void page_cgroup_init(void)
171{
172}
173
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +0300174static inline void __init page_cgroup_init_flatmem(void)
175{
176}
177
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700178#endif /* CONFIG_CGROUP_MEM_RES_CTLR */
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800179
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800180#include <linux/swap.h>
Jaswinder Singh Rajput97572752009-09-20 16:20:44 +0530181
182#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
Daisuke Nishimura02491442010-03-10 15:22:17 -0800183extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
184 unsigned short old, unsigned short new);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700185extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
186extern unsigned short lookup_swap_cgroup(swp_entry_t ent);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800187extern int swap_cgroup_swapon(int type, unsigned long max_pages);
188extern void swap_cgroup_swapoff(int type);
189#else
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800190
191static inline
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700192unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800193{
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700194 return 0;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800195}
196
197static inline
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700198unsigned short lookup_swap_cgroup(swp_entry_t ent)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800199{
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700200 return 0;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800201}
202
203static inline int
204swap_cgroup_swapon(int type, unsigned long max_pages)
205{
206 return 0;
207}
208
209static inline void swap_cgroup_swapoff(int type)
210{
211 return;
212}
213
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700214#endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
215
216#endif /* !__GENERATING_BOUNDS_H */
217
218#endif /* __LINUX_PAGE_CGROUP_H */