blob: 1153095ee457e496c51c0c893b5853fc7b683afb [file] [log] [blame]
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001#ifndef __LINUX_PAGE_CGROUP_H
2#define __LINUX_PAGE_CGROUP_H
3
Johannes Weiner6b3ae582011-03-23 16:42:30 -07004enum {
5 /* flags for mem_cgroup */
6 PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
7 PCG_CACHE, /* charged as cache */
8 PCG_USED, /* this object is in use. */
9 PCG_MIGRATION, /* under page migration */
10 /* flags for mem_cgroup and file and I/O status */
11 PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
12 PCG_FILE_MAPPED, /* page is accounted as "mapped" */
13 /* No lock in page_cgroup */
14 PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
15 __NR_PCG_FLAGS,
16};
17
18#ifndef __GENERATING_BOUNDS_H
19#include <generated/bounds.h>
20
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070021#ifdef CONFIG_CGROUP_MEM_RES_CTLR
22#include <linux/bit_spinlock.h>
Johannes Weiner6b3ae582011-03-23 16:42:30 -070023
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070024/*
25 * Page Cgroup can be considered as an extended mem_map.
26 * A page_cgroup page is associated with every page descriptor. The
27 * page_cgroup helps us identify information about the cgroup
28 * All page cgroups are allocated at boot or memory hotplug event,
29 * then the page cgroup for pfn always exists.
30 */
31struct page_cgroup {
32 unsigned long flags;
33 struct mem_cgroup *mem_cgroup;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070034};
35
Al Viro31168482008-11-22 17:33:24 +000036void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +030037
38#ifdef CONFIG_SPARSEMEM
39static inline void __init page_cgroup_init_flatmem(void)
40{
41}
42extern void __init page_cgroup_init(void);
43#else
44void __init page_cgroup_init_flatmem(void);
45static inline void __init page_cgroup_init(void)
46{
47}
48#endif
49
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070050struct page_cgroup *lookup_page_cgroup(struct page *page);
Johannes Weiner6b3ae582011-03-23 16:42:30 -070051struct page *lookup_cgroup_page(struct page_cgroup *pc);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070052
53#define TESTPCGFLAG(uname, lname) \
54static inline int PageCgroup##uname(struct page_cgroup *pc) \
55 { return test_bit(PCG_##lname, &pc->flags); }
56
57#define SETPCGFLAG(uname, lname) \
58static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
59 { set_bit(PCG_##lname, &pc->flags); }
60
61#define CLEARPCGFLAG(uname, lname) \
62static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
63 { clear_bit(PCG_##lname, &pc->flags); }
64
Balbir Singh4b3bde42009-09-23 15:56:32 -070065#define TESTCLEARPCGFLAG(uname, lname) \
66static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
67 { return test_and_clear_bit(PCG_##lname, &pc->flags); }
68
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070069/* Cache flag is set only once (at allocation) */
70TESTPCGFLAG(Cache, CACHE)
Balbir Singh4b3bde42009-09-23 15:56:32 -070071CLEARPCGFLAG(Cache, CACHE)
72SETPCGFLAG(Cache, CACHE)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070073
74TESTPCGFLAG(Used, USED)
75CLEARPCGFLAG(Used, USED)
Balbir Singh4b3bde42009-09-23 15:56:32 -070076SETPCGFLAG(Used, USED)
77
78SETPCGFLAG(AcctLRU, ACCT_LRU)
79CLEARPCGFLAG(AcctLRU, ACCT_LRU)
80TESTPCGFLAG(AcctLRU, ACCT_LRU)
81TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070082
KAMEZAWA Hiroyuki8725d542010-04-06 14:35:05 -070083
84SETPCGFLAG(FileMapped, FILE_MAPPED)
85CLEARPCGFLAG(FileMapped, FILE_MAPPED)
86TESTPCGFLAG(FileMapped, FILE_MAPPED)
87
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -070088SETPCGFLAG(Migration, MIGRATION)
89CLEARPCGFLAG(Migration, MIGRATION)
90TESTPCGFLAG(Migration, MIGRATION)
91
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070092static inline void lock_page_cgroup(struct page_cgroup *pc)
93{
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -080094 /*
95 * Don't take this lock in IRQ context.
96 * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
97 */
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070098 bit_spin_lock(PCG_LOCK, &pc->flags);
99}
100
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700101static inline void unlock_page_cgroup(struct page_cgroup *pc)
102{
103 bit_spin_unlock(PCG_LOCK, &pc->flags);
104}
105
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -0800106static inline void move_lock_page_cgroup(struct page_cgroup *pc,
107 unsigned long *flags)
108{
109 /*
110 * We know updates to pc->flags of page cache's stats are from both of
111 * usual context or IRQ context. Disable IRQ to avoid deadlock.
112 */
113 local_irq_save(*flags);
114 bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
115}
116
117static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
118 unsigned long *flags)
119{
120 bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
121 local_irq_restore(*flags);
122}
123
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700124#else /* CONFIG_CGROUP_MEM_RES_CTLR */
125struct page_cgroup;
126
Al Viro31168482008-11-22 17:33:24 +0000127static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700128{
129}
130
131static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
132{
133 return NULL;
134}
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -0700135
136static inline void page_cgroup_init(void)
137{
138}
139
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +0300140static inline void __init page_cgroup_init_flatmem(void)
141{
142}
143
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700144#endif /* CONFIG_CGROUP_MEM_RES_CTLR */
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800145
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800146#include <linux/swap.h>
Jaswinder Singh Rajput97572752009-09-20 16:20:44 +0530147
148#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
Daisuke Nishimura02491442010-03-10 15:22:17 -0800149extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
150 unsigned short old, unsigned short new);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700151extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800152extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800153extern int swap_cgroup_swapon(int type, unsigned long max_pages);
154extern void swap_cgroup_swapoff(int type);
155#else
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800156
157static inline
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700158unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800159{
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700160 return 0;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800161}
162
163static inline
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800164unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800165{
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700166 return 0;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800167}
168
169static inline int
170swap_cgroup_swapon(int type, unsigned long max_pages)
171{
172 return 0;
173}
174
175static inline void swap_cgroup_swapoff(int type)
176{
177 return;
178}
179
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700180#endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
181
182#endif /* !__GENERATING_BOUNDS_H */
183
184#endif /* __LINUX_PAGE_CGROUP_H */