blob: 2de6504e01fbae4700a8037a5a8b92b06ffcc67b [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -08009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
Hirokazu Takahashif8d665422009-01-07 18:08:02 -080022#include <linux/cgroup.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080023struct mem_cgroup;
24struct page_cgroup;
Balbir Singh8697d332008-02-07 00:13:59 -080025struct page;
26struct mm_struct;
Pavel Emelianov78fb7462008-02-07 00:13:51 -080027
Balbir Singh00f0b822008-03-04 14:28:39 -080028#ifdef CONFIG_CGROUP_MEM_RES_CTLR
Pavel Emelianov78fb7462008-02-07 00:13:51 -080029
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080030extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
Balbir Singhe1a1cd52008-02-07 00:14:02 -080031 gfp_t gfp_mask);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080032/* for swap handling */
33extern int mem_cgroup_try_charge(struct mm_struct *mm,
34 gfp_t gfp_mask, struct mem_cgroup **ptr);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080035extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
36 struct page *page, gfp_t mask, struct mem_cgroup **ptr);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080037extern void mem_cgroup_commit_charge_swapin(struct page *page,
38 struct mem_cgroup *ptr);
39extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
40
Hugh Dickins82895462008-03-04 14:29:08 -080041extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
42 gfp_t gfp_mask);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080043extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
44extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
45extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
46extern void mem_cgroup_del_lru(struct page *page);
47extern void mem_cgroup_move_lists(struct page *page,
48 enum lru_list from, enum lru_list to);
Balbir Singh3c541e12008-02-07 00:14:41 -080049extern void mem_cgroup_uncharge_page(struct page *page);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -070050extern void mem_cgroup_uncharge_cache_page(struct page *page);
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -070051extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask);
52
Balbir Singh66e17072008-02-07 00:13:56 -080053extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
54 struct list_head *dst,
55 unsigned long *scanned, int order,
56 int mode, struct zone *z,
57 struct mem_cgroup *mem_cont,
Rik van Riel4f98a2f2008-10-18 20:26:32 -070058 int active, int file);
Pavel Emelianovc7ba5c92008-02-07 00:13:58 -080059extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
David Rientjes4c4a2212008-02-07 00:14:06 -080060int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
David Rientjes3062fc62008-02-07 00:14:03 -080061
Balbir Singhcf475ad2008-04-29 01:00:16 -070062extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
63
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080064static inline
65int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
66{
67 struct mem_cgroup *mem;
68 rcu_read_lock();
69 mem = mem_cgroup_from_task((mm)->owner);
70 rcu_read_unlock();
71 return cgroup == mem;
72}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080073
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -070074extern int
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -080075mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr);
76extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
77 struct page *oldpage, struct page *newpage);
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -080078
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -080079/*
80 * For memory reclaim.
81 */
82extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
KAMEZAWA Hiroyuki5932f362008-02-07 00:14:33 -080083extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
84
KAMEZAWA Hiroyuki6c48a1d2008-02-07 00:14:34 -080085extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
86extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
87 int priority);
88extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
89 int priority);
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -080090
Christoph Lameterb69408e2008-10-18 20:26:14 -070091extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
92 int priority, enum lru_list lru);
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -080093
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080094#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
95extern int do_swap_account;
96#endif
Hirokazu Takahashif8d665422009-01-07 18:08:02 -080097
98static inline bool mem_cgroup_disabled(void)
99{
100 if (mem_cgroup_subsys.disabled)
101 return true;
102 return false;
103}
104
Balbir Singh00f0b822008-03-04 14:28:39 -0800105#else /* CONFIG_CGROUP_MEM_RES_CTLR */
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800106struct mem_cgroup;
107
108static inline int mem_cgroup_newpage_charge(struct page *page,
Hugh Dickins82895462008-03-04 14:29:08 -0800109 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800110{
111 return 0;
112}
113
Hugh Dickins82895462008-03-04 14:29:08 -0800114static inline int mem_cgroup_cache_charge(struct page *page,
115 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800116{
Hugh Dickins82895462008-03-04 14:29:08 -0800117 return 0;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800118}
119
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800120static inline int mem_cgroup_try_charge(struct mm_struct *mm,
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800121 gfp_t gfp_mask, struct mem_cgroup **ptr)
122{
123 return 0;
124}
125
126static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
127 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800128{
129 return 0;
130}
131
132static inline void mem_cgroup_commit_charge_swapin(struct page *page,
133 struct mem_cgroup *ptr)
134{
135}
136
137static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
138{
139}
140
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800141static inline void mem_cgroup_uncharge_page(struct page *page)
142{
143}
144
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700145static inline void mem_cgroup_uncharge_cache_page(struct page *page)
146{
147}
148
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -0700149static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
150{
151 return 0;
152}
153
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800154static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
155{
156}
157
158static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
159{
160 return ;
161}
162
163static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
164{
165 return ;
166}
167
168static inline void mem_cgroup_del_lru(struct page *page)
169{
170 return ;
171}
172
173static inline void
174mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
Balbir Singh66e17072008-02-07 00:13:56 -0800175{
176}
177
Hugh Dickinsbd845e32008-03-04 14:29:01 -0800178static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
Balbir Singhbed71612008-02-07 00:14:01 -0800179{
David Rientjes60c12b12008-02-09 00:10:15 -0800180 return 1;
Balbir Singhbed71612008-02-07 00:14:01 -0800181}
182
David Rientjes4c4a2212008-02-07 00:14:06 -0800183static inline int task_in_mem_cgroup(struct task_struct *task,
184 const struct mem_cgroup *mem)
185{
186 return 1;
187}
188
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700189static inline int
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800190mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800191{
192 return 0;
193}
194
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800195static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
196 struct page *oldpage,
197 struct page *newpage)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800198{
199}
200
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800201static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
202{
203 return 0;
204}
KAMEZAWA Hiroyuki5932f362008-02-07 00:14:33 -0800205
206static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
207{
208 return 0;
209}
210
KAMEZAWA Hiroyuki6c48a1d2008-02-07 00:14:34 -0800211static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
212{
213 return 0;
214}
215
216static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
217 int priority)
218{
219}
220
221static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
222 int priority)
223{
224}
225
Christoph Lameterb69408e2008-10-18 20:26:14 -0700226static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem,
227 struct zone *zone, int priority,
228 enum lru_list lru)
KAMEZAWA Hiroyukicc381082008-02-07 00:14:35 -0800229{
230 return 0;
231}
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800232
233static inline bool mem_cgroup_disabled(void)
234{
235 return true;
236}
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800237#endif /* CONFIG_CGROUP_MEM_CONT */
238
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800239#endif /* _LINUX_MEMCONTROL_H */
240