blob: fc81dc24430950d0465efca467dc619299bb83f2 [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -08009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
Hirokazu Takahashif8d665422009-01-07 18:08:02 -080022#include <linux/cgroup.h>
Ying Han456f9982011-05-26 16:25:38 -070023#include <linux/vm_event_item.h>
24
Pavel Emelianov78fb7462008-02-07 00:13:51 -080025struct mem_cgroup;
26struct page_cgroup;
Balbir Singh8697d332008-02-07 00:13:59 -080027struct page;
28struct mm_struct;
Pavel Emelianov78fb7462008-02-07 00:13:51 -080029
Greg Thelen2a7106f2011-01-13 15:47:37 -080030/* Stats that can be updated by kernel. */
31enum mem_cgroup_page_stat_item {
32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
33};
34
Johannes Weiner56600482012-01-12 17:17:59 -080035struct mem_cgroup_reclaim_cookie {
36 struct zone *zone;
37 int priority;
38 unsigned int generation;
39};
40
Balbir Singh00f0b822008-03-04 14:28:39 -080041#ifdef CONFIG_CGROUP_MEM_RES_CTLR
KAMEZAWA Hiroyuki2c26fdd2009-01-07 18:08:10 -080042/*
43 * All "charge" functions with gfp_mask should use GFP_KERNEL or
44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
45 * alloc memory but reclaims memory from all available zones. So, "where I want
46 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
47 * available but adding a rule is better. charge functions' gfp_mask should
48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
49 * codes.
50 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
51 */
Pavel Emelianov78fb7462008-02-07 00:13:51 -080052
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080053extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
Balbir Singhe1a1cd52008-02-07 00:14:02 -080054 gfp_t gfp_mask);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080055/* for swap handling */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080056extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
Johannes Weiner72835c82012-01-12 17:18:32 -080057 struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080058extern void mem_cgroup_commit_charge_swapin(struct page *page,
Johannes Weiner72835c82012-01-12 17:18:32 -080059 struct mem_cgroup *memcg);
60extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080061
Hugh Dickins82895462008-03-04 14:29:08 -080062extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
63 gfp_t gfp_mask);
Johannes Weiner925b7672012-01-12 17:18:15 -080064
65struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
66struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
67 enum lru_list);
68void mem_cgroup_lru_del_list(struct page *, enum lru_list);
Johannes Weiner925b7672012-01-12 17:18:15 -080069struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
70 enum lru_list, enum lru_list);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -080071
72/* For coalescing uncharge for reducing memcg' overhead*/
73extern void mem_cgroup_uncharge_start(void);
74extern void mem_cgroup_uncharge_end(void);
75
Balbir Singh3c541e12008-02-07 00:14:41 -080076extern void mem_cgroup_uncharge_page(struct page *page);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -070077extern void mem_cgroup_uncharge_cache_page(struct page *page);
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -070078
David Rientjese845e192012-03-21 16:34:10 -070079extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
80 int order);
Johannes Weinerc3ac9a82012-05-29 15:06:25 -070081bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
82 struct mem_cgroup *memcg);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -070083int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
David Rientjes3062fc62008-02-07 00:14:03 -080084
Wu Fengguange42d9d52009-12-16 12:19:59 +010085extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
Balbir Singhcf475ad2008-04-29 01:00:16 -070086extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
KOSAKI Motohiroa4336582011-06-15 15:08:13 -070087extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
Balbir Singhcf475ad2008-04-29 01:00:16 -070088
Glauber Costae1aab162011-12-11 21:47:03 +000089extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
Glauber Costad1a4c0b2011-12-11 21:47:04 +000090extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
Glauber Costae1aab162011-12-11 21:47:03 +000091
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080092static inline
93int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
94{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -070095 struct mem_cgroup *memcg;
Johannes Weinerc3ac9a82012-05-29 15:06:25 -070096 int match;
97
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080098 rcu_read_lock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -070099 memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
Johannes Weinerc3ac9a82012-05-29 15:06:25 -0700100 match = __mem_cgroup_same_or_subtree(cgroup, memcg);
Lai Jiangshan2e4d4092009-01-07 18:08:07 -0800101 rcu_read_unlock();
Johannes Weinerc3ac9a82012-05-29 15:06:25 -0700102 return match;
Lai Jiangshan2e4d4092009-01-07 18:08:07 -0800103}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800104
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700105extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
Wu Fengguangd3242362009-12-16 12:19:59 +0100106
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700107extern int
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -0700108mem_cgroup_prepare_migration(struct page *page,
Johannes Weiner72835c82012-01-12 17:18:32 -0800109 struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700110extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -0800111 struct page *oldpage, struct page *newpage, bool migration_ok);
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800112
Johannes Weiner56600482012-01-12 17:17:59 -0800113struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
114 struct mem_cgroup *,
115 struct mem_cgroup_reclaim_cookie *);
116void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
117
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800118/*
119 * For memory reclaim.
120 */
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -0700121int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
122int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
Ying Han889976d2011-05-26 16:25:33 -0700123int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
Konstantin Khlebnikov074291f2012-05-29 15:07:00 -0700124unsigned long mem_cgroup_get_lruvec_size(struct lruvec *lruvec, enum lru_list);
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800125struct zone_reclaim_stat*
126mem_cgroup_get_reclaim_stat_from_page(struct page *page);
Balbir Singhe2224322009-04-02 16:57:39 -0700127extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
128 struct task_struct *p);
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -0800129extern void mem_cgroup_replace_page_cache(struct page *oldpage,
130 struct page *newpage);
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800131
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -0800132#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
133extern int do_swap_account;
134#endif
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800135
136static inline bool mem_cgroup_disabled(void)
137{
138 if (mem_cgroup_subsys.disabled)
139 return true;
140 return false;
141}
142
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -0700143void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
144 unsigned long *flags);
145
KAMEZAWA Hiroyuki4331f7d2012-03-21 16:34:26 -0700146extern atomic_t memcg_moving;
147
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -0700148static inline void mem_cgroup_begin_update_page_stat(struct page *page,
149 bool *locked, unsigned long *flags)
150{
151 if (mem_cgroup_disabled())
152 return;
153 rcu_read_lock();
154 *locked = false;
KAMEZAWA Hiroyuki4331f7d2012-03-21 16:34:26 -0700155 if (atomic_read(&memcg_moving))
156 __mem_cgroup_begin_update_page_stat(page, locked, flags);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -0700157}
158
159void __mem_cgroup_end_update_page_stat(struct page *page,
160 unsigned long *flags);
161static inline void mem_cgroup_end_update_page_stat(struct page *page,
162 bool *locked, unsigned long *flags)
163{
164 if (mem_cgroup_disabled())
165 return;
166 if (*locked)
167 __mem_cgroup_end_update_page_stat(page, flags);
168 rcu_read_unlock();
169}
170
Greg Thelen2a7106f2011-01-13 15:47:37 -0800171void mem_cgroup_update_page_stat(struct page *page,
172 enum mem_cgroup_page_stat_item idx,
173 int val);
174
175static inline void mem_cgroup_inc_page_stat(struct page *page,
176 enum mem_cgroup_page_stat_item idx)
177{
178 mem_cgroup_update_page_stat(page, idx, 1);
179}
180
181static inline void mem_cgroup_dec_page_stat(struct page *page,
182 enum mem_cgroup_page_stat_item idx)
183{
184 mem_cgroup_update_page_stat(page, idx, -1);
185}
186
Balbir Singh4e416952009-09-23 15:56:39 -0700187unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
Ying Han0ae5e892011-05-26 16:25:25 -0700188 gfp_t gfp_mask,
189 unsigned long *total_scanned);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700190u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
David Rientjesa63d83f2010-08-09 17:19:46 -0700191
Ying Han456f9982011-05-26 16:25:38 -0700192void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -0800193#ifdef CONFIG_TRANSPARENT_HUGEPAGE
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -0800194void mem_cgroup_split_huge_fixup(struct page *head);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -0800195#endif
196
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -0700197#ifdef CONFIG_DEBUG_VM
198bool mem_cgroup_bad_page_check(struct page *page);
199void mem_cgroup_print_bad_page(struct page *page);
200#endif
Balbir Singh00f0b822008-03-04 14:28:39 -0800201#else /* CONFIG_CGROUP_MEM_RES_CTLR */
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800202struct mem_cgroup;
203
204static inline int mem_cgroup_newpage_charge(struct page *page,
Hugh Dickins82895462008-03-04 14:29:08 -0800205 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800206{
207 return 0;
208}
209
Hugh Dickins82895462008-03-04 14:29:08 -0800210static inline int mem_cgroup_cache_charge(struct page *page,
211 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800212{
Hugh Dickins82895462008-03-04 14:29:08 -0800213 return 0;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800214}
215
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800216static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
Johannes Weiner72835c82012-01-12 17:18:32 -0800217 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800218{
219 return 0;
220}
221
222static inline void mem_cgroup_commit_charge_swapin(struct page *page,
Johannes Weiner72835c82012-01-12 17:18:32 -0800223 struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800224{
225}
226
Johannes Weiner72835c82012-01-12 17:18:32 -0800227static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800228{
229}
230
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800231static inline void mem_cgroup_uncharge_start(void)
232{
233}
234
235static inline void mem_cgroup_uncharge_end(void)
236{
237}
238
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800239static inline void mem_cgroup_uncharge_page(struct page *page)
240{
241}
242
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700243static inline void mem_cgroup_uncharge_cache_page(struct page *page)
244{
245}
246
Johannes Weiner925b7672012-01-12 17:18:15 -0800247static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
248 struct mem_cgroup *memcg)
249{
250 return &zone->lruvec;
251}
252
253static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
254 struct page *page,
255 enum lru_list lru)
256{
257 return &zone->lruvec;
258}
259
260static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800261{
262}
263
Johannes Weiner925b7672012-01-12 17:18:15 -0800264static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
265 struct page *page,
266 enum lru_list from,
267 enum lru_list to)
Minchan Kim3f58a822011-03-22 16:32:53 -0700268{
Johannes Weiner925b7672012-01-12 17:18:15 -0800269 return &zone->lruvec;
Balbir Singh66e17072008-02-07 00:13:56 -0800270}
271
Wu Fengguange42d9d52009-12-16 12:19:59 +0100272static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
273{
274 return NULL;
275}
276
KOSAKI Motohiroa4336582011-06-15 15:08:13 -0700277static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
278{
279 return NULL;
280}
281
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700282static inline int mm_match_cgroup(struct mm_struct *mm,
283 struct mem_cgroup *memcg)
Balbir Singhbed71612008-02-07 00:14:01 -0800284{
David Rientjes60c12b12008-02-09 00:10:15 -0800285 return 1;
Balbir Singhbed71612008-02-07 00:14:01 -0800286}
287
David Rientjes4c4a2212008-02-07 00:14:06 -0800288static inline int task_in_mem_cgroup(struct task_struct *task,
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700289 const struct mem_cgroup *memcg)
David Rientjes4c4a2212008-02-07 00:14:06 -0800290{
291 return 1;
292}
293
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700294static inline struct cgroup_subsys_state
295 *mem_cgroup_css(struct mem_cgroup *memcg)
Wu Fengguangd3242362009-12-16 12:19:59 +0100296{
297 return NULL;
298}
299
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700300static inline int
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -0700301mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
Johannes Weiner72835c82012-01-12 17:18:32 -0800302 struct mem_cgroup **memcgp, gfp_t gfp_mask)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800303{
304 return 0;
305}
306
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700307static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -0800308 struct page *oldpage, struct page *newpage, bool migration_ok)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800309{
310}
311
Johannes Weiner56600482012-01-12 17:17:59 -0800312static inline struct mem_cgroup *
313mem_cgroup_iter(struct mem_cgroup *root,
314 struct mem_cgroup *prev,
315 struct mem_cgroup_reclaim_cookie *reclaim)
316{
317 return NULL;
318}
319
320static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
321 struct mem_cgroup *prev)
322{
323}
324
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800325static inline bool mem_cgroup_disabled(void)
326{
327 return true;
328}
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -0800329
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800330static inline int
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -0700331mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800332{
333 return 1;
334}
335
Rik van Riel56e49d22009-06-16 15:32:28 -0700336static inline int
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -0700337mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
Rik van Riel56e49d22009-06-16 15:32:28 -0700338{
339 return 1;
340}
341
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -0800342static inline unsigned long
Konstantin Khlebnikov074291f2012-05-29 15:07:00 -0700343mem_cgroup_get_lruvec_size(struct lruvec *lruvec, enum lru_list lru)
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -0800344{
345 return 0;
346}
347
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800348static inline struct zone_reclaim_stat*
349mem_cgroup_get_reclaim_stat_from_page(struct page *page)
350{
351 return NULL;
352}
353
Balbir Singhe2224322009-04-02 16:57:39 -0700354static inline void
355mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
356{
357}
358
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -0700359static inline void mem_cgroup_begin_update_page_stat(struct page *page,
360 bool *locked, unsigned long *flags)
361{
362}
363
364static inline void mem_cgroup_end_update_page_stat(struct page *page,
365 bool *locked, unsigned long *flags)
366{
367}
368
Greg Thelen2a7106f2011-01-13 15:47:37 -0800369static inline void mem_cgroup_inc_page_stat(struct page *page,
370 enum mem_cgroup_page_stat_item idx)
371{
372}
373
374static inline void mem_cgroup_dec_page_stat(struct page *page,
375 enum mem_cgroup_page_stat_item idx)
Balbir Singhd69b0422009-06-17 16:26:34 -0700376{
377}
378
Balbir Singh4e416952009-09-23 15:56:39 -0700379static inline
380unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
Ying Han0ae5e892011-05-26 16:25:25 -0700381 gfp_t gfp_mask,
382 unsigned long *total_scanned)
Balbir Singh4e416952009-09-23 15:56:39 -0700383{
384 return 0;
385}
386
David Rientjesa63d83f2010-08-09 17:19:46 -0700387static inline
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700388u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
David Rientjesa63d83f2010-08-09 17:19:46 -0700389{
390 return 0;
391}
392
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -0800393static inline void mem_cgroup_split_huge_fixup(struct page *head)
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -0800394{
395}
396
Ying Han456f9982011-05-26 16:25:38 -0700397static inline
398void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
399{
400}
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -0800401static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
402 struct page *newpage)
403{
404}
Hugh Dickins31a79232012-03-21 16:34:18 -0700405#endif /* CONFIG_CGROUP_MEM_RES_CTLR */
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800406
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -0700407#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
408static inline bool
409mem_cgroup_bad_page_check(struct page *page)
410{
411 return false;
412}
413
414static inline void
415mem_cgroup_print_bad_page(struct page *page)
416{
417}
418#endif
419
Glauber Costae1aab162011-12-11 21:47:03 +0000420enum {
421 UNDER_LIMIT,
422 SOFT_LIMIT,
423 OVER_LIMIT,
424};
425
426struct sock;
427#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
428void sock_update_memcg(struct sock *sk);
429void sock_release_memcg(struct sock *sk);
430#else
431static inline void sock_update_memcg(struct sock *sk)
432{
433}
434static inline void sock_release_memcg(struct sock *sk)
435{
436}
437#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800438#endif /* _LINUX_MEMCONTROL_H */
439