blob: 4d34356fe644ee5dca447071829ae06ab68eeb60 [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -08009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
Hirokazu Takahashif8d66542009-01-07 18:08:02 -080022#include <linux/cgroup.h>
Ying Han456f9982011-05-26 16:25:38 -070023#include <linux/vm_event_item.h>
24
Pavel Emelianov78fb7462008-02-07 00:13:51 -080025struct mem_cgroup;
26struct page_cgroup;
Balbir Singh8697d332008-02-07 00:13:59 -080027struct page;
28struct mm_struct;
Pavel Emelianov78fb7462008-02-07 00:13:51 -080029
Greg Thelen2a7106f2011-01-13 15:47:37 -080030/* Stats that can be updated by kernel. */
31enum mem_cgroup_page_stat_item {
32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
33};
34
Johannes Weiner56600482012-01-12 17:17:59 -080035struct mem_cgroup_reclaim_cookie {
36 struct zone *zone;
37 int priority;
38 unsigned int generation;
39};
40
Balbir Singh00f0b822008-03-04 14:28:39 -080041#ifdef CONFIG_CGROUP_MEM_RES_CTLR
KAMEZAWA Hiroyuki2c26fdd2009-01-07 18:08:10 -080042/*
43 * All "charge" functions with gfp_mask should use GFP_KERNEL or
44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
45 * alloc memory but reclaims memory from all available zones. So, "where I want
46 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
47 * available but adding a rule is better. charge functions' gfp_mask should
48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
49 * codes.
50 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
51 */
Pavel Emelianov78fb7462008-02-07 00:13:51 -080052
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080053extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
Balbir Singhe1a1cd52008-02-07 00:14:02 -080054 gfp_t gfp_mask);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080055/* for swap handling */
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -080056extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
Johannes Weiner72835c82012-01-12 17:18:32 -080057 struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080058extern void mem_cgroup_commit_charge_swapin(struct page *page,
Johannes Weiner72835c82012-01-12 17:18:32 -080059 struct mem_cgroup *memcg);
60extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080061
Hugh Dickins82895462008-03-04 14:29:08 -080062extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
63 gfp_t gfp_mask);
Johannes Weiner925b7672012-01-12 17:18:15 -080064
65struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
66struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
67 enum lru_list);
68void mem_cgroup_lru_del_list(struct page *, enum lru_list);
69void mem_cgroup_lru_del(struct page *);
70struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
71 enum lru_list, enum lru_list);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -080072
73/* For coalescing uncharge for reducing memcg' overhead*/
74extern void mem_cgroup_uncharge_start(void);
75extern void mem_cgroup_uncharge_end(void);
76
Balbir Singh3c541e12008-02-07 00:14:41 -080077extern void mem_cgroup_uncharge_page(struct page *page);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -070078extern void mem_cgroup_uncharge_cache_page(struct page *page);
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -070079
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -070080extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask);
81int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
David Rientjes3062fc62008-02-07 00:14:03 -080082
Wu Fengguange42d9d52009-12-16 12:19:59 +010083extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
Balbir Singhcf475ad2008-04-29 01:00:16 -070084extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
KOSAKI Motohiroa4336582011-06-15 15:08:13 -070085extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
Balbir Singhcf475ad2008-04-29 01:00:16 -070086
Glauber Costae1aab162011-12-11 21:47:03 +000087extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
Glauber Costad1a4c0b2011-12-11 21:47:04 +000088extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
Glauber Costae1aab162011-12-11 21:47:03 +000089
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080090static inline
91int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
92{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -070093 struct mem_cgroup *memcg;
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080094 rcu_read_lock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -070095 memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080096 rcu_read_unlock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -070097 return cgroup == memcg;
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080098}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080099
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700100extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
Wu Fengguangd3242362009-12-16 12:19:59 +0100101
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700102extern int
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -0700103mem_cgroup_prepare_migration(struct page *page,
Johannes Weiner72835c82012-01-12 17:18:32 -0800104 struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700105extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -0800106 struct page *oldpage, struct page *newpage, bool migration_ok);
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800107
Johannes Weiner56600482012-01-12 17:17:59 -0800108struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
109 struct mem_cgroup *,
110 struct mem_cgroup_reclaim_cookie *);
111void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
112
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800113/*
114 * For memory reclaim.
115 */
Johannes Weiner9b272972011-11-02 13:38:23 -0700116int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
117 struct zone *zone);
118int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
119 struct zone *zone);
Ying Han889976d2011-05-26 16:25:33 -0700120int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
Ying Han1bac1802011-05-26 16:25:36 -0700121unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700122 int nid, int zid, unsigned int lrumask);
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800123struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
124 struct zone *zone);
125struct zone_reclaim_stat*
126mem_cgroup_get_reclaim_stat_from_page(struct page *page);
Balbir Singhe2224322009-04-02 16:57:39 -0700127extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
128 struct task_struct *p);
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -0800129extern void mem_cgroup_replace_page_cache(struct page *oldpage,
130 struct page *newpage);
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800131
KAMEZAWA Hiroyuki4e5f01c2012-01-12 17:18:58 -0800132extern void mem_cgroup_reset_owner(struct page *page);
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -0800133#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
134extern int do_swap_account;
135#endif
Hirokazu Takahashif8d66542009-01-07 18:08:02 -0800136
137static inline bool mem_cgroup_disabled(void)
138{
139 if (mem_cgroup_subsys.disabled)
140 return true;
141 return false;
142}
143
Greg Thelen2a7106f2011-01-13 15:47:37 -0800144void mem_cgroup_update_page_stat(struct page *page,
145 enum mem_cgroup_page_stat_item idx,
146 int val);
147
148static inline void mem_cgroup_inc_page_stat(struct page *page,
149 enum mem_cgroup_page_stat_item idx)
150{
151 mem_cgroup_update_page_stat(page, idx, 1);
152}
153
154static inline void mem_cgroup_dec_page_stat(struct page *page,
155 enum mem_cgroup_page_stat_item idx)
156{
157 mem_cgroup_update_page_stat(page, idx, -1);
158}
159
Balbir Singh4e416952009-09-23 15:56:39 -0700160unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
Ying Han0ae5e892011-05-26 16:25:25 -0700161 gfp_t gfp_mask,
162 unsigned long *total_scanned);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700163u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
David Rientjesa63d83f2010-08-09 17:19:46 -0700164
Ying Han456f9982011-05-26 16:25:38 -0700165void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -0800166#ifdef CONFIG_TRANSPARENT_HUGEPAGE
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -0800167void mem_cgroup_split_huge_fixup(struct page *head);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -0800168#endif
169
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -0700170#ifdef CONFIG_DEBUG_VM
171bool mem_cgroup_bad_page_check(struct page *page);
172void mem_cgroup_print_bad_page(struct page *page);
173#endif
Balbir Singh00f0b822008-03-04 14:28:39 -0800174#else /* CONFIG_CGROUP_MEM_RES_CTLR */
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800175struct mem_cgroup;
176
177static inline int mem_cgroup_newpage_charge(struct page *page,
Hugh Dickins82895462008-03-04 14:29:08 -0800178 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800179{
180 return 0;
181}
182
Hugh Dickins82895462008-03-04 14:29:08 -0800183static inline int mem_cgroup_cache_charge(struct page *page,
184 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800185{
Hugh Dickins82895462008-03-04 14:29:08 -0800186 return 0;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800187}
188
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -0800189static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
Johannes Weiner72835c82012-01-12 17:18:32 -0800190 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800191{
192 return 0;
193}
194
195static inline void mem_cgroup_commit_charge_swapin(struct page *page,
Johannes Weiner72835c82012-01-12 17:18:32 -0800196 struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800197{
198}
199
Johannes Weiner72835c82012-01-12 17:18:32 -0800200static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800201{
202}
203
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800204static inline void mem_cgroup_uncharge_start(void)
205{
206}
207
208static inline void mem_cgroup_uncharge_end(void)
209{
210}
211
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800212static inline void mem_cgroup_uncharge_page(struct page *page)
213{
214}
215
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700216static inline void mem_cgroup_uncharge_cache_page(struct page *page)
217{
218}
219
Johannes Weiner925b7672012-01-12 17:18:15 -0800220static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
221 struct mem_cgroup *memcg)
222{
223 return &zone->lruvec;
224}
225
226static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
227 struct page *page,
228 enum lru_list lru)
229{
230 return &zone->lruvec;
231}
232
233static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800234{
235}
236
Johannes Weiner925b7672012-01-12 17:18:15 -0800237static inline void mem_cgroup_lru_del(struct page *page)
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800238{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800239}
240
Johannes Weiner925b7672012-01-12 17:18:15 -0800241static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
242 struct page *page,
243 enum lru_list from,
244 enum lru_list to)
Minchan Kim3f58a822011-03-22 16:32:53 -0700245{
Johannes Weiner925b7672012-01-12 17:18:15 -0800246 return &zone->lruvec;
Balbir Singh66e17072008-02-07 00:13:56 -0800247}
248
Wu Fengguange42d9d52009-12-16 12:19:59 +0100249static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
250{
251 return NULL;
252}
253
KOSAKI Motohiroa4336582011-06-15 15:08:13 -0700254static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
255{
256 return NULL;
257}
258
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700259static inline int mm_match_cgroup(struct mm_struct *mm,
260 struct mem_cgroup *memcg)
Balbir Singhbed71612008-02-07 00:14:01 -0800261{
David Rientjes60c12b12008-02-09 00:10:15 -0800262 return 1;
Balbir Singhbed71612008-02-07 00:14:01 -0800263}
264
David Rientjes4c4a2212008-02-07 00:14:06 -0800265static inline int task_in_mem_cgroup(struct task_struct *task,
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700266 const struct mem_cgroup *memcg)
David Rientjes4c4a2212008-02-07 00:14:06 -0800267{
268 return 1;
269}
270
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700271static inline struct cgroup_subsys_state
272 *mem_cgroup_css(struct mem_cgroup *memcg)
Wu Fengguangd3242362009-12-16 12:19:59 +0100273{
274 return NULL;
275}
276
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700277static inline int
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -0700278mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
Johannes Weiner72835c82012-01-12 17:18:32 -0800279 struct mem_cgroup **memcgp, gfp_t gfp_mask)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800280{
281 return 0;
282}
283
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700284static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -0800285 struct page *oldpage, struct page *newpage, bool migration_ok)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800286{
287}
288
Johannes Weiner56600482012-01-12 17:17:59 -0800289static inline struct mem_cgroup *
290mem_cgroup_iter(struct mem_cgroup *root,
291 struct mem_cgroup *prev,
292 struct mem_cgroup_reclaim_cookie *reclaim)
293{
294 return NULL;
295}
296
297static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
298 struct mem_cgroup *prev)
299{
300}
301
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700302static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki6c48a1d2008-02-07 00:14:34 -0800303{
304 return 0;
305}
306
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700307static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki6c48a1d2008-02-07 00:14:34 -0800308 int priority)
309{
310}
311
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700312static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki6c48a1d2008-02-07 00:14:34 -0800313 int priority)
314{
315}
316
Hirokazu Takahashif8d66542009-01-07 18:08:02 -0800317static inline bool mem_cgroup_disabled(void)
318{
319 return true;
320}
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -0800321
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800322static inline int
Johannes Weiner9b272972011-11-02 13:38:23 -0700323mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800324{
325 return 1;
326}
327
Rik van Riel56e49d22009-06-16 15:32:28 -0700328static inline int
Johannes Weiner9b272972011-11-02 13:38:23 -0700329mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
Rik van Riel56e49d22009-06-16 15:32:28 -0700330{
331 return 1;
332}
333
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -0800334static inline unsigned long
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700335mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
336 unsigned int lru_mask)
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -0800337{
338 return 0;
339}
340
341
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800342static inline struct zone_reclaim_stat*
343mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
344{
345 return NULL;
346}
347
348static inline struct zone_reclaim_stat*
349mem_cgroup_get_reclaim_stat_from_page(struct page *page)
350{
351 return NULL;
352}
353
Balbir Singhe2224322009-04-02 16:57:39 -0700354static inline void
355mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
356{
357}
358
Greg Thelen2a7106f2011-01-13 15:47:37 -0800359static inline void mem_cgroup_inc_page_stat(struct page *page,
360 enum mem_cgroup_page_stat_item idx)
361{
362}
363
364static inline void mem_cgroup_dec_page_stat(struct page *page,
365 enum mem_cgroup_page_stat_item idx)
Balbir Singhd69b0422009-06-17 16:26:34 -0700366{
367}
368
Balbir Singh4e416952009-09-23 15:56:39 -0700369static inline
370unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
Ying Han0ae5e892011-05-26 16:25:25 -0700371 gfp_t gfp_mask,
372 unsigned long *total_scanned)
Balbir Singh4e416952009-09-23 15:56:39 -0700373{
374 return 0;
375}
376
David Rientjesa63d83f2010-08-09 17:19:46 -0700377static inline
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700378u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
David Rientjesa63d83f2010-08-09 17:19:46 -0700379{
380 return 0;
381}
382
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -0800383static inline void mem_cgroup_split_huge_fixup(struct page *head)
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -0800384{
385}
386
Ying Han456f9982011-05-26 16:25:38 -0700387static inline
388void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
389{
390}
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -0800391static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
392 struct page *newpage)
393{
394}
KAMEZAWA Hiroyuki4e5f01c2012-01-12 17:18:58 -0800395
396static inline void mem_cgroup_reset_owner(struct page *page)
397{
398}
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800399#endif /* CONFIG_CGROUP_MEM_CONT */
400
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -0700401#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
402static inline bool
403mem_cgroup_bad_page_check(struct page *page)
404{
405 return false;
406}
407
408static inline void
409mem_cgroup_print_bad_page(struct page *page)
410{
411}
412#endif
413
Glauber Costae1aab162011-12-11 21:47:03 +0000414enum {
415 UNDER_LIMIT,
416 SOFT_LIMIT,
417 OVER_LIMIT,
418};
419
420struct sock;
421#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
422void sock_update_memcg(struct sock *sk);
423void sock_release_memcg(struct sock *sk);
424#else
425static inline void sock_update_memcg(struct sock *sk)
426{
427}
428static inline void sock_release_memcg(struct sock *sk)
429{
430}
431#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800432#endif /* _LINUX_MEMCONTROL_H */
433