blob: 9724a38ee69d5c9e292aa75c8fcc092486ebdbe7 [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -08009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
Hirokazu Takahashif8d665422009-01-07 18:08:02 -080022#include <linux/cgroup.h>
Ying Han456f9982011-05-26 16:25:38 -070023#include <linux/vm_event_item.h>
24
Pavel Emelianov78fb7462008-02-07 00:13:51 -080025struct mem_cgroup;
26struct page_cgroup;
Balbir Singh8697d332008-02-07 00:13:59 -080027struct page;
28struct mm_struct;
Pavel Emelianov78fb7462008-02-07 00:13:51 -080029
Greg Thelen2a7106f2011-01-13 15:47:37 -080030/* Stats that can be updated by kernel. */
31enum mem_cgroup_page_stat_item {
32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
33};
34
Johannes Weiner8b25c6d2010-05-24 14:32:40 -070035extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
36 struct list_head *dst,
37 unsigned long *scanned, int order,
38 int mode, struct zone *z,
39 struct mem_cgroup *mem_cont,
40 int active, int file);
41
Balbir Singh00f0b822008-03-04 14:28:39 -080042#ifdef CONFIG_CGROUP_MEM_RES_CTLR
KAMEZAWA Hiroyuki2c26fdd2009-01-07 18:08:10 -080043/*
44 * All "charge" functions with gfp_mask should use GFP_KERNEL or
45 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
46 * alloc memory but reclaims memory from all available zones. So, "where I want
47 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
48 * available but adding a rule is better. charge functions' gfp_mask should
49 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
50 * codes.
51 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
52 */
Pavel Emelianov78fb7462008-02-07 00:13:51 -080053
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080054extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
Balbir Singhe1a1cd52008-02-07 00:14:02 -080055 gfp_t gfp_mask);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080056/* for swap handling */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080057extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
58 struct page *page, gfp_t mask, struct mem_cgroup **ptr);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080059extern void mem_cgroup_commit_charge_swapin(struct page *page,
60 struct mem_cgroup *ptr);
61extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
62
Hugh Dickins82895462008-03-04 14:29:08 -080063extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
64 gfp_t gfp_mask);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080065extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
66extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
Minchan Kim3f58a822011-03-22 16:32:53 -070067extern void mem_cgroup_rotate_reclaimable_page(struct page *page);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080068extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
69extern void mem_cgroup_del_lru(struct page *page);
70extern void mem_cgroup_move_lists(struct page *page,
71 enum lru_list from, enum lru_list to);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -080072
73/* For coalescing uncharge for reducing memcg' overhead*/
74extern void mem_cgroup_uncharge_start(void);
75extern void mem_cgroup_uncharge_end(void);
76
Balbir Singh3c541e12008-02-07 00:14:41 -080077extern void mem_cgroup_uncharge_page(struct page *page);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -070078extern void mem_cgroup_uncharge_cache_page(struct page *page);
Daisuke Nishimuraae3abae2009-04-30 15:08:19 -070079extern int mem_cgroup_shmem_charge_fallback(struct page *page,
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -080080 struct mm_struct *mm, gfp_t gfp_mask);
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -070081
Pavel Emelianovc7ba5c92008-02-07 00:13:58 -080082extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
David Rientjes4c4a2212008-02-07 00:14:06 -080083int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
David Rientjes3062fc62008-02-07 00:14:03 -080084
Wu Fengguange42d9d52009-12-16 12:19:59 +010085extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
Balbir Singhcf475ad2008-04-29 01:00:16 -070086extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
87
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080088static inline
89int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
90{
91 struct mem_cgroup *mem;
92 rcu_read_lock();
KAMEZAWA Hiroyukie638c132009-04-21 12:24:41 -070093 mem = mem_cgroup_from_task(rcu_dereference((mm)->owner));
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080094 rcu_read_unlock();
95 return cgroup == mem;
96}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080097
Wu Fengguangd3242362009-12-16 12:19:59 +010098extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
99
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700100extern int
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -0700101mem_cgroup_prepare_migration(struct page *page,
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700102 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask);
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800103extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -0800104 struct page *oldpage, struct page *newpage, bool migration_ok);
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800105
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800106/*
107 * For memory reclaim.
108 */
KOSAKI Motohiroc772be92009-01-07 18:08:25 -0800109int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
Rik van Riel56e49d22009-06-16 15:32:28 -0700110int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
Ying Han889976d2011-05-26 16:25:33 -0700111int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
Ying Han1bac1802011-05-26 16:25:36 -0700112unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
113 struct zone *zone,
114 enum lru_list lru);
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800115struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
116 struct zone *zone);
117struct zone_reclaim_stat*
118mem_cgroup_get_reclaim_stat_from_page(struct page *page);
Balbir Singhe2224322009-04-02 16:57:39 -0700119extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
120 struct task_struct *p);
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800121
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -0800122#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
123extern int do_swap_account;
124#endif
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800125
126static inline bool mem_cgroup_disabled(void)
127{
128 if (mem_cgroup_subsys.disabled)
129 return true;
130 return false;
131}
132
Greg Thelen2a7106f2011-01-13 15:47:37 -0800133void mem_cgroup_update_page_stat(struct page *page,
134 enum mem_cgroup_page_stat_item idx,
135 int val);
136
137static inline void mem_cgroup_inc_page_stat(struct page *page,
138 enum mem_cgroup_page_stat_item idx)
139{
140 mem_cgroup_update_page_stat(page, idx, 1);
141}
142
143static inline void mem_cgroup_dec_page_stat(struct page *page,
144 enum mem_cgroup_page_stat_item idx)
145{
146 mem_cgroup_update_page_stat(page, idx, -1);
147}
148
Balbir Singh4e416952009-09-23 15:56:39 -0700149unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
Ying Han0ae5e892011-05-26 16:25:25 -0700150 gfp_t gfp_mask,
151 unsigned long *total_scanned);
David Rientjesa63d83f2010-08-09 17:19:46 -0700152u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
153
Ying Han456f9982011-05-26 16:25:38 -0700154void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -0800155#ifdef CONFIG_TRANSPARENT_HUGEPAGE
156void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
157#endif
158
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -0700159#ifdef CONFIG_DEBUG_VM
160bool mem_cgroup_bad_page_check(struct page *page);
161void mem_cgroup_print_bad_page(struct page *page);
162#endif
Balbir Singh00f0b822008-03-04 14:28:39 -0800163#else /* CONFIG_CGROUP_MEM_RES_CTLR */
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800164struct mem_cgroup;
165
166static inline int mem_cgroup_newpage_charge(struct page *page,
Hugh Dickins82895462008-03-04 14:29:08 -0800167 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800168{
169 return 0;
170}
171
Hugh Dickins82895462008-03-04 14:29:08 -0800172static inline int mem_cgroup_cache_charge(struct page *page,
173 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800174{
Hugh Dickins82895462008-03-04 14:29:08 -0800175 return 0;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800176}
177
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800178static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
179 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800180{
181 return 0;
182}
183
184static inline void mem_cgroup_commit_charge_swapin(struct page *page,
185 struct mem_cgroup *ptr)
186{
187}
188
189static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
190{
191}
192
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800193static inline void mem_cgroup_uncharge_start(void)
194{
195}
196
197static inline void mem_cgroup_uncharge_end(void)
198{
199}
200
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800201static inline void mem_cgroup_uncharge_page(struct page *page)
202{
203}
204
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700205static inline void mem_cgroup_uncharge_cache_page(struct page *page)
206{
207}
208
Daisuke Nishimuraae3abae2009-04-30 15:08:19 -0700209static inline int mem_cgroup_shmem_charge_fallback(struct page *page,
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -0800210 struct mm_struct *mm, gfp_t gfp_mask)
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -0700211{
212 return 0;
213}
214
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800215static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
216{
217}
218
219static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
220{
221 return ;
222}
223
Eric Dumazet67954fe2011-04-14 15:21:52 -0700224static inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
Minchan Kim3f58a822011-03-22 16:32:53 -0700225{
226 return ;
227}
228
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800229static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
230{
231 return ;
232}
233
234static inline void mem_cgroup_del_lru(struct page *page)
235{
236 return ;
237}
238
239static inline void
240mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
Balbir Singh66e17072008-02-07 00:13:56 -0800241{
242}
243
Wu Fengguange42d9d52009-12-16 12:19:59 +0100244static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
245{
246 return NULL;
247}
248
Hugh Dickinsbd845e32008-03-04 14:29:01 -0800249static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
Balbir Singhbed71612008-02-07 00:14:01 -0800250{
David Rientjes60c12b12008-02-09 00:10:15 -0800251 return 1;
Balbir Singhbed71612008-02-07 00:14:01 -0800252}
253
David Rientjes4c4a2212008-02-07 00:14:06 -0800254static inline int task_in_mem_cgroup(struct task_struct *task,
255 const struct mem_cgroup *mem)
256{
257 return 1;
258}
259
Wu Fengguangd3242362009-12-16 12:19:59 +0100260static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
261{
262 return NULL;
263}
264
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700265static inline int
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -0700266mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700267 struct mem_cgroup **ptr, gfp_t gfp_mask)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800268{
269 return 0;
270}
271
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800272static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -0800273 struct page *oldpage, struct page *newpage, bool migration_ok)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800274{
275}
276
KAMEZAWA Hiroyuki6c48a1d2008-02-07 00:14:34 -0800277static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
278{
279 return 0;
280}
281
282static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
283 int priority)
284{
285}
286
287static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
288 int priority)
289{
290}
291
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800292static inline bool mem_cgroup_disabled(void)
293{
294 return true;
295}
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -0800296
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800297static inline int
KOSAKI Motohiroc772be92009-01-07 18:08:25 -0800298mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800299{
300 return 1;
301}
302
Rik van Riel56e49d22009-06-16 15:32:28 -0700303static inline int
304mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
305{
306 return 1;
307}
308
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -0800309static inline unsigned long
Ying Han1bac1802011-05-26 16:25:36 -0700310mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, struct zone *zone,
311 enum lru_list lru)
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -0800312{
313 return 0;
314}
315
316
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800317static inline struct zone_reclaim_stat*
318mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
319{
320 return NULL;
321}
322
323static inline struct zone_reclaim_stat*
324mem_cgroup_get_reclaim_stat_from_page(struct page *page)
325{
326 return NULL;
327}
328
Balbir Singhe2224322009-04-02 16:57:39 -0700329static inline void
330mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
331{
332}
333
Greg Thelen2a7106f2011-01-13 15:47:37 -0800334static inline void mem_cgroup_inc_page_stat(struct page *page,
335 enum mem_cgroup_page_stat_item idx)
336{
337}
338
339static inline void mem_cgroup_dec_page_stat(struct page *page,
340 enum mem_cgroup_page_stat_item idx)
Balbir Singhd69b0422009-06-17 16:26:34 -0700341{
342}
343
Balbir Singh4e416952009-09-23 15:56:39 -0700344static inline
345unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
Ying Han0ae5e892011-05-26 16:25:25 -0700346 gfp_t gfp_mask,
347 unsigned long *total_scanned)
Balbir Singh4e416952009-09-23 15:56:39 -0700348{
349 return 0;
350}
351
David Rientjesa63d83f2010-08-09 17:19:46 -0700352static inline
353u64 mem_cgroup_get_limit(struct mem_cgroup *mem)
354{
355 return 0;
356}
357
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -0800358static inline void mem_cgroup_split_huge_fixup(struct page *head,
359 struct page *tail)
360{
361}
362
Ying Han456f9982011-05-26 16:25:38 -0700363static inline
364void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
365{
366}
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800367#endif /* CONFIG_CGROUP_MEM_CONT */
368
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -0700369#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
370static inline bool
371mem_cgroup_bad_page_check(struct page *page)
372{
373 return false;
374}
375
376static inline void
377mem_cgroup_print_bad_page(struct page *page)
378{
379}
380#endif
381
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800382#endif /* _LINUX_MEMCONTROL_H */
383