blob: 067115ce6b3ed81f6662fe4d70d332298aa7cb17 [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -08009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
Hirokazu Takahashif8d665422009-01-07 18:08:02 -080022#include <linux/cgroup.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080023struct mem_cgroup;
24struct page_cgroup;
Balbir Singh8697d332008-02-07 00:13:59 -080025struct page;
26struct mm_struct;
Pavel Emelianov78fb7462008-02-07 00:13:51 -080027
Greg Thelen2a7106f2011-01-13 15:47:37 -080028/* Stats that can be updated by kernel. */
29enum mem_cgroup_page_stat_item {
30 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
31};
32
Johannes Weiner8b25c6d2010-05-24 14:32:40 -070033extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
34 struct list_head *dst,
35 unsigned long *scanned, int order,
36 int mode, struct zone *z,
37 struct mem_cgroup *mem_cont,
38 int active, int file);
39
Balbir Singh00f0b822008-03-04 14:28:39 -080040#ifdef CONFIG_CGROUP_MEM_RES_CTLR
KAMEZAWA Hiroyuki2c26fdd2009-01-07 18:08:10 -080041/*
42 * All "charge" functions with gfp_mask should use GFP_KERNEL or
43 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
44 * alloc memory but reclaims memory from all available zones. So, "where I want
45 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
46 * available but adding a rule is better. charge functions' gfp_mask should
47 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
48 * codes.
49 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
50 */
Pavel Emelianov78fb7462008-02-07 00:13:51 -080051
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080052extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
Balbir Singhe1a1cd52008-02-07 00:14:02 -080053 gfp_t gfp_mask);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080054/* for swap handling */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080055extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
56 struct page *page, gfp_t mask, struct mem_cgroup **ptr);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080057extern void mem_cgroup_commit_charge_swapin(struct page *page,
58 struct mem_cgroup *ptr);
59extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
60
Hugh Dickins82895462008-03-04 14:29:08 -080061extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
62 gfp_t gfp_mask);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080063extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
64extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
65extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
66extern void mem_cgroup_del_lru(struct page *page);
67extern void mem_cgroup_move_lists(struct page *page,
68 enum lru_list from, enum lru_list to);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -080069
70/* For coalescing uncharge for reducing memcg' overhead*/
71extern void mem_cgroup_uncharge_start(void);
72extern void mem_cgroup_uncharge_end(void);
73
Balbir Singh3c541e12008-02-07 00:14:41 -080074extern void mem_cgroup_uncharge_page(struct page *page);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -070075extern void mem_cgroup_uncharge_cache_page(struct page *page);
Daisuke Nishimuraae3abae2009-04-30 15:08:19 -070076extern int mem_cgroup_shmem_charge_fallback(struct page *page,
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -080077 struct mm_struct *mm, gfp_t gfp_mask);
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -070078
Pavel Emelianovc7ba5c92008-02-07 00:13:58 -080079extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
David Rientjes4c4a2212008-02-07 00:14:06 -080080int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
David Rientjes3062fc62008-02-07 00:14:03 -080081
Wu Fengguange42d9d52009-12-16 12:19:59 +010082extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
Balbir Singhcf475ad2008-04-29 01:00:16 -070083extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
84
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080085static inline
86int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
87{
88 struct mem_cgroup *mem;
89 rcu_read_lock();
KAMEZAWA Hiroyukie638c132009-04-21 12:24:41 -070090 mem = mem_cgroup_from_task(rcu_dereference((mm)->owner));
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080091 rcu_read_unlock();
92 return cgroup == mem;
93}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080094
Wu Fengguangd3242362009-12-16 12:19:59 +010095extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
96
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -070097extern int
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -070098mem_cgroup_prepare_migration(struct page *page,
99 struct page *newpage, struct mem_cgroup **ptr);
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800100extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
101 struct page *oldpage, struct page *newpage);
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800102
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800103/*
104 * For memory reclaim.
105 */
KOSAKI Motohiroc772be92009-01-07 18:08:25 -0800106int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
Rik van Riel56e49d22009-06-16 15:32:28 -0700107int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -0800108unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
109 struct zone *zone,
110 enum lru_list lru);
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800111struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
112 struct zone *zone);
113struct zone_reclaim_stat*
114mem_cgroup_get_reclaim_stat_from_page(struct page *page);
Balbir Singhe2224322009-04-02 16:57:39 -0700115extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
116 struct task_struct *p);
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800117
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -0800118#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
119extern int do_swap_account;
120#endif
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800121
122static inline bool mem_cgroup_disabled(void)
123{
124 if (mem_cgroup_subsys.disabled)
125 return true;
126 return false;
127}
128
Greg Thelen2a7106f2011-01-13 15:47:37 -0800129void mem_cgroup_update_page_stat(struct page *page,
130 enum mem_cgroup_page_stat_item idx,
131 int val);
132
133static inline void mem_cgroup_inc_page_stat(struct page *page,
134 enum mem_cgroup_page_stat_item idx)
135{
136 mem_cgroup_update_page_stat(page, idx, 1);
137}
138
139static inline void mem_cgroup_dec_page_stat(struct page *page,
140 enum mem_cgroup_page_stat_item idx)
141{
142 mem_cgroup_update_page_stat(page, idx, -1);
143}
144
Balbir Singh4e416952009-09-23 15:56:39 -0700145unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
KOSAKI Motohiro00918b62010-08-10 18:03:05 -0700146 gfp_t gfp_mask);
David Rientjesa63d83f2010-08-09 17:19:46 -0700147u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
148
Balbir Singh00f0b822008-03-04 14:28:39 -0800149#else /* CONFIG_CGROUP_MEM_RES_CTLR */
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800150struct mem_cgroup;
151
152static inline int mem_cgroup_newpage_charge(struct page *page,
Hugh Dickins82895462008-03-04 14:29:08 -0800153 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800154{
155 return 0;
156}
157
Hugh Dickins82895462008-03-04 14:29:08 -0800158static inline int mem_cgroup_cache_charge(struct page *page,
159 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800160{
Hugh Dickins82895462008-03-04 14:29:08 -0800161 return 0;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800162}
163
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800164static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
165 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800166{
167 return 0;
168}
169
170static inline void mem_cgroup_commit_charge_swapin(struct page *page,
171 struct mem_cgroup *ptr)
172{
173}
174
175static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
176{
177}
178
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800179static inline void mem_cgroup_uncharge_start(void)
180{
181}
182
183static inline void mem_cgroup_uncharge_end(void)
184{
185}
186
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800187static inline void mem_cgroup_uncharge_page(struct page *page)
188{
189}
190
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700191static inline void mem_cgroup_uncharge_cache_page(struct page *page)
192{
193}
194
Daisuke Nishimuraae3abae2009-04-30 15:08:19 -0700195static inline int mem_cgroup_shmem_charge_fallback(struct page *page,
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -0800196 struct mm_struct *mm, gfp_t gfp_mask)
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -0700197{
198 return 0;
199}
200
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800201static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
202{
203}
204
205static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
206{
207 return ;
208}
209
210static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
211{
212 return ;
213}
214
215static inline void mem_cgroup_del_lru(struct page *page)
216{
217 return ;
218}
219
220static inline void
221mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
Balbir Singh66e17072008-02-07 00:13:56 -0800222{
223}
224
Wu Fengguange42d9d52009-12-16 12:19:59 +0100225static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
226{
227 return NULL;
228}
229
Hugh Dickinsbd845e32008-03-04 14:29:01 -0800230static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
Balbir Singhbed71612008-02-07 00:14:01 -0800231{
David Rientjes60c12b12008-02-09 00:10:15 -0800232 return 1;
Balbir Singhbed71612008-02-07 00:14:01 -0800233}
234
David Rientjes4c4a2212008-02-07 00:14:06 -0800235static inline int task_in_mem_cgroup(struct task_struct *task,
236 const struct mem_cgroup *mem)
237{
238 return 1;
239}
240
Wu Fengguangd3242362009-12-16 12:19:59 +0100241static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
242{
243 return NULL;
244}
245
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700246static inline int
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -0700247mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
248 struct mem_cgroup **ptr)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800249{
250 return 0;
251}
252
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800253static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
254 struct page *oldpage,
255 struct page *newpage)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800256{
257}
258
KAMEZAWA Hiroyuki6c48a1d2008-02-07 00:14:34 -0800259static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
260{
261 return 0;
262}
263
264static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
265 int priority)
266{
267}
268
269static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
270 int priority)
271{
272}
273
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800274static inline bool mem_cgroup_disabled(void)
275{
276 return true;
277}
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -0800278
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800279static inline int
KOSAKI Motohiroc772be92009-01-07 18:08:25 -0800280mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800281{
282 return 1;
283}
284
Rik van Riel56e49d22009-06-16 15:32:28 -0700285static inline int
286mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
287{
288 return 1;
289}
290
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -0800291static inline unsigned long
292mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone,
293 enum lru_list lru)
294{
295 return 0;
296}
297
298
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800299static inline struct zone_reclaim_stat*
300mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
301{
302 return NULL;
303}
304
305static inline struct zone_reclaim_stat*
306mem_cgroup_get_reclaim_stat_from_page(struct page *page)
307{
308 return NULL;
309}
310
Balbir Singhe2224322009-04-02 16:57:39 -0700311static inline void
312mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
313{
314}
315
Greg Thelen2a7106f2011-01-13 15:47:37 -0800316static inline void mem_cgroup_inc_page_stat(struct page *page,
317 enum mem_cgroup_page_stat_item idx)
318{
319}
320
321static inline void mem_cgroup_dec_page_stat(struct page *page,
322 enum mem_cgroup_page_stat_item idx)
Balbir Singhd69b0422009-06-17 16:26:34 -0700323{
324}
325
Balbir Singh4e416952009-09-23 15:56:39 -0700326static inline
327unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
KOSAKI Motohiro00918b62010-08-10 18:03:05 -0700328 gfp_t gfp_mask)
Balbir Singh4e416952009-09-23 15:56:39 -0700329{
330 return 0;
331}
332
David Rientjesa63d83f2010-08-09 17:19:46 -0700333static inline
334u64 mem_cgroup_get_limit(struct mem_cgroup *mem)
335{
336 return 0;
337}
338
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800339#endif /* CONFIG_CGROUP_MEM_CONT */
340
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800341#endif /* _LINUX_MEMCONTROL_H */
342