blob: fdf3967e13975a4dc24ec7c37026295ea4fa8f0e [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -08009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
22
Pavel Emelianov78fb7462008-02-07 00:13:51 -080023struct mem_cgroup;
24struct page_cgroup;
Balbir Singh8697d332008-02-07 00:13:59 -080025struct page;
26struct mm_struct;
Pavel Emelianov78fb7462008-02-07 00:13:51 -080027
Balbir Singh00f0b822008-03-04 14:28:39 -080028#ifdef CONFIG_CGROUP_MEM_RES_CTLR
Pavel Emelianov78fb7462008-02-07 00:13:51 -080029
Hugh Dickins9442ec9d2008-03-04 14:29:07 -080030#define page_reset_bad_cgroup(page) ((page)->page_cgroup = 0)
31
Pavel Emelianov78fb7462008-02-07 00:13:51 -080032extern struct page_cgroup *page_get_page_cgroup(struct page *page);
Balbir Singhe1a1cd52008-02-07 00:14:02 -080033extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
34 gfp_t gfp_mask);
Hugh Dickins82895462008-03-04 14:29:08 -080035extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
36 gfp_t gfp_mask);
Balbir Singh3c541e12008-02-07 00:14:41 -080037extern void mem_cgroup_uncharge_page(struct page *page);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -070038extern void mem_cgroup_uncharge_cache_page(struct page *page);
Hugh Dickins427d5412008-03-04 14:29:03 -080039extern void mem_cgroup_move_lists(struct page *page, bool active);
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -070040extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask);
41
Balbir Singh66e17072008-02-07 00:13:56 -080042extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
43 struct list_head *dst,
44 unsigned long *scanned, int order,
45 int mode, struct zone *z,
46 struct mem_cgroup *mem_cont,
47 int active);
Pavel Emelianovc7ba5c92008-02-07 00:13:58 -080048extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
David Rientjes4c4a2212008-02-07 00:14:06 -080049int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
David Rientjes3062fc62008-02-07 00:14:03 -080050
Balbir Singhcf475ad2008-04-29 01:00:16 -070051extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
52
Hugh Dickinsbd845e32008-03-04 14:29:01 -080053#define mm_match_cgroup(mm, cgroup) \
Balbir Singhcf475ad2008-04-29 01:00:16 -070054 ((cgroup) == mem_cgroup_from_task((mm)->owner))
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080055
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -070056extern int
57mem_cgroup_prepare_migration(struct page *page, struct page *newpage);
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -080058extern void mem_cgroup_end_migration(struct page *page);
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -080059
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -080060/*
61 * For memory reclaim.
62 */
63extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
KAMEZAWA Hiroyuki5932f362008-02-07 00:14:33 -080064extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
65
KAMEZAWA Hiroyuki6c48a1d2008-02-07 00:14:34 -080066extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
67extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
68 int priority);
69extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
70 int priority);
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -080071
KAMEZAWA Hiroyukicc381082008-02-07 00:14:35 -080072extern long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
73 struct zone *zone, int priority);
74extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
75 struct zone *zone, int priority);
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -080076
Balbir Singh00f0b822008-03-04 14:28:39 -080077#else /* CONFIG_CGROUP_MEM_RES_CTLR */
Hugh Dickins9442ec9d2008-03-04 14:29:07 -080078static inline void page_reset_bad_cgroup(struct page *page)
Pavel Emelianov78fb7462008-02-07 00:13:51 -080079{
80}
81
82static inline struct page_cgroup *page_get_page_cgroup(struct page *page)
83{
84 return NULL;
85}
86
Hugh Dickins82895462008-03-04 14:29:08 -080087static inline int mem_cgroup_charge(struct page *page,
88 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080089{
90 return 0;
91}
92
Hugh Dickins82895462008-03-04 14:29:08 -080093static inline int mem_cgroup_cache_charge(struct page *page,
94 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080095{
Hugh Dickins82895462008-03-04 14:29:08 -080096 return 0;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080097}
98
99static inline void mem_cgroup_uncharge_page(struct page *page)
100{
101}
102
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700103static inline void mem_cgroup_uncharge_cache_page(struct page *page)
104{
105}
106
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -0700107static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
108{
109 return 0;
110}
111
Hugh Dickins427d5412008-03-04 14:29:03 -0800112static inline void mem_cgroup_move_lists(struct page *page, bool active)
Balbir Singh66e17072008-02-07 00:13:56 -0800113{
114}
115
Hugh Dickinsbd845e32008-03-04 14:29:01 -0800116static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
Balbir Singhbed71612008-02-07 00:14:01 -0800117{
David Rientjes60c12b12008-02-09 00:10:15 -0800118 return 1;
Balbir Singhbed71612008-02-07 00:14:01 -0800119}
120
David Rientjes4c4a2212008-02-07 00:14:06 -0800121static inline int task_in_mem_cgroup(struct task_struct *task,
122 const struct mem_cgroup *mem)
123{
124 return 1;
125}
126
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700127static inline int
128mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800129{
130 return 0;
131}
132
133static inline void mem_cgroup_end_migration(struct page *page)
134{
135}
136
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800137static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
138{
139 return 0;
140}
KAMEZAWA Hiroyuki5932f362008-02-07 00:14:33 -0800141
142static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
143{
144 return 0;
145}
146
KAMEZAWA Hiroyuki6c48a1d2008-02-07 00:14:34 -0800147static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
148{
149 return 0;
150}
151
152static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
153 int priority)
154{
155}
156
157static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
158 int priority)
159{
160}
161
KAMEZAWA Hiroyukicc381082008-02-07 00:14:35 -0800162static inline long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
163 struct zone *zone, int priority)
164{
165 return 0;
166}
167
168static inline long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
169 struct zone *zone, int priority)
170{
171 return 0;
172}
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800173#endif /* CONFIG_CGROUP_MEM_CONT */
174
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800175#endif /* _LINUX_MEMCONTROL_H */
176