blob: e98a74c0c9c0e872eb20fd29e5a24b3c3cd3d5ed [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -08009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
Hirokazu Takahashif8d665422009-01-07 18:08:02 -080022#include <linux/cgroup.h>
Ying Han456f9982011-05-26 16:25:38 -070023#include <linux/vm_event_item.h>
24
Pavel Emelianov78fb7462008-02-07 00:13:51 -080025struct mem_cgroup;
26struct page_cgroup;
Balbir Singh8697d332008-02-07 00:13:59 -080027struct page;
28struct mm_struct;
Pavel Emelianov78fb7462008-02-07 00:13:51 -080029
Greg Thelen2a7106f2011-01-13 15:47:37 -080030/* Stats that can be updated by kernel. */
31enum mem_cgroup_page_stat_item {
32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
33};
34
Johannes Weiner56600482012-01-12 17:17:59 -080035struct mem_cgroup_reclaim_cookie {
36 struct zone *zone;
37 int priority;
38 unsigned int generation;
39};
40
Andrew Mortonc255a452012-07-31 16:43:02 -070041#ifdef CONFIG_MEMCG
KAMEZAWA Hiroyuki2c26fdd2009-01-07 18:08:10 -080042/*
43 * All "charge" functions with gfp_mask should use GFP_KERNEL or
44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
45 * alloc memory but reclaims memory from all available zones. So, "where I want
46 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
47 * available but adding a rule is better. charge functions' gfp_mask should
48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
49 * codes.
50 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
51 */
Pavel Emelianov78fb7462008-02-07 00:13:51 -080052
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080053extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
Balbir Singhe1a1cd52008-02-07 00:14:02 -080054 gfp_t gfp_mask);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080055/* for swap handling */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080056extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
Johannes Weiner72835c82012-01-12 17:18:32 -080057 struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080058extern void mem_cgroup_commit_charge_swapin(struct page *page,
Johannes Weiner72835c82012-01-12 17:18:32 -080059 struct mem_cgroup *memcg);
60extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -080061
Hugh Dickins82895462008-03-04 14:29:08 -080062extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
63 gfp_t gfp_mask);
Johannes Weiner925b7672012-01-12 17:18:15 -080064
65struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
Hugh Dickinsfa9add62012-05-29 15:07:09 -070066struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -080067
68/* For coalescing uncharge for reducing memcg' overhead*/
69extern void mem_cgroup_uncharge_start(void);
70extern void mem_cgroup_uncharge_end(void);
71
Balbir Singh3c541e12008-02-07 00:14:41 -080072extern void mem_cgroup_uncharge_page(struct page *page);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -070073extern void mem_cgroup_uncharge_cache_page(struct page *page);
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -070074
Johannes Weinerc3ac9a82012-05-29 15:06:25 -070075bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
76 struct mem_cgroup *memcg);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -070077int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
David Rientjes3062fc62008-02-07 00:14:03 -080078
Wu Fengguange42d9d52009-12-16 12:19:59 +010079extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
Balbir Singhcf475ad2008-04-29 01:00:16 -070080extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
KOSAKI Motohiroa4336582011-06-15 15:08:13 -070081extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
Balbir Singhcf475ad2008-04-29 01:00:16 -070082
Glauber Costae1aab162011-12-11 21:47:03 +000083extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
Glauber Costad1a4c0b2011-12-11 21:47:04 +000084extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
Glauber Costae1aab162011-12-11 21:47:03 +000085
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080086static inline
Johannes Weiner587af302012-10-08 16:34:12 -070087bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080088{
Johannes Weiner587af302012-10-08 16:34:12 -070089 struct mem_cgroup *task_memcg;
90 bool match;
Johannes Weinerc3ac9a82012-05-29 15:06:25 -070091
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080092 rcu_read_lock();
Johannes Weiner587af302012-10-08 16:34:12 -070093 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
94 match = __mem_cgroup_same_or_subtree(memcg, task_memcg);
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080095 rcu_read_unlock();
Johannes Weinerc3ac9a82012-05-29 15:06:25 -070096 return match;
Lai Jiangshan2e4d4092009-01-07 18:08:07 -080097}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080098
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -070099extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
Wu Fengguangd3242362009-12-16 12:19:59 +0100100
Johannes Weiner0030f532012-07-31 16:45:25 -0700101extern void
102mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
103 struct mem_cgroup **memcgp);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700104extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -0800105 struct page *oldpage, struct page *newpage, bool migration_ok);
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800106
Johannes Weiner56600482012-01-12 17:17:59 -0800107struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
108 struct mem_cgroup *,
109 struct mem_cgroup_reclaim_cookie *);
110void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
111
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800112/*
113 * For memory reclaim.
114 */
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -0700115int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
116int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
Ying Han889976d2011-05-26 16:25:33 -0700117int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
Hugh Dickins4d7dcca2012-05-29 15:07:08 -0700118unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700119void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
Balbir Singhe2224322009-04-02 16:57:39 -0700120extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
121 struct task_struct *p);
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -0800122extern void mem_cgroup_replace_page_cache(struct page *oldpage,
123 struct page *newpage);
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800124
Andrew Mortonc255a452012-07-31 16:43:02 -0700125#ifdef CONFIG_MEMCG_SWAP
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -0800126extern int do_swap_account;
127#endif
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800128
129static inline bool mem_cgroup_disabled(void)
130{
131 if (mem_cgroup_subsys.disabled)
132 return true;
133 return false;
134}
135
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -0700136void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
137 unsigned long *flags);
138
KAMEZAWA Hiroyuki4331f7d2012-03-21 16:34:26 -0700139extern atomic_t memcg_moving;
140
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -0700141static inline void mem_cgroup_begin_update_page_stat(struct page *page,
142 bool *locked, unsigned long *flags)
143{
144 if (mem_cgroup_disabled())
145 return;
146 rcu_read_lock();
147 *locked = false;
KAMEZAWA Hiroyuki4331f7d2012-03-21 16:34:26 -0700148 if (atomic_read(&memcg_moving))
149 __mem_cgroup_begin_update_page_stat(page, locked, flags);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -0700150}
151
152void __mem_cgroup_end_update_page_stat(struct page *page,
153 unsigned long *flags);
154static inline void mem_cgroup_end_update_page_stat(struct page *page,
155 bool *locked, unsigned long *flags)
156{
157 if (mem_cgroup_disabled())
158 return;
159 if (*locked)
160 __mem_cgroup_end_update_page_stat(page, flags);
161 rcu_read_unlock();
162}
163
Greg Thelen2a7106f2011-01-13 15:47:37 -0800164void mem_cgroup_update_page_stat(struct page *page,
165 enum mem_cgroup_page_stat_item idx,
166 int val);
167
168static inline void mem_cgroup_inc_page_stat(struct page *page,
169 enum mem_cgroup_page_stat_item idx)
170{
171 mem_cgroup_update_page_stat(page, idx, 1);
172}
173
174static inline void mem_cgroup_dec_page_stat(struct page *page,
175 enum mem_cgroup_page_stat_item idx)
176{
177 mem_cgroup_update_page_stat(page, idx, -1);
178}
179
Balbir Singh4e416952009-09-23 15:56:39 -0700180unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
Ying Han0ae5e892011-05-26 16:25:25 -0700181 gfp_t gfp_mask,
182 unsigned long *total_scanned);
David Rientjesa63d83f2010-08-09 17:19:46 -0700183
David Rientjes68ae5642012-12-12 13:51:57 -0800184void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
185static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
186 enum vm_event_item idx)
187{
188 if (mem_cgroup_disabled())
189 return;
190 __mem_cgroup_count_vm_event(mm, idx);
191}
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -0800192#ifdef CONFIG_TRANSPARENT_HUGEPAGE
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -0800193void mem_cgroup_split_huge_fixup(struct page *head);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -0800194#endif
195
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -0700196#ifdef CONFIG_DEBUG_VM
197bool mem_cgroup_bad_page_check(struct page *page);
198void mem_cgroup_print_bad_page(struct page *page);
199#endif
Andrew Mortonc255a452012-07-31 16:43:02 -0700200#else /* CONFIG_MEMCG */
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800201struct mem_cgroup;
202
203static inline int mem_cgroup_newpage_charge(struct page *page,
Hugh Dickins82895462008-03-04 14:29:08 -0800204 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800205{
206 return 0;
207}
208
Hugh Dickins82895462008-03-04 14:29:08 -0800209static inline int mem_cgroup_cache_charge(struct page *page,
210 struct mm_struct *mm, gfp_t gfp_mask)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800211{
Hugh Dickins82895462008-03-04 14:29:08 -0800212 return 0;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800213}
214
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800215static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
Johannes Weiner72835c82012-01-12 17:18:32 -0800216 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800217{
218 return 0;
219}
220
221static inline void mem_cgroup_commit_charge_swapin(struct page *page,
Johannes Weiner72835c82012-01-12 17:18:32 -0800222 struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800223{
224}
225
Johannes Weiner72835c82012-01-12 17:18:32 -0800226static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800227{
228}
229
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800230static inline void mem_cgroup_uncharge_start(void)
231{
232}
233
234static inline void mem_cgroup_uncharge_end(void)
235{
236}
237
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800238static inline void mem_cgroup_uncharge_page(struct page *page)
239{
240}
241
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700242static inline void mem_cgroup_uncharge_cache_page(struct page *page)
243{
244}
245
Johannes Weiner925b7672012-01-12 17:18:15 -0800246static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
247 struct mem_cgroup *memcg)
248{
249 return &zone->lruvec;
250}
251
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700252static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
253 struct zone *zone)
Minchan Kim3f58a822011-03-22 16:32:53 -0700254{
Johannes Weiner925b7672012-01-12 17:18:15 -0800255 return &zone->lruvec;
Balbir Singh66e17072008-02-07 00:13:56 -0800256}
257
Wu Fengguange42d9d52009-12-16 12:19:59 +0100258static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
259{
260 return NULL;
261}
262
KOSAKI Motohiroa4336582011-06-15 15:08:13 -0700263static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
264{
265 return NULL;
266}
267
Johannes Weiner587af302012-10-08 16:34:12 -0700268static inline bool mm_match_cgroup(struct mm_struct *mm,
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700269 struct mem_cgroup *memcg)
Balbir Singhbed71612008-02-07 00:14:01 -0800270{
Johannes Weiner587af302012-10-08 16:34:12 -0700271 return true;
Balbir Singhbed71612008-02-07 00:14:01 -0800272}
273
David Rientjes4c4a2212008-02-07 00:14:06 -0800274static inline int task_in_mem_cgroup(struct task_struct *task,
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700275 const struct mem_cgroup *memcg)
David Rientjes4c4a2212008-02-07 00:14:06 -0800276{
277 return 1;
278}
279
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700280static inline struct cgroup_subsys_state
281 *mem_cgroup_css(struct mem_cgroup *memcg)
Wu Fengguangd3242362009-12-16 12:19:59 +0100282{
283 return NULL;
284}
285
Johannes Weiner0030f532012-07-31 16:45:25 -0700286static inline void
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -0700287mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
Johannes Weiner0030f532012-07-31 16:45:25 -0700288 struct mem_cgroup **memcgp)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800289{
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800290}
291
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700292static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -0800293 struct page *oldpage, struct page *newpage, bool migration_ok)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800294{
295}
296
Johannes Weiner56600482012-01-12 17:17:59 -0800297static inline struct mem_cgroup *
298mem_cgroup_iter(struct mem_cgroup *root,
299 struct mem_cgroup *prev,
300 struct mem_cgroup_reclaim_cookie *reclaim)
301{
302 return NULL;
303}
304
305static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
306 struct mem_cgroup *prev)
307{
308}
309
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800310static inline bool mem_cgroup_disabled(void)
311{
312 return true;
313}
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -0800314
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800315static inline int
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -0700316mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800317{
318 return 1;
319}
320
Rik van Riel56e49d22009-06-16 15:32:28 -0700321static inline int
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -0700322mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
Rik van Riel56e49d22009-06-16 15:32:28 -0700323{
324 return 1;
325}
326
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -0800327static inline unsigned long
Hugh Dickins4d7dcca2012-05-29 15:07:08 -0700328mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -0800329{
330 return 0;
331}
332
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700333static inline void
334mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
335 int increment)
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800336{
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800337}
338
Balbir Singhe2224322009-04-02 16:57:39 -0700339static inline void
340mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
341{
342}
343
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -0700344static inline void mem_cgroup_begin_update_page_stat(struct page *page,
345 bool *locked, unsigned long *flags)
346{
347}
348
349static inline void mem_cgroup_end_update_page_stat(struct page *page,
350 bool *locked, unsigned long *flags)
351{
352}
353
Greg Thelen2a7106f2011-01-13 15:47:37 -0800354static inline void mem_cgroup_inc_page_stat(struct page *page,
355 enum mem_cgroup_page_stat_item idx)
356{
357}
358
359static inline void mem_cgroup_dec_page_stat(struct page *page,
360 enum mem_cgroup_page_stat_item idx)
Balbir Singhd69b0422009-06-17 16:26:34 -0700361{
362}
363
Balbir Singh4e416952009-09-23 15:56:39 -0700364static inline
365unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
Ying Han0ae5e892011-05-26 16:25:25 -0700366 gfp_t gfp_mask,
367 unsigned long *total_scanned)
Balbir Singh4e416952009-09-23 15:56:39 -0700368{
369 return 0;
370}
371
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -0800372static inline void mem_cgroup_split_huge_fixup(struct page *head)
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -0800373{
374}
375
Ying Han456f9982011-05-26 16:25:38 -0700376static inline
377void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
378{
379}
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -0800380static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
381 struct page *newpage)
382{
383}
Andrew Mortonc255a452012-07-31 16:43:02 -0700384#endif /* CONFIG_MEMCG */
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800385
Andrew Mortonc255a452012-07-31 16:43:02 -0700386#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -0700387static inline bool
388mem_cgroup_bad_page_check(struct page *page)
389{
390 return false;
391}
392
393static inline void
394mem_cgroup_print_bad_page(struct page *page)
395{
396}
397#endif
398
Glauber Costae1aab162011-12-11 21:47:03 +0000399enum {
400 UNDER_LIMIT,
401 SOFT_LIMIT,
402 OVER_LIMIT,
403};
404
405struct sock;
David Rientjescd590852012-10-10 15:54:08 -0700406#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
Glauber Costae1aab162011-12-11 21:47:03 +0000407void sock_update_memcg(struct sock *sk);
408void sock_release_memcg(struct sock *sk);
409#else
410static inline void sock_update_memcg(struct sock *sk)
411{
412}
413static inline void sock_release_memcg(struct sock *sk)
414{
415}
David Rientjescd590852012-10-10 15:54:08 -0700416#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800417#endif /* _LINUX_MEMCONTROL_H */
418