blob: 8ce4e9e4795960e5258f3c77424fb209094b9d7a [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -08009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/res_counter.h>
21#include <linux/memcontrol.h>
22#include <linux/cgroup.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080023#include <linux/mm.h>
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -080024#include <linux/pagemap.h>
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080025#include <linux/smp.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080026#include <linux/page-flags.h>
Balbir Singh66e17072008-02-07 00:13:56 -080027#include <linux/backing-dev.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080028#include <linux/bit_spinlock.h>
29#include <linux/rcupdate.h>
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080030#include <linux/mutex.h>
Balbir Singhb6ac57d2008-04-29 01:00:19 -070031#include <linux/slab.h>
Balbir Singh66e17072008-02-07 00:13:56 -080032#include <linux/swap.h>
33#include <linux/spinlock.h>
34#include <linux/fs.h>
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -080035#include <linux/seq_file.h>
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -070036#include <linux/vmalloc.h>
Christoph Lameterb69408e2008-10-18 20:26:14 -070037#include <linux/mm_inline.h>
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070038#include <linux/page_cgroup.h>
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080039#include "internal.h"
Balbir Singh8cdea7c2008-02-07 00:13:50 -080040
Balbir Singh8697d332008-02-07 00:13:59 -080041#include <asm/uaccess.h>
42
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070043struct cgroup_subsys mem_cgroup_subsys __read_mostly;
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070044#define MEM_CGROUP_RECLAIM_RETRIES 5
Balbir Singh8cdea7c2008-02-07 00:13:50 -080045
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080046#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
47/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
48int do_swap_account __read_mostly;
49static int really_do_swap_account __initdata = 1; /* for remember boot option*/
50#else
51#define do_swap_account (0)
52#endif
53
54
Balbir Singh8cdea7c2008-02-07 00:13:50 -080055/*
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080056 * Statistics for memory cgroup.
57 */
58enum mem_cgroup_stat_index {
59 /*
60 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
61 */
62 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
63 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
Balaji Rao55e462b2008-05-01 04:35:12 -070064 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
65 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080066
67 MEM_CGROUP_STAT_NSTATS,
68};
69
70struct mem_cgroup_stat_cpu {
71 s64 count[MEM_CGROUP_STAT_NSTATS];
72} ____cacheline_aligned_in_smp;
73
74struct mem_cgroup_stat {
Jan Blunckc8dad2b2009-01-07 18:07:53 -080075 struct mem_cgroup_stat_cpu cpustat[0];
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080076};
77
78/*
79 * For accounting under irq disable, no need for increment preempt count.
80 */
KAMEZAWA Hiroyukiaddb9ef2008-10-18 20:28:10 -070081static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080082 enum mem_cgroup_stat_index idx, int val)
83{
KAMEZAWA Hiroyukiaddb9ef2008-10-18 20:28:10 -070084 stat->count[idx] += val;
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080085}
86
87static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
88 enum mem_cgroup_stat_index idx)
89{
90 int cpu;
91 s64 ret = 0;
92 for_each_possible_cpu(cpu)
93 ret += stat->cpustat[cpu].count[idx];
94 return ret;
95}
96
97/*
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -080098 * per-zone information in memory controller.
99 */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800100struct mem_cgroup_per_zone {
KAMEZAWA Hiroyuki072c56c12008-02-07 00:14:39 -0800101 /*
102 * spin_lock to protect the per cgroup LRU
103 */
Christoph Lameterb69408e2008-10-18 20:26:14 -0700104 struct list_head lists[NR_LRU_LISTS];
105 unsigned long count[NR_LRU_LISTS];
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800106};
107/* Macro for accessing counter */
108#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
109
110struct mem_cgroup_per_node {
111 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
112};
113
114struct mem_cgroup_lru_info {
115 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
116};
117
118/*
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800119 * The memory controller data structure. The memory controller controls both
120 * page cache and RSS per cgroup. We would eventually like to provide
121 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
122 * to help the administrator determine what knobs to tune.
123 *
124 * TODO: Add a water mark for the memory controller. Reclaim will begin when
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800125 * we hit the water mark. May be even add a low water mark, such that
126 * no reclaim occurs from a cgroup at it's low water mark, this is
127 * a feature that will be implemented much later in the future.
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800128 */
129struct mem_cgroup {
130 struct cgroup_subsys_state css;
131 /*
132 * the counter to account for memory usage
133 */
134 struct res_counter res;
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800135 /*
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800136 * the counter to account for mem+swap usage.
137 */
138 struct res_counter memsw;
139 /*
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800140 * Per cgroup active and inactive list, similar to the
141 * per zone LRU lists.
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800142 */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800143 struct mem_cgroup_lru_info info;
KAMEZAWA Hiroyuki072c56c12008-02-07 00:14:39 -0800144
KAMEZAWA Hiroyuki6c48a1d2008-02-07 00:14:34 -0800145 int prev_priority; /* for recording reclaim priority */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800146 int obsolete;
147 atomic_t refcnt;
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800148 /*
Jan Blunckc8dad2b2009-01-07 18:07:53 -0800149 * statistics. This must be placed at the end of memcg.
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800150 */
151 struct mem_cgroup_stat stat;
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800152};
153
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800154enum charge_type {
155 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
156 MEM_CGROUP_CHARGE_TYPE_MAPPED,
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700157 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700158 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800159 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700160 NR_CHARGE_TYPE,
161};
162
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700163/* only for here (for easy reading.) */
164#define PCGF_CACHE (1UL << PCG_CACHE)
165#define PCGF_USED (1UL << PCG_USED)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700166#define PCGF_LOCK (1UL << PCG_LOCK)
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700167static const unsigned long
168pcg_default_flags[NR_CHARGE_TYPE] = {
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800169 PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
170 PCGF_USED | PCGF_LOCK, /* Anon */
171 PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700172 0, /* FORCE */
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800173};
174
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800175
176/* for encoding cft->private value on file */
177#define _MEM (0)
178#define _MEMSWAP (1)
179#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
180#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
181#define MEMFILE_ATTR(val) ((val) & 0xffff)
182
183static void mem_cgroup_get(struct mem_cgroup *mem);
184static void mem_cgroup_put(struct mem_cgroup *mem);
185
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700186static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
187 struct page_cgroup *pc,
188 bool charge)
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800189{
190 int val = (charge)? 1 : -1;
191 struct mem_cgroup_stat *stat = &mem->stat;
KAMEZAWA Hiroyukiaddb9ef2008-10-18 20:28:10 -0700192 struct mem_cgroup_stat_cpu *cpustat;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800193 int cpu = get_cpu();
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800194
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800195 cpustat = &stat->cpustat[cpu];
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700196 if (PageCgroupCache(pc))
KAMEZAWA Hiroyukiaddb9ef2008-10-18 20:28:10 -0700197 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800198 else
KAMEZAWA Hiroyukiaddb9ef2008-10-18 20:28:10 -0700199 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
Balaji Rao55e462b2008-05-01 04:35:12 -0700200
201 if (charge)
KAMEZAWA Hiroyukiaddb9ef2008-10-18 20:28:10 -0700202 __mem_cgroup_stat_add_safe(cpustat,
Balaji Rao55e462b2008-05-01 04:35:12 -0700203 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
204 else
KAMEZAWA Hiroyukiaddb9ef2008-10-18 20:28:10 -0700205 __mem_cgroup_stat_add_safe(cpustat,
Balaji Rao55e462b2008-05-01 04:35:12 -0700206 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800207 put_cpu();
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800208}
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800209
Hugh Dickinsd5b69e32008-03-04 14:29:10 -0800210static struct mem_cgroup_per_zone *
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800211mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
212{
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800213 return &mem->info.nodeinfo[nid]->zoneinfo[zid];
214}
215
Hugh Dickinsd5b69e32008-03-04 14:29:10 -0800216static struct mem_cgroup_per_zone *
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800217page_cgroup_zoneinfo(struct page_cgroup *pc)
218{
219 struct mem_cgroup *mem = pc->mem_cgroup;
220 int nid = page_cgroup_nid(pc);
221 int zid = page_cgroup_zid(pc);
222
223 return mem_cgroup_zoneinfo(mem, nid, zid);
224}
225
226static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
Christoph Lameterb69408e2008-10-18 20:26:14 -0700227 enum lru_list idx)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800228{
229 int nid, zid;
230 struct mem_cgroup_per_zone *mz;
231 u64 total = 0;
232
233 for_each_online_node(nid)
234 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
235 mz = mem_cgroup_zoneinfo(mem, nid, zid);
236 total += MEM_CGROUP_ZSTAT(mz, idx);
237 }
238 return total;
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800239}
240
Hugh Dickinsd5b69e32008-03-04 14:29:10 -0800241static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800242{
243 return container_of(cgroup_subsys_state(cont,
244 mem_cgroup_subsys_id), struct mem_cgroup,
245 css);
246}
247
Balbir Singhcf475ad2008-04-29 01:00:16 -0700248struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800249{
Balbir Singh31a78f22008-09-28 23:09:31 +0100250 /*
251 * mm_update_next_owner() may clear mm->owner to NULL
252 * if it races with swapoff, page migration, etc.
253 * So this can be called with p == NULL.
254 */
255 if (unlikely(!p))
256 return NULL;
257
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800258 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
259 struct mem_cgroup, css);
260}
261
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800262/*
263 * Following LRU functions are allowed to be used without PCG_LOCK.
264 * Operations are called by routine of global LRU independently from memcg.
265 * What we have to take care of here is validness of pc->mem_cgroup.
266 *
267 * Changes to pc->mem_cgroup happens when
268 * 1. charge
269 * 2. moving account
270 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
271 * It is added to LRU before charge.
272 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
273 * When moving account, the page is not on LRU. It's isolated.
274 */
275
276void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800277{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800278 struct page_cgroup *pc;
279 struct mem_cgroup *mem;
280 struct mem_cgroup_per_zone *mz;
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700281
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800282 if (mem_cgroup_subsys.disabled)
283 return;
284 pc = lookup_page_cgroup(page);
285 /* can happen while we handle swapcache. */
286 if (list_empty(&pc->lru))
287 return;
288 mz = page_cgroup_zoneinfo(pc);
289 mem = pc->mem_cgroup;
Christoph Lameterb69408e2008-10-18 20:26:14 -0700290 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800291 list_del_init(&pc->lru);
292 return;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800293}
294
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800295void mem_cgroup_del_lru(struct page *page)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800296{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800297 mem_cgroup_del_lru_list(page, page_lru(page));
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800298}
299
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800300void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
Balbir Singh66e17072008-02-07 00:13:56 -0800301{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800302 struct mem_cgroup_per_zone *mz;
303 struct page_cgroup *pc;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800304
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800305 if (mem_cgroup_subsys.disabled)
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700306 return;
Christoph Lameterb69408e2008-10-18 20:26:14 -0700307
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800308 pc = lookup_page_cgroup(page);
309 smp_rmb();
310 /* unused page is not rotated. */
311 if (!PageCgroupUsed(pc))
312 return;
313 mz = page_cgroup_zoneinfo(pc);
Christoph Lameterb69408e2008-10-18 20:26:14 -0700314 list_move(&pc->lru, &mz->lists[lru]);
Balbir Singh66e17072008-02-07 00:13:56 -0800315}
316
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800317void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
318{
319 struct page_cgroup *pc;
320 struct mem_cgroup_per_zone *mz;
321
322 if (mem_cgroup_subsys.disabled)
323 return;
324 pc = lookup_page_cgroup(page);
325 /* barrier to sync with "charge" */
326 smp_rmb();
327 if (!PageCgroupUsed(pc))
328 return;
329
330 mz = page_cgroup_zoneinfo(pc);
331 MEM_CGROUP_ZSTAT(mz, lru) += 1;
332 list_add(&pc->lru, &mz->lists[lru]);
333}
334/*
335 * To add swapcache into LRU. Be careful to all this function.
336 * zone->lru_lock shouldn't be held and irq must not be disabled.
337 */
338static void mem_cgroup_lru_fixup(struct page *page)
339{
340 if (!isolate_lru_page(page))
341 putback_lru_page(page);
342}
343
344void mem_cgroup_move_lists(struct page *page,
345 enum lru_list from, enum lru_list to)
346{
347 if (mem_cgroup_subsys.disabled)
348 return;
349 mem_cgroup_del_lru_list(page, from);
350 mem_cgroup_add_lru_list(page, to);
351}
352
David Rientjes4c4a2212008-02-07 00:14:06 -0800353int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
354{
355 int ret;
356
357 task_lock(task);
Hugh Dickinsbd845e32008-03-04 14:29:01 -0800358 ret = task->mm && mm_match_cgroup(task->mm, mem);
David Rientjes4c4a2212008-02-07 00:14:06 -0800359 task_unlock(task);
360 return ret;
361}
362
Balbir Singh66e17072008-02-07 00:13:56 -0800363/*
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800364 * Calculate mapped_ratio under memory controller. This will be used in
365 * vmscan.c for deteremining we have to reclaim mapped pages.
366 */
367int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
368{
369 long total, rss;
370
371 /*
372 * usage is recorded in bytes. But, here, we assume the number of
373 * physical pages can be represented by "long" on any arch.
374 */
375 total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
376 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
377 return (int)((rss * 100L) / total);
378}
Hugh Dickins8869b8f2008-03-04 14:29:09 -0800379
KAMEZAWA Hiroyuki5932f362008-02-07 00:14:33 -0800380/*
KAMEZAWA Hiroyuki6c48a1d2008-02-07 00:14:34 -0800381 * prev_priority control...this will be used in memory reclaim path.
382 */
383int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
384{
385 return mem->prev_priority;
386}
387
388void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
389{
390 if (priority < mem->prev_priority)
391 mem->prev_priority = priority;
392}
393
394void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
395{
396 mem->prev_priority = priority;
397}
398
KAMEZAWA Hiroyukicc381082008-02-07 00:14:35 -0800399/*
400 * Calculate # of pages to be scanned in this priority/zone.
401 * See also vmscan.c
402 *
403 * priority starts from "DEF_PRIORITY" and decremented in each loop.
404 * (see include/linux/mmzone.h)
405 */
406
Christoph Lameterb69408e2008-10-18 20:26:14 -0700407long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
408 int priority, enum lru_list lru)
KAMEZAWA Hiroyukicc381082008-02-07 00:14:35 -0800409{
Christoph Lameterb69408e2008-10-18 20:26:14 -0700410 long nr_pages;
KAMEZAWA Hiroyukicc381082008-02-07 00:14:35 -0800411 int nid = zone->zone_pgdat->node_id;
412 int zid = zone_idx(zone);
413 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
414
Christoph Lameterb69408e2008-10-18 20:26:14 -0700415 nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
KAMEZAWA Hiroyukicc381082008-02-07 00:14:35 -0800416
Christoph Lameterb69408e2008-10-18 20:26:14 -0700417 return (nr_pages >> priority);
KAMEZAWA Hiroyukicc381082008-02-07 00:14:35 -0800418}
419
Balbir Singh66e17072008-02-07 00:13:56 -0800420unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
421 struct list_head *dst,
422 unsigned long *scanned, int order,
423 int mode, struct zone *z,
424 struct mem_cgroup *mem_cont,
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700425 int active, int file)
Balbir Singh66e17072008-02-07 00:13:56 -0800426{
427 unsigned long nr_taken = 0;
428 struct page *page;
429 unsigned long scan;
430 LIST_HEAD(pc_list);
431 struct list_head *src;
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -0800432 struct page_cgroup *pc, *tmp;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -0800433 int nid = z->zone_pgdat->node_id;
434 int zid = zone_idx(z);
435 struct mem_cgroup_per_zone *mz;
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700436 int lru = LRU_FILE * !!file + !!active;
Balbir Singh66e17072008-02-07 00:13:56 -0800437
Balbir Singhcf475ad2008-04-29 01:00:16 -0700438 BUG_ON(!mem_cont);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -0800439 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
Christoph Lameterb69408e2008-10-18 20:26:14 -0700440 src = &mz->lists[lru];
Balbir Singh66e17072008-02-07 00:13:56 -0800441
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -0800442 scan = 0;
443 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
Hugh Dickins436c65412008-02-07 00:14:12 -0800444 if (scan >= nr_to_scan)
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -0800445 break;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800446
447 page = pc->page;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700448 if (unlikely(!PageCgroupUsed(pc)))
449 continue;
Hugh Dickins436c65412008-02-07 00:14:12 -0800450 if (unlikely(!PageLRU(page)))
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -0800451 continue;
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -0800452
Hugh Dickins436c65412008-02-07 00:14:12 -0800453 scan++;
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700454 if (__isolate_lru_page(page, mode, file) == 0) {
Balbir Singh66e17072008-02-07 00:13:56 -0800455 list_move(&page->lru, dst);
456 nr_taken++;
457 }
458 }
459
Balbir Singh66e17072008-02-07 00:13:56 -0800460 *scanned = scan;
461 return nr_taken;
462}
463
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800464/*
465 * Unlike exported interface, "oom" parameter is added. if oom==true,
466 * oom-killer can be invoked.
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800467 */
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800468static int __mem_cgroup_try_charge(struct mm_struct *mm,
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800469 gfp_t gfp_mask, struct mem_cgroup **memcg,
470 bool oom)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800471{
472 struct mem_cgroup *mem;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800473 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800474 /*
Hugh Dickins3be91272008-02-07 00:14:19 -0800475 * We always charge the cgroup the mm_struct belongs to.
476 * The mm_struct's mem_cgroup changes on task migration if the
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800477 * thread group leader migrates. It's possible that mm is not
478 * set, if so charge the init_mm (happens for pagecache usage).
479 */
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800480 if (likely(!*memcg)) {
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700481 rcu_read_lock();
482 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
Balbir Singh31a78f22008-09-28 23:09:31 +0100483 if (unlikely(!mem)) {
484 rcu_read_unlock();
Balbir Singh31a78f22008-09-28 23:09:31 +0100485 return 0;
486 }
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700487 /*
488 * For every charge from the cgroup, increment reference count
489 */
490 css_get(&mem->css);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800491 *memcg = mem;
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700492 rcu_read_unlock();
493 } else {
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800494 mem = *memcg;
495 css_get(&mem->css);
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700496 }
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800497
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800498 while (1) {
499 int ret;
500 bool noswap = false;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800501
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800502 ret = res_counter_charge(&mem->res, PAGE_SIZE);
503 if (likely(!ret)) {
504 if (!do_swap_account)
505 break;
506 ret = res_counter_charge(&mem->memsw, PAGE_SIZE);
507 if (likely(!ret))
508 break;
509 /* mem+swap counter fails */
510 res_counter_uncharge(&mem->res, PAGE_SIZE);
511 noswap = true;
512 }
Hugh Dickins3be91272008-02-07 00:14:19 -0800513 if (!(gfp_mask & __GFP_WAIT))
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800514 goto nomem;
Balbir Singhe1a1cd52008-02-07 00:14:02 -0800515
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800516 if (try_to_free_mem_cgroup_pages(mem, gfp_mask, noswap))
Balbir Singh66e17072008-02-07 00:13:56 -0800517 continue;
518
519 /*
Hugh Dickins8869b8f2008-03-04 14:29:09 -0800520 * try_to_free_mem_cgroup_pages() might not give us a full
521 * picture of reclaim. Some pages are reclaimed and might be
522 * moved to swap cache or just unmapped from the cgroup.
523 * Check the limit again to see if the reclaim reduced the
524 * current usage of the cgroup before giving up
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800525 *
Hugh Dickins8869b8f2008-03-04 14:29:09 -0800526 */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800527 if (!do_swap_account &&
528 res_counter_check_under_limit(&mem->res))
529 continue;
530 if (do_swap_account &&
531 res_counter_check_under_limit(&mem->memsw))
Balbir Singh66e17072008-02-07 00:13:56 -0800532 continue;
Hugh Dickins3be91272008-02-07 00:14:19 -0800533
534 if (!nr_retries--) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800535 if (oom)
536 mem_cgroup_out_of_memory(mem, gfp_mask);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800537 goto nomem;
Balbir Singh66e17072008-02-07 00:13:56 -0800538 }
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800539 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800540 return 0;
541nomem:
542 css_put(&mem->css);
543 return -ENOMEM;
544}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800545
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800546/**
547 * mem_cgroup_try_charge - get charge of PAGE_SIZE.
548 * @mm: an mm_struct which is charged against. (when *memcg is NULL)
549 * @gfp_mask: gfp_mask for reclaim.
550 * @memcg: a pointer to memory cgroup which is charged against.
551 *
552 * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated
553 * memory cgroup from @mm is got and stored in *memcg.
554 *
555 * Returns 0 if success. -ENOMEM at failure.
556 * This call can invoke OOM-Killer.
557 */
558
559int mem_cgroup_try_charge(struct mm_struct *mm,
560 gfp_t mask, struct mem_cgroup **memcg)
561{
562 return __mem_cgroup_try_charge(mm, mask, memcg, true);
563}
564
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800565/*
566 * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be
567 * USED state. If already USED, uncharge and return.
568 */
569
570static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
571 struct page_cgroup *pc,
572 enum charge_type ctype)
573{
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800574 /* try_charge() can return NULL to *memcg, taking care of it. */
575 if (!mem)
576 return;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700577
578 lock_page_cgroup(pc);
579 if (unlikely(PageCgroupUsed(pc))) {
580 unlock_page_cgroup(pc);
581 res_counter_uncharge(&mem->res, PAGE_SIZE);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800582 if (do_swap_account)
583 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700584 css_put(&mem->css);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800585 return;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700586 }
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800587 pc->mem_cgroup = mem;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800588 smp_wmb();
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700589 pc->flags = pcg_default_flags[ctype];
Hugh Dickins3be91272008-02-07 00:14:19 -0800590
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800591 mem_cgroup_charge_statistics(mem, pc, true);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700592
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700593 unlock_page_cgroup(pc);
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800594}
595
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800596/**
597 * mem_cgroup_move_account - move account of the page
598 * @pc: page_cgroup of the page.
599 * @from: mem_cgroup which the page is moved from.
600 * @to: mem_cgroup which the page is moved to. @from != @to.
601 *
602 * The caller must confirm following.
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800603 * - page is not on LRU (isolate_page() is useful.)
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800604 *
605 * returns 0 at success,
606 * returns -EBUSY when lock is busy or "pc" is unstable.
607 *
608 * This function does "uncharge" from old cgroup but doesn't do "charge" to
609 * new cgroup. It should be done by a caller.
610 */
611
612static int mem_cgroup_move_account(struct page_cgroup *pc,
613 struct mem_cgroup *from, struct mem_cgroup *to)
614{
615 struct mem_cgroup_per_zone *from_mz, *to_mz;
616 int nid, zid;
617 int ret = -EBUSY;
618
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800619 VM_BUG_ON(from == to);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800620 VM_BUG_ON(PageLRU(pc->page));
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800621
622 nid = page_cgroup_nid(pc);
623 zid = page_cgroup_zid(pc);
624 from_mz = mem_cgroup_zoneinfo(from, nid, zid);
625 to_mz = mem_cgroup_zoneinfo(to, nid, zid);
626
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800627 if (!trylock_page_cgroup(pc))
628 return ret;
629
630 if (!PageCgroupUsed(pc))
631 goto out;
632
633 if (pc->mem_cgroup != from)
634 goto out;
635
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800636 css_put(&from->css);
637 res_counter_uncharge(&from->res, PAGE_SIZE);
638 mem_cgroup_charge_statistics(from, pc, false);
639 if (do_swap_account)
640 res_counter_uncharge(&from->memsw, PAGE_SIZE);
641 pc->mem_cgroup = to;
642 mem_cgroup_charge_statistics(to, pc, true);
643 css_get(&to->css);
644 ret = 0;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800645out:
646 unlock_page_cgroup(pc);
647 return ret;
648}
649
650/*
651 * move charges to its parent.
652 */
653
654static int mem_cgroup_move_parent(struct page_cgroup *pc,
655 struct mem_cgroup *child,
656 gfp_t gfp_mask)
657{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800658 struct page *page = pc->page;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800659 struct cgroup *cg = child->css.cgroup;
660 struct cgroup *pcg = cg->parent;
661 struct mem_cgroup *parent;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800662 int ret;
663
664 /* Is ROOT ? */
665 if (!pcg)
666 return -EINVAL;
667
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800668
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800669 parent = mem_cgroup_from_cont(pcg);
670
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800671
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800672 ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
673 if (ret)
674 return ret;
675
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800676 if (!get_page_unless_zero(page))
677 return -EBUSY;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800678
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800679 ret = isolate_lru_page(page);
680
681 if (ret)
682 goto cancel;
683
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800684 ret = mem_cgroup_move_account(pc, child, parent);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800685
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800686 /* drop extra refcnt by try_charge() (move_account increment one) */
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800687 css_put(&parent->css);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800688 putback_lru_page(page);
689 if (!ret) {
690 put_page(page);
691 return 0;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800692 }
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800693 /* uncharge if move fails */
694cancel:
695 res_counter_uncharge(&parent->res, PAGE_SIZE);
696 if (do_swap_account)
697 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
698 put_page(page);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800699 return ret;
700}
701
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800702/*
703 * Charge the memory controller for page usage.
704 * Return
705 * 0 if the charge was successful
706 * < 0 if the cgroup is over its limit
707 */
708static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
709 gfp_t gfp_mask, enum charge_type ctype,
710 struct mem_cgroup *memcg)
711{
712 struct mem_cgroup *mem;
713 struct page_cgroup *pc;
714 int ret;
715
716 pc = lookup_page_cgroup(page);
717 /* can happen at boot */
718 if (unlikely(!pc))
719 return 0;
720 prefetchw(pc);
721
722 mem = memcg;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -0800723 ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800724 if (ret)
725 return ret;
726
727 __mem_cgroup_commit_charge(mem, pc, ctype);
728 return 0;
729}
730
731int mem_cgroup_newpage_charge(struct page *page,
732 struct mm_struct *mm, gfp_t gfp_mask)
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800733{
Li Zefancede86a2008-07-25 01:47:18 -0700734 if (mem_cgroup_subsys.disabled)
735 return 0;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700736 if (PageCompound(page))
737 return 0;
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700738 /*
739 * If already mapped, we don't have to account.
740 * If page cache, page->mapping has address_space.
741 * But page->mapping may have out-of-use anon_vma pointer,
742 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
743 * is NULL.
744 */
745 if (page_mapped(page) || (page->mapping && !PageAnon(page)))
746 return 0;
747 if (unlikely(!mm))
748 mm = &init_mm;
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800749 return mem_cgroup_charge_common(page, mm, gfp_mask,
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700750 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800751}
752
Balbir Singhe1a1cd52008-02-07 00:14:02 -0800753int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
754 gfp_t gfp_mask)
Balbir Singh8697d332008-02-07 00:13:59 -0800755{
Li Zefancede86a2008-07-25 01:47:18 -0700756 if (mem_cgroup_subsys.disabled)
757 return 0;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700758 if (PageCompound(page))
759 return 0;
KAMEZAWA Hiroyukiaccf1632008-07-25 01:47:17 -0700760 /*
761 * Corner case handling. This is called from add_to_page_cache()
762 * in usual. But some FS (shmem) precharges this page before calling it
763 * and call add_to_page_cache() with GFP_NOWAIT.
764 *
765 * For GFP_NOWAIT case, the page may be pre-charged before calling
766 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
767 * charge twice. (It works but has to pay a bit larger cost.)
768 */
769 if (!(gfp_mask & __GFP_WAIT)) {
770 struct page_cgroup *pc;
771
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700772
773 pc = lookup_page_cgroup(page);
774 if (!pc)
775 return 0;
776 lock_page_cgroup(pc);
777 if (PageCgroupUsed(pc)) {
778 unlock_page_cgroup(pc);
KAMEZAWA Hiroyukiaccf1632008-07-25 01:47:17 -0700779 return 0;
780 }
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700781 unlock_page_cgroup(pc);
KAMEZAWA Hiroyukiaccf1632008-07-25 01:47:17 -0700782 }
783
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700784 if (unlikely(!mm))
Balbir Singh8697d332008-02-07 00:13:59 -0800785 mm = &init_mm;
KAMEZAWA Hiroyukiaccf1632008-07-25 01:47:17 -0700786
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700787 if (page_is_file_cache(page))
788 return mem_cgroup_charge_common(page, mm, gfp_mask,
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700789 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700790 else
791 return mem_cgroup_charge_common(page, mm, gfp_mask,
792 MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700793}
794
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800795int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
796 struct page *page,
797 gfp_t mask, struct mem_cgroup **ptr)
798{
799 struct mem_cgroup *mem;
800 swp_entry_t ent;
801
802 if (mem_cgroup_subsys.disabled)
803 return 0;
804
805 if (!do_swap_account)
806 goto charge_cur_mm;
807
808 /*
809 * A racing thread's fault, or swapoff, may have already updated
810 * the pte, and even removed page from swap cache: return success
811 * to go on to do_swap_page()'s pte_same() test, which should fail.
812 */
813 if (!PageSwapCache(page))
814 return 0;
815
816 ent.val = page_private(page);
817
818 mem = lookup_swap_cgroup(ent);
819 if (!mem || mem->obsolete)
820 goto charge_cur_mm;
821 *ptr = mem;
822 return __mem_cgroup_try_charge(NULL, mask, ptr, true);
823charge_cur_mm:
824 if (unlikely(!mm))
825 mm = &init_mm;
826 return __mem_cgroup_try_charge(mm, mask, ptr, true);
827}
828
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800829#ifdef CONFIG_SWAP
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800830
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800831int mem_cgroup_cache_charge_swapin(struct page *page,
832 struct mm_struct *mm, gfp_t mask, bool locked)
833{
834 int ret = 0;
835
836 if (mem_cgroup_subsys.disabled)
837 return 0;
838 if (unlikely(!mm))
839 mm = &init_mm;
840 if (!locked)
841 lock_page(page);
842 /*
843 * If not locked, the page can be dropped from SwapCache until
844 * we reach here.
845 */
846 if (PageSwapCache(page)) {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800847 struct mem_cgroup *mem = NULL;
848 swp_entry_t ent;
849
850 ent.val = page_private(page);
851 if (do_swap_account) {
852 mem = lookup_swap_cgroup(ent);
853 if (mem && mem->obsolete)
854 mem = NULL;
855 if (mem)
856 mm = NULL;
857 }
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800858 ret = mem_cgroup_charge_common(page, mm, mask,
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800859 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
860
861 if (!ret && do_swap_account) {
862 /* avoid double counting */
863 mem = swap_cgroup_record(ent, NULL);
864 if (mem) {
865 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
866 mem_cgroup_put(mem);
867 }
868 }
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800869 }
870 if (!locked)
871 unlock_page(page);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800872 /* add this page(page_cgroup) to the LRU we want. */
873 mem_cgroup_lru_fixup(page);
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800874
875 return ret;
876}
877#endif
878
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800879void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
880{
881 struct page_cgroup *pc;
882
883 if (mem_cgroup_subsys.disabled)
884 return;
885 if (!ptr)
886 return;
887 pc = lookup_page_cgroup(page);
888 __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800889 /*
890 * Now swap is on-memory. This means this page may be
891 * counted both as mem and swap....double count.
892 * Fix it by uncharging from memsw. This SwapCache is stable
893 * because we're still under lock_page().
894 */
895 if (do_swap_account) {
896 swp_entry_t ent = {.val = page_private(page)};
897 struct mem_cgroup *memcg;
898 memcg = swap_cgroup_record(ent, NULL);
899 if (memcg) {
900 /* If memcg is obsolete, memcg can be != ptr */
901 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
902 mem_cgroup_put(memcg);
903 }
904
905 }
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800906 /* add this page(page_cgroup) to the LRU we want. */
907 mem_cgroup_lru_fixup(page);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800908}
909
910void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
911{
912 if (mem_cgroup_subsys.disabled)
913 return;
914 if (!mem)
915 return;
916 res_counter_uncharge(&mem->res, PAGE_SIZE);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800917 if (do_swap_account)
918 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -0800919 css_put(&mem->css);
920}
921
922
Balbir Singh8697d332008-02-07 00:13:59 -0800923/*
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700924 * uncharge if !page_mapped(page)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800925 */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800926static struct mem_cgroup *
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700927__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800928{
Hugh Dickins82895462008-03-04 14:29:08 -0800929 struct page_cgroup *pc;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800930 struct mem_cgroup *mem = NULL;
KAMEZAWA Hiroyuki072c56c12008-02-07 00:14:39 -0800931 struct mem_cgroup_per_zone *mz;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800932
Balbir Singh40779602008-04-04 14:29:59 -0700933 if (mem_cgroup_subsys.disabled)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800934 return NULL;
Balbir Singh40779602008-04-04 14:29:59 -0700935
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800936 if (PageSwapCache(page))
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800937 return NULL;
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800938
Balbir Singh8697d332008-02-07 00:13:59 -0800939 /*
Balbir Singh3c541e12008-02-07 00:14:41 -0800940 * Check if our page_cgroup is valid
Balbir Singh8697d332008-02-07 00:13:59 -0800941 */
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700942 pc = lookup_page_cgroup(page);
943 if (unlikely(!pc || !PageCgroupUsed(pc)))
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800944 return NULL;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800945
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700946 lock_page_cgroup(pc);
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800947
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800948 mem = pc->mem_cgroup;
949
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800950 if (!PageCgroupUsed(pc))
951 goto unlock_out;
952
953 switch (ctype) {
954 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
955 if (page_mapped(page))
956 goto unlock_out;
957 break;
958 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
959 if (!PageAnon(page)) { /* Shared memory */
960 if (page->mapping && !page_is_file_cache(page))
961 goto unlock_out;
962 } else if (page_mapped(page)) /* Anon */
963 goto unlock_out;
964 break;
965 default:
966 break;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700967 }
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800968
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800969 res_counter_uncharge(&mem->res, PAGE_SIZE);
970 if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
971 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
972
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800973 mem_cgroup_charge_statistics(mem, pc, false);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700974 ClearPageCgroupUsed(pc);
Hugh Dickinsb9c565d2008-03-04 14:29:11 -0800975
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700976 mz = page_cgroup_zoneinfo(pc);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700977 unlock_page_cgroup(pc);
Hugh Dickinsfb59e9f2008-03-04 14:29:16 -0800978
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700979 css_put(&mem->css);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800980
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800981 return mem;
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800982
983unlock_out:
984 unlock_page_cgroup(pc);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800985 return NULL;
Balbir Singh3c541e12008-02-07 00:14:41 -0800986}
987
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700988void mem_cgroup_uncharge_page(struct page *page)
989{
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700990 /* early check. */
991 if (page_mapped(page))
992 return;
993 if (page->mapping && !PageAnon(page))
994 return;
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -0700995 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
996}
997
998void mem_cgroup_uncharge_cache_page(struct page *page)
999{
1000 VM_BUG_ON(page_mapped(page));
KAMEZAWA Hiroyukib7abea92008-10-18 20:28:09 -07001001 VM_BUG_ON(page->mapping);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07001002 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1003}
1004
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001005/*
1006 * called from __delete_from_swap_cache() and drop "page" account.
1007 * memcg information is recorded to swap_cgroup of "ent"
1008 */
1009void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08001010{
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001011 struct mem_cgroup *memcg;
1012
1013 memcg = __mem_cgroup_uncharge_common(page,
1014 MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1015 /* record memcg information */
1016 if (do_swap_account && memcg) {
1017 swap_cgroup_record(ent, memcg);
1018 mem_cgroup_get(memcg);
1019 }
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08001020}
1021
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001022#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1023/*
1024 * called from swap_entry_free(). remove record in swap_cgroup and
1025 * uncharge "memsw" account.
1026 */
1027void mem_cgroup_uncharge_swap(swp_entry_t ent)
1028{
1029 struct mem_cgroup *memcg;
1030
1031 if (!do_swap_account)
1032 return;
1033
1034 memcg = swap_cgroup_record(ent, NULL);
1035 if (memcg) {
1036 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1037 mem_cgroup_put(memcg);
1038 }
1039}
1040#endif
1041
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08001042/*
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08001043 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1044 * page belongs to.
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08001045 */
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08001046int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08001047{
1048 struct page_cgroup *pc;
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07001049 struct mem_cgroup *mem = NULL;
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07001050 int ret = 0;
Hugh Dickins8869b8f2008-03-04 14:29:09 -08001051
Balbir Singh40779602008-04-04 14:29:59 -07001052 if (mem_cgroup_subsys.disabled)
1053 return 0;
1054
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001055 pc = lookup_page_cgroup(page);
1056 lock_page_cgroup(pc);
1057 if (PageCgroupUsed(pc)) {
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07001058 mem = pc->mem_cgroup;
1059 css_get(&mem->css);
Hugh Dickinsb9c565d2008-03-04 14:29:11 -08001060 }
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001061 unlock_page_cgroup(pc);
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08001062
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07001063 if (mem) {
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08001064 ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem);
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07001065 css_put(&mem->css);
1066 }
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08001067 *ptr = mem;
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07001068 return ret;
1069}
Hugh Dickinsfb59e9f2008-03-04 14:29:16 -08001070
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07001071/* remove redundant charge if migration failed*/
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08001072void mem_cgroup_end_migration(struct mem_cgroup *mem,
1073 struct page *oldpage, struct page *newpage)
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07001074{
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08001075 struct page *target, *unused;
1076 struct page_cgroup *pc;
1077 enum charge_type ctype;
1078
1079 if (!mem)
1080 return;
1081
1082 /* at migration success, oldpage->mapping is NULL. */
1083 if (oldpage->mapping) {
1084 target = oldpage;
1085 unused = NULL;
1086 } else {
1087 target = newpage;
1088 unused = oldpage;
1089 }
1090
1091 if (PageAnon(target))
1092 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1093 else if (page_is_file_cache(target))
1094 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1095 else
1096 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1097
1098 /* unused page is not on radix-tree now. */
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08001099 if (unused)
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08001100 __mem_cgroup_uncharge_common(unused, ctype);
1101
1102 pc = lookup_page_cgroup(target);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07001103 /*
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08001104 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1105 * So, double-counting is effectively avoided.
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07001106 */
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08001107 __mem_cgroup_commit_charge(mem, pc, ctype);
1108
1109 /*
1110 * Both of oldpage and newpage are still under lock_page().
1111 * Then, we don't have to care about race in radix-tree.
1112 * But we have to be careful that this page is unmapped or not.
1113 *
1114 * There is a case for !page_mapped(). At the start of
1115 * migration, oldpage was mapped. But now, it's zapped.
1116 * But we know *target* page is not freed/reused under us.
1117 * mem_cgroup_uncharge_page() does all necessary checks.
1118 */
1119 if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1120 mem_cgroup_uncharge_page(target);
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08001121}
Pavel Emelianov78fb7462008-02-07 00:13:51 -08001122
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08001123/*
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -07001124 * A call to try to shrink memory usage under specified resource controller.
1125 * This is typically used for page reclaiming for shmem for reducing side
1126 * effect of page allocation from shmem, which is used by some mem_cgroup.
1127 */
1128int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
1129{
1130 struct mem_cgroup *mem;
1131 int progress = 0;
1132 int retry = MEM_CGROUP_RECLAIM_RETRIES;
1133
Li Zefancede86a2008-07-25 01:47:18 -07001134 if (mem_cgroup_subsys.disabled)
1135 return 0;
Hugh Dickins9623e072008-08-12 15:08:41 -07001136 if (!mm)
1137 return 0;
Li Zefancede86a2008-07-25 01:47:18 -07001138
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -07001139 rcu_read_lock();
1140 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
Balbir Singh31a78f22008-09-28 23:09:31 +01001141 if (unlikely(!mem)) {
1142 rcu_read_unlock();
1143 return 0;
1144 }
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -07001145 css_get(&mem->css);
1146 rcu_read_unlock();
1147
1148 do {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001149 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
Daisuke Nishimuraa10cebf2008-09-22 13:57:52 -07001150 progress += res_counter_check_under_limit(&mem->res);
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -07001151 } while (!progress && --retry);
1152
1153 css_put(&mem->css);
1154 if (!retry)
1155 return -ENOMEM;
1156 return 0;
1157}
1158
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001159static DEFINE_MUTEX(set_limit_mutex);
1160
KOSAKI Motohirod38d2a72009-01-06 14:39:44 -08001161static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001162 unsigned long long val)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07001163{
1164
1165 int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1166 int progress;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001167 u64 memswlimit;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07001168 int ret = 0;
1169
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001170 while (retry_count) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07001171 if (signal_pending(current)) {
1172 ret = -EINTR;
1173 break;
1174 }
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001175 /*
1176 * Rather than hide all in some function, I do this in
1177 * open coded manner. You see what this really does.
1178 * We have to guarantee mem->res.limit < mem->memsw.limit.
1179 */
1180 mutex_lock(&set_limit_mutex);
1181 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1182 if (memswlimit < val) {
1183 ret = -EINVAL;
1184 mutex_unlock(&set_limit_mutex);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07001185 break;
1186 }
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001187 ret = res_counter_set_limit(&memcg->res, val);
1188 mutex_unlock(&set_limit_mutex);
1189
1190 if (!ret)
1191 break;
1192
KAMEZAWA Hiroyukibced0522009-01-07 18:07:49 -08001193 progress = try_to_free_mem_cgroup_pages(memcg,
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001194 GFP_HIGHUSER_MOVABLE, false);
1195 if (!progress) retry_count--;
1196 }
1197 return ret;
1198}
1199
1200int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1201 unsigned long long val)
1202{
1203 int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1204 u64 memlimit, oldusage, curusage;
1205 int ret;
1206
1207 if (!do_swap_account)
1208 return -EINVAL;
1209
1210 while (retry_count) {
1211 if (signal_pending(current)) {
1212 ret = -EINTR;
1213 break;
1214 }
1215 /*
1216 * Rather than hide all in some function, I do this in
1217 * open coded manner. You see what this really does.
1218 * We have to guarantee mem->res.limit < mem->memsw.limit.
1219 */
1220 mutex_lock(&set_limit_mutex);
1221 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1222 if (memlimit > val) {
1223 ret = -EINVAL;
1224 mutex_unlock(&set_limit_mutex);
1225 break;
1226 }
1227 ret = res_counter_set_limit(&memcg->memsw, val);
1228 mutex_unlock(&set_limit_mutex);
1229
1230 if (!ret)
1231 break;
1232
1233 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1234 try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true);
1235 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1236 if (curusage >= oldusage)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07001237 retry_count--;
1238 }
1239 return ret;
1240}
1241
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -07001242/*
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08001243 * This routine traverse page_cgroup in given list and drop them all.
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08001244 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1245 */
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001246static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001247 int node, int zid, enum lru_list lru)
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08001248{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001249 struct zone *zone;
1250 struct mem_cgroup_per_zone *mz;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001251 struct page_cgroup *pc, *busy;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001252 unsigned long flags, loop;
KAMEZAWA Hiroyuki072c56c12008-02-07 00:14:39 -08001253 struct list_head *list;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001254 int ret = 0;
KAMEZAWA Hiroyuki072c56c12008-02-07 00:14:39 -08001255
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001256 zone = &NODE_DATA(node)->node_zones[zid];
1257 mz = mem_cgroup_zoneinfo(mem, node, zid);
Christoph Lameterb69408e2008-10-18 20:26:14 -07001258 list = &mz->lists[lru];
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08001259
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001260 loop = MEM_CGROUP_ZSTAT(mz, lru);
1261 /* give some margin against EBUSY etc...*/
1262 loop += 256;
1263 busy = NULL;
1264 while (loop--) {
1265 ret = 0;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001266 spin_lock_irqsave(&zone->lru_lock, flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001267 if (list_empty(list)) {
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001268 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001269 break;
1270 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001271 pc = list_entry(list->prev, struct page_cgroup, lru);
1272 if (busy == pc) {
1273 list_move(&pc->lru, list);
1274 busy = 0;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001275 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001276 continue;
1277 }
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001278 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001279
1280 ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE);
1281 if (ret == -ENOMEM)
1282 break;
1283
1284 if (ret == -EBUSY || ret == -EINVAL) {
1285 /* found lock contention or "pc" is obsolete. */
1286 busy = pc;
1287 cond_resched();
1288 } else
1289 busy = NULL;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08001290 }
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001291
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001292 if (!ret && !list_empty(list))
1293 return -EBUSY;
1294 return ret;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08001295}
1296
1297/*
1298 * make mem_cgroup's charge to be 0 if there is no task.
1299 * This enables deleting this mem_cgroup.
1300 */
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001301static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08001302{
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001303 int ret;
1304 int node, zid, shrink;
1305 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001306 struct cgroup *cgrp = mem->css.cgroup;
Hugh Dickins8869b8f2008-03-04 14:29:09 -08001307
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08001308 css_get(&mem->css);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001309
1310 shrink = 0;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001311 /* should free all ? */
1312 if (free_all)
1313 goto try_to_free;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001314move_account:
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001315 while (mem->res.usage > 0) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001316 ret = -EBUSY;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001317 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08001318 goto out;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001319 ret = -EINTR;
1320 if (signal_pending(current))
1321 goto out;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001322 /* This is for making all *used* pages to be on LRU. */
1323 lru_add_drain_all();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001324 ret = 0;
1325 for_each_node_state(node, N_POSSIBLE) {
1326 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
Christoph Lameterb69408e2008-10-18 20:26:14 -07001327 enum lru_list l;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001328 for_each_lru(l) {
1329 ret = mem_cgroup_force_empty_list(mem,
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001330 node, zid, l);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001331 if (ret)
1332 break;
1333 }
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001334 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001335 if (ret)
1336 break;
1337 }
1338 /* it seems parent cgroup doesn't have enough mem */
1339 if (ret == -ENOMEM)
1340 goto try_to_free;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001341 cond_resched();
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08001342 }
1343 ret = 0;
1344out:
1345 css_put(&mem->css);
1346 return ret;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001347
1348try_to_free:
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001349 /* returns EBUSY if there is a task or if we come here twice. */
1350 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001351 ret = -EBUSY;
1352 goto out;
1353 }
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001354 /* we call try-to-free pages for make this cgroup empty */
1355 lru_add_drain_all();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001356 /* try to free all pages in this cgroup */
1357 shrink = 1;
1358 while (nr_retries && mem->res.usage > 0) {
1359 int progress;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001360
1361 if (signal_pending(current)) {
1362 ret = -EINTR;
1363 goto out;
1364 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001365 progress = try_to_free_mem_cgroup_pages(mem,
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001366 GFP_HIGHUSER_MOVABLE, false);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001367 if (!progress) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001368 nr_retries--;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001369 /* maybe some writeback is necessary */
1370 congestion_wait(WRITE, HZ/10);
1371 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001372
1373 }
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001374 lru_add_drain();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001375 /* try move_account...there may be some *locked* pages. */
1376 if (mem->res.usage)
1377 goto move_account;
1378 ret = 0;
1379 goto out;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08001380}
1381
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001382int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1383{
1384 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1385}
1386
1387
Paul Menage2c3daa72008-04-29 00:59:58 -07001388static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001389{
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001390 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1391 u64 val = 0;
1392 int type, name;
1393
1394 type = MEMFILE_TYPE(cft->private);
1395 name = MEMFILE_ATTR(cft->private);
1396 switch (type) {
1397 case _MEM:
1398 val = res_counter_read_u64(&mem->res, name);
1399 break;
1400 case _MEMSWAP:
1401 if (do_swap_account)
1402 val = res_counter_read_u64(&mem->memsw, name);
1403 break;
1404 default:
1405 BUG();
1406 break;
1407 }
1408 return val;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001409}
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07001410/*
1411 * The user of this function is...
1412 * RES_LIMIT.
1413 */
Paul Menage856c13a2008-07-25 01:47:04 -07001414static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
1415 const char *buffer)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001416{
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07001417 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001418 int type, name;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07001419 unsigned long long val;
1420 int ret;
1421
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001422 type = MEMFILE_TYPE(cft->private);
1423 name = MEMFILE_ATTR(cft->private);
1424 switch (name) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07001425 case RES_LIMIT:
1426 /* This function does all necessary parse...reuse it */
1427 ret = res_counter_memparse_write_strategy(buffer, &val);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001428 if (ret)
1429 break;
1430 if (type == _MEM)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07001431 ret = mem_cgroup_resize_limit(memcg, val);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001432 else
1433 ret = mem_cgroup_resize_memsw_limit(memcg, val);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07001434 break;
1435 default:
1436 ret = -EINVAL; /* should be BUG() ? */
1437 break;
1438 }
1439 return ret;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001440}
1441
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07001442static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07001443{
1444 struct mem_cgroup *mem;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001445 int type, name;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07001446
1447 mem = mem_cgroup_from_cont(cont);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001448 type = MEMFILE_TYPE(event);
1449 name = MEMFILE_ATTR(event);
1450 switch (name) {
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07001451 case RES_MAX_USAGE:
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001452 if (type == _MEM)
1453 res_counter_reset_max(&mem->res);
1454 else
1455 res_counter_reset_max(&mem->memsw);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07001456 break;
1457 case RES_FAILCNT:
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001458 if (type == _MEM)
1459 res_counter_reset_failcnt(&mem->res);
1460 else
1461 res_counter_reset_failcnt(&mem->memsw);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07001462 break;
1463 }
Pavel Emelyanov85cc59d2008-04-29 01:00:20 -07001464 return 0;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07001465}
1466
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08001467static const struct mem_cgroup_stat_desc {
1468 const char *msg;
1469 u64 unit;
1470} mem_cgroup_stat_desc[] = {
1471 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
1472 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
Balaji Rao55e462b2008-05-01 04:35:12 -07001473 [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
1474 [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08001475};
1476
Paul Menagec64745c2008-04-29 01:00:02 -07001477static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
1478 struct cgroup_map_cb *cb)
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08001479{
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08001480 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
1481 struct mem_cgroup_stat *stat = &mem_cont->stat;
1482 int i;
1483
1484 for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
1485 s64 val;
1486
1487 val = mem_cgroup_read_stat(stat, i);
1488 val *= mem_cgroup_stat_desc[i].unit;
Paul Menagec64745c2008-04-29 01:00:02 -07001489 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08001490 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001491 /* showing # of active pages */
1492 {
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001493 unsigned long active_anon, inactive_anon;
1494 unsigned long active_file, inactive_file;
Lee Schermerhorn7b854122008-10-18 20:26:40 -07001495 unsigned long unevictable;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001496
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001497 inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
1498 LRU_INACTIVE_ANON);
1499 active_anon = mem_cgroup_get_all_zonestat(mem_cont,
1500 LRU_ACTIVE_ANON);
1501 inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
1502 LRU_INACTIVE_FILE);
1503 active_file = mem_cgroup_get_all_zonestat(mem_cont,
1504 LRU_ACTIVE_FILE);
Lee Schermerhorn7b854122008-10-18 20:26:40 -07001505 unevictable = mem_cgroup_get_all_zonestat(mem_cont,
1506 LRU_UNEVICTABLE);
1507
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001508 cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
1509 cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
1510 cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
1511 cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
Lee Schermerhorn7b854122008-10-18 20:26:40 -07001512 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
1513
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001514 }
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08001515 return 0;
1516}
1517
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001518
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001519static struct cftype mem_cgroup_files[] = {
1520 {
Balbir Singh0eea1032008-02-07 00:13:57 -08001521 .name = "usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001522 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
Paul Menage2c3daa72008-04-29 00:59:58 -07001523 .read_u64 = mem_cgroup_read,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001524 },
1525 {
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07001526 .name = "max_usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001527 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07001528 .trigger = mem_cgroup_reset,
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07001529 .read_u64 = mem_cgroup_read,
1530 },
1531 {
Balbir Singh0eea1032008-02-07 00:13:57 -08001532 .name = "limit_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001533 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
Paul Menage856c13a2008-07-25 01:47:04 -07001534 .write_string = mem_cgroup_write,
Paul Menage2c3daa72008-04-29 00:59:58 -07001535 .read_u64 = mem_cgroup_read,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001536 },
1537 {
1538 .name = "failcnt",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001539 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07001540 .trigger = mem_cgroup_reset,
Paul Menage2c3daa72008-04-29 00:59:58 -07001541 .read_u64 = mem_cgroup_read,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001542 },
Balbir Singh8697d332008-02-07 00:13:59 -08001543 {
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08001544 .name = "stat",
Paul Menagec64745c2008-04-29 01:00:02 -07001545 .read_map = mem_control_stat_show,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08001546 },
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001547 {
1548 .name = "force_empty",
1549 .trigger = mem_cgroup_force_empty_write,
1550 },
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001551};
1552
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001553#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1554static struct cftype memsw_cgroup_files[] = {
1555 {
1556 .name = "memsw.usage_in_bytes",
1557 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
1558 .read_u64 = mem_cgroup_read,
1559 },
1560 {
1561 .name = "memsw.max_usage_in_bytes",
1562 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
1563 .trigger = mem_cgroup_reset,
1564 .read_u64 = mem_cgroup_read,
1565 },
1566 {
1567 .name = "memsw.limit_in_bytes",
1568 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
1569 .write_string = mem_cgroup_write,
1570 .read_u64 = mem_cgroup_read,
1571 },
1572 {
1573 .name = "memsw.failcnt",
1574 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
1575 .trigger = mem_cgroup_reset,
1576 .read_u64 = mem_cgroup_read,
1577 },
1578};
1579
1580static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
1581{
1582 if (!do_swap_account)
1583 return 0;
1584 return cgroup_add_files(cont, ss, memsw_cgroup_files,
1585 ARRAY_SIZE(memsw_cgroup_files));
1586};
1587#else
1588static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
1589{
1590 return 0;
1591}
1592#endif
1593
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001594static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1595{
1596 struct mem_cgroup_per_node *pn;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001597 struct mem_cgroup_per_zone *mz;
Christoph Lameterb69408e2008-10-18 20:26:14 -07001598 enum lru_list l;
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07001599 int zone, tmp = node;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001600 /*
1601 * This routine is called against possible nodes.
1602 * But it's BUG to call kmalloc() against offline node.
1603 *
1604 * TODO: this routine can waste much memory for nodes which will
1605 * never be onlined. It's better to use memory hotplug callback
1606 * function.
1607 */
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07001608 if (!node_state(node, N_NORMAL_MEMORY))
1609 tmp = -1;
1610 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001611 if (!pn)
1612 return 1;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001613
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001614 mem->info.nodeinfo[node] = pn;
1615 memset(pn, 0, sizeof(*pn));
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001616
1617 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
1618 mz = &pn->zoneinfo[zone];
Christoph Lameterb69408e2008-10-18 20:26:14 -07001619 for_each_lru(l)
1620 INIT_LIST_HEAD(&mz->lists[l]);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001621 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001622 return 0;
1623}
1624
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001625static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1626{
1627 kfree(mem->info.nodeinfo[node]);
1628}
1629
Jan Blunckc8dad2b2009-01-07 18:07:53 -08001630static int mem_cgroup_size(void)
1631{
1632 int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
1633 return sizeof(struct mem_cgroup) + cpustat_size;
1634}
1635
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07001636static struct mem_cgroup *mem_cgroup_alloc(void)
1637{
1638 struct mem_cgroup *mem;
Jan Blunckc8dad2b2009-01-07 18:07:53 -08001639 int size = mem_cgroup_size();
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07001640
Jan Blunckc8dad2b2009-01-07 18:07:53 -08001641 if (size < PAGE_SIZE)
1642 mem = kmalloc(size, GFP_KERNEL);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07001643 else
Jan Blunckc8dad2b2009-01-07 18:07:53 -08001644 mem = vmalloc(size);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07001645
1646 if (mem)
Jan Blunckc8dad2b2009-01-07 18:07:53 -08001647 memset(mem, 0, size);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07001648 return mem;
1649}
1650
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001651/*
1652 * At destroying mem_cgroup, references from swap_cgroup can remain.
1653 * (scanning all at force_empty is too costly...)
1654 *
1655 * Instead of clearing all references at force_empty, we remember
1656 * the number of reference from swap_cgroup and free mem_cgroup when
1657 * it goes down to 0.
1658 *
1659 * When mem_cgroup is destroyed, mem->obsolete will be set to 0 and
1660 * entry which points to this memcg will be ignore at swapin.
1661 *
1662 * Removal of cgroup itself succeeds regardless of refs from swap.
1663 */
1664
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07001665static void mem_cgroup_free(struct mem_cgroup *mem)
1666{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001667 int node;
1668
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001669 if (atomic_read(&mem->refcnt) > 0)
1670 return;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001671
1672
1673 for_each_node_state(node, N_POSSIBLE)
1674 free_mem_cgroup_per_zone_info(mem, node);
1675
Jan Blunckc8dad2b2009-01-07 18:07:53 -08001676 if (mem_cgroup_size() < PAGE_SIZE)
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07001677 kfree(mem);
1678 else
1679 vfree(mem);
1680}
1681
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001682static void mem_cgroup_get(struct mem_cgroup *mem)
1683{
1684 atomic_inc(&mem->refcnt);
1685}
1686
1687static void mem_cgroup_put(struct mem_cgroup *mem)
1688{
1689 if (atomic_dec_and_test(&mem->refcnt)) {
1690 if (!mem->obsolete)
1691 return;
1692 mem_cgroup_free(mem);
1693 }
1694}
1695
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07001696
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08001697#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1698static void __init enable_swap_cgroup(void)
1699{
1700 if (!mem_cgroup_subsys.disabled && really_do_swap_account)
1701 do_swap_account = 1;
1702}
1703#else
1704static void __init enable_swap_cgroup(void)
1705{
1706}
1707#endif
1708
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001709static struct cgroup_subsys_state *
1710mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1711{
1712 struct mem_cgroup *mem;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001713 int node;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001714
Jan Blunckc8dad2b2009-01-07 18:07:53 -08001715 mem = mem_cgroup_alloc();
1716 if (!mem)
1717 return ERR_PTR(-ENOMEM);
Pavel Emelianov78fb7462008-02-07 00:13:51 -08001718
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001719 res_counter_init(&mem->res);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001720 res_counter_init(&mem->memsw);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001721
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001722 for_each_node_state(node, N_POSSIBLE)
1723 if (alloc_mem_cgroup_per_zone_info(mem, node))
1724 goto free_out;
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08001725 /* root ? */
1726 if (cont->parent == NULL)
1727 enable_swap_cgroup();
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001728
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001729 return &mem->css;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001730free_out:
1731 for_each_node_state(node, N_POSSIBLE)
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001732 free_mem_cgroup_per_zone_info(mem, node);
Jan Blunckc8dad2b2009-01-07 18:07:53 -08001733 mem_cgroup_free(mem);
Li Zefan2dda81c2008-02-23 15:24:14 -08001734 return ERR_PTR(-ENOMEM);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001735}
1736
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08001737static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
1738 struct cgroup *cont)
1739{
1740 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001741 mem->obsolete = 1;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08001742 mem_cgroup_force_empty(mem, false);
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08001743}
1744
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001745static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1746 struct cgroup *cont)
1747{
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07001748 mem_cgroup_free(mem_cgroup_from_cont(cont));
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001749}
1750
1751static int mem_cgroup_populate(struct cgroup_subsys *ss,
1752 struct cgroup *cont)
1753{
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08001754 int ret;
1755
1756 ret = cgroup_add_files(cont, ss, mem_cgroup_files,
1757 ARRAY_SIZE(mem_cgroup_files));
1758
1759 if (!ret)
1760 ret = register_memsw_files(cont, ss);
1761 return ret;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001762}
1763
Balbir Singh67e465a2008-02-07 00:13:54 -08001764static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1765 struct cgroup *cont,
1766 struct cgroup *old_cont,
1767 struct task_struct *p)
1768{
1769 struct mm_struct *mm;
1770 struct mem_cgroup *mem, *old_mem;
1771
1772 mm = get_task_mm(p);
1773 if (mm == NULL)
1774 return;
1775
1776 mem = mem_cgroup_from_cont(cont);
1777 old_mem = mem_cgroup_from_cont(old_cont);
1778
Balbir Singh67e465a2008-02-07 00:13:54 -08001779 /*
1780 * Only thread group leaders are allowed to migrate, the mm_struct is
1781 * in effect owned by the leader
1782 */
Pavel Emelyanov52ea27e2008-03-19 17:00:45 -07001783 if (!thread_group_leader(p))
Balbir Singh67e465a2008-02-07 00:13:54 -08001784 goto out;
1785
Balbir Singh67e465a2008-02-07 00:13:54 -08001786out:
1787 mmput(mm);
Balbir Singh67e465a2008-02-07 00:13:54 -08001788}
1789
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001790struct cgroup_subsys mem_cgroup_subsys = {
1791 .name = "memory",
1792 .subsys_id = mem_cgroup_subsys_id,
1793 .create = mem_cgroup_create,
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08001794 .pre_destroy = mem_cgroup_pre_destroy,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001795 .destroy = mem_cgroup_destroy,
1796 .populate = mem_cgroup_populate,
Balbir Singh67e465a2008-02-07 00:13:54 -08001797 .attach = mem_cgroup_move_task,
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001798 .early_init = 0,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001799};
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08001800
1801#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1802
1803static int __init disable_swap_account(char *s)
1804{
1805 really_do_swap_account = 0;
1806 return 1;
1807}
1808__setup("noswapaccount", disable_swap_account);
1809#endif