blob: 42882c1e7fcea1a5896a77d58af2e6e87aabc898 [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08009 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
Glauber Costa7ae1e1d2012-12-18 14:21:56 -080013 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
Johannes Weiner1575e682015-04-14 15:44:51 -070017 * Native page reclaim
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -080023 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 */
33
Johannes Weiner3e32cb22014-12-10 15:42:31 -080034#include <linux/page_counter.h>
Balbir Singh8cdea7c2008-02-07 00:13:50 -080035#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080037#include <linux/mm.h>
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -080038#include <linux/hugetlb.h>
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -080039#include <linux/pagemap.h>
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080040#include <linux/smp.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080041#include <linux/page-flags.h>
Balbir Singh66e17072008-02-07 00:13:56 -080042#include <linux/backing-dev.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080043#include <linux/bit_spinlock.h>
44#include <linux/rcupdate.h>
Balbir Singhe2224322009-04-02 16:57:39 -070045#include <linux/limits.h>
Paul Gortmakerb9e15ba2011-05-26 16:00:52 -040046#include <linux/export.h>
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080047#include <linux/mutex.h>
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -070048#include <linux/rbtree.h>
Balbir Singhb6ac57d2008-04-29 01:00:19 -070049#include <linux/slab.h>
Balbir Singh66e17072008-02-07 00:13:56 -080050#include <linux/swap.h>
Daisuke Nishimura02491442010-03-10 15:22:17 -080051#include <linux/swapops.h>
Balbir Singh66e17072008-02-07 00:13:56 -080052#include <linux/spinlock.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080053#include <linux/eventfd.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050054#include <linux/poll.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080055#include <linux/sort.h>
Balbir Singh66e17072008-02-07 00:13:56 -080056#include <linux/fs.h>
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -080057#include <linux/seq_file.h>
Anton Vorontsov70ddf632013-04-29 15:08:31 -070058#include <linux/vmpressure.h>
Christoph Lameterb69408e2008-10-18 20:26:14 -070059#include <linux/mm_inline.h>
Johannes Weiner5d1ea482014-12-10 15:44:55 -080060#include <linux/swap_cgroup.h>
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -080061#include <linux/cpu.h>
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -070062#include <linux/oom.h>
Johannes Weiner0056f4e2013-10-31 16:34:14 -070063#include <linux/lockdep.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050064#include <linux/file.h>
Tejun Heob23afb92015-11-05 18:46:11 -080065#include <linux/tracehook.h>
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080066#include "internal.h"
Glauber Costad1a4c0b2011-12-11 21:47:04 +000067#include <net/sock.h>
Michal Hocko4bd2c1e2012-10-08 16:33:10 -070068#include <net/ip.h>
Qiang Huangf35c3a82013-11-12 15:08:22 -080069#include "slab.h"
Balbir Singh8cdea7c2008-02-07 00:13:50 -080070
Balbir Singh8697d332008-02-07 00:13:59 -080071#include <asm/uaccess.h>
72
KOSAKI Motohirocc8e9702010-08-09 17:19:57 -070073#include <trace/events/vmscan.h>
74
Tejun Heo073219e2014-02-08 10:36:58 -050075struct cgroup_subsys memory_cgrp_subsys __read_mostly;
76EXPORT_SYMBOL(memory_cgrp_subsys);
David Rientjes68ae5642012-12-12 13:51:57 -080077
Johannes Weiner7d828602016-01-14 15:20:56 -080078struct mem_cgroup *root_mem_cgroup __read_mostly;
79
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070080#define MEM_CGROUP_RECLAIM_RETRIES 5
Balbir Singh8cdea7c2008-02-07 00:13:50 -080081
Johannes Weinerf7e1cb62016-01-14 15:21:29 -080082/* Socket memory accounting disabled? */
83static bool cgroup_memory_nosocket;
84
Vladimir Davydov04823c82016-01-20 15:02:38 -080085/* Kernel memory accounting disabled? */
86static bool cgroup_memory_nokmem;
87
Johannes Weiner21afa382015-02-11 15:26:36 -080088/* Whether the swap controller is active */
Andrew Mortonc255a452012-07-31 16:43:02 -070089#ifdef CONFIG_MEMCG_SWAP
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080090int do_swap_account __read_mostly;
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080091#else
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -070092#define do_swap_account 0
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080093#endif
94
Johannes Weiner7941d212016-01-14 15:21:23 -080095/* Whether legacy memory+swap accounting is active */
96static bool do_memsw_account(void)
97{
98 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
99}
100
Johannes Weineraf7c4b02012-05-29 15:07:08 -0700101static const char * const mem_cgroup_stat_names[] = {
102 "cache",
103 "rss",
David Rientjesb070e652013-05-07 16:18:09 -0700104 "rss_huge",
Johannes Weineraf7c4b02012-05-29 15:07:08 -0700105 "mapped_file",
Greg Thelenc4843a72015-05-22 17:13:16 -0400106 "dirty",
Sha Zhengju3ea67d02013-09-12 15:13:53 -0700107 "writeback",
Johannes Weineraf7c4b02012-05-29 15:07:08 -0700108 "swap",
109};
110
Johannes Weineraf7c4b02012-05-29 15:07:08 -0700111static const char * const mem_cgroup_events_names[] = {
112 "pgpgin",
113 "pgpgout",
114 "pgfault",
115 "pgmajfault",
116};
117
Sha Zhengju58cf1882013-02-22 16:32:05 -0800118static const char * const mem_cgroup_lru_names[] = {
119 "inactive_anon",
120 "active_anon",
121 "inactive_file",
122 "active_file",
123 "unevictable",
124};
125
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700126#define THRESHOLDS_EVENTS_TARGET 128
127#define SOFTLIMIT_EVENTS_TARGET 1024
128#define NUMAINFO_EVENTS_TARGET 1024
Johannes Weinere9f89742011-03-23 16:42:37 -0700129
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700130/*
131 * Cgroups above their limits are maintained in a RB-Tree, independent of
132 * their hierarchy representation
133 */
134
135struct mem_cgroup_tree_per_zone {
136 struct rb_root rb_root;
137 spinlock_t lock;
138};
139
140struct mem_cgroup_tree_per_node {
141 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
142};
143
144struct mem_cgroup_tree {
145 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
146};
147
148static struct mem_cgroup_tree soft_limit_tree __read_mostly;
149
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700150/* for OOM */
151struct mem_cgroup_eventfd_list {
152 struct list_head list;
153 struct eventfd_ctx *eventfd;
154};
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800155
Tejun Heo79bd9812013-11-22 18:20:42 -0500156/*
157 * cgroup_event represents events which userspace want to receive.
158 */
Tejun Heo3bc942f2013-11-22 18:20:44 -0500159struct mem_cgroup_event {
Tejun Heo79bd9812013-11-22 18:20:42 -0500160 /*
Tejun Heo59b6f872013-11-22 18:20:43 -0500161 * memcg which the event belongs to.
Tejun Heo79bd9812013-11-22 18:20:42 -0500162 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500163 struct mem_cgroup *memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -0500164 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500165 * eventfd to signal userspace about the event.
166 */
167 struct eventfd_ctx *eventfd;
168 /*
169 * Each of these stored in a list by the cgroup.
170 */
171 struct list_head list;
172 /*
Tejun Heofba94802013-11-22 18:20:43 -0500173 * register_event() callback will be used to add new userspace
174 * waiter for changes related to this event. Use eventfd_signal()
175 * on eventfd to send notification to userspace.
176 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500177 int (*register_event)(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -0500178 struct eventfd_ctx *eventfd, const char *args);
Tejun Heofba94802013-11-22 18:20:43 -0500179 /*
180 * unregister_event() callback will be called when userspace closes
181 * the eventfd or on cgroup removing. This callback must be set,
182 * if you want provide notification functionality.
183 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500184 void (*unregister_event)(struct mem_cgroup *memcg,
Tejun Heofba94802013-11-22 18:20:43 -0500185 struct eventfd_ctx *eventfd);
186 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500187 * All fields below needed to unregister event when
188 * userspace closes eventfd.
189 */
190 poll_table pt;
191 wait_queue_head_t *wqh;
192 wait_queue_t wait;
193 struct work_struct remove;
194};
195
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700196static void mem_cgroup_threshold(struct mem_cgroup *memcg);
197static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800198
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800199/* Stuffs for move charges at task migration. */
200/*
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800201 * Types of charges to be moved.
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800202 */
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800203#define MOVE_ANON 0x1U
204#define MOVE_FILE 0x2U
205#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800206
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800207/* "mc" and its members are protected by cgroup_mutex */
208static struct move_charge_struct {
Daisuke Nishimurab1dd6932010-11-24 12:57:06 -0800209 spinlock_t lock; /* for from, to */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800210 struct mem_cgroup *from;
211 struct mem_cgroup *to;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800212 unsigned long flags;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800213 unsigned long precharge;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -0800214 unsigned long moved_charge;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -0800215 unsigned long moved_swap;
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800216 struct task_struct *moving_task; /* a task moving charges */
217 wait_queue_head_t waitq; /* a waitq for other context */
218} mc = {
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -0700219 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800220 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
221};
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800222
Balbir Singh4e416952009-09-23 15:56:39 -0700223/*
224 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
225 * limit reclaim to prevent infinite loops, if they ever occur.
226 */
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700227#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700228#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
Balbir Singh4e416952009-09-23 15:56:39 -0700229
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800230enum charge_type {
231 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
Kamezawa Hiroyuki41326c12012-07-31 16:41:40 -0700232 MEM_CGROUP_CHARGE_TYPE_ANON,
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800233 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700234 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700235 NR_CHARGE_TYPE,
236};
237
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800238/* for encoding cft->private value on file */
Glauber Costa86ae53e2012-12-18 14:21:45 -0800239enum res_type {
240 _MEM,
241 _MEMSWAP,
242 _OOM_TYPE,
Glauber Costa510fc4e2012-12-18 14:21:47 -0800243 _KMEM,
Vladimir Davydovd55f90b2016-01-20 15:02:44 -0800244 _TCP,
Glauber Costa86ae53e2012-12-18 14:21:45 -0800245};
246
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700247#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
248#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800249#define MEMFILE_ATTR(val) ((val) & 0xffff)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700250/* Used for OOM nofiier */
251#define OOM_CONTROL (0)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800252
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700253/* Some nice accessors for the vmpressure. */
254struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
255{
256 if (!memcg)
257 memcg = root_mem_cgroup;
258 return &memcg->vmpressure;
259}
260
261struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
262{
263 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
264}
265
Michal Hocko7ffc0ed2012-10-08 16:33:13 -0700266static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
267{
268 return (memcg == root_mem_cgroup);
269}
270
Johannes Weiner127424c2016-01-20 15:02:32 -0800271#ifndef CONFIG_SLOB
Glauber Costa55007d82012-12-18 14:22:38 -0800272/*
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800273 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
Li Zefanb8627832013-09-23 16:56:47 +0800274 * The main reason for not using cgroup id for this:
275 * this works better in sparse environments, where we have a lot of memcgs,
276 * but only a few kmem-limited. Or also, if we have, for instance, 200
277 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
278 * 200 entry array for that.
Glauber Costa55007d82012-12-18 14:22:38 -0800279 *
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800280 * The current size of the caches array is stored in memcg_nr_cache_ids. It
281 * will double each time we have to increase it.
Glauber Costa55007d82012-12-18 14:22:38 -0800282 */
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800283static DEFINE_IDA(memcg_cache_ida);
284int memcg_nr_cache_ids;
Glauber Costa749c5412012-12-18 14:23:01 -0800285
Vladimir Davydov05257a12015-02-12 14:59:01 -0800286/* Protects memcg_nr_cache_ids */
287static DECLARE_RWSEM(memcg_cache_ids_sem);
288
289void memcg_get_cache_ids(void)
290{
291 down_read(&memcg_cache_ids_sem);
292}
293
294void memcg_put_cache_ids(void)
295{
296 up_read(&memcg_cache_ids_sem);
297}
298
Glauber Costa55007d82012-12-18 14:22:38 -0800299/*
300 * MIN_SIZE is different than 1, because we would like to avoid going through
301 * the alloc/free process all the time. In a small machine, 4 kmem-limited
302 * cgroups is a reasonable guess. In the future, it could be a parameter or
303 * tunable, but that is strictly not necessary.
304 *
Li Zefanb8627832013-09-23 16:56:47 +0800305 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
Glauber Costa55007d82012-12-18 14:22:38 -0800306 * this constant directly from cgroup, but it is understandable that this is
307 * better kept as an internal representation in cgroup.c. In any case, the
Li Zefanb8627832013-09-23 16:56:47 +0800308 * cgrp_id space is not getting any smaller, and we don't have to necessarily
Glauber Costa55007d82012-12-18 14:22:38 -0800309 * increase ours as well if it increases.
310 */
311#define MEMCG_CACHES_MIN_SIZE 4
Li Zefanb8627832013-09-23 16:56:47 +0800312#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
Glauber Costa55007d82012-12-18 14:22:38 -0800313
Glauber Costad7f25f82012-12-18 14:22:40 -0800314/*
315 * A lot of the calls to the cache allocation functions are expected to be
316 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
317 * conditional to this static branch, we'll have to allow modules that does
318 * kmem_cache_alloc and the such to see this symbol as well
319 */
Johannes Weineref129472016-01-14 15:21:34 -0800320DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
Glauber Costad7f25f82012-12-18 14:22:40 -0800321EXPORT_SYMBOL(memcg_kmem_enabled_key);
Glauber Costaa8964b92012-12-18 14:22:09 -0800322
Johannes Weiner127424c2016-01-20 15:02:32 -0800323#endif /* !CONFIG_SLOB */
Glauber Costaa8964b92012-12-18 14:22:09 -0800324
Balbir Singhf64c3f52009-09-23 15:56:37 -0700325static struct mem_cgroup_per_zone *
Jianyu Zhane2318752014-06-06 14:38:20 -0700326mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700327{
Jianyu Zhane2318752014-06-06 14:38:20 -0700328 int nid = zone_to_nid(zone);
329 int zid = zone_idx(zone);
330
Johannes Weiner54f72fe2013-07-08 15:59:49 -0700331 return &memcg->nodeinfo[nid]->zoneinfo[zid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700332}
333
Tejun Heoad7fa852015-05-27 20:00:02 -0400334/**
335 * mem_cgroup_css_from_page - css of the memcg associated with a page
336 * @page: page of interest
337 *
338 * If memcg is bound to the default hierarchy, css of the memcg associated
339 * with @page is returned. The returned css remains associated with @page
340 * until it is released.
341 *
342 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
343 * is returned.
Tejun Heoad7fa852015-05-27 20:00:02 -0400344 */
345struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
346{
347 struct mem_cgroup *memcg;
348
Tejun Heoad7fa852015-05-27 20:00:02 -0400349 memcg = page->mem_cgroup;
350
Tejun Heo9e10a132015-09-18 11:56:28 -0400351 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
Tejun Heoad7fa852015-05-27 20:00:02 -0400352 memcg = root_mem_cgroup;
353
Tejun Heoad7fa852015-05-27 20:00:02 -0400354 return &memcg->css;
355}
356
Vladimir Davydov2fc04522015-09-09 15:35:28 -0700357/**
358 * page_cgroup_ino - return inode number of the memcg a page is charged to
359 * @page: the page
360 *
361 * Look up the closest online ancestor of the memory cgroup @page is charged to
362 * and return its inode number or 0 if @page is not charged to any cgroup. It
363 * is safe to call this function without holding a reference to @page.
364 *
365 * Note, this function is inherently racy, because there is nothing to prevent
366 * the cgroup inode from getting torn down and potentially reallocated a moment
367 * after page_cgroup_ino() returns, so it only should be used by callers that
368 * do not care (such as procfs interfaces).
369 */
370ino_t page_cgroup_ino(struct page *page)
371{
372 struct mem_cgroup *memcg;
373 unsigned long ino = 0;
374
375 rcu_read_lock();
376 memcg = READ_ONCE(page->mem_cgroup);
377 while (memcg && !(memcg->css.flags & CSS_ONLINE))
378 memcg = parent_mem_cgroup(memcg);
379 if (memcg)
380 ino = cgroup_ino(memcg->css.cgroup);
381 rcu_read_unlock();
382 return ino;
383}
384
Balbir Singhf64c3f52009-09-23 15:56:37 -0700385static struct mem_cgroup_per_zone *
Jianyu Zhane2318752014-06-06 14:38:20 -0700386mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700387{
Johannes Weiner97a6c372011-03-23 16:42:27 -0700388 int nid = page_to_nid(page);
389 int zid = page_zonenum(page);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700390
Jianyu Zhane2318752014-06-06 14:38:20 -0700391 return &memcg->nodeinfo[nid]->zoneinfo[zid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700392}
393
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700394static struct mem_cgroup_tree_per_zone *
395soft_limit_tree_node_zone(int nid, int zid)
396{
397 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
398}
399
400static struct mem_cgroup_tree_per_zone *
401soft_limit_tree_from_page(struct page *page)
402{
403 int nid = page_to_nid(page);
404 int zid = page_zonenum(page);
405
406 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
407}
408
Johannes Weinercf2c8122014-06-06 14:38:21 -0700409static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
410 struct mem_cgroup_tree_per_zone *mctz,
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800411 unsigned long new_usage_in_excess)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700412{
413 struct rb_node **p = &mctz->rb_root.rb_node;
414 struct rb_node *parent = NULL;
415 struct mem_cgroup_per_zone *mz_node;
416
417 if (mz->on_tree)
418 return;
419
420 mz->usage_in_excess = new_usage_in_excess;
421 if (!mz->usage_in_excess)
422 return;
423 while (*p) {
424 parent = *p;
425 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
426 tree_node);
427 if (mz->usage_in_excess < mz_node->usage_in_excess)
428 p = &(*p)->rb_left;
429 /*
430 * We can't avoid mem cgroups that are over their soft
431 * limit by the same amount
432 */
433 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
434 p = &(*p)->rb_right;
435 }
436 rb_link_node(&mz->tree_node, parent, p);
437 rb_insert_color(&mz->tree_node, &mctz->rb_root);
438 mz->on_tree = true;
439}
440
Johannes Weinercf2c8122014-06-06 14:38:21 -0700441static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
442 struct mem_cgroup_tree_per_zone *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700443{
444 if (!mz->on_tree)
445 return;
446 rb_erase(&mz->tree_node, &mctz->rb_root);
447 mz->on_tree = false;
448}
449
Johannes Weinercf2c8122014-06-06 14:38:21 -0700450static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
451 struct mem_cgroup_tree_per_zone *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700452{
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700453 unsigned long flags;
454
455 spin_lock_irqsave(&mctz->lock, flags);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700456 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700457 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700458}
459
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800460static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
461{
462 unsigned long nr_pages = page_counter_read(&memcg->memory);
Jason Low4db0c3c2015-04-15 16:14:08 -0700463 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800464 unsigned long excess = 0;
465
466 if (nr_pages > soft_limit)
467 excess = nr_pages - soft_limit;
468
469 return excess;
470}
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700471
472static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
473{
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800474 unsigned long excess;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700475 struct mem_cgroup_per_zone *mz;
476 struct mem_cgroup_tree_per_zone *mctz;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700477
Jianyu Zhane2318752014-06-06 14:38:20 -0700478 mctz = soft_limit_tree_from_page(page);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700479 /*
480 * Necessary to update all ancestors when hierarchy is used.
481 * because their event counter is not touched.
482 */
483 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
Jianyu Zhane2318752014-06-06 14:38:20 -0700484 mz = mem_cgroup_page_zoneinfo(memcg, page);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800485 excess = soft_limit_excess(memcg);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700486 /*
487 * We have to update the tree if mz is on RB-tree or
488 * mem is over its softlimit.
489 */
490 if (excess || mz->on_tree) {
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700491 unsigned long flags;
492
493 spin_lock_irqsave(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700494 /* if on-tree, remove it */
495 if (mz->on_tree)
Johannes Weinercf2c8122014-06-06 14:38:21 -0700496 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700497 /*
498 * Insert again. mz->usage_in_excess will be updated.
499 * If excess is 0, no tree ops.
500 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700501 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700502 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700503 }
504 }
505}
506
507static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
508{
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700509 struct mem_cgroup_tree_per_zone *mctz;
Jianyu Zhane2318752014-06-06 14:38:20 -0700510 struct mem_cgroup_per_zone *mz;
511 int nid, zid;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700512
Jianyu Zhane2318752014-06-06 14:38:20 -0700513 for_each_node(nid) {
514 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
515 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
516 mctz = soft_limit_tree_node_zone(nid, zid);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700517 mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700518 }
519 }
520}
521
522static struct mem_cgroup_per_zone *
523__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
524{
525 struct rb_node *rightmost = NULL;
526 struct mem_cgroup_per_zone *mz;
527
528retry:
529 mz = NULL;
530 rightmost = rb_last(&mctz->rb_root);
531 if (!rightmost)
532 goto done; /* Nothing to reclaim from */
533
534 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
535 /*
536 * Remove the node now but someone else can add it back,
537 * we will to add it back at the end of reclaim to its correct
538 * position in the tree.
539 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700540 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800541 if (!soft_limit_excess(mz->memcg) ||
Tejun Heoec903c02014-05-13 12:11:01 -0400542 !css_tryget_online(&mz->memcg->css))
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700543 goto retry;
544done:
545 return mz;
546}
547
548static struct mem_cgroup_per_zone *
549mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
550{
551 struct mem_cgroup_per_zone *mz;
552
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700553 spin_lock_irq(&mctz->lock);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700554 mz = __mem_cgroup_largest_soft_limit_node(mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700555 spin_unlock_irq(&mctz->lock);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700556 return mz;
557}
558
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700559/*
Greg Thelen484ebb32015-10-01 15:37:05 -0700560 * Return page count for single (non recursive) @memcg.
561 *
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700562 * Implementation Note: reading percpu statistics for memcg.
563 *
564 * Both of vmstat[] and percpu_counter has threshold and do periodic
565 * synchronization to implement "quick" read. There are trade-off between
566 * reading cost and precision of value. Then, we may have a chance to implement
Greg Thelen484ebb32015-10-01 15:37:05 -0700567 * a periodic synchronization of counter in memcg's counter.
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700568 *
569 * But this _read() function is used for user interface now. The user accounts
570 * memory usage by memory cgroup and he _always_ requires exact value because
571 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
572 * have to visit all online cpus and make sum. So, for now, unnecessary
573 * synchronization is not implemented. (just implemented for cpu hotplug)
574 *
575 * If there are kernel internal actions which can make use of some not-exact
576 * value, and reading all cpu value can be performance bottleneck in some
Greg Thelen484ebb32015-10-01 15:37:05 -0700577 * common workload, threshold and synchronization as vmstat[] should be
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700578 * implemented.
579 */
Greg Thelen484ebb32015-10-01 15:37:05 -0700580static unsigned long
581mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800582{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700583 long val = 0;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800584 int cpu;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800585
Greg Thelen484ebb32015-10-01 15:37:05 -0700586 /* Per-cpu values can be negative, use a signed accumulator */
Tejun Heo733a5722015-05-22 18:23:18 -0400587 for_each_possible_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700588 val += per_cpu(memcg->stat->count[idx], cpu);
Greg Thelen484ebb32015-10-01 15:37:05 -0700589 /*
590 * Summing races with updates, so val may be negative. Avoid exposing
591 * transient negative values.
592 */
593 if (val < 0)
594 val = 0;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800595 return val;
596}
597
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700598static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
Johannes Weinere9f89742011-03-23 16:42:37 -0700599 enum mem_cgroup_events_index idx)
600{
601 unsigned long val = 0;
602 int cpu;
603
Tejun Heo733a5722015-05-22 18:23:18 -0400604 for_each_possible_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700605 val += per_cpu(memcg->stat->events[idx], cpu);
Johannes Weinere9f89742011-03-23 16:42:37 -0700606 return val;
607}
608
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700609static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
David Rientjesb070e652013-05-07 16:18:09 -0700610 struct page *page,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800611 bool compound, int nr_pages)
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800612{
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700613 /*
614 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
615 * counted as CACHE even if it's on ANON LRU.
616 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700617 if (PageAnon(page))
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700618 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700619 nr_pages);
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800620 else
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700621 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700622 nr_pages);
Balaji Rao55e462b2008-05-01 04:35:12 -0700623
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800624 if (compound) {
625 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
David Rientjesb070e652013-05-07 16:18:09 -0700626 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
627 nr_pages);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800628 }
David Rientjesb070e652013-05-07 16:18:09 -0700629
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800630 /* pagein of a big page is an event. So, ignore page size */
631 if (nr_pages > 0)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700632 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800633 else {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700634 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800635 nr_pages = -nr_pages; /* for event */
636 }
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800637
Johannes Weiner13114712012-05-29 15:07:07 -0700638 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800639}
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800640
Jianyu Zhane2318752014-06-06 14:38:20 -0700641static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
642 int nid,
643 unsigned int lru_mask)
Ying Han889976d2011-05-26 16:25:33 -0700644{
Jianyu Zhane2318752014-06-06 14:38:20 -0700645 unsigned long nr = 0;
Ying Han889976d2011-05-26 16:25:33 -0700646 int zid;
647
Jianyu Zhane2318752014-06-06 14:38:20 -0700648 VM_BUG_ON((unsigned)nid >= nr_node_ids);
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700649
Jianyu Zhane2318752014-06-06 14:38:20 -0700650 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
651 struct mem_cgroup_per_zone *mz;
652 enum lru_list lru;
653
654 for_each_lru(lru) {
655 if (!(BIT(lru) & lru_mask))
656 continue;
657 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
658 nr += mz->lru_size[lru];
659 }
660 }
661 return nr;
Ying Han889976d2011-05-26 16:25:33 -0700662}
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700663
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700664static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700665 unsigned int lru_mask)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800666{
Jianyu Zhane2318752014-06-06 14:38:20 -0700667 unsigned long nr = 0;
Ying Han889976d2011-05-26 16:25:33 -0700668 int nid;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800669
Lai Jiangshan31aaea42012-12-12 13:51:27 -0800670 for_each_node_state(nid, N_MEMORY)
Jianyu Zhane2318752014-06-06 14:38:20 -0700671 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
672 return nr;
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800673}
674
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800675static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
676 enum mem_cgroup_events_target target)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800677{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700678 unsigned long val, next;
679
Johannes Weiner13114712012-05-29 15:07:07 -0700680 val = __this_cpu_read(memcg->stat->nr_page_events);
Steven Rostedt47994012011-11-02 13:38:33 -0700681 next = __this_cpu_read(memcg->stat->targets[target]);
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700682 /* from time_after() in jiffies.h */
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800683 if ((long)next - (long)val < 0) {
684 switch (target) {
685 case MEM_CGROUP_TARGET_THRESH:
686 next = val + THRESHOLDS_EVENTS_TARGET;
687 break;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700688 case MEM_CGROUP_TARGET_SOFTLIMIT:
689 next = val + SOFTLIMIT_EVENTS_TARGET;
690 break;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800691 case MEM_CGROUP_TARGET_NUMAINFO:
692 next = val + NUMAINFO_EVENTS_TARGET;
693 break;
694 default:
695 break;
696 }
697 __this_cpu_write(memcg->stat->targets[target], next);
698 return true;
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700699 }
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800700 return false;
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800701}
702
703/*
704 * Check events in order.
705 *
706 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700707static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800708{
709 /* threshold event is triggered in finer grain than soft limit */
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800710 if (unlikely(mem_cgroup_event_ratelimit(memcg,
711 MEM_CGROUP_TARGET_THRESH))) {
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700712 bool do_softlimit;
Andrew Morton82b3f2a2012-02-03 15:37:14 -0800713 bool do_numainfo __maybe_unused;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800714
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700715 do_softlimit = mem_cgroup_event_ratelimit(memcg,
716 MEM_CGROUP_TARGET_SOFTLIMIT);
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700717#if MAX_NUMNODES > 1
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800718 do_numainfo = mem_cgroup_event_ratelimit(memcg,
719 MEM_CGROUP_TARGET_NUMAINFO);
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700720#endif
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800721 mem_cgroup_threshold(memcg);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700722 if (unlikely(do_softlimit))
723 mem_cgroup_update_tree(memcg, page);
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800724#if MAX_NUMNODES > 1
725 if (unlikely(do_numainfo))
726 atomic_inc(&memcg->numainfo_events);
727#endif
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700728 }
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800729}
730
Balbir Singhcf475ad2008-04-29 01:00:16 -0700731struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800732{
Balbir Singh31a78f22008-09-28 23:09:31 +0100733 /*
734 * mm_update_next_owner() may clear mm->owner to NULL
735 * if it races with swapoff, page migration, etc.
736 * So this can be called with p == NULL.
737 */
738 if (unlikely(!p))
739 return NULL;
740
Tejun Heo073219e2014-02-08 10:36:58 -0500741 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800742}
Michal Hocko33398cf2015-09-08 15:01:02 -0700743EXPORT_SYMBOL(mem_cgroup_from_task);
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800744
Johannes Weinerdf381972014-04-07 15:37:43 -0700745static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800746{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700747 struct mem_cgroup *memcg = NULL;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -0700748
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800749 rcu_read_lock();
750 do {
Michal Hocko6f6acb02014-05-22 11:54:19 -0700751 /*
752 * Page cache insertions can happen withou an
753 * actual mm context, e.g. during disk probing
754 * on boot, loopback IO, acct() writes etc.
755 */
756 if (unlikely(!mm))
Johannes Weinerdf381972014-04-07 15:37:43 -0700757 memcg = root_mem_cgroup;
Michal Hocko6f6acb02014-05-22 11:54:19 -0700758 else {
759 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
760 if (unlikely(!memcg))
761 memcg = root_mem_cgroup;
762 }
Tejun Heoec903c02014-05-13 12:11:01 -0400763 } while (!css_tryget_online(&memcg->css));
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800764 rcu_read_unlock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700765 return memcg;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800766}
767
Johannes Weiner56600482012-01-12 17:17:59 -0800768/**
769 * mem_cgroup_iter - iterate over memory cgroup hierarchy
770 * @root: hierarchy root
771 * @prev: previously returned memcg, NULL on first invocation
772 * @reclaim: cookie for shared reclaim walks, NULL for full walks
773 *
774 * Returns references to children of the hierarchy below @root, or
775 * @root itself, or %NULL after a full round-trip.
776 *
777 * Caller must pass the return value in @prev on subsequent
778 * invocations for reference counting, or use mem_cgroup_iter_break()
779 * to cancel a hierarchy walk before the round-trip is complete.
780 *
781 * Reclaimers can specify a zone and a priority level in @reclaim to
782 * divide up the memcgs in the hierarchy among all concurrent
783 * reclaimers operating on the same zone and priority.
784 */
Andrew Morton694fbc02013-09-24 15:27:37 -0700785struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
Johannes Weiner56600482012-01-12 17:17:59 -0800786 struct mem_cgroup *prev,
Andrew Morton694fbc02013-09-24 15:27:37 -0700787 struct mem_cgroup_reclaim_cookie *reclaim)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -0700788{
Michal Hocko33398cf2015-09-08 15:01:02 -0700789 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800790 struct cgroup_subsys_state *css = NULL;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800791 struct mem_cgroup *memcg = NULL;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800792 struct mem_cgroup *pos = NULL;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700793
Andrew Morton694fbc02013-09-24 15:27:37 -0700794 if (mem_cgroup_disabled())
795 return NULL;
Johannes Weiner56600482012-01-12 17:17:59 -0800796
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700797 if (!root)
798 root = root_mem_cgroup;
799
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800800 if (prev && !reclaim)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800801 pos = prev;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800802
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800803 if (!root->use_hierarchy && root != root_mem_cgroup) {
804 if (prev)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800805 goto out;
Andrew Morton694fbc02013-09-24 15:27:37 -0700806 return root;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800807 }
808
Michal Hocko542f85f2013-04-29 15:07:15 -0700809 rcu_read_lock();
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800810
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800811 if (reclaim) {
812 struct mem_cgroup_per_zone *mz;
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800813
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800814 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
815 iter = &mz->iter[reclaim->priority];
Michal Hocko5f578162013-04-29 15:07:17 -0700816
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800817 if (prev && reclaim->generation != iter->generation)
Michal Hocko542f85f2013-04-29 15:07:15 -0700818 goto out_unlock;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800819
Vladimir Davydov6df38682015-12-29 14:54:10 -0800820 while (1) {
Jason Low4db0c3c2015-04-15 16:14:08 -0700821 pos = READ_ONCE(iter->position);
Vladimir Davydov6df38682015-12-29 14:54:10 -0800822 if (!pos || css_tryget(&pos->css))
823 break;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800824 /*
Vladimir Davydov6df38682015-12-29 14:54:10 -0800825 * css reference reached zero, so iter->position will
826 * be cleared by ->css_released. However, we should not
827 * rely on this happening soon, because ->css_released
828 * is called from a work queue, and by busy-waiting we
829 * might block it. So we clear iter->position right
830 * away.
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800831 */
Vladimir Davydov6df38682015-12-29 14:54:10 -0800832 (void)cmpxchg(&iter->position, pos, NULL);
833 }
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800834 }
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800835
836 if (pos)
837 css = &pos->css;
838
839 for (;;) {
840 css = css_next_descendant_pre(css, &root->css);
841 if (!css) {
842 /*
843 * Reclaimers share the hierarchy walk, and a
844 * new one might jump in right at the end of
845 * the hierarchy - make sure they see at least
846 * one group and restart from the beginning.
847 */
848 if (!prev)
849 continue;
850 break;
851 }
852
853 /*
854 * Verify the css and acquire a reference. The root
855 * is provided by the caller, so we know it's alive
856 * and kicking, and don't take an extra reference.
857 */
858 memcg = mem_cgroup_from_css(css);
859
860 if (css == &root->css)
861 break;
862
Johannes Weiner0b8f73e2016-01-20 15:02:53 -0800863 if (css_tryget(css))
864 break;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800865
866 memcg = NULL;
867 }
868
869 if (reclaim) {
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800870 /*
Vladimir Davydov6df38682015-12-29 14:54:10 -0800871 * The position could have already been updated by a competing
872 * thread, so check that the value hasn't changed since we read
873 * it to avoid reclaiming from the same cgroup twice.
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800874 */
Vladimir Davydov6df38682015-12-29 14:54:10 -0800875 (void)cmpxchg(&iter->position, pos, memcg);
876
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800877 if (pos)
878 css_put(&pos->css);
879
880 if (!memcg)
881 iter->generation++;
882 else if (!prev)
883 reclaim->generation = iter->generation;
884 }
885
Michal Hocko542f85f2013-04-29 15:07:15 -0700886out_unlock:
887 rcu_read_unlock();
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800888out:
Michal Hockoc40046f2013-04-29 15:07:14 -0700889 if (prev && prev != root)
890 css_put(&prev->css);
891
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800892 return memcg;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700893}
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800894
Johannes Weiner56600482012-01-12 17:17:59 -0800895/**
896 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
897 * @root: hierarchy root
898 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
899 */
900void mem_cgroup_iter_break(struct mem_cgroup *root,
901 struct mem_cgroup *prev)
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800902{
903 if (!root)
904 root = root_mem_cgroup;
905 if (prev && prev != root)
906 css_put(&prev->css);
907}
908
Vladimir Davydov6df38682015-12-29 14:54:10 -0800909static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
910{
911 struct mem_cgroup *memcg = dead_memcg;
912 struct mem_cgroup_reclaim_iter *iter;
913 struct mem_cgroup_per_zone *mz;
914 int nid, zid;
915 int i;
916
917 while ((memcg = parent_mem_cgroup(memcg))) {
918 for_each_node(nid) {
919 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
920 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
921 for (i = 0; i <= DEF_PRIORITY; i++) {
922 iter = &mz->iter[i];
923 cmpxchg(&iter->position,
924 dead_memcg, NULL);
925 }
926 }
927 }
928 }
929}
930
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700931/*
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800932 * Iteration constructs for visiting all cgroups (under a tree). If
933 * loops are exited prematurely (break), mem_cgroup_iter_break() must
934 * be used for reference counting.
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700935 */
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800936#define for_each_mem_cgroup_tree(iter, root) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800937 for (iter = mem_cgroup_iter(root, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800938 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800939 iter = mem_cgroup_iter(root, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700940
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800941#define for_each_mem_cgroup(iter) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800942 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800943 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800944 iter = mem_cgroup_iter(NULL, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700945
Johannes Weiner925b7672012-01-12 17:18:15 -0800946/**
947 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
948 * @zone: zone of the wanted lruvec
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700949 * @memcg: memcg of the wanted lruvec
Johannes Weiner925b7672012-01-12 17:18:15 -0800950 *
951 * Returns the lru list vector holding pages for the given @zone and
952 * @mem. This can be the global zone lruvec, if the memory controller
953 * is disabled.
954 */
955struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
956 struct mem_cgroup *memcg)
957{
958 struct mem_cgroup_per_zone *mz;
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800959 struct lruvec *lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -0800960
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800961 if (mem_cgroup_disabled()) {
962 lruvec = &zone->lruvec;
963 goto out;
964 }
Johannes Weiner925b7672012-01-12 17:18:15 -0800965
Jianyu Zhane2318752014-06-06 14:38:20 -0700966 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800967 lruvec = &mz->lruvec;
968out:
969 /*
970 * Since a node can be onlined after the mem_cgroup was created,
971 * we have to be prepared to initialize lruvec->zone here;
972 * and if offlined then reonlined, we need to reinitialize it.
973 */
974 if (unlikely(lruvec->zone != zone))
975 lruvec->zone = zone;
976 return lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -0800977}
978
Johannes Weiner925b7672012-01-12 17:18:15 -0800979/**
Johannes Weinerdfe0e772014-12-10 15:43:43 -0800980 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
Johannes Weiner925b7672012-01-12 17:18:15 -0800981 * @page: the page
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700982 * @zone: zone of the page
Johannes Weinerdfe0e772014-12-10 15:43:43 -0800983 *
984 * This function is only safe when following the LRU page isolation
985 * and putback protocol: the LRU lock must be held, and the page must
986 * either be PageLRU() or the caller must have isolated/allocated it.
Minchan Kim3f58a822011-03-22 16:32:53 -0700987 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700988struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
Minchan Kim3f58a822011-03-22 16:32:53 -0700989{
990 struct mem_cgroup_per_zone *mz;
Johannes Weiner925b7672012-01-12 17:18:15 -0800991 struct mem_cgroup *memcg;
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800992 struct lruvec *lruvec;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800993
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800994 if (mem_cgroup_disabled()) {
995 lruvec = &zone->lruvec;
996 goto out;
997 }
Christoph Lameterb69408e2008-10-18 20:26:14 -0700998
Johannes Weiner1306a852014-12-10 15:44:52 -0800999 memcg = page->mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -08001000 /*
Johannes Weinerdfe0e772014-12-10 15:43:43 -08001001 * Swapcache readahead pages are added to the LRU - and
Johannes Weiner29833312014-12-10 15:44:02 -08001002 * possibly migrated - before they are charged.
Hugh Dickins75121022012-03-05 14:59:18 -08001003 */
Johannes Weiner29833312014-12-10 15:44:02 -08001004 if (!memcg)
1005 memcg = root_mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -08001006
Jianyu Zhane2318752014-06-06 14:38:20 -07001007 mz = mem_cgroup_page_zoneinfo(memcg, page);
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001008 lruvec = &mz->lruvec;
1009out:
1010 /*
1011 * Since a node can be onlined after the mem_cgroup was created,
1012 * we have to be prepared to initialize lruvec->zone here;
1013 * and if offlined then reonlined, we need to reinitialize it.
1014 */
1015 if (unlikely(lruvec->zone != zone))
1016 lruvec->zone = zone;
1017 return lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001018}
1019
1020/**
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001021 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1022 * @lruvec: mem_cgroup per zone lru vector
1023 * @lru: index of lru list the page is sitting on
1024 * @nr_pages: positive when adding or negative when removing
Johannes Weiner925b7672012-01-12 17:18:15 -08001025 *
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001026 * This function must be called when a page is added to or removed from an
1027 * lru list.
Johannes Weiner925b7672012-01-12 17:18:15 -08001028 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001029void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1030 int nr_pages)
Johannes Weiner925b7672012-01-12 17:18:15 -08001031{
1032 struct mem_cgroup_per_zone *mz;
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001033 unsigned long *lru_size;
Johannes Weiner925b7672012-01-12 17:18:15 -08001034
1035 if (mem_cgroup_disabled())
1036 return;
1037
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001038 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1039 lru_size = mz->lru_size + lru;
1040 *lru_size += nr_pages;
1041 VM_BUG_ON((long)(*lru_size) < 0);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001042}
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001043
Johannes Weiner2314b422014-12-10 15:44:33 -08001044bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001045{
Johannes Weiner2314b422014-12-10 15:44:33 -08001046 struct mem_cgroup *task_memcg;
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001047 struct task_struct *p;
David Rientjesffbdccf2013-07-03 15:01:23 -07001048 bool ret;
David Rientjes4c4a2212008-02-07 00:14:06 -08001049
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001050 p = find_lock_task_mm(task);
David Rientjesde077d22012-01-12 17:18:52 -08001051 if (p) {
Johannes Weiner2314b422014-12-10 15:44:33 -08001052 task_memcg = get_mem_cgroup_from_mm(p->mm);
David Rientjesde077d22012-01-12 17:18:52 -08001053 task_unlock(p);
1054 } else {
1055 /*
1056 * All threads may have already detached their mm's, but the oom
1057 * killer still needs to detect if they have already been oom
1058 * killed to prevent needlessly killing additional tasks.
1059 */
David Rientjesffbdccf2013-07-03 15:01:23 -07001060 rcu_read_lock();
Johannes Weiner2314b422014-12-10 15:44:33 -08001061 task_memcg = mem_cgroup_from_task(task);
1062 css_get(&task_memcg->css);
David Rientjesffbdccf2013-07-03 15:01:23 -07001063 rcu_read_unlock();
David Rientjesde077d22012-01-12 17:18:52 -08001064 }
Johannes Weiner2314b422014-12-10 15:44:33 -08001065 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1066 css_put(&task_memcg->css);
David Rientjes4c4a2212008-02-07 00:14:06 -08001067 return ret;
1068}
1069
Johannes Weiner19942822011-02-01 15:52:43 -08001070/**
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001071 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
Wanpeng Lidad75572012-06-20 12:53:01 -07001072 * @memcg: the memory cgroup
Johannes Weiner19942822011-02-01 15:52:43 -08001073 *
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001074 * Returns the maximum amount of memory @mem can be charged with, in
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001075 * pages.
Johannes Weiner19942822011-02-01 15:52:43 -08001076 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001077static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
Johannes Weiner19942822011-02-01 15:52:43 -08001078{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001079 unsigned long margin = 0;
1080 unsigned long count;
1081 unsigned long limit;
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001082
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001083 count = page_counter_read(&memcg->memory);
Jason Low4db0c3c2015-04-15 16:14:08 -07001084 limit = READ_ONCE(memcg->memory.limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001085 if (count < limit)
1086 margin = limit - count;
1087
Johannes Weiner7941d212016-01-14 15:21:23 -08001088 if (do_memsw_account()) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001089 count = page_counter_read(&memcg->memsw);
Jason Low4db0c3c2015-04-15 16:14:08 -07001090 limit = READ_ONCE(memcg->memsw.limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001091 if (count <= limit)
1092 margin = min(margin, limit - count);
1093 }
1094
1095 return margin;
Johannes Weiner19942822011-02-01 15:52:43 -08001096}
1097
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001098/*
Qiang Huangbdcbb652014-06-04 16:08:21 -07001099 * A routine for checking "mem" is under move_account() or not.
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001100 *
Qiang Huangbdcbb652014-06-04 16:08:21 -07001101 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1102 * moving cgroups. This is for waiting at high-memory pressure
1103 * caused by "move".
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001104 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001105static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001106{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001107 struct mem_cgroup *from;
1108 struct mem_cgroup *to;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001109 bool ret = false;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001110 /*
1111 * Unlike task_move routines, we access mc.to, mc.from not under
1112 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1113 */
1114 spin_lock(&mc.lock);
1115 from = mc.from;
1116 to = mc.to;
1117 if (!from)
1118 goto unlock;
Michal Hocko3e920412011-07-26 16:08:29 -07001119
Johannes Weiner2314b422014-12-10 15:44:33 -08001120 ret = mem_cgroup_is_descendant(from, memcg) ||
1121 mem_cgroup_is_descendant(to, memcg);
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001122unlock:
1123 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001124 return ret;
1125}
1126
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001127static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001128{
1129 if (mc.moving_task && current != mc.moving_task) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001130 if (mem_cgroup_under_move(memcg)) {
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001131 DEFINE_WAIT(wait);
1132 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1133 /* moving charge context might have finished. */
1134 if (mc.moving_task)
1135 schedule();
1136 finish_wait(&mc.waitq, &wait);
1137 return true;
1138 }
1139 }
1140 return false;
1141}
1142
Sha Zhengju58cf1882013-02-22 16:32:05 -08001143#define K(x) ((x) << (PAGE_SHIFT-10))
Balbir Singhe2224322009-04-02 16:57:39 -07001144/**
Sha Zhengju58cf1882013-02-22 16:32:05 -08001145 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
Balbir Singhe2224322009-04-02 16:57:39 -07001146 * @memcg: The memory cgroup that went over limit
1147 * @p: Task that is going to be killed
1148 *
1149 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1150 * enabled
1151 */
1152void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1153{
Tejun Heoe61734c2014-02-12 09:29:50 -05001154 /* oom_info_lock ensures that parallel ooms do not interleave */
Michal Hocko08088cb2014-02-25 15:01:44 -08001155 static DEFINE_MUTEX(oom_info_lock);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001156 struct mem_cgroup *iter;
1157 unsigned int i;
Balbir Singhe2224322009-04-02 16:57:39 -07001158
Michal Hocko08088cb2014-02-25 15:01:44 -08001159 mutex_lock(&oom_info_lock);
Balbir Singhe2224322009-04-02 16:57:39 -07001160 rcu_read_lock();
1161
Balasubramani Vivekanandan2415b9f2015-04-14 15:48:18 -07001162 if (p) {
1163 pr_info("Task in ");
1164 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1165 pr_cont(" killed as a result of limit of ");
1166 } else {
1167 pr_info("Memory limit reached of cgroup ");
1168 }
1169
Tejun Heoe61734c2014-02-12 09:29:50 -05001170 pr_cont_cgroup_path(memcg->css.cgroup);
Greg Thelen0346dad2015-01-26 12:58:38 -08001171 pr_cont("\n");
Balbir Singhe2224322009-04-02 16:57:39 -07001172
Balbir Singhe2224322009-04-02 16:57:39 -07001173 rcu_read_unlock();
1174
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001175 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1176 K((u64)page_counter_read(&memcg->memory)),
1177 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1178 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1179 K((u64)page_counter_read(&memcg->memsw)),
1180 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1181 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1182 K((u64)page_counter_read(&memcg->kmem)),
1183 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001184
1185 for_each_mem_cgroup_tree(iter, memcg) {
Tejun Heoe61734c2014-02-12 09:29:50 -05001186 pr_info("Memory cgroup stats for ");
1187 pr_cont_cgroup_path(iter->css.cgroup);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001188 pr_cont(":");
1189
1190 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
Vladimir Davydov37e84352016-01-20 15:02:56 -08001191 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
Sha Zhengju58cf1882013-02-22 16:32:05 -08001192 continue;
Greg Thelen484ebb32015-10-01 15:37:05 -07001193 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
Sha Zhengju58cf1882013-02-22 16:32:05 -08001194 K(mem_cgroup_read_stat(iter, i)));
1195 }
1196
1197 for (i = 0; i < NR_LRU_LISTS; i++)
1198 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1199 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1200
1201 pr_cont("\n");
1202 }
Michal Hocko08088cb2014-02-25 15:01:44 -08001203 mutex_unlock(&oom_info_lock);
Balbir Singhe2224322009-04-02 16:57:39 -07001204}
1205
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001206/*
1207 * This function returns the number of memcg under hierarchy tree. Returns
1208 * 1(self count) if no children.
1209 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001210static int mem_cgroup_count_children(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001211{
1212 int num = 0;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001213 struct mem_cgroup *iter;
1214
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001215 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001216 num++;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001217 return num;
1218}
1219
Balbir Singh6d61ef42009-01-07 18:08:06 -08001220/*
David Rientjesa63d83f2010-08-09 17:19:46 -07001221 * Return the memory (and swap, if configured) limit for a memcg.
1222 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001223static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
David Rientjesa63d83f2010-08-09 17:19:46 -07001224{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001225 unsigned long limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001226
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001227 limit = memcg->memory.limit;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001228 if (mem_cgroup_swappiness(memcg)) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001229 unsigned long memsw_limit;
Vladimir Davydov37e84352016-01-20 15:02:56 -08001230 unsigned long swap_limit;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001231
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001232 memsw_limit = memcg->memsw.limit;
Vladimir Davydov37e84352016-01-20 15:02:56 -08001233 swap_limit = memcg->swap.limit;
1234 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1235 limit = min(limit + swap_limit, memsw_limit);
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001236 }
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001237 return limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001238}
1239
David Rientjes19965462012-12-11 16:00:26 -08001240static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1241 int order)
David Rientjes9cbb78b2012-07-31 16:43:44 -07001242{
David Rientjes6e0fc462015-09-08 15:00:36 -07001243 struct oom_control oc = {
1244 .zonelist = NULL,
1245 .nodemask = NULL,
1246 .gfp_mask = gfp_mask,
1247 .order = order,
David Rientjes6e0fc462015-09-08 15:00:36 -07001248 };
David Rientjes9cbb78b2012-07-31 16:43:44 -07001249 struct mem_cgroup *iter;
1250 unsigned long chosen_points = 0;
1251 unsigned long totalpages;
1252 unsigned int points = 0;
1253 struct task_struct *chosen = NULL;
1254
Johannes Weinerdc564012015-06-24 16:57:19 -07001255 mutex_lock(&oom_lock);
1256
David Rientjes876aafb2012-07-31 16:43:48 -07001257 /*
David Rientjes465adcf2013-04-29 15:08:45 -07001258 * If current has a pending SIGKILL or is exiting, then automatically
1259 * select it. The goal is to allow it to allocate so that it may
1260 * quickly exit and free its memory.
David Rientjes876aafb2012-07-31 16:43:48 -07001261 */
Oleg Nesterovd003f372014-12-12 16:56:24 -08001262 if (fatal_signal_pending(current) || task_will_free_mem(current)) {
Johannes Weiner16e95192015-06-24 16:57:07 -07001263 mark_oom_victim(current);
Johannes Weinerdc564012015-06-24 16:57:19 -07001264 goto unlock;
David Rientjes876aafb2012-07-31 16:43:48 -07001265 }
1266
David Rientjes6e0fc462015-09-08 15:00:36 -07001267 check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001268 totalpages = mem_cgroup_get_limit(memcg) ? : 1;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001269 for_each_mem_cgroup_tree(iter, memcg) {
Tejun Heo72ec7022013-08-08 20:11:26 -04001270 struct css_task_iter it;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001271 struct task_struct *task;
1272
Tejun Heo72ec7022013-08-08 20:11:26 -04001273 css_task_iter_start(&iter->css, &it);
1274 while ((task = css_task_iter_next(&it))) {
David Rientjes6e0fc462015-09-08 15:00:36 -07001275 switch (oom_scan_process_thread(&oc, task, totalpages)) {
David Rientjes9cbb78b2012-07-31 16:43:44 -07001276 case OOM_SCAN_SELECT:
1277 if (chosen)
1278 put_task_struct(chosen);
1279 chosen = task;
1280 chosen_points = ULONG_MAX;
1281 get_task_struct(chosen);
1282 /* fall through */
1283 case OOM_SCAN_CONTINUE:
1284 continue;
1285 case OOM_SCAN_ABORT:
Tejun Heo72ec7022013-08-08 20:11:26 -04001286 css_task_iter_end(&it);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001287 mem_cgroup_iter_break(memcg, iter);
1288 if (chosen)
1289 put_task_struct(chosen);
Johannes Weinerdc564012015-06-24 16:57:19 -07001290 goto unlock;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001291 case OOM_SCAN_OK:
1292 break;
1293 };
1294 points = oom_badness(task, memcg, NULL, totalpages);
David Rientjesd49ad932014-01-23 15:53:34 -08001295 if (!points || points < chosen_points)
1296 continue;
1297 /* Prefer thread group leaders for display purposes */
1298 if (points == chosen_points &&
1299 thread_group_leader(chosen))
1300 continue;
1301
1302 if (chosen)
1303 put_task_struct(chosen);
1304 chosen = task;
1305 chosen_points = points;
1306 get_task_struct(chosen);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001307 }
Tejun Heo72ec7022013-08-08 20:11:26 -04001308 css_task_iter_end(&it);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001309 }
1310
Johannes Weinerdc564012015-06-24 16:57:19 -07001311 if (chosen) {
1312 points = chosen_points * 1000 / totalpages;
David Rientjes6e0fc462015-09-08 15:00:36 -07001313 oom_kill_process(&oc, chosen, points, totalpages, memcg,
1314 "Memory cgroup out of memory");
Johannes Weinerdc564012015-06-24 16:57:19 -07001315 }
1316unlock:
1317 mutex_unlock(&oom_lock);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001318}
1319
Michele Curtiae6e71d2014-12-12 16:56:35 -08001320#if MAX_NUMNODES > 1
1321
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001322/**
1323 * test_mem_cgroup_node_reclaimable
Wanpeng Lidad75572012-06-20 12:53:01 -07001324 * @memcg: the target memcg
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001325 * @nid: the node ID to be checked.
1326 * @noswap : specify true here if the user wants flle only information.
1327 *
1328 * This function returns whether the specified memcg contains any
1329 * reclaimable pages on a node. Returns true if there are any reclaimable
1330 * pages in the node.
1331 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001332static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001333 int nid, bool noswap)
1334{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001335 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001336 return true;
1337 if (noswap || !total_swap_pages)
1338 return false;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001339 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001340 return true;
1341 return false;
1342
1343}
Ying Han889976d2011-05-26 16:25:33 -07001344
1345/*
1346 * Always updating the nodemask is not very good - even if we have an empty
1347 * list or the wrong list here, we can start from some node and traverse all
1348 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1349 *
1350 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001351static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001352{
1353 int nid;
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001354 /*
1355 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1356 * pagein/pageout changes since the last update.
1357 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001358 if (!atomic_read(&memcg->numainfo_events))
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001359 return;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001360 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
Ying Han889976d2011-05-26 16:25:33 -07001361 return;
1362
Ying Han889976d2011-05-26 16:25:33 -07001363 /* make a nodemask where this memcg uses memory from */
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001364 memcg->scan_nodes = node_states[N_MEMORY];
Ying Han889976d2011-05-26 16:25:33 -07001365
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001366 for_each_node_mask(nid, node_states[N_MEMORY]) {
Ying Han889976d2011-05-26 16:25:33 -07001367
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001368 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1369 node_clear(nid, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001370 }
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001371
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001372 atomic_set(&memcg->numainfo_events, 0);
1373 atomic_set(&memcg->numainfo_updating, 0);
Ying Han889976d2011-05-26 16:25:33 -07001374}
1375
1376/*
1377 * Selecting a node where we start reclaim from. Because what we need is just
1378 * reducing usage counter, start from anywhere is O,K. Considering
1379 * memory reclaim from current node, there are pros. and cons.
1380 *
1381 * Freeing memory from current node means freeing memory from a node which
1382 * we'll use or we've used. So, it may make LRU bad. And if several threads
1383 * hit limits, it will see a contention on a node. But freeing from remote
1384 * node means more costs for memory reclaim because of memory latency.
1385 *
1386 * Now, we use round-robin. Better algorithm is welcomed.
1387 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001388int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001389{
1390 int node;
1391
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001392 mem_cgroup_may_update_nodemask(memcg);
1393 node = memcg->last_scanned_node;
Ying Han889976d2011-05-26 16:25:33 -07001394
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001395 node = next_node(node, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001396 if (node == MAX_NUMNODES)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001397 node = first_node(memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001398 /*
1399 * We call this when we hit limit, not when pages are added to LRU.
1400 * No LRU may hold pages because all pages are UNEVICTABLE or
1401 * memcg is too small and all pages are not on LRU. In that case,
1402 * we use curret node.
1403 */
1404 if (unlikely(node == MAX_NUMNODES))
1405 node = numa_node_id();
1406
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001407 memcg->last_scanned_node = node;
Ying Han889976d2011-05-26 16:25:33 -07001408 return node;
1409}
Ying Han889976d2011-05-26 16:25:33 -07001410#else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001411int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001412{
1413 return 0;
1414}
1415#endif
1416
Andrew Morton0608f432013-09-24 15:27:41 -07001417static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1418 struct zone *zone,
1419 gfp_t gfp_mask,
1420 unsigned long *total_scanned)
Balbir Singh6d61ef42009-01-07 18:08:06 -08001421{
Andrew Morton0608f432013-09-24 15:27:41 -07001422 struct mem_cgroup *victim = NULL;
1423 int total = 0;
1424 int loop = 0;
1425 unsigned long excess;
1426 unsigned long nr_scanned;
1427 struct mem_cgroup_reclaim_cookie reclaim = {
1428 .zone = zone,
1429 .priority = 0,
1430 };
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001431
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001432 excess = soft_limit_excess(root_memcg);
Balbir Singh6d61ef42009-01-07 18:08:06 -08001433
Andrew Morton0608f432013-09-24 15:27:41 -07001434 while (1) {
1435 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1436 if (!victim) {
1437 loop++;
1438 if (loop >= 2) {
1439 /*
1440 * If we have not been able to reclaim
1441 * anything, it might because there are
1442 * no reclaimable pages under this hierarchy
1443 */
1444 if (!total)
1445 break;
1446 /*
1447 * We want to do more targeted reclaim.
1448 * excess >> 2 is not to excessive so as to
1449 * reclaim too much, nor too less that we keep
1450 * coming back to reclaim from this cgroup
1451 */
1452 if (total >= (excess >> 2) ||
1453 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1454 break;
1455 }
1456 continue;
1457 }
Andrew Morton0608f432013-09-24 15:27:41 -07001458 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1459 zone, &nr_scanned);
1460 *total_scanned += nr_scanned;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001461 if (!soft_limit_excess(root_memcg))
Andrew Morton0608f432013-09-24 15:27:41 -07001462 break;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001463 }
Andrew Morton0608f432013-09-24 15:27:41 -07001464 mem_cgroup_iter_break(root_memcg, victim);
1465 return total;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001466}
1467
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001468#ifdef CONFIG_LOCKDEP
1469static struct lockdep_map memcg_oom_lock_dep_map = {
1470 .name = "memcg_oom_lock",
1471};
1472#endif
1473
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001474static DEFINE_SPINLOCK(memcg_oom_lock);
1475
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001476/*
1477 * Check OOM-Killer is already running under our hierarchy.
1478 * If someone is running, return false.
1479 */
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001480static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001481{
Michal Hocko79dfdac2011-07-26 16:08:23 -07001482 struct mem_cgroup *iter, *failed = NULL;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001483
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001484 spin_lock(&memcg_oom_lock);
1485
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001486 for_each_mem_cgroup_tree(iter, memcg) {
Johannes Weiner23751be2011-08-25 15:59:16 -07001487 if (iter->oom_lock) {
Michal Hocko79dfdac2011-07-26 16:08:23 -07001488 /*
1489 * this subtree of our hierarchy is already locked
1490 * so we cannot give a lock.
1491 */
Michal Hocko79dfdac2011-07-26 16:08:23 -07001492 failed = iter;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001493 mem_cgroup_iter_break(memcg, iter);
1494 break;
Johannes Weiner23751be2011-08-25 15:59:16 -07001495 } else
1496 iter->oom_lock = true;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001497 }
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001498
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001499 if (failed) {
1500 /*
1501 * OK, we failed to lock the whole subtree so we have
1502 * to clean up what we set up to the failing subtree
1503 */
1504 for_each_mem_cgroup_tree(iter, memcg) {
1505 if (iter == failed) {
1506 mem_cgroup_iter_break(memcg, iter);
1507 break;
1508 }
1509 iter->oom_lock = false;
Michal Hocko79dfdac2011-07-26 16:08:23 -07001510 }
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001511 } else
1512 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001513
1514 spin_unlock(&memcg_oom_lock);
1515
1516 return !failed;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001517}
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001518
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001519static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001520{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001521 struct mem_cgroup *iter;
1522
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001523 spin_lock(&memcg_oom_lock);
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001524 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001525 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001526 iter->oom_lock = false;
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001527 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001528}
1529
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001530static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001531{
1532 struct mem_cgroup *iter;
1533
Tejun Heoc2b42d32015-06-24 16:58:23 -07001534 spin_lock(&memcg_oom_lock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001535 for_each_mem_cgroup_tree(iter, memcg)
Tejun Heoc2b42d32015-06-24 16:58:23 -07001536 iter->under_oom++;
1537 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001538}
1539
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001540static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001541{
1542 struct mem_cgroup *iter;
1543
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001544 /*
1545 * When a new child is created while the hierarchy is under oom,
Tejun Heoc2b42d32015-06-24 16:58:23 -07001546 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001547 */
Tejun Heoc2b42d32015-06-24 16:58:23 -07001548 spin_lock(&memcg_oom_lock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001549 for_each_mem_cgroup_tree(iter, memcg)
Tejun Heoc2b42d32015-06-24 16:58:23 -07001550 if (iter->under_oom > 0)
1551 iter->under_oom--;
1552 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001553}
1554
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001555static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1556
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001557struct oom_wait_info {
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001558 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001559 wait_queue_t wait;
1560};
1561
1562static int memcg_oom_wake_function(wait_queue_t *wait,
1563 unsigned mode, int sync, void *arg)
1564{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001565 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1566 struct mem_cgroup *oom_wait_memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001567 struct oom_wait_info *oom_wait_info;
1568
1569 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001570 oom_wait_memcg = oom_wait_info->memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001571
Johannes Weiner2314b422014-12-10 15:44:33 -08001572 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1573 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001574 return 0;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001575 return autoremove_wake_function(wait, mode, sync, arg);
1576}
1577
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001578static void memcg_oom_recover(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001579{
Tejun Heoc2b42d32015-06-24 16:58:23 -07001580 /*
1581 * For the following lockless ->under_oom test, the only required
1582 * guarantee is that it must see the state asserted by an OOM when
1583 * this function is called as a result of userland actions
1584 * triggered by the notification of the OOM. This is trivially
1585 * achieved by invoking mem_cgroup_mark_under_oom() before
1586 * triggering notification.
1587 */
1588 if (memcg && memcg->under_oom)
Tejun Heof4b90b702015-06-24 16:58:21 -07001589 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001590}
1591
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001592static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001593{
Tejun Heo626ebc42015-11-05 18:46:09 -08001594 if (!current->memcg_may_oom)
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001595 return;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001596 /*
Johannes Weiner49426422013-10-16 13:46:59 -07001597 * We are in the middle of the charge context here, so we
1598 * don't want to block when potentially sitting on a callstack
1599 * that holds all kinds of filesystem and mm locks.
1600 *
1601 * Also, the caller may handle a failed allocation gracefully
1602 * (like optional page cache readahead) and so an OOM killer
1603 * invocation might not even be necessary.
1604 *
1605 * That's why we don't do anything here except remember the
1606 * OOM context and then deal with it at the end of the page
1607 * fault when the stack is unwound, the locks are released,
1608 * and when we know whether the fault was overall successful.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001609 */
Johannes Weiner49426422013-10-16 13:46:59 -07001610 css_get(&memcg->css);
Tejun Heo626ebc42015-11-05 18:46:09 -08001611 current->memcg_in_oom = memcg;
1612 current->memcg_oom_gfp_mask = mask;
1613 current->memcg_oom_order = order;
Johannes Weiner49426422013-10-16 13:46:59 -07001614}
1615
1616/**
1617 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1618 * @handle: actually kill/wait or just clean up the OOM state
1619 *
1620 * This has to be called at the end of a page fault if the memcg OOM
1621 * handler was enabled.
1622 *
1623 * Memcg supports userspace OOM handling where failed allocations must
1624 * sleep on a waitqueue until the userspace task resolves the
1625 * situation. Sleeping directly in the charge context with all kinds
1626 * of locks held is not a good idea, instead we remember an OOM state
1627 * in the task and mem_cgroup_oom_synchronize() has to be called at
1628 * the end of the page fault to complete the OOM handling.
1629 *
1630 * Returns %true if an ongoing memcg OOM situation was detected and
1631 * completed, %false otherwise.
1632 */
1633bool mem_cgroup_oom_synchronize(bool handle)
1634{
Tejun Heo626ebc42015-11-05 18:46:09 -08001635 struct mem_cgroup *memcg = current->memcg_in_oom;
Johannes Weiner49426422013-10-16 13:46:59 -07001636 struct oom_wait_info owait;
1637 bool locked;
1638
1639 /* OOM is global, do not handle */
1640 if (!memcg)
1641 return false;
1642
Michal Hockoc32b3cb2015-02-11 15:26:24 -08001643 if (!handle || oom_killer_disabled)
Johannes Weiner49426422013-10-16 13:46:59 -07001644 goto cleanup;
1645
1646 owait.memcg = memcg;
1647 owait.wait.flags = 0;
1648 owait.wait.func = memcg_oom_wake_function;
1649 owait.wait.private = current;
1650 INIT_LIST_HEAD(&owait.wait.task_list);
1651
1652 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001653 mem_cgroup_mark_under_oom(memcg);
1654
1655 locked = mem_cgroup_oom_trylock(memcg);
1656
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001657 if (locked)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001658 mem_cgroup_oom_notify(memcg);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001659
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001660 if (locked && !memcg->oom_kill_disable) {
1661 mem_cgroup_unmark_under_oom(memcg);
Johannes Weiner49426422013-10-16 13:46:59 -07001662 finish_wait(&memcg_oom_waitq, &owait.wait);
Tejun Heo626ebc42015-11-05 18:46:09 -08001663 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1664 current->memcg_oom_order);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001665 } else {
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001666 schedule();
Johannes Weiner49426422013-10-16 13:46:59 -07001667 mem_cgroup_unmark_under_oom(memcg);
1668 finish_wait(&memcg_oom_waitq, &owait.wait);
1669 }
1670
1671 if (locked) {
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001672 mem_cgroup_oom_unlock(memcg);
1673 /*
1674 * There is no guarantee that an OOM-lock contender
1675 * sees the wakeups triggered by the OOM kill
1676 * uncharges. Wake any sleepers explicitely.
1677 */
1678 memcg_oom_recover(memcg);
1679 }
Johannes Weiner49426422013-10-16 13:46:59 -07001680cleanup:
Tejun Heo626ebc42015-11-05 18:46:09 -08001681 current->memcg_in_oom = NULL;
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001682 css_put(&memcg->css);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001683 return true;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001684}
1685
Johannes Weinerd7365e72014-10-29 14:50:48 -07001686/**
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001687 * lock_page_memcg - lock a page->mem_cgroup binding
1688 * @page: the page
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001689 *
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001690 * This function protects unlocked LRU pages from being moved to
1691 * another cgroup and stabilizes their page->mem_cgroup binding.
Balbir Singhd69b0422009-06-17 16:26:34 -07001692 */
Johannes Weiner62cccb82016-03-15 14:57:22 -07001693void lock_page_memcg(struct page *page)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001694{
1695 struct mem_cgroup *memcg;
Johannes Weiner6de22612015-02-11 15:25:01 -08001696 unsigned long flags;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001697
Johannes Weiner6de22612015-02-11 15:25:01 -08001698 /*
1699 * The RCU lock is held throughout the transaction. The fast
1700 * path can get away without acquiring the memcg->move_lock
1701 * because page moving starts with an RCU grace period.
Johannes Weiner6de22612015-02-11 15:25:01 -08001702 */
Johannes Weinerd7365e72014-10-29 14:50:48 -07001703 rcu_read_lock();
1704
1705 if (mem_cgroup_disabled())
Johannes Weiner62cccb82016-03-15 14:57:22 -07001706 return;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001707again:
Johannes Weiner1306a852014-12-10 15:44:52 -08001708 memcg = page->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08001709 if (unlikely(!memcg))
Johannes Weiner62cccb82016-03-15 14:57:22 -07001710 return;
Johannes Weinerd7365e72014-10-29 14:50:48 -07001711
Qiang Huangbdcbb652014-06-04 16:08:21 -07001712 if (atomic_read(&memcg->moving_account) <= 0)
Johannes Weiner62cccb82016-03-15 14:57:22 -07001713 return;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001714
Johannes Weiner6de22612015-02-11 15:25:01 -08001715 spin_lock_irqsave(&memcg->move_lock, flags);
Johannes Weiner1306a852014-12-10 15:44:52 -08001716 if (memcg != page->mem_cgroup) {
Johannes Weiner6de22612015-02-11 15:25:01 -08001717 spin_unlock_irqrestore(&memcg->move_lock, flags);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001718 goto again;
1719 }
Johannes Weiner6de22612015-02-11 15:25:01 -08001720
1721 /*
1722 * When charge migration first begins, we can have locked and
1723 * unlocked page stat updates happening concurrently. Track
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001724 * the task who has the lock for unlock_page_memcg().
Johannes Weiner6de22612015-02-11 15:25:01 -08001725 */
1726 memcg->move_lock_task = current;
1727 memcg->move_lock_flags = flags;
Johannes Weinerd7365e72014-10-29 14:50:48 -07001728
Johannes Weiner62cccb82016-03-15 14:57:22 -07001729 return;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001730}
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001731EXPORT_SYMBOL(lock_page_memcg);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001732
Johannes Weinerd7365e72014-10-29 14:50:48 -07001733/**
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001734 * unlock_page_memcg - unlock a page->mem_cgroup binding
Johannes Weiner62cccb82016-03-15 14:57:22 -07001735 * @page: the page
Johannes Weinerd7365e72014-10-29 14:50:48 -07001736 */
Johannes Weiner62cccb82016-03-15 14:57:22 -07001737void unlock_page_memcg(struct page *page)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001738{
Johannes Weiner62cccb82016-03-15 14:57:22 -07001739 struct mem_cgroup *memcg = page->mem_cgroup;
1740
Johannes Weiner6de22612015-02-11 15:25:01 -08001741 if (memcg && memcg->move_lock_task == current) {
1742 unsigned long flags = memcg->move_lock_flags;
1743
1744 memcg->move_lock_task = NULL;
1745 memcg->move_lock_flags = 0;
1746
1747 spin_unlock_irqrestore(&memcg->move_lock, flags);
1748 }
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001749
Johannes Weinerd7365e72014-10-29 14:50:48 -07001750 rcu_read_unlock();
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001751}
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001752EXPORT_SYMBOL(unlock_page_memcg);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001753
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001754/*
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001755 * size of first charge trial. "32" comes from vmscan.c's magic value.
1756 * TODO: maybe necessary to use big numbers in big irons.
1757 */
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001758#define CHARGE_BATCH 32U
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001759struct memcg_stock_pcp {
1760 struct mem_cgroup *cached; /* this never be root cgroup */
Johannes Weiner11c9ea42011-03-23 16:42:34 -07001761 unsigned int nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001762 struct work_struct work;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001763 unsigned long flags;
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -07001764#define FLUSHING_CACHED_CHARGE 0
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001765};
1766static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
Michal Hocko9f50fad2011-08-09 11:56:26 +02001767static DEFINE_MUTEX(percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001768
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001769/**
1770 * consume_stock: Try to consume stocked charge on this cpu.
1771 * @memcg: memcg to consume from.
1772 * @nr_pages: how many pages to charge.
1773 *
1774 * The charges will only happen if @memcg matches the current cpu's memcg
1775 * stock, and at least @nr_pages are available in that stock. Failure to
1776 * service an allocation will refill the stock.
1777 *
1778 * returns true if successful, false otherwise.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001779 */
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001780static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001781{
1782 struct memcg_stock_pcp *stock;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001783 bool ret = false;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001784
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001785 if (nr_pages > CHARGE_BATCH)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001786 return ret;
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001787
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001788 stock = &get_cpu_var(memcg_stock);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001789 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001790 stock->nr_pages -= nr_pages;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001791 ret = true;
1792 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001793 put_cpu_var(memcg_stock);
1794 return ret;
1795}
1796
1797/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001798 * Returns stocks cached in percpu and reset cached information.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001799 */
1800static void drain_stock(struct memcg_stock_pcp *stock)
1801{
1802 struct mem_cgroup *old = stock->cached;
1803
Johannes Weiner11c9ea42011-03-23 16:42:34 -07001804 if (stock->nr_pages) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001805 page_counter_uncharge(&old->memory, stock->nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08001806 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001807 page_counter_uncharge(&old->memsw, stock->nr_pages);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08001808 css_put_many(&old->css, stock->nr_pages);
Johannes Weiner11c9ea42011-03-23 16:42:34 -07001809 stock->nr_pages = 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001810 }
1811 stock->cached = NULL;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001812}
1813
1814/*
1815 * This must be called under preempt disabled or must be called by
1816 * a thread which is pinned to local cpu.
1817 */
1818static void drain_local_stock(struct work_struct *dummy)
1819{
Christoph Lameter7c8e0182014-06-04 16:07:56 -07001820 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001821 drain_stock(stock);
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001822 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001823}
1824
1825/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001826 * Cache charges(val) to local per_cpu area.
Greg Thelen320cc512010-03-15 15:27:28 +01001827 * This will be consumed by consume_stock() function, later.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001828 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001829static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001830{
1831 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1832
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001833 if (stock->cached != memcg) { /* reset if necessary */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001834 drain_stock(stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001835 stock->cached = memcg;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001836 }
Johannes Weiner11c9ea42011-03-23 16:42:34 -07001837 stock->nr_pages += nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001838 put_cpu_var(memcg_stock);
1839}
1840
1841/*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001842 * Drains all per-CPU charge caches for given root_memcg resp. subtree
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08001843 * of the hierarchy under it.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001844 */
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08001845static void drain_all_stock(struct mem_cgroup *root_memcg)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001846{
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001847 int cpu, curcpu;
Michal Hockod38144b2011-07-26 16:08:28 -07001848
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08001849 /* If someone's already draining, avoid adding running more workers. */
1850 if (!mutex_trylock(&percpu_charge_mutex))
1851 return;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001852 /* Notify other cpus that system-wide "drain" is running */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001853 get_online_cpus();
Johannes Weiner5af12d02011-08-25 15:59:07 -07001854 curcpu = get_cpu();
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001855 for_each_online_cpu(cpu) {
1856 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001857 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001858
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001859 memcg = stock->cached;
1860 if (!memcg || !stock->nr_pages)
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001861 continue;
Johannes Weiner2314b422014-12-10 15:44:33 -08001862 if (!mem_cgroup_is_descendant(memcg, root_memcg))
Michal Hocko3e920412011-07-26 16:08:29 -07001863 continue;
Michal Hockod1a05b62011-07-26 16:08:27 -07001864 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1865 if (cpu == curcpu)
1866 drain_local_stock(&stock->work);
1867 else
1868 schedule_work_on(cpu, &stock->work);
1869 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001870 }
Johannes Weiner5af12d02011-08-25 15:59:07 -07001871 put_cpu();
Andrew Mortonf894ffa2013-09-12 15:13:35 -07001872 put_online_cpus();
Michal Hocko9f50fad2011-08-09 11:56:26 +02001873 mutex_unlock(&percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001874}
1875
Paul Gortmaker0db06282013-06-19 14:53:51 -04001876static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001877 unsigned long action,
1878 void *hcpu)
1879{
1880 int cpu = (unsigned long)hcpu;
1881 struct memcg_stock_pcp *stock;
1882
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001883 if (action == CPU_ONLINE)
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001884 return NOTIFY_OK;
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001885
Kirill A. Shutemovd8330492012-04-12 12:49:11 -07001886 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001887 return NOTIFY_OK;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07001888
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001889 stock = &per_cpu(memcg_stock, cpu);
1890 drain_stock(stock);
1891 return NOTIFY_OK;
1892}
1893
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08001894static void reclaim_high(struct mem_cgroup *memcg,
1895 unsigned int nr_pages,
1896 gfp_t gfp_mask)
1897{
1898 do {
1899 if (page_counter_read(&memcg->memory) <= memcg->high)
1900 continue;
1901 mem_cgroup_events(memcg, MEMCG_HIGH, 1);
1902 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1903 } while ((memcg = parent_mem_cgroup(memcg)));
1904}
1905
1906static void high_work_func(struct work_struct *work)
1907{
1908 struct mem_cgroup *memcg;
1909
1910 memcg = container_of(work, struct mem_cgroup, high_work);
1911 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
1912}
1913
Tejun Heob23afb92015-11-05 18:46:11 -08001914/*
1915 * Scheduled by try_charge() to be executed from the userland return path
1916 * and reclaims memory over the high limit.
1917 */
1918void mem_cgroup_handle_over_high(void)
1919{
1920 unsigned int nr_pages = current->memcg_nr_pages_over_high;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08001921 struct mem_cgroup *memcg;
Tejun Heob23afb92015-11-05 18:46:11 -08001922
1923 if (likely(!nr_pages))
1924 return;
1925
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08001926 memcg = get_mem_cgroup_from_mm(current->mm);
1927 reclaim_high(memcg, nr_pages, GFP_KERNEL);
Tejun Heob23afb92015-11-05 18:46:11 -08001928 css_put(&memcg->css);
1929 current->memcg_nr_pages_over_high = 0;
1930}
1931
Johannes Weiner00501b52014-08-08 14:19:20 -07001932static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1933 unsigned int nr_pages)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001934{
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001935 unsigned int batch = max(CHARGE_BATCH, nr_pages);
Johannes Weiner9b130612014-08-06 16:05:51 -07001936 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001937 struct mem_cgroup *mem_over_limit;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001938 struct page_counter *counter;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001939 unsigned long nr_reclaimed;
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001940 bool may_swap = true;
1941 bool drained = false;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001942
Johannes Weinerce00a962014-09-05 08:43:57 -04001943 if (mem_cgroup_is_root(memcg))
Tejun Heo10d53c72015-11-05 18:46:17 -08001944 return 0;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001945retry:
Michal Hockob6b6cc72014-04-07 15:37:44 -07001946 if (consume_stock(memcg, nr_pages))
Tejun Heo10d53c72015-11-05 18:46:17 -08001947 return 0;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001948
Johannes Weiner7941d212016-01-14 15:21:23 -08001949 if (!do_memsw_account() ||
Johannes Weiner6071ca52015-11-05 18:50:26 -08001950 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1951 if (page_counter_try_charge(&memcg->memory, batch, &counter))
Johannes Weiner6539cc02014-08-06 16:05:42 -07001952 goto done_restock;
Johannes Weiner7941d212016-01-14 15:21:23 -08001953 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001954 page_counter_uncharge(&memcg->memsw, batch);
1955 mem_over_limit = mem_cgroup_from_counter(counter, memory);
Johannes Weiner3fbe7242014-10-09 15:28:54 -07001956 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001957 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001958 may_swap = false;
Johannes Weiner3fbe7242014-10-09 15:28:54 -07001959 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08001960
Johannes Weiner6539cc02014-08-06 16:05:42 -07001961 if (batch > nr_pages) {
1962 batch = nr_pages;
1963 goto retry;
1964 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001965
Johannes Weiner06b078f2014-08-06 16:05:44 -07001966 /*
1967 * Unlike in global OOM situations, memcg is not in a physical
1968 * memory shortage. Allow dying and OOM-killed tasks to
1969 * bypass the last charges so that they can exit quickly and
1970 * free their memory.
1971 */
1972 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
1973 fatal_signal_pending(current) ||
1974 current->flags & PF_EXITING))
Tejun Heo10d53c72015-11-05 18:46:17 -08001975 goto force;
Johannes Weiner06b078f2014-08-06 16:05:44 -07001976
1977 if (unlikely(task_in_memcg_oom(current)))
1978 goto nomem;
1979
Mel Gormand0164ad2015-11-06 16:28:21 -08001980 if (!gfpflags_allow_blocking(gfp_mask))
Johannes Weiner6539cc02014-08-06 16:05:42 -07001981 goto nomem;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001982
Johannes Weiner241994e2015-02-11 15:26:06 -08001983 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
1984
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001985 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1986 gfp_mask, may_swap);
Johannes Weiner6539cc02014-08-06 16:05:42 -07001987
Johannes Weiner61e02c72014-08-06 16:08:16 -07001988 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
Johannes Weiner6539cc02014-08-06 16:05:42 -07001989 goto retry;
Johannes Weiner28c34c22014-08-06 16:05:47 -07001990
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001991 if (!drained) {
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08001992 drain_all_stock(mem_over_limit);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001993 drained = true;
1994 goto retry;
1995 }
1996
Johannes Weiner28c34c22014-08-06 16:05:47 -07001997 if (gfp_mask & __GFP_NORETRY)
1998 goto nomem;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001999 /*
2000 * Even though the limit is exceeded at this point, reclaim
2001 * may have been able to free some pages. Retry the charge
2002 * before killing the task.
2003 *
2004 * Only for regular pages, though: huge pages are rather
2005 * unlikely to succeed so close to the limit, and we fall back
2006 * to regular pages anyway in case of failure.
2007 */
Johannes Weiner61e02c72014-08-06 16:08:16 -07002008 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
Johannes Weiner6539cc02014-08-06 16:05:42 -07002009 goto retry;
2010 /*
2011 * At task move, charge accounts can be doubly counted. So, it's
2012 * better to wait until the end of task_move if something is going on.
2013 */
2014 if (mem_cgroup_wait_acct_move(mem_over_limit))
2015 goto retry;
2016
Johannes Weiner9b130612014-08-06 16:05:51 -07002017 if (nr_retries--)
2018 goto retry;
2019
Johannes Weiner06b078f2014-08-06 16:05:44 -07002020 if (gfp_mask & __GFP_NOFAIL)
Tejun Heo10d53c72015-11-05 18:46:17 -08002021 goto force;
Johannes Weiner06b078f2014-08-06 16:05:44 -07002022
Johannes Weiner6539cc02014-08-06 16:05:42 -07002023 if (fatal_signal_pending(current))
Tejun Heo10d53c72015-11-05 18:46:17 -08002024 goto force;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002025
Johannes Weiner241994e2015-02-11 15:26:06 -08002026 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
2027
Jerome Marchand3608de02015-11-05 18:47:29 -08002028 mem_cgroup_oom(mem_over_limit, gfp_mask,
2029 get_order(nr_pages * PAGE_SIZE));
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002030nomem:
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002031 if (!(gfp_mask & __GFP_NOFAIL))
Johannes Weiner3168ecb2013-10-31 16:34:13 -07002032 return -ENOMEM;
Tejun Heo10d53c72015-11-05 18:46:17 -08002033force:
2034 /*
2035 * The allocation either can't fail or will lead to more memory
2036 * being freed very soon. Allow memory usage go over the limit
2037 * temporarily by force charging it.
2038 */
2039 page_counter_charge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002040 if (do_memsw_account())
Tejun Heo10d53c72015-11-05 18:46:17 -08002041 page_counter_charge(&memcg->memsw, nr_pages);
2042 css_get_many(&memcg->css, nr_pages);
2043
2044 return 0;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002045
2046done_restock:
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002047 css_get_many(&memcg->css, batch);
Johannes Weiner6539cc02014-08-06 16:05:42 -07002048 if (batch > nr_pages)
2049 refill_stock(memcg, batch - nr_pages);
Tejun Heob23afb92015-11-05 18:46:11 -08002050
Johannes Weiner241994e2015-02-11 15:26:06 -08002051 /*
Tejun Heob23afb92015-11-05 18:46:11 -08002052 * If the hierarchy is above the normal consumption range, schedule
2053 * reclaim on returning to userland. We can perform reclaim here
Mel Gorman71baba42015-11-06 16:28:28 -08002054 * if __GFP_RECLAIM but let's always punt for simplicity and so that
Tejun Heob23afb92015-11-05 18:46:11 -08002055 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2056 * not recorded as it most likely matches current's and won't
2057 * change in the meantime. As high limit is checked again before
2058 * reclaim, the cost of mismatch is negligible.
Johannes Weiner241994e2015-02-11 15:26:06 -08002059 */
2060 do {
Tejun Heob23afb92015-11-05 18:46:11 -08002061 if (page_counter_read(&memcg->memory) > memcg->high) {
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002062 /* Don't bother a random interrupted task */
2063 if (in_interrupt()) {
2064 schedule_work(&memcg->high_work);
2065 break;
2066 }
Vladimir Davydov9516a182015-12-11 13:40:24 -08002067 current->memcg_nr_pages_over_high += batch;
Tejun Heob23afb92015-11-05 18:46:11 -08002068 set_notify_resume(current);
2069 break;
2070 }
Johannes Weiner241994e2015-02-11 15:26:06 -08002071 } while ((memcg = parent_mem_cgroup(memcg)));
Tejun Heo10d53c72015-11-05 18:46:17 -08002072
2073 return 0;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002074}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002075
Johannes Weiner00501b52014-08-08 14:19:20 -07002076static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002077{
Johannes Weinerce00a962014-09-05 08:43:57 -04002078 if (mem_cgroup_is_root(memcg))
2079 return;
2080
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002081 page_counter_uncharge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002082 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002083 page_counter_uncharge(&memcg->memsw, nr_pages);
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002084
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002085 css_put_many(&memcg->css, nr_pages);
KAMEZAWA Hiroyukid01dd172012-05-29 15:07:03 -07002086}
2087
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002088static void lock_page_lru(struct page *page, int *isolated)
2089{
2090 struct zone *zone = page_zone(page);
2091
2092 spin_lock_irq(&zone->lru_lock);
2093 if (PageLRU(page)) {
2094 struct lruvec *lruvec;
2095
2096 lruvec = mem_cgroup_page_lruvec(page, zone);
2097 ClearPageLRU(page);
2098 del_page_from_lru_list(page, lruvec, page_lru(page));
2099 *isolated = 1;
2100 } else
2101 *isolated = 0;
2102}
2103
2104static void unlock_page_lru(struct page *page, int isolated)
2105{
2106 struct zone *zone = page_zone(page);
2107
2108 if (isolated) {
2109 struct lruvec *lruvec;
2110
2111 lruvec = mem_cgroup_page_lruvec(page, zone);
2112 VM_BUG_ON_PAGE(PageLRU(page), page);
2113 SetPageLRU(page);
2114 add_page_to_lru_list(page, lruvec, page_lru(page));
2115 }
2116 spin_unlock_irq(&zone->lru_lock);
2117}
2118
Johannes Weiner00501b52014-08-08 14:19:20 -07002119static void commit_charge(struct page *page, struct mem_cgroup *memcg,
Johannes Weiner6abb5a82014-08-08 14:19:33 -07002120 bool lrucare)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002121{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002122 int isolated;
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002123
Johannes Weiner1306a852014-12-10 15:44:52 -08002124 VM_BUG_ON_PAGE(page->mem_cgroup, page);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002125
2126 /*
2127 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2128 * may already be on some other mem_cgroup's LRU. Take care of it.
2129 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002130 if (lrucare)
2131 lock_page_lru(page, &isolated);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002132
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002133 /*
2134 * Nobody should be changing or seriously looking at
Johannes Weiner1306a852014-12-10 15:44:52 -08002135 * page->mem_cgroup at this point:
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002136 *
2137 * - the page is uncharged
2138 *
2139 * - the page is off-LRU
2140 *
2141 * - an anonymous fault has exclusive page access, except for
2142 * a locked page table
2143 *
2144 * - a page cache insertion, a swapin fault, or a migration
2145 * have the page locked
2146 */
Johannes Weiner1306a852014-12-10 15:44:52 -08002147 page->mem_cgroup = memcg;
Hugh Dickins3be91272008-02-07 00:14:19 -08002148
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002149 if (lrucare)
2150 unlock_page_lru(page, isolated);
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002151}
2152
Johannes Weiner127424c2016-01-20 15:02:32 -08002153#ifndef CONFIG_SLOB
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002154static int memcg_alloc_cache_id(void)
Glauber Costa55007d82012-12-18 14:22:38 -08002155{
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002156 int id, size;
2157 int err;
Glauber Costa55007d82012-12-18 14:22:38 -08002158
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002159 id = ida_simple_get(&memcg_cache_ida,
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002160 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2161 if (id < 0)
2162 return id;
2163
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002164 if (id < memcg_nr_cache_ids)
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002165 return id;
2166
2167 /*
2168 * There's no space for the new id in memcg_caches arrays,
2169 * so we have to grow them.
2170 */
Vladimir Davydov05257a12015-02-12 14:59:01 -08002171 down_write(&memcg_cache_ids_sem);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002172
2173 size = 2 * (id + 1);
Glauber Costa55007d82012-12-18 14:22:38 -08002174 if (size < MEMCG_CACHES_MIN_SIZE)
2175 size = MEMCG_CACHES_MIN_SIZE;
2176 else if (size > MEMCG_CACHES_MAX_SIZE)
2177 size = MEMCG_CACHES_MAX_SIZE;
2178
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002179 err = memcg_update_all_caches(size);
Vladimir Davydov05257a12015-02-12 14:59:01 -08002180 if (!err)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002181 err = memcg_update_all_list_lrus(size);
2182 if (!err)
Vladimir Davydov05257a12015-02-12 14:59:01 -08002183 memcg_nr_cache_ids = size;
2184
2185 up_write(&memcg_cache_ids_sem);
2186
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002187 if (err) {
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002188 ida_simple_remove(&memcg_cache_ida, id);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002189 return err;
2190 }
2191 return id;
2192}
2193
2194static void memcg_free_cache_id(int id)
2195{
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002196 ida_simple_remove(&memcg_cache_ida, id);
Glauber Costa55007d82012-12-18 14:22:38 -08002197}
2198
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002199struct memcg_kmem_cache_create_work {
Vladimir Davydov5722d092014-04-07 15:39:24 -07002200 struct mem_cgroup *memcg;
2201 struct kmem_cache *cachep;
2202 struct work_struct work;
2203};
2204
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002205static void memcg_kmem_cache_create_func(struct work_struct *w)
Glauber Costad7f25f82012-12-18 14:22:40 -08002206{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002207 struct memcg_kmem_cache_create_work *cw =
2208 container_of(w, struct memcg_kmem_cache_create_work, work);
Vladimir Davydov5722d092014-04-07 15:39:24 -07002209 struct mem_cgroup *memcg = cw->memcg;
2210 struct kmem_cache *cachep = cw->cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002211
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002212 memcg_create_kmem_cache(memcg, cachep);
Vladimir Davydovbd673142014-06-04 16:07:40 -07002213
Vladimir Davydov5722d092014-04-07 15:39:24 -07002214 css_put(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002215 kfree(cw);
2216}
2217
2218/*
2219 * Enqueue the creation of a per-memcg kmem_cache.
Glauber Costad7f25f82012-12-18 14:22:40 -08002220 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002221static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2222 struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08002223{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002224 struct memcg_kmem_cache_create_work *cw;
Glauber Costad7f25f82012-12-18 14:22:40 -08002225
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002226 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002227 if (!cw)
Glauber Costad7f25f82012-12-18 14:22:40 -08002228 return;
Vladimir Davydov8135be52014-12-12 16:56:38 -08002229
2230 css_get(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002231
2232 cw->memcg = memcg;
2233 cw->cachep = cachep;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002234 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
Glauber Costad7f25f82012-12-18 14:22:40 -08002235
Glauber Costad7f25f82012-12-18 14:22:40 -08002236 schedule_work(&cw->work);
2237}
2238
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002239static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2240 struct kmem_cache *cachep)
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002241{
2242 /*
2243 * We need to stop accounting when we kmalloc, because if the
2244 * corresponding kmalloc cache is not yet created, the first allocation
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002245 * in __memcg_schedule_kmem_cache_create will recurse.
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002246 *
2247 * However, it is better to enclose the whole function. Depending on
2248 * the debugging options enabled, INIT_WORK(), for instance, can
2249 * trigger an allocation. This too, will make us recurse. Because at
2250 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2251 * the safest choice is to do it like this, wrapping the whole function.
2252 */
Vladimir Davydov6f185c22014-12-12 16:55:15 -08002253 current->memcg_kmem_skip_account = 1;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002254 __memcg_schedule_kmem_cache_create(memcg, cachep);
Vladimir Davydov6f185c22014-12-12 16:55:15 -08002255 current->memcg_kmem_skip_account = 0;
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002256}
Vladimir Davydovc67a8a62014-06-04 16:07:39 -07002257
Glauber Costad7f25f82012-12-18 14:22:40 -08002258/*
2259 * Return the kmem_cache we're supposed to use for a slab allocation.
2260 * We try to use the current memcg's version of the cache.
2261 *
2262 * If the cache does not exist yet, if we are the first user of it,
2263 * we either create it immediately, if possible, or create it asynchronously
2264 * in a workqueue.
2265 * In the latter case, we will let the current allocation go through with
2266 * the original cache.
2267 *
2268 * Can't be called in interrupt context or from kernel threads.
2269 * This function needs to be called with rcu_read_lock() held.
2270 */
Vladimir Davydov230e9fc2016-01-14 15:18:15 -08002271struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
Glauber Costad7f25f82012-12-18 14:22:40 -08002272{
2273 struct mem_cgroup *memcg;
Vladimir Davydov959c8962014-01-23 15:52:59 -08002274 struct kmem_cache *memcg_cachep;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002275 int kmemcg_id;
Glauber Costad7f25f82012-12-18 14:22:40 -08002276
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002277 VM_BUG_ON(!is_root_cache(cachep));
Glauber Costad7f25f82012-12-18 14:22:40 -08002278
Vladimir Davydov230e9fc2016-01-14 15:18:15 -08002279 if (cachep->flags & SLAB_ACCOUNT)
2280 gfp |= __GFP_ACCOUNT;
2281
2282 if (!(gfp & __GFP_ACCOUNT))
2283 return cachep;
2284
Vladimir Davydov9d100c52014-12-12 16:54:53 -08002285 if (current->memcg_kmem_skip_account)
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002286 return cachep;
2287
Vladimir Davydov8135be52014-12-12 16:56:38 -08002288 memcg = get_mem_cgroup_from_mm(current->mm);
Jason Low4db0c3c2015-04-15 16:14:08 -07002289 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002290 if (kmemcg_id < 0)
Li Zefanca0dde92013-04-29 15:08:57 -07002291 goto out;
Glauber Costad7f25f82012-12-18 14:22:40 -08002292
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002293 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002294 if (likely(memcg_cachep))
2295 return memcg_cachep;
Li Zefanca0dde92013-04-29 15:08:57 -07002296
2297 /*
2298 * If we are in a safe context (can wait, and not in interrupt
2299 * context), we could be be predictable and return right away.
2300 * This would guarantee that the allocation being performed
2301 * already belongs in the new cache.
2302 *
2303 * However, there are some clashes that can arrive from locking.
2304 * For instance, because we acquire the slab_mutex while doing
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002305 * memcg_create_kmem_cache, this means no further allocation
2306 * could happen with the slab_mutex held. So it's better to
2307 * defer everything.
Li Zefanca0dde92013-04-29 15:08:57 -07002308 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002309 memcg_schedule_kmem_cache_create(memcg, cachep);
Li Zefanca0dde92013-04-29 15:08:57 -07002310out:
Vladimir Davydov8135be52014-12-12 16:56:38 -08002311 css_put(&memcg->css);
Li Zefanca0dde92013-04-29 15:08:57 -07002312 return cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002313}
Glauber Costad7f25f82012-12-18 14:22:40 -08002314
Vladimir Davydov8135be52014-12-12 16:56:38 -08002315void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2316{
2317 if (!is_root_cache(cachep))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002318 css_put(&cachep->memcg_params.memcg->css);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002319}
2320
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002321int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2322 struct mem_cgroup *memcg)
2323{
2324 unsigned int nr_pages = 1 << order;
2325 struct page_counter *counter;
Johannes Weiner6071ca52015-11-05 18:50:26 -08002326 int ret;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002327
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002328 if (!memcg_kmem_online(memcg))
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002329 return 0;
2330
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002331 ret = try_charge(memcg, gfp, nr_pages);
Johannes Weiner52c29b02016-01-20 15:02:35 -08002332 if (ret)
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002333 return ret;
Johannes Weiner52c29b02016-01-20 15:02:35 -08002334
2335 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2336 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2337 cancel_charge(memcg, nr_pages);
2338 return -ENOMEM;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002339 }
2340
2341 page->mem_cgroup = memcg;
2342
2343 return 0;
2344}
2345
Vladimir Davydovd05e83a2015-11-05 18:48:59 -08002346int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002347{
2348 struct mem_cgroup *memcg;
2349 int ret;
2350
Johannes Weinerdf381972014-04-07 15:37:43 -07002351 memcg = get_mem_cgroup_from_mm(current->mm);
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002352 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002353 css_put(&memcg->css);
Vladimir Davydovd05e83a2015-11-05 18:48:59 -08002354 return ret;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002355}
2356
Vladimir Davydovd05e83a2015-11-05 18:48:59 -08002357void __memcg_kmem_uncharge(struct page *page, int order)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002358{
Johannes Weiner1306a852014-12-10 15:44:52 -08002359 struct mem_cgroup *memcg = page->mem_cgroup;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002360 unsigned int nr_pages = 1 << order;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002361
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002362 if (!memcg)
2363 return;
2364
Sasha Levin309381fea2014-01-23 15:52:54 -08002365 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
Johannes Weiner29833312014-12-10 15:44:02 -08002366
Johannes Weiner52c29b02016-01-20 15:02:35 -08002367 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2368 page_counter_uncharge(&memcg->kmem, nr_pages);
2369
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002370 page_counter_uncharge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002371 if (do_memsw_account())
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002372 page_counter_uncharge(&memcg->memsw, nr_pages);
2373
Johannes Weiner1306a852014-12-10 15:44:52 -08002374 page->mem_cgroup = NULL;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002375 css_put_many(&memcg->css, nr_pages);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002376}
Johannes Weiner127424c2016-01-20 15:02:32 -08002377#endif /* !CONFIG_SLOB */
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002378
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002379#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2380
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002381/*
2382 * Because tail pages are not marked as "used", set it. We're under
Kirill A. Shutemov3ac808f2016-01-15 16:53:07 -08002383 * zone->lru_lock and migration entries setup in all page mappings.
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002384 */
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002385void mem_cgroup_split_huge_fixup(struct page *head)
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002386{
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002387 int i;
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002388
KAMEZAWA Hiroyuki3d37c4a2011-01-25 15:07:28 -08002389 if (mem_cgroup_disabled())
2390 return;
David Rientjesb070e652013-05-07 16:18:09 -07002391
Johannes Weiner29833312014-12-10 15:44:02 -08002392 for (i = 1; i < HPAGE_PMD_NR; i++)
Johannes Weiner1306a852014-12-10 15:44:52 -08002393 head[i].mem_cgroup = head->mem_cgroup;
Michal Hockob9982f82014-12-10 15:43:51 -08002394
Johannes Weiner1306a852014-12-10 15:44:52 -08002395 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
David Rientjesb070e652013-05-07 16:18:09 -07002396 HPAGE_PMD_NR);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002397}
Hugh Dickins12d27102012-01-12 17:19:52 -08002398#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002399
Andrew Mortonc255a452012-07-31 16:43:02 -07002400#ifdef CONFIG_MEMCG_SWAP
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002401static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2402 bool charge)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002403{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002404 int val = (charge) ? 1 : -1;
2405 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002406}
Daisuke Nishimura02491442010-03-10 15:22:17 -08002407
2408/**
2409 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2410 * @entry: swap entry to be moved
2411 * @from: mem_cgroup which the entry is moved from
2412 * @to: mem_cgroup which the entry is moved to
2413 *
2414 * It succeeds only when the swap_cgroup's record for this entry is the same
2415 * as the mem_cgroup's id of @from.
2416 *
2417 * Returns 0 on success, -EINVAL on failure.
2418 *
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002419 * The caller must have charged to @to, IOW, called page_counter_charge() about
Daisuke Nishimura02491442010-03-10 15:22:17 -08002420 * both res and memsw, and called css_get().
2421 */
2422static int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07002423 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002424{
2425 unsigned short old_id, new_id;
2426
Li Zefan34c00c32013-09-23 16:56:01 +08002427 old_id = mem_cgroup_id(from);
2428 new_id = mem_cgroup_id(to);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002429
2430 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08002431 mem_cgroup_swap_statistics(from, false);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002432 mem_cgroup_swap_statistics(to, true);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002433 return 0;
2434 }
2435 return -EINVAL;
2436}
2437#else
2438static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07002439 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002440{
2441 return -EINVAL;
2442}
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002443#endif
2444
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002445static DEFINE_MUTEX(memcg_limit_mutex);
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -07002446
KOSAKI Motohirod38d2a72009-01-06 14:39:44 -08002447static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002448 unsigned long limit)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002449{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002450 unsigned long curusage;
2451 unsigned long oldusage;
2452 bool enlarge = false;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002453 int retry_count;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002454 int ret;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002455
2456 /*
2457 * For keeping hierarchical_reclaim simple, how long we should retry
2458 * is depends on callers. We set our retry-count to be function
2459 * of # of children which we should visit in this loop.
2460 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002461 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2462 mem_cgroup_count_children(memcg);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002463
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002464 oldusage = page_counter_read(&memcg->memory);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002465
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002466 do {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002467 if (signal_pending(current)) {
2468 ret = -EINTR;
2469 break;
2470 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002471
2472 mutex_lock(&memcg_limit_mutex);
2473 if (limit > memcg->memsw.limit) {
2474 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002475 ret = -EINVAL;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002476 break;
2477 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002478 if (limit > memcg->memory.limit)
2479 enlarge = true;
2480 ret = page_counter_limit(&memcg->memory, limit);
2481 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002482
2483 if (!ret)
2484 break;
2485
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002486 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2487
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002488 curusage = page_counter_read(&memcg->memory);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002489 /* Usage is reduced ? */
Andrew Mortonf894ffa2013-09-12 15:13:35 -07002490 if (curusage >= oldusage)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002491 retry_count--;
2492 else
2493 oldusage = curusage;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002494 } while (retry_count);
2495
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002496 if (!ret && enlarge)
2497 memcg_oom_recover(memcg);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08002498
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002499 return ret;
2500}
2501
Li Zefan338c8432009-06-17 16:27:15 -07002502static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002503 unsigned long limit)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002504{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002505 unsigned long curusage;
2506 unsigned long oldusage;
2507 bool enlarge = false;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002508 int retry_count;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002509 int ret;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002510
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002511 /* see mem_cgroup_resize_res_limit */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002512 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2513 mem_cgroup_count_children(memcg);
2514
2515 oldusage = page_counter_read(&memcg->memsw);
2516
2517 do {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002518 if (signal_pending(current)) {
2519 ret = -EINTR;
2520 break;
2521 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002522
2523 mutex_lock(&memcg_limit_mutex);
2524 if (limit < memcg->memory.limit) {
2525 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002526 ret = -EINVAL;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002527 break;
2528 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002529 if (limit > memcg->memsw.limit)
2530 enlarge = true;
2531 ret = page_counter_limit(&memcg->memsw, limit);
2532 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002533
2534 if (!ret)
2535 break;
2536
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002537 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2538
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002539 curusage = page_counter_read(&memcg->memsw);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002540 /* Usage is reduced ? */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002541 if (curusage >= oldusage)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002542 retry_count--;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002543 else
2544 oldusage = curusage;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002545 } while (retry_count);
2546
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002547 if (!ret && enlarge)
2548 memcg_oom_recover(memcg);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002549
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002550 return ret;
2551}
2552
Andrew Morton0608f432013-09-24 15:27:41 -07002553unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2554 gfp_t gfp_mask,
2555 unsigned long *total_scanned)
2556{
2557 unsigned long nr_reclaimed = 0;
2558 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2559 unsigned long reclaimed;
2560 int loop = 0;
2561 struct mem_cgroup_tree_per_zone *mctz;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002562 unsigned long excess;
Andrew Morton0608f432013-09-24 15:27:41 -07002563 unsigned long nr_scanned;
2564
2565 if (order > 0)
2566 return 0;
2567
2568 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
2569 /*
2570 * This loop can run a while, specially if mem_cgroup's continuously
2571 * keep exceeding their soft limit and putting the system under
2572 * pressure
2573 */
2574 do {
2575 if (next_mz)
2576 mz = next_mz;
2577 else
2578 mz = mem_cgroup_largest_soft_limit_node(mctz);
2579 if (!mz)
2580 break;
2581
2582 nr_scanned = 0;
2583 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
2584 gfp_mask, &nr_scanned);
2585 nr_reclaimed += reclaimed;
2586 *total_scanned += nr_scanned;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002587 spin_lock_irq(&mctz->lock);
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08002588 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Morton0608f432013-09-24 15:27:41 -07002589
2590 /*
2591 * If we failed to reclaim anything from this memory cgroup
2592 * it is time to move on to the next cgroup
2593 */
2594 next_mz = NULL;
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08002595 if (!reclaimed)
2596 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2597
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002598 excess = soft_limit_excess(mz->memcg);
Andrew Morton0608f432013-09-24 15:27:41 -07002599 /*
2600 * One school of thought says that we should not add
2601 * back the node to the tree if reclaim returns 0.
2602 * But our reclaim could return 0, simply because due
2603 * to priority we are exposing a smaller subset of
2604 * memory to reclaim from. Consider this as a longer
2605 * term TODO.
2606 */
2607 /* If excess == 0, no tree ops */
Johannes Weinercf2c8122014-06-06 14:38:21 -07002608 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002609 spin_unlock_irq(&mctz->lock);
Andrew Morton0608f432013-09-24 15:27:41 -07002610 css_put(&mz->memcg->css);
2611 loop++;
2612 /*
2613 * Could not reclaim anything and there are no more
2614 * mem cgroups to try or we seem to be looping without
2615 * reclaiming anything.
2616 */
2617 if (!nr_reclaimed &&
2618 (next_mz == NULL ||
2619 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2620 break;
2621 } while (!nr_reclaimed);
2622 if (next_mz)
2623 css_put(&next_mz->memcg->css);
2624 return nr_reclaimed;
2625}
2626
Tejun Heoea280e72014-05-16 13:22:48 -04002627/*
2628 * Test whether @memcg has children, dead or alive. Note that this
2629 * function doesn't care whether @memcg has use_hierarchy enabled and
2630 * returns %true if there are child csses according to the cgroup
2631 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
2632 */
Glauber Costab5f99b52013-02-22 16:34:53 -08002633static inline bool memcg_has_children(struct mem_cgroup *memcg)
2634{
Tejun Heoea280e72014-05-16 13:22:48 -04002635 bool ret;
2636
Tejun Heoea280e72014-05-16 13:22:48 -04002637 rcu_read_lock();
2638 ret = css_next_child(NULL, &memcg->css);
2639 rcu_read_unlock();
2640 return ret;
Glauber Costab5f99b52013-02-22 16:34:53 -08002641}
2642
2643/*
Michal Hockoc26251f2012-10-26 13:37:28 +02002644 * Reclaims as many pages from the given memcg as possible and moves
2645 * the rest to the parent.
2646 *
2647 * Caller is responsible for holding css reference for memcg.
2648 */
2649static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2650{
2651 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Michal Hockoc26251f2012-10-26 13:37:28 +02002652
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002653 /* we call try-to-free pages for make this cgroup empty */
2654 lru_add_drain_all();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002655 /* try to free all pages in this cgroup */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002656 while (nr_retries && page_counter_read(&memcg->memory)) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002657 int progress;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002658
Michal Hockoc26251f2012-10-26 13:37:28 +02002659 if (signal_pending(current))
2660 return -EINTR;
2661
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002662 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2663 GFP_KERNEL, true);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002664 if (!progress) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002665 nr_retries--;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002666 /* maybe some writeback is necessary */
Jens Axboe8aa7e842009-07-09 14:52:32 +02002667 congestion_wait(BLK_RW_ASYNC, HZ/10);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002668 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002669
2670 }
Michal Hockoab5196c2012-10-26 13:37:32 +02002671
2672 return 0;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08002673}
2674
Tejun Heo6770c642014-05-13 12:16:21 -04002675static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2676 char *buf, size_t nbytes,
2677 loff_t off)
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002678{
Tejun Heo6770c642014-05-13 12:16:21 -04002679 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Michal Hockoc26251f2012-10-26 13:37:28 +02002680
Michal Hockod8423012012-10-26 13:37:29 +02002681 if (mem_cgroup_is_root(memcg))
2682 return -EINVAL;
Tejun Heo6770c642014-05-13 12:16:21 -04002683 return mem_cgroup_force_empty(memcg) ?: nbytes;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002684}
2685
Tejun Heo182446d2013-08-08 20:11:24 -04002686static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2687 struct cftype *cft)
Balbir Singh18f59ea2009-01-07 18:08:07 -08002688{
Tejun Heo182446d2013-08-08 20:11:24 -04002689 return mem_cgroup_from_css(css)->use_hierarchy;
Balbir Singh18f59ea2009-01-07 18:08:07 -08002690}
2691
Tejun Heo182446d2013-08-08 20:11:24 -04002692static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2693 struct cftype *cft, u64 val)
Balbir Singh18f59ea2009-01-07 18:08:07 -08002694{
2695 int retval = 0;
Tejun Heo182446d2013-08-08 20:11:24 -04002696 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo5c9d5352014-05-16 13:22:48 -04002697 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
Balbir Singh18f59ea2009-01-07 18:08:07 -08002698
Glauber Costa567fb432012-07-31 16:43:07 -07002699 if (memcg->use_hierarchy == val)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002700 return 0;
Glauber Costa567fb432012-07-31 16:43:07 -07002701
Balbir Singh18f59ea2009-01-07 18:08:07 -08002702 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002703 * If parent's use_hierarchy is set, we can't make any modifications
Balbir Singh18f59ea2009-01-07 18:08:07 -08002704 * in the child subtrees. If it is unset, then the change can
2705 * occur, provided the current cgroup has no children.
2706 *
2707 * For the root cgroup, parent_mem is NULL, we allow value to be
2708 * set if there are no children.
2709 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002710 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
Balbir Singh18f59ea2009-01-07 18:08:07 -08002711 (val == 1 || val == 0)) {
Tejun Heoea280e72014-05-16 13:22:48 -04002712 if (!memcg_has_children(memcg))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002713 memcg->use_hierarchy = val;
Balbir Singh18f59ea2009-01-07 18:08:07 -08002714 else
2715 retval = -EBUSY;
2716 } else
2717 retval = -EINVAL;
Glauber Costa567fb432012-07-31 16:43:07 -07002718
Balbir Singh18f59ea2009-01-07 18:08:07 -08002719 return retval;
2720}
2721
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002722static unsigned long tree_stat(struct mem_cgroup *memcg,
2723 enum mem_cgroup_stat_index idx)
Johannes Weinerce00a962014-09-05 08:43:57 -04002724{
2725 struct mem_cgroup *iter;
Greg Thelen484ebb32015-10-01 15:37:05 -07002726 unsigned long val = 0;
Johannes Weinerce00a962014-09-05 08:43:57 -04002727
Johannes Weinerce00a962014-09-05 08:43:57 -04002728 for_each_mem_cgroup_tree(iter, memcg)
2729 val += mem_cgroup_read_stat(iter, idx);
2730
Johannes Weinerce00a962014-09-05 08:43:57 -04002731 return val;
2732}
2733
Johannes Weiner587d9f72016-01-20 15:03:19 -08002734static unsigned long tree_events(struct mem_cgroup *memcg,
2735 enum mem_cgroup_events_index idx)
2736{
2737 struct mem_cgroup *iter;
2738 unsigned long val = 0;
2739
2740 for_each_mem_cgroup_tree(iter, memcg)
2741 val += mem_cgroup_read_events(iter, idx);
2742
2743 return val;
2744}
2745
Andrew Morton6f646152015-11-06 16:28:58 -08002746static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
Johannes Weinerce00a962014-09-05 08:43:57 -04002747{
Michal Hockoc12176d2015-11-05 18:50:29 -08002748 unsigned long val;
Johannes Weinerce00a962014-09-05 08:43:57 -04002749
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002750 if (mem_cgroup_is_root(memcg)) {
2751 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE);
2752 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS);
2753 if (swap)
2754 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP);
2755 } else {
Johannes Weinerce00a962014-09-05 08:43:57 -04002756 if (!swap)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002757 val = page_counter_read(&memcg->memory);
Johannes Weinerce00a962014-09-05 08:43:57 -04002758 else
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002759 val = page_counter_read(&memcg->memsw);
Johannes Weinerce00a962014-09-05 08:43:57 -04002760 }
Michal Hockoc12176d2015-11-05 18:50:29 -08002761 return val;
Johannes Weinerce00a962014-09-05 08:43:57 -04002762}
2763
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002764enum {
2765 RES_USAGE,
2766 RES_LIMIT,
2767 RES_MAX_USAGE,
2768 RES_FAILCNT,
2769 RES_SOFT_LIMIT,
2770};
Johannes Weinerce00a962014-09-05 08:43:57 -04002771
Tejun Heo791badb2013-12-05 12:28:02 -05002772static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
Johannes Weiner05b84302014-08-06 16:05:59 -07002773 struct cftype *cft)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08002774{
Tejun Heo182446d2013-08-08 20:11:24 -04002775 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002776 struct page_counter *counter;
Tejun Heoaf36f902012-04-01 12:09:55 -07002777
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002778 switch (MEMFILE_TYPE(cft->private)) {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002779 case _MEM:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002780 counter = &memcg->memory;
Glauber Costa510fc4e2012-12-18 14:21:47 -08002781 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002782 case _MEMSWAP:
2783 counter = &memcg->memsw;
2784 break;
2785 case _KMEM:
2786 counter = &memcg->kmem;
2787 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002788 case _TCP:
Johannes Weiner0db15292016-01-20 15:02:50 -08002789 counter = &memcg->tcpmem;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002790 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002791 default:
2792 BUG();
2793 }
2794
2795 switch (MEMFILE_ATTR(cft->private)) {
2796 case RES_USAGE:
2797 if (counter == &memcg->memory)
Michal Hockoc12176d2015-11-05 18:50:29 -08002798 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002799 if (counter == &memcg->memsw)
Michal Hockoc12176d2015-11-05 18:50:29 -08002800 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002801 return (u64)page_counter_read(counter) * PAGE_SIZE;
2802 case RES_LIMIT:
2803 return (u64)counter->limit * PAGE_SIZE;
2804 case RES_MAX_USAGE:
2805 return (u64)counter->watermark * PAGE_SIZE;
2806 case RES_FAILCNT:
2807 return counter->failcnt;
2808 case RES_SOFT_LIMIT:
2809 return (u64)memcg->soft_limit * PAGE_SIZE;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002810 default:
2811 BUG();
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002812 }
Balbir Singh8cdea7c2008-02-07 00:13:50 -08002813}
Glauber Costa510fc4e2012-12-18 14:21:47 -08002814
Johannes Weiner127424c2016-01-20 15:02:32 -08002815#ifndef CONFIG_SLOB
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002816static int memcg_online_kmem(struct mem_cgroup *memcg)
Vladimir Davydovd6441632014-01-23 15:53:09 -08002817{
Vladimir Davydovd6441632014-01-23 15:53:09 -08002818 int memcg_id;
2819
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002820 BUG_ON(memcg->kmemcg_id >= 0);
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002821 BUG_ON(memcg->kmem_state);
Vladimir Davydovd6441632014-01-23 15:53:09 -08002822
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002823 memcg_id = memcg_alloc_cache_id();
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002824 if (memcg_id < 0)
2825 return memcg_id;
Vladimir Davydovd6441632014-01-23 15:53:09 -08002826
Johannes Weineref129472016-01-14 15:21:34 -08002827 static_branch_inc(&memcg_kmem_enabled_key);
Vladimir Davydovd6441632014-01-23 15:53:09 -08002828 /*
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002829 * A memory cgroup is considered kmem-online as soon as it gets
Vladimir Davydov900a38f2014-12-12 16:55:10 -08002830 * kmemcg_id. Setting the id after enabling static branching will
Vladimir Davydovd6441632014-01-23 15:53:09 -08002831 * guarantee no one starts accounting before all call sites are
2832 * patched.
2833 */
Vladimir Davydov900a38f2014-12-12 16:55:10 -08002834 memcg->kmemcg_id = memcg_id;
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002835 memcg->kmem_state = KMEM_ONLINE;
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002836
2837 return 0;
Vladimir Davydovd6441632014-01-23 15:53:09 -08002838}
2839
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002840static int memcg_propagate_kmem(struct mem_cgroup *parent,
2841 struct mem_cgroup *memcg)
Glauber Costa510fc4e2012-12-18 14:21:47 -08002842{
Glauber Costa55007d82012-12-18 14:22:38 -08002843 int ret = 0;
Glauber Costa55007d82012-12-18 14:22:38 -08002844
Vladimir Davydov8c0145b2014-12-10 15:43:48 -08002845 mutex_lock(&memcg_limit_mutex);
Glauber Costaa8964b92012-12-18 14:22:09 -08002846 /*
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002847 * If the parent cgroup is not kmem-online now, it cannot be
2848 * onlined after this point, because it has at least one child
2849 * already.
Glauber Costaa8964b92012-12-18 14:22:09 -08002850 */
Vladimir Davydov04823c82016-01-20 15:02:38 -08002851 if (memcg_kmem_online(parent) ||
2852 (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nokmem))
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002853 ret = memcg_online_kmem(memcg);
Vladimir Davydov8c0145b2014-12-10 15:43:48 -08002854 mutex_unlock(&memcg_limit_mutex);
Glauber Costa55007d82012-12-18 14:22:38 -08002855 return ret;
Glauber Costa510fc4e2012-12-18 14:21:47 -08002856}
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002857
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002858static void memcg_offline_kmem(struct mem_cgroup *memcg)
2859{
2860 struct cgroup_subsys_state *css;
2861 struct mem_cgroup *parent, *child;
2862 int kmemcg_id;
2863
2864 if (memcg->kmem_state != KMEM_ONLINE)
2865 return;
2866 /*
2867 * Clear the online state before clearing memcg_caches array
2868 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2869 * guarantees that no cache will be created for this cgroup
2870 * after we are done (see memcg_create_kmem_cache()).
2871 */
2872 memcg->kmem_state = KMEM_ALLOCATED;
2873
2874 memcg_deactivate_kmem_caches(memcg);
2875
2876 kmemcg_id = memcg->kmemcg_id;
2877 BUG_ON(kmemcg_id < 0);
2878
2879 parent = parent_mem_cgroup(memcg);
2880 if (!parent)
2881 parent = root_mem_cgroup;
2882
2883 /*
2884 * Change kmemcg_id of this cgroup and all its descendants to the
2885 * parent's id, and then move all entries from this cgroup's list_lrus
2886 * to ones of the parent. After we have finished, all list_lrus
2887 * corresponding to this cgroup are guaranteed to remain empty. The
2888 * ordering is imposed by list_lru_node->lock taken by
2889 * memcg_drain_all_list_lrus().
2890 */
2891 css_for_each_descendant_pre(css, &memcg->css) {
2892 child = mem_cgroup_from_css(css);
2893 BUG_ON(child->kmemcg_id != kmemcg_id);
2894 child->kmemcg_id = parent->kmemcg_id;
2895 if (!memcg->use_hierarchy)
2896 break;
2897 }
2898 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2899
2900 memcg_free_cache_id(kmemcg_id);
2901}
2902
2903static void memcg_free_kmem(struct mem_cgroup *memcg)
2904{
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002905 /* css_alloc() failed, offlining didn't happen */
2906 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2907 memcg_offline_kmem(memcg);
2908
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002909 if (memcg->kmem_state == KMEM_ALLOCATED) {
2910 memcg_destroy_kmem_caches(memcg);
2911 static_branch_dec(&memcg_kmem_enabled_key);
2912 WARN_ON(page_counter_read(&memcg->kmem));
2913 }
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002914}
Vladimir Davydovd6441632014-01-23 15:53:09 -08002915#else
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002916static int memcg_propagate_kmem(struct mem_cgroup *parent, struct mem_cgroup *memcg)
2917{
2918 return 0;
2919}
2920static int memcg_online_kmem(struct mem_cgroup *memcg)
Johannes Weiner127424c2016-01-20 15:02:32 -08002921{
2922 return 0;
2923}
2924static void memcg_offline_kmem(struct mem_cgroup *memcg)
2925{
2926}
2927static void memcg_free_kmem(struct mem_cgroup *memcg)
2928{
2929}
2930#endif /* !CONFIG_SLOB */
2931
Johannes Weiner127424c2016-01-20 15:02:32 -08002932static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2933 unsigned long limit)
2934{
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002935 int ret = 0;
Johannes Weiner127424c2016-01-20 15:02:32 -08002936
2937 mutex_lock(&memcg_limit_mutex);
2938 /* Top-level cgroup doesn't propagate from root */
2939 if (!memcg_kmem_online(memcg)) {
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002940 if (cgroup_is_populated(memcg->css.cgroup) ||
2941 (memcg->use_hierarchy && memcg_has_children(memcg)))
2942 ret = -EBUSY;
2943 if (ret)
2944 goto out;
Johannes Weiner127424c2016-01-20 15:02:32 -08002945 ret = memcg_online_kmem(memcg);
2946 if (ret)
2947 goto out;
2948 }
2949 ret = page_counter_limit(&memcg->kmem, limit);
2950out:
2951 mutex_unlock(&memcg_limit_mutex);
2952 return ret;
2953}
Glauber Costa510fc4e2012-12-18 14:21:47 -08002954
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002955static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2956{
2957 int ret;
2958
2959 mutex_lock(&memcg_limit_mutex);
2960
Johannes Weiner0db15292016-01-20 15:02:50 -08002961 ret = page_counter_limit(&memcg->tcpmem, limit);
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002962 if (ret)
2963 goto out;
2964
Johannes Weiner0db15292016-01-20 15:02:50 -08002965 if (!memcg->tcpmem_active) {
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002966 /*
2967 * The active flag needs to be written after the static_key
2968 * update. This is what guarantees that the socket activation
2969 * function is the last one to run. See sock_update_memcg() for
2970 * details, and note that we don't mark any socket as belonging
2971 * to this memcg until that flag is up.
2972 *
2973 * We need to do this, because static_keys will span multiple
2974 * sites, but we can't control their order. If we mark a socket
2975 * as accounted, but the accounting functions are not patched in
2976 * yet, we'll lose accounting.
2977 *
2978 * We never race with the readers in sock_update_memcg(),
2979 * because when this value change, the code to process it is not
2980 * patched in yet.
2981 */
2982 static_branch_inc(&memcg_sockets_enabled_key);
Johannes Weiner0db15292016-01-20 15:02:50 -08002983 memcg->tcpmem_active = true;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002984 }
2985out:
2986 mutex_unlock(&memcg_limit_mutex);
2987 return ret;
2988}
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002989
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002990/*
2991 * The user of this function is...
2992 * RES_LIMIT.
2993 */
Tejun Heo451af502014-05-13 12:16:21 -04002994static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2995 char *buf, size_t nbytes, loff_t off)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08002996{
Tejun Heo451af502014-05-13 12:16:21 -04002997 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002998 unsigned long nr_pages;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002999 int ret;
3000
Tejun Heo451af502014-05-13 12:16:21 -04003001 buf = strstrip(buf);
Johannes Weiner650c5e52015-02-11 15:26:03 -08003002 ret = page_counter_memparse(buf, "-1", &nr_pages);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003003 if (ret)
3004 return ret;
Tejun Heoaf36f902012-04-01 12:09:55 -07003005
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003006 switch (MEMFILE_ATTR(of_cft(of)->private)) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003007 case RES_LIMIT:
Balbir Singh4b3bde42009-09-23 15:56:32 -07003008 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3009 ret = -EINVAL;
3010 break;
3011 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003012 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3013 case _MEM:
3014 ret = mem_cgroup_resize_limit(memcg, nr_pages);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003015 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003016 case _MEMSWAP:
3017 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
3018 break;
3019 case _KMEM:
3020 ret = memcg_update_kmem_limit(memcg, nr_pages);
3021 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003022 case _TCP:
3023 ret = memcg_update_tcp_limit(memcg, nr_pages);
3024 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003025 }
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003026 break;
Balbir Singh296c81d2009-09-23 15:56:36 -07003027 case RES_SOFT_LIMIT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003028 memcg->soft_limit = nr_pages;
3029 ret = 0;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003030 break;
3031 }
Tejun Heo451af502014-05-13 12:16:21 -04003032 return ret ?: nbytes;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003033}
3034
Tejun Heo6770c642014-05-13 12:16:21 -04003035static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3036 size_t nbytes, loff_t off)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003037{
Tejun Heo6770c642014-05-13 12:16:21 -04003038 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003039 struct page_counter *counter;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003040
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003041 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3042 case _MEM:
3043 counter = &memcg->memory;
3044 break;
3045 case _MEMSWAP:
3046 counter = &memcg->memsw;
3047 break;
3048 case _KMEM:
3049 counter = &memcg->kmem;
3050 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003051 case _TCP:
Johannes Weiner0db15292016-01-20 15:02:50 -08003052 counter = &memcg->tcpmem;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003053 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003054 default:
3055 BUG();
3056 }
Tejun Heoaf36f902012-04-01 12:09:55 -07003057
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003058 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003059 case RES_MAX_USAGE:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003060 page_counter_reset_watermark(counter);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003061 break;
3062 case RES_FAILCNT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003063 counter->failcnt = 0;
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003064 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003065 default:
3066 BUG();
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003067 }
Balbir Singhf64c3f52009-09-23 15:56:37 -07003068
Tejun Heo6770c642014-05-13 12:16:21 -04003069 return nbytes;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003070}
3071
Tejun Heo182446d2013-08-08 20:11:24 -04003072static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003073 struct cftype *cft)
3074{
Tejun Heo182446d2013-08-08 20:11:24 -04003075 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003076}
3077
Daisuke Nishimura02491442010-03-10 15:22:17 -08003078#ifdef CONFIG_MMU
Tejun Heo182446d2013-08-08 20:11:24 -04003079static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003080 struct cftype *cft, u64 val)
3081{
Tejun Heo182446d2013-08-08 20:11:24 -04003082 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003083
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08003084 if (val & ~MOVE_MASK)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003085 return -EINVAL;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003086
Glauber Costaee5e8472013-02-22 16:34:50 -08003087 /*
3088 * No kind of locking is needed in here, because ->can_attach() will
3089 * check this value once in the beginning of the process, and then carry
3090 * on with stale data. This means that changes to this value will only
3091 * affect task migrations starting after the change.
3092 */
3093 memcg->move_charge_at_immigrate = val;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003094 return 0;
3095}
Daisuke Nishimura02491442010-03-10 15:22:17 -08003096#else
Tejun Heo182446d2013-08-08 20:11:24 -04003097static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura02491442010-03-10 15:22:17 -08003098 struct cftype *cft, u64 val)
3099{
3100 return -ENOSYS;
3101}
3102#endif
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003103
Ying Han406eb0c2011-05-26 16:25:37 -07003104#ifdef CONFIG_NUMA
Tejun Heo2da8ca82013-12-05 12:28:04 -05003105static int memcg_numa_stat_show(struct seq_file *m, void *v)
Ying Han406eb0c2011-05-26 16:25:37 -07003106{
Greg Thelen25485de2013-11-12 15:07:40 -08003107 struct numa_stat {
3108 const char *name;
3109 unsigned int lru_mask;
3110 };
3111
3112 static const struct numa_stat stats[] = {
3113 { "total", LRU_ALL },
3114 { "file", LRU_ALL_FILE },
3115 { "anon", LRU_ALL_ANON },
3116 { "unevictable", BIT(LRU_UNEVICTABLE) },
3117 };
3118 const struct numa_stat *stat;
Ying Han406eb0c2011-05-26 16:25:37 -07003119 int nid;
Greg Thelen25485de2013-11-12 15:07:40 -08003120 unsigned long nr;
Tejun Heo2da8ca82013-12-05 12:28:04 -05003121 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Ying Han406eb0c2011-05-26 16:25:37 -07003122
Greg Thelen25485de2013-11-12 15:07:40 -08003123 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3124 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3125 seq_printf(m, "%s=%lu", stat->name, nr);
3126 for_each_node_state(nid, N_MEMORY) {
3127 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3128 stat->lru_mask);
3129 seq_printf(m, " N%d=%lu", nid, nr);
3130 }
3131 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003132 }
Ying Han406eb0c2011-05-26 16:25:37 -07003133
Ying Han071aee12013-11-12 15:07:41 -08003134 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3135 struct mem_cgroup *iter;
Ying Han406eb0c2011-05-26 16:25:37 -07003136
Ying Han071aee12013-11-12 15:07:41 -08003137 nr = 0;
3138 for_each_mem_cgroup_tree(iter, memcg)
3139 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3140 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3141 for_each_node_state(nid, N_MEMORY) {
3142 nr = 0;
3143 for_each_mem_cgroup_tree(iter, memcg)
3144 nr += mem_cgroup_node_nr_lru_pages(
3145 iter, nid, stat->lru_mask);
3146 seq_printf(m, " N%d=%lu", nid, nr);
3147 }
3148 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003149 }
Ying Han406eb0c2011-05-26 16:25:37 -07003150
Ying Han406eb0c2011-05-26 16:25:37 -07003151 return 0;
3152}
3153#endif /* CONFIG_NUMA */
3154
Tejun Heo2da8ca82013-12-05 12:28:04 -05003155static int memcg_stat_show(struct seq_file *m, void *v)
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003156{
Tejun Heo2da8ca82013-12-05 12:28:04 -05003157 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003158 unsigned long memory, memsw;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003159 struct mem_cgroup *mi;
3160 unsigned int i;
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003161
Greg Thelen0ca44b12015-02-11 15:25:58 -08003162 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3163 MEM_CGROUP_STAT_NSTATS);
3164 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3165 MEM_CGROUP_EVENTS_NSTATS);
Rickard Strandqvist70bc0682014-12-12 16:56:41 -08003166 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3167
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003168 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
Johannes Weiner7941d212016-01-14 15:21:23 -08003169 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003170 continue;
Greg Thelen484ebb32015-10-01 15:37:05 -07003171 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003172 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003173 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08003174
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003175 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3176 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3177 mem_cgroup_read_events(memcg, i));
3178
3179 for (i = 0; i < NR_LRU_LISTS; i++)
3180 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3181 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3182
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003183 /* Hierarchical information */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003184 memory = memsw = PAGE_COUNTER_MAX;
3185 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3186 memory = min(memory, mi->memory.limit);
3187 memsw = min(memsw, mi->memsw.limit);
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08003188 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003189 seq_printf(m, "hierarchical_memory_limit %llu\n",
3190 (u64)memory * PAGE_SIZE);
Johannes Weiner7941d212016-01-14 15:21:23 -08003191 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003192 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3193 (u64)memsw * PAGE_SIZE);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003194
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003195 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
Greg Thelen484ebb32015-10-01 15:37:05 -07003196 unsigned long long val = 0;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003197
Johannes Weiner7941d212016-01-14 15:21:23 -08003198 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003199 continue;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003200 for_each_mem_cgroup_tree(mi, memcg)
3201 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
Greg Thelen484ebb32015-10-01 15:37:05 -07003202 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003203 }
3204
3205 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3206 unsigned long long val = 0;
3207
3208 for_each_mem_cgroup_tree(mi, memcg)
3209 val += mem_cgroup_read_events(mi, i);
3210 seq_printf(m, "total_%s %llu\n",
3211 mem_cgroup_events_names[i], val);
3212 }
3213
3214 for (i = 0; i < NR_LRU_LISTS; i++) {
3215 unsigned long long val = 0;
3216
3217 for_each_mem_cgroup_tree(mi, memcg)
3218 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3219 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003220 }
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003221
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003222#ifdef CONFIG_DEBUG_VM
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003223 {
3224 int nid, zid;
3225 struct mem_cgroup_per_zone *mz;
Hugh Dickins89abfab2012-05-29 15:06:53 -07003226 struct zone_reclaim_stat *rstat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003227 unsigned long recent_rotated[2] = {0, 0};
3228 unsigned long recent_scanned[2] = {0, 0};
3229
3230 for_each_online_node(nid)
3231 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
Jianyu Zhane2318752014-06-06 14:38:20 -07003232 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
Hugh Dickins89abfab2012-05-29 15:06:53 -07003233 rstat = &mz->lruvec.reclaim_stat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003234
Hugh Dickins89abfab2012-05-29 15:06:53 -07003235 recent_rotated[0] += rstat->recent_rotated[0];
3236 recent_rotated[1] += rstat->recent_rotated[1];
3237 recent_scanned[0] += rstat->recent_scanned[0];
3238 recent_scanned[1] += rstat->recent_scanned[1];
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003239 }
Johannes Weiner78ccf5b2012-05-29 15:07:06 -07003240 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3241 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3242 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3243 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003244 }
3245#endif
3246
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003247 return 0;
3248}
3249
Tejun Heo182446d2013-08-08 20:11:24 -04003250static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3251 struct cftype *cft)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003252{
Tejun Heo182446d2013-08-08 20:11:24 -04003253 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003254
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07003255 return mem_cgroup_swappiness(memcg);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003256}
3257
Tejun Heo182446d2013-08-08 20:11:24 -04003258static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3259 struct cftype *cft, u64 val)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003260{
Tejun Heo182446d2013-08-08 20:11:24 -04003261 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Li Zefan068b38c2009-01-15 13:51:26 -08003262
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003263 if (val > 100)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003264 return -EINVAL;
3265
Linus Torvalds14208b02014-06-09 15:03:33 -07003266 if (css->parent)
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003267 memcg->swappiness = val;
3268 else
3269 vm_swappiness = val;
Li Zefan068b38c2009-01-15 13:51:26 -08003270
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003271 return 0;
3272}
3273
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003274static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3275{
3276 struct mem_cgroup_threshold_ary *t;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003277 unsigned long usage;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003278 int i;
3279
3280 rcu_read_lock();
3281 if (!swap)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003282 t = rcu_dereference(memcg->thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003283 else
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003284 t = rcu_dereference(memcg->memsw_thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003285
3286 if (!t)
3287 goto unlock;
3288
Johannes Weinerce00a962014-09-05 08:43:57 -04003289 usage = mem_cgroup_usage(memcg, swap);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003290
3291 /*
Sha Zhengju748dad32012-05-29 15:06:57 -07003292 * current_threshold points to threshold just below or equal to usage.
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003293 * If it's not true, a threshold was crossed after last
3294 * call of __mem_cgroup_threshold().
3295 */
Phil Carmody5407a562010-05-26 14:42:42 -07003296 i = t->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003297
3298 /*
3299 * Iterate backward over array of thresholds starting from
3300 * current_threshold and check if a threshold is crossed.
3301 * If none of thresholds below usage is crossed, we read
3302 * only one element of the array here.
3303 */
3304 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3305 eventfd_signal(t->entries[i].eventfd, 1);
3306
3307 /* i = current_threshold + 1 */
3308 i++;
3309
3310 /*
3311 * Iterate forward over array of thresholds starting from
3312 * current_threshold+1 and check if a threshold is crossed.
3313 * If none of thresholds above usage is crossed, we read
3314 * only one element of the array here.
3315 */
3316 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3317 eventfd_signal(t->entries[i].eventfd, 1);
3318
3319 /* Update current_threshold */
Phil Carmody5407a562010-05-26 14:42:42 -07003320 t->current_threshold = i - 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003321unlock:
3322 rcu_read_unlock();
3323}
3324
3325static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3326{
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07003327 while (memcg) {
3328 __mem_cgroup_threshold(memcg, false);
Johannes Weiner7941d212016-01-14 15:21:23 -08003329 if (do_memsw_account())
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07003330 __mem_cgroup_threshold(memcg, true);
3331
3332 memcg = parent_mem_cgroup(memcg);
3333 }
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003334}
3335
3336static int compare_thresholds(const void *a, const void *b)
3337{
3338 const struct mem_cgroup_threshold *_a = a;
3339 const struct mem_cgroup_threshold *_b = b;
3340
Greg Thelen2bff24a2013-09-11 14:23:08 -07003341 if (_a->threshold > _b->threshold)
3342 return 1;
3343
3344 if (_a->threshold < _b->threshold)
3345 return -1;
3346
3347 return 0;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003348}
3349
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003350static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003351{
3352 struct mem_cgroup_eventfd_list *ev;
3353
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003354 spin_lock(&memcg_oom_lock);
3355
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003356 list_for_each_entry(ev, &memcg->oom_notify, list)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003357 eventfd_signal(ev->eventfd, 1);
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003358
3359 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003360 return 0;
3361}
3362
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003363static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003364{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003365 struct mem_cgroup *iter;
3366
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003367 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003368 mem_cgroup_oom_notify_cb(iter);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003369}
3370
Tejun Heo59b6f872013-11-22 18:20:43 -05003371static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003372 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003373{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003374 struct mem_cgroup_thresholds *thresholds;
3375 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003376 unsigned long threshold;
3377 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003378 int i, size, ret;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003379
Johannes Weiner650c5e52015-02-11 15:26:03 -08003380 ret = page_counter_memparse(args, "-1", &threshold);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003381 if (ret)
3382 return ret;
3383
3384 mutex_lock(&memcg->thresholds_lock);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003385
Johannes Weiner05b84302014-08-06 16:05:59 -07003386 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003387 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003388 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003389 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003390 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003391 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003392 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003393 BUG();
3394
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003395 /* Check if a threshold crossed before adding a new one */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003396 if (thresholds->primary)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003397 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3398
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003399 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003400
3401 /* Allocate memory for new array of thresholds */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003402 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003403 GFP_KERNEL);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003404 if (!new) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003405 ret = -ENOMEM;
3406 goto unlock;
3407 }
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003408 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003409
3410 /* Copy thresholds (if any) to new array */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003411 if (thresholds->primary) {
3412 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003413 sizeof(struct mem_cgroup_threshold));
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003414 }
3415
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003416 /* Add new threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003417 new->entries[size - 1].eventfd = eventfd;
3418 new->entries[size - 1].threshold = threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003419
3420 /* Sort thresholds. Registering of new threshold isn't time-critical */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003421 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003422 compare_thresholds, NULL);
3423
3424 /* Find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003425 new->current_threshold = -1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003426 for (i = 0; i < size; i++) {
Sha Zhengju748dad32012-05-29 15:06:57 -07003427 if (new->entries[i].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003428 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003429 * new->current_threshold will not be used until
3430 * rcu_assign_pointer(), so it's safe to increment
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003431 * it here.
3432 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003433 ++new->current_threshold;
Sha Zhengju748dad32012-05-29 15:06:57 -07003434 } else
3435 break;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003436 }
3437
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003438 /* Free old spare buffer and save old primary buffer as spare */
3439 kfree(thresholds->spare);
3440 thresholds->spare = thresholds->primary;
3441
3442 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003443
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003444 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003445 synchronize_rcu();
3446
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003447unlock:
3448 mutex_unlock(&memcg->thresholds_lock);
3449
3450 return ret;
3451}
3452
Tejun Heo59b6f872013-11-22 18:20:43 -05003453static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003454 struct eventfd_ctx *eventfd, const char *args)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003455{
Tejun Heo59b6f872013-11-22 18:20:43 -05003456 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05003457}
3458
Tejun Heo59b6f872013-11-22 18:20:43 -05003459static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003460 struct eventfd_ctx *eventfd, const char *args)
3461{
Tejun Heo59b6f872013-11-22 18:20:43 -05003462 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05003463}
3464
Tejun Heo59b6f872013-11-22 18:20:43 -05003465static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003466 struct eventfd_ctx *eventfd, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003467{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003468 struct mem_cgroup_thresholds *thresholds;
3469 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003470 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003471 int i, j, size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003472
3473 mutex_lock(&memcg->thresholds_lock);
Johannes Weiner05b84302014-08-06 16:05:59 -07003474
3475 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003476 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003477 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003478 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003479 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003480 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003481 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003482 BUG();
3483
Anton Vorontsov371528c2012-02-24 05:14:46 +04003484 if (!thresholds->primary)
3485 goto unlock;
3486
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003487 /* Check if a threshold crossed before removing */
3488 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3489
3490 /* Calculate new number of threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003491 size = 0;
3492 for (i = 0; i < thresholds->primary->size; i++) {
3493 if (thresholds->primary->entries[i].eventfd != eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003494 size++;
3495 }
3496
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003497 new = thresholds->spare;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003498
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003499 /* Set thresholds array to NULL if we don't have thresholds */
3500 if (!size) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003501 kfree(new);
3502 new = NULL;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003503 goto swap_buffers;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003504 }
3505
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003506 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003507
3508 /* Copy thresholds and find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003509 new->current_threshold = -1;
3510 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3511 if (thresholds->primary->entries[i].eventfd == eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003512 continue;
3513
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003514 new->entries[j] = thresholds->primary->entries[i];
Sha Zhengju748dad32012-05-29 15:06:57 -07003515 if (new->entries[j].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003516 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003517 * new->current_threshold will not be used
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003518 * until rcu_assign_pointer(), so it's safe to increment
3519 * it here.
3520 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003521 ++new->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003522 }
3523 j++;
3524 }
3525
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003526swap_buffers:
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003527 /* Swap primary and spare array */
3528 thresholds->spare = thresholds->primary;
Sha Zhengju8c757762012-05-10 13:01:45 -07003529
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003530 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003531
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003532 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003533 synchronize_rcu();
Martijn Coenen6611d8d2016-01-15 16:57:49 -08003534
3535 /* If all events are unregistered, free the spare array */
3536 if (!new) {
3537 kfree(thresholds->spare);
3538 thresholds->spare = NULL;
3539 }
Anton Vorontsov371528c2012-02-24 05:14:46 +04003540unlock:
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003541 mutex_unlock(&memcg->thresholds_lock);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003542}
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003543
Tejun Heo59b6f872013-11-22 18:20:43 -05003544static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003545 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003546{
Tejun Heo59b6f872013-11-22 18:20:43 -05003547 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05003548}
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003549
Tejun Heo59b6f872013-11-22 18:20:43 -05003550static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003551 struct eventfd_ctx *eventfd)
3552{
Tejun Heo59b6f872013-11-22 18:20:43 -05003553 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05003554}
3555
Tejun Heo59b6f872013-11-22 18:20:43 -05003556static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003557 struct eventfd_ctx *eventfd, const char *args)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003558{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003559 struct mem_cgroup_eventfd_list *event;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003560
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003561 event = kmalloc(sizeof(*event), GFP_KERNEL);
3562 if (!event)
3563 return -ENOMEM;
3564
Michal Hocko1af8efe2011-07-26 16:08:24 -07003565 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003566
3567 event->eventfd = eventfd;
3568 list_add(&event->list, &memcg->oom_notify);
3569
3570 /* already in OOM ? */
Tejun Heoc2b42d32015-06-24 16:58:23 -07003571 if (memcg->under_oom)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003572 eventfd_signal(eventfd, 1);
Michal Hocko1af8efe2011-07-26 16:08:24 -07003573 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003574
3575 return 0;
3576}
3577
Tejun Heo59b6f872013-11-22 18:20:43 -05003578static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003579 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003580{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003581 struct mem_cgroup_eventfd_list *ev, *tmp;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003582
Michal Hocko1af8efe2011-07-26 16:08:24 -07003583 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003584
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003585 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003586 if (ev->eventfd == eventfd) {
3587 list_del(&ev->list);
3588 kfree(ev);
3589 }
3590 }
3591
Michal Hocko1af8efe2011-07-26 16:08:24 -07003592 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003593}
3594
Tejun Heo2da8ca82013-12-05 12:28:04 -05003595static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003596{
Tejun Heo2da8ca82013-12-05 12:28:04 -05003597 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003598
Tejun Heo791badb2013-12-05 12:28:02 -05003599 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
Tejun Heoc2b42d32015-06-24 16:58:23 -07003600 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003601 return 0;
3602}
3603
Tejun Heo182446d2013-08-08 20:11:24 -04003604static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003605 struct cftype *cft, u64 val)
3606{
Tejun Heo182446d2013-08-08 20:11:24 -04003607 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003608
3609 /* cannot set to root cgroup and only 0 and 1 are allowed */
Linus Torvalds14208b02014-06-09 15:03:33 -07003610 if (!css->parent || !((val == 0) || (val == 1)))
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003611 return -EINVAL;
3612
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003613 memcg->oom_kill_disable = val;
KAMEZAWA Hiroyuki4d845eb2010-06-29 15:05:18 -07003614 if (!val)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003615 memcg_oom_recover(memcg);
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003616
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003617 return 0;
3618}
3619
Tejun Heo52ebea72015-05-22 17:13:37 -04003620#ifdef CONFIG_CGROUP_WRITEBACK
3621
3622struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3623{
3624 return &memcg->cgwb_list;
3625}
3626
Tejun Heo841710a2015-05-22 18:23:33 -04003627static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3628{
3629 return wb_domain_init(&memcg->cgwb_domain, gfp);
3630}
3631
3632static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3633{
3634 wb_domain_exit(&memcg->cgwb_domain);
3635}
3636
Tejun Heo2529bb32015-05-22 18:23:34 -04003637static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3638{
3639 wb_domain_size_changed(&memcg->cgwb_domain);
3640}
3641
Tejun Heo841710a2015-05-22 18:23:33 -04003642struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3643{
3644 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3645
3646 if (!memcg->css.parent)
3647 return NULL;
3648
3649 return &memcg->cgwb_domain;
3650}
3651
Tejun Heoc2aa7232015-05-22 18:23:35 -04003652/**
3653 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3654 * @wb: bdi_writeback in question
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003655 * @pfilepages: out parameter for number of file pages
3656 * @pheadroom: out parameter for number of allocatable pages according to memcg
Tejun Heoc2aa7232015-05-22 18:23:35 -04003657 * @pdirty: out parameter for number of dirty pages
3658 * @pwriteback: out parameter for number of pages under writeback
3659 *
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003660 * Determine the numbers of file, headroom, dirty, and writeback pages in
3661 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3662 * is a bit more involved.
Tejun Heoc2aa7232015-05-22 18:23:35 -04003663 *
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003664 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3665 * headroom is calculated as the lowest headroom of itself and the
3666 * ancestors. Note that this doesn't consider the actual amount of
3667 * available memory in the system. The caller should further cap
3668 * *@pheadroom accordingly.
Tejun Heoc2aa7232015-05-22 18:23:35 -04003669 */
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003670void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3671 unsigned long *pheadroom, unsigned long *pdirty,
3672 unsigned long *pwriteback)
Tejun Heoc2aa7232015-05-22 18:23:35 -04003673{
3674 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3675 struct mem_cgroup *parent;
Tejun Heoc2aa7232015-05-22 18:23:35 -04003676
3677 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3678
3679 /* this should eventually include NR_UNSTABLE_NFS */
3680 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003681 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3682 (1 << LRU_ACTIVE_FILE));
3683 *pheadroom = PAGE_COUNTER_MAX;
Tejun Heoc2aa7232015-05-22 18:23:35 -04003684
Tejun Heoc2aa7232015-05-22 18:23:35 -04003685 while ((parent = parent_mem_cgroup(memcg))) {
3686 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3687 unsigned long used = page_counter_read(&memcg->memory);
3688
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003689 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
Tejun Heoc2aa7232015-05-22 18:23:35 -04003690 memcg = parent;
3691 }
Tejun Heoc2aa7232015-05-22 18:23:35 -04003692}
3693
Tejun Heo841710a2015-05-22 18:23:33 -04003694#else /* CONFIG_CGROUP_WRITEBACK */
3695
3696static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3697{
3698 return 0;
3699}
3700
3701static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3702{
3703}
3704
Tejun Heo2529bb32015-05-22 18:23:34 -04003705static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3706{
3707}
3708
Tejun Heo52ebea72015-05-22 17:13:37 -04003709#endif /* CONFIG_CGROUP_WRITEBACK */
3710
Tejun Heo79bd9812013-11-22 18:20:42 -05003711/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05003712 * DO NOT USE IN NEW FILES.
3713 *
3714 * "cgroup.event_control" implementation.
3715 *
3716 * This is way over-engineered. It tries to support fully configurable
3717 * events for each user. Such level of flexibility is completely
3718 * unnecessary especially in the light of the planned unified hierarchy.
3719 *
3720 * Please deprecate this and replace with something simpler if at all
3721 * possible.
3722 */
3723
3724/*
Tejun Heo79bd9812013-11-22 18:20:42 -05003725 * Unregister event and free resources.
3726 *
3727 * Gets called from workqueue.
3728 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05003729static void memcg_event_remove(struct work_struct *work)
Tejun Heo79bd9812013-11-22 18:20:42 -05003730{
Tejun Heo3bc942f2013-11-22 18:20:44 -05003731 struct mem_cgroup_event *event =
3732 container_of(work, struct mem_cgroup_event, remove);
Tejun Heo59b6f872013-11-22 18:20:43 -05003733 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05003734
3735 remove_wait_queue(event->wqh, &event->wait);
3736
Tejun Heo59b6f872013-11-22 18:20:43 -05003737 event->unregister_event(memcg, event->eventfd);
Tejun Heo79bd9812013-11-22 18:20:42 -05003738
3739 /* Notify userspace the event is going away. */
3740 eventfd_signal(event->eventfd, 1);
3741
3742 eventfd_ctx_put(event->eventfd);
3743 kfree(event);
Tejun Heo59b6f872013-11-22 18:20:43 -05003744 css_put(&memcg->css);
Tejun Heo79bd9812013-11-22 18:20:42 -05003745}
3746
3747/*
3748 * Gets called on POLLHUP on eventfd when user closes it.
3749 *
3750 * Called with wqh->lock held and interrupts disabled.
3751 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05003752static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3753 int sync, void *key)
Tejun Heo79bd9812013-11-22 18:20:42 -05003754{
Tejun Heo3bc942f2013-11-22 18:20:44 -05003755 struct mem_cgroup_event *event =
3756 container_of(wait, struct mem_cgroup_event, wait);
Tejun Heo59b6f872013-11-22 18:20:43 -05003757 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05003758 unsigned long flags = (unsigned long)key;
3759
3760 if (flags & POLLHUP) {
3761 /*
3762 * If the event has been detached at cgroup removal, we
3763 * can simply return knowing the other side will cleanup
3764 * for us.
3765 *
3766 * We can't race against event freeing since the other
3767 * side will require wqh->lock via remove_wait_queue(),
3768 * which we hold.
3769 */
Tejun Heofba94802013-11-22 18:20:43 -05003770 spin_lock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05003771 if (!list_empty(&event->list)) {
3772 list_del_init(&event->list);
3773 /*
3774 * We are in atomic context, but cgroup_event_remove()
3775 * may sleep, so we have to call it in workqueue.
3776 */
3777 schedule_work(&event->remove);
3778 }
Tejun Heofba94802013-11-22 18:20:43 -05003779 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05003780 }
3781
3782 return 0;
3783}
3784
Tejun Heo3bc942f2013-11-22 18:20:44 -05003785static void memcg_event_ptable_queue_proc(struct file *file,
Tejun Heo79bd9812013-11-22 18:20:42 -05003786 wait_queue_head_t *wqh, poll_table *pt)
3787{
Tejun Heo3bc942f2013-11-22 18:20:44 -05003788 struct mem_cgroup_event *event =
3789 container_of(pt, struct mem_cgroup_event, pt);
Tejun Heo79bd9812013-11-22 18:20:42 -05003790
3791 event->wqh = wqh;
3792 add_wait_queue(wqh, &event->wait);
3793}
3794
3795/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05003796 * DO NOT USE IN NEW FILES.
3797 *
Tejun Heo79bd9812013-11-22 18:20:42 -05003798 * Parse input and register new cgroup event handler.
3799 *
3800 * Input must be in format '<event_fd> <control_fd> <args>'.
3801 * Interpretation of args is defined by control file implementation.
3802 */
Tejun Heo451af502014-05-13 12:16:21 -04003803static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3804 char *buf, size_t nbytes, loff_t off)
Tejun Heo79bd9812013-11-22 18:20:42 -05003805{
Tejun Heo451af502014-05-13 12:16:21 -04003806 struct cgroup_subsys_state *css = of_css(of);
Tejun Heofba94802013-11-22 18:20:43 -05003807 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05003808 struct mem_cgroup_event *event;
Tejun Heo79bd9812013-11-22 18:20:42 -05003809 struct cgroup_subsys_state *cfile_css;
3810 unsigned int efd, cfd;
3811 struct fd efile;
3812 struct fd cfile;
Tejun Heofba94802013-11-22 18:20:43 -05003813 const char *name;
Tejun Heo79bd9812013-11-22 18:20:42 -05003814 char *endp;
3815 int ret;
3816
Tejun Heo451af502014-05-13 12:16:21 -04003817 buf = strstrip(buf);
3818
3819 efd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05003820 if (*endp != ' ')
3821 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04003822 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05003823
Tejun Heo451af502014-05-13 12:16:21 -04003824 cfd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05003825 if ((*endp != ' ') && (*endp != '\0'))
3826 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04003827 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05003828
3829 event = kzalloc(sizeof(*event), GFP_KERNEL);
3830 if (!event)
3831 return -ENOMEM;
3832
Tejun Heo59b6f872013-11-22 18:20:43 -05003833 event->memcg = memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05003834 INIT_LIST_HEAD(&event->list);
Tejun Heo3bc942f2013-11-22 18:20:44 -05003835 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3836 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3837 INIT_WORK(&event->remove, memcg_event_remove);
Tejun Heo79bd9812013-11-22 18:20:42 -05003838
3839 efile = fdget(efd);
3840 if (!efile.file) {
3841 ret = -EBADF;
3842 goto out_kfree;
3843 }
3844
3845 event->eventfd = eventfd_ctx_fileget(efile.file);
3846 if (IS_ERR(event->eventfd)) {
3847 ret = PTR_ERR(event->eventfd);
3848 goto out_put_efile;
3849 }
3850
3851 cfile = fdget(cfd);
3852 if (!cfile.file) {
3853 ret = -EBADF;
3854 goto out_put_eventfd;
3855 }
3856
3857 /* the process need read permission on control file */
3858 /* AV: shouldn't we check that it's been opened for read instead? */
3859 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3860 if (ret < 0)
3861 goto out_put_cfile;
3862
Tejun Heo79bd9812013-11-22 18:20:42 -05003863 /*
Tejun Heofba94802013-11-22 18:20:43 -05003864 * Determine the event callbacks and set them in @event. This used
3865 * to be done via struct cftype but cgroup core no longer knows
3866 * about these events. The following is crude but the whole thing
3867 * is for compatibility anyway.
Tejun Heo3bc942f2013-11-22 18:20:44 -05003868 *
3869 * DO NOT ADD NEW FILES.
Tejun Heofba94802013-11-22 18:20:43 -05003870 */
Al Virob5830432014-10-31 01:22:04 -04003871 name = cfile.file->f_path.dentry->d_name.name;
Tejun Heofba94802013-11-22 18:20:43 -05003872
3873 if (!strcmp(name, "memory.usage_in_bytes")) {
3874 event->register_event = mem_cgroup_usage_register_event;
3875 event->unregister_event = mem_cgroup_usage_unregister_event;
3876 } else if (!strcmp(name, "memory.oom_control")) {
3877 event->register_event = mem_cgroup_oom_register_event;
3878 event->unregister_event = mem_cgroup_oom_unregister_event;
3879 } else if (!strcmp(name, "memory.pressure_level")) {
3880 event->register_event = vmpressure_register_event;
3881 event->unregister_event = vmpressure_unregister_event;
3882 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
Tejun Heo347c4a82013-11-22 18:20:43 -05003883 event->register_event = memsw_cgroup_usage_register_event;
3884 event->unregister_event = memsw_cgroup_usage_unregister_event;
Tejun Heofba94802013-11-22 18:20:43 -05003885 } else {
3886 ret = -EINVAL;
3887 goto out_put_cfile;
3888 }
3889
3890 /*
Tejun Heob5557c42013-11-22 18:20:42 -05003891 * Verify @cfile should belong to @css. Also, remaining events are
3892 * automatically removed on cgroup destruction but the removal is
3893 * asynchronous, so take an extra ref on @css.
Tejun Heo79bd9812013-11-22 18:20:42 -05003894 */
Al Virob5830432014-10-31 01:22:04 -04003895 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
Tejun Heoec903c02014-05-13 12:11:01 -04003896 &memory_cgrp_subsys);
Tejun Heo79bd9812013-11-22 18:20:42 -05003897 ret = -EINVAL;
Tejun Heo5a17f542014-02-11 11:52:47 -05003898 if (IS_ERR(cfile_css))
Tejun Heo79bd9812013-11-22 18:20:42 -05003899 goto out_put_cfile;
Tejun Heo5a17f542014-02-11 11:52:47 -05003900 if (cfile_css != css) {
3901 css_put(cfile_css);
3902 goto out_put_cfile;
3903 }
Tejun Heo79bd9812013-11-22 18:20:42 -05003904
Tejun Heo451af502014-05-13 12:16:21 -04003905 ret = event->register_event(memcg, event->eventfd, buf);
Tejun Heo79bd9812013-11-22 18:20:42 -05003906 if (ret)
3907 goto out_put_css;
3908
3909 efile.file->f_op->poll(efile.file, &event->pt);
3910
Tejun Heofba94802013-11-22 18:20:43 -05003911 spin_lock(&memcg->event_list_lock);
3912 list_add(&event->list, &memcg->event_list);
3913 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05003914
3915 fdput(cfile);
3916 fdput(efile);
3917
Tejun Heo451af502014-05-13 12:16:21 -04003918 return nbytes;
Tejun Heo79bd9812013-11-22 18:20:42 -05003919
3920out_put_css:
Tejun Heob5557c42013-11-22 18:20:42 -05003921 css_put(css);
Tejun Heo79bd9812013-11-22 18:20:42 -05003922out_put_cfile:
3923 fdput(cfile);
3924out_put_eventfd:
3925 eventfd_ctx_put(event->eventfd);
3926out_put_efile:
3927 fdput(efile);
3928out_kfree:
3929 kfree(event);
3930
3931 return ret;
3932}
3933
Johannes Weiner241994e2015-02-11 15:26:06 -08003934static struct cftype mem_cgroup_legacy_files[] = {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003935 {
Balbir Singh0eea1032008-02-07 00:13:57 -08003936 .name = "usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003937 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05003938 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003939 },
3940 {
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003941 .name = "max_usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003942 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04003943 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05003944 .read_u64 = mem_cgroup_read_u64,
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003945 },
3946 {
Balbir Singh0eea1032008-02-07 00:13:57 -08003947 .name = "limit_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003948 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04003949 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05003950 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003951 },
3952 {
Balbir Singh296c81d2009-09-23 15:56:36 -07003953 .name = "soft_limit_in_bytes",
3954 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04003955 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05003956 .read_u64 = mem_cgroup_read_u64,
Balbir Singh296c81d2009-09-23 15:56:36 -07003957 },
3958 {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003959 .name = "failcnt",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003960 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04003961 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05003962 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003963 },
Balbir Singh8697d332008-02-07 00:13:59 -08003964 {
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003965 .name = "stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05003966 .seq_show = memcg_stat_show,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003967 },
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003968 {
3969 .name = "force_empty",
Tejun Heo6770c642014-05-13 12:16:21 -04003970 .write = mem_cgroup_force_empty_write,
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003971 },
Balbir Singh18f59ea2009-01-07 18:08:07 -08003972 {
3973 .name = "use_hierarchy",
3974 .write_u64 = mem_cgroup_hierarchy_write,
3975 .read_u64 = mem_cgroup_hierarchy_read,
3976 },
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003977 {
Tejun Heo3bc942f2013-11-22 18:20:44 -05003978 .name = "cgroup.event_control", /* XXX: for compat */
Tejun Heo451af502014-05-13 12:16:21 -04003979 .write = memcg_write_event_control,
Tejun Heo7dbdb192015-09-18 17:54:23 -04003980 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
Tejun Heo79bd9812013-11-22 18:20:42 -05003981 },
3982 {
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003983 .name = "swappiness",
3984 .read_u64 = mem_cgroup_swappiness_read,
3985 .write_u64 = mem_cgroup_swappiness_write,
3986 },
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003987 {
3988 .name = "move_charge_at_immigrate",
3989 .read_u64 = mem_cgroup_move_charge_read,
3990 .write_u64 = mem_cgroup_move_charge_write,
3991 },
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003992 {
3993 .name = "oom_control",
Tejun Heo2da8ca82013-12-05 12:28:04 -05003994 .seq_show = mem_cgroup_oom_control_read,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003995 .write_u64 = mem_cgroup_oom_control_write,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003996 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3997 },
Anton Vorontsov70ddf632013-04-29 15:08:31 -07003998 {
3999 .name = "pressure_level",
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004000 },
Ying Han406eb0c2011-05-26 16:25:37 -07004001#ifdef CONFIG_NUMA
4002 {
4003 .name = "numa_stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05004004 .seq_show = memcg_numa_stat_show,
Ying Han406eb0c2011-05-26 16:25:37 -07004005 },
4006#endif
Glauber Costa510fc4e2012-12-18 14:21:47 -08004007 {
4008 .name = "kmem.limit_in_bytes",
4009 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004010 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004011 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004012 },
4013 {
4014 .name = "kmem.usage_in_bytes",
4015 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05004016 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004017 },
4018 {
4019 .name = "kmem.failcnt",
4020 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04004021 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004022 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004023 },
4024 {
4025 .name = "kmem.max_usage_in_bytes",
4026 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04004027 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004028 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004029 },
Glauber Costa749c5412012-12-18 14:23:01 -08004030#ifdef CONFIG_SLABINFO
4031 {
4032 .name = "kmem.slabinfo",
Vladimir Davydovb0475012014-12-10 15:44:19 -08004033 .seq_start = slab_start,
4034 .seq_next = slab_next,
4035 .seq_stop = slab_stop,
4036 .seq_show = memcg_slab_show,
Glauber Costa749c5412012-12-18 14:23:01 -08004037 },
4038#endif
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08004039 {
4040 .name = "kmem.tcp.limit_in_bytes",
4041 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4042 .write = mem_cgroup_write,
4043 .read_u64 = mem_cgroup_read_u64,
4044 },
4045 {
4046 .name = "kmem.tcp.usage_in_bytes",
4047 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4048 .read_u64 = mem_cgroup_read_u64,
4049 },
4050 {
4051 .name = "kmem.tcp.failcnt",
4052 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4053 .write = mem_cgroup_reset,
4054 .read_u64 = mem_cgroup_read_u64,
4055 },
4056 {
4057 .name = "kmem.tcp.max_usage_in_bytes",
4058 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4059 .write = mem_cgroup_reset,
4060 .read_u64 = mem_cgroup_read_u64,
4061 },
Tejun Heo6bc10342012-04-01 12:09:55 -07004062 { }, /* terminate */
Tejun Heoaf36f902012-04-01 12:09:55 -07004063};
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004064
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004065static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004066{
4067 struct mem_cgroup_per_node *pn;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004068 struct mem_cgroup_per_zone *mz;
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004069 int zone, tmp = node;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004070 /*
4071 * This routine is called against possible nodes.
4072 * But it's BUG to call kmalloc() against offline node.
4073 *
4074 * TODO: this routine can waste much memory for nodes which will
4075 * never be onlined. It's better to use memory hotplug callback
4076 * function.
4077 */
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004078 if (!node_state(node, N_NORMAL_MEMORY))
4079 tmp = -1;
Jesper Juhl17295c82011-01-13 15:47:42 -08004080 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004081 if (!pn)
4082 return 1;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004083
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004084 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4085 mz = &pn->zoneinfo[zone];
Hugh Dickinsbea8c152012-11-16 14:14:54 -08004086 lruvec_init(&mz->lruvec);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -07004087 mz->usage_in_excess = 0;
4088 mz->on_tree = false;
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004089 mz->memcg = memcg;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004090 }
Johannes Weiner54f72fe2013-07-08 15:59:49 -07004091 memcg->nodeinfo[node] = pn;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004092 return 0;
4093}
4094
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004095static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004096{
Johannes Weiner54f72fe2013-07-08 15:59:49 -07004097 kfree(memcg->nodeinfo[node]);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004098}
4099
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004100static void mem_cgroup_free(struct mem_cgroup *memcg)
4101{
4102 int node;
4103
4104 memcg_wb_domain_exit(memcg);
4105 for_each_node(node)
4106 free_mem_cgroup_per_zone_info(memcg, node);
4107 free_percpu(memcg->stat);
4108 kfree(memcg);
4109}
4110
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004111static struct mem_cgroup *mem_cgroup_alloc(void)
4112{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004113 struct mem_cgroup *memcg;
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004114 size_t size;
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004115 int node;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004116
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004117 size = sizeof(struct mem_cgroup);
4118 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004119
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004120 memcg = kzalloc(size, GFP_KERNEL);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004121 if (!memcg)
Dan Carpentere7bbcdf2010-03-23 13:35:12 -07004122 return NULL;
4123
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004124 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4125 if (!memcg->stat)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004126 goto fail;
Pavel Emelianov78fb7462008-02-07 00:13:51 -08004127
Bob Liu3ed28fa2012-01-12 17:19:04 -08004128 for_each_node(node)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004129 if (alloc_mem_cgroup_per_zone_info(memcg, node))
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004130 goto fail;
Balbir Singhf64c3f52009-09-23 15:56:37 -07004131
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004132 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4133 goto fail;
Balbir Singh28dbc4b2009-01-07 18:08:05 -08004134
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004135 INIT_WORK(&memcg->high_work, high_work_func);
Glauber Costad142e3e2013-02-22 16:34:52 -08004136 memcg->last_scanned_node = MAX_NUMNODES;
4137 INIT_LIST_HEAD(&memcg->oom_notify);
Glauber Costad142e3e2013-02-22 16:34:52 -08004138 mutex_init(&memcg->thresholds_lock);
4139 spin_lock_init(&memcg->move_lock);
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004140 vmpressure_init(&memcg->vmpressure);
Tejun Heofba94802013-11-22 18:20:43 -05004141 INIT_LIST_HEAD(&memcg->event_list);
4142 spin_lock_init(&memcg->event_list_lock);
Johannes Weinerd886f4e2016-01-20 15:02:47 -08004143 memcg->socket_pressure = jiffies;
Johannes Weiner127424c2016-01-20 15:02:32 -08004144#ifndef CONFIG_SLOB
Vladimir Davydov900a38f2014-12-12 16:55:10 -08004145 memcg->kmemcg_id = -1;
Vladimir Davydov900a38f2014-12-12 16:55:10 -08004146#endif
Tejun Heo52ebea72015-05-22 17:13:37 -04004147#ifdef CONFIG_CGROUP_WRITEBACK
4148 INIT_LIST_HEAD(&memcg->cgwb_list);
4149#endif
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004150 return memcg;
4151fail:
4152 mem_cgroup_free(memcg);
4153 return NULL;
Glauber Costad142e3e2013-02-22 16:34:52 -08004154}
4155
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004156static struct cgroup_subsys_state * __ref
4157mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Glauber Costad142e3e2013-02-22 16:34:52 -08004158{
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004159 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4160 struct mem_cgroup *memcg;
4161 long error = -ENOMEM;
Glauber Costad142e3e2013-02-22 16:34:52 -08004162
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004163 memcg = mem_cgroup_alloc();
4164 if (!memcg)
4165 return ERR_PTR(error);
Li Zefan4219b2d2013-09-23 16:56:29 +08004166
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004167 memcg->high = PAGE_COUNTER_MAX;
4168 memcg->soft_limit = PAGE_COUNTER_MAX;
4169 if (parent) {
4170 memcg->swappiness = mem_cgroup_swappiness(parent);
4171 memcg->oom_kill_disable = parent->oom_kill_disable;
4172 }
4173 if (parent && parent->use_hierarchy) {
4174 memcg->use_hierarchy = true;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004175 page_counter_init(&memcg->memory, &parent->memory);
Vladimir Davydov37e84352016-01-20 15:02:56 -08004176 page_counter_init(&memcg->swap, &parent->swap);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004177 page_counter_init(&memcg->memsw, &parent->memsw);
4178 page_counter_init(&memcg->kmem, &parent->kmem);
Johannes Weiner0db15292016-01-20 15:02:50 -08004179 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
Balbir Singh18f59ea2009-01-07 18:08:07 -08004180 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004181 page_counter_init(&memcg->memory, NULL);
Vladimir Davydov37e84352016-01-20 15:02:56 -08004182 page_counter_init(&memcg->swap, NULL);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004183 page_counter_init(&memcg->memsw, NULL);
4184 page_counter_init(&memcg->kmem, NULL);
Johannes Weiner0db15292016-01-20 15:02:50 -08004185 page_counter_init(&memcg->tcpmem, NULL);
Tejun Heo8c7f6ed2012-09-13 12:20:58 -07004186 /*
4187 * Deeper hierachy with use_hierarchy == false doesn't make
4188 * much sense so let cgroup subsystem know about this
4189 * unfortunate state in our controller.
4190 */
Glauber Costad142e3e2013-02-22 16:34:52 -08004191 if (parent != root_mem_cgroup)
Tejun Heo073219e2014-02-08 10:36:58 -05004192 memory_cgrp_subsys.broken_hierarchy = true;
Balbir Singh18f59ea2009-01-07 18:08:07 -08004193 }
Vladimir Davydovd6441632014-01-23 15:53:09 -08004194
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004195 /* The following stuff does not apply to the root */
4196 if (!parent) {
4197 root_mem_cgroup = memcg;
4198 return &memcg->css;
4199 }
4200
4201 error = memcg_propagate_kmem(parent, memcg);
4202 if (error)
4203 goto fail;
Johannes Weiner127424c2016-01-20 15:02:32 -08004204
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004205 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
Johannes Weineref129472016-01-14 15:21:34 -08004206 static_branch_inc(&memcg_sockets_enabled_key);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004207
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004208 return &memcg->css;
4209fail:
4210 mem_cgroup_free(memcg);
4211 return NULL;
4212}
4213
4214static int
4215mem_cgroup_css_online(struct cgroup_subsys_state *css)
4216{
4217 if (css->id > MEM_CGROUP_ID_MAX)
4218 return -ENOSPC;
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -07004219
4220 return 0;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004221}
4222
Tejun Heoeb954192013-08-08 20:11:23 -04004223static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004224{
Tejun Heoeb954192013-08-08 20:11:23 -04004225 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004226 struct mem_cgroup_event *event, *tmp;
Tejun Heo79bd9812013-11-22 18:20:42 -05004227
4228 /*
4229 * Unregister events and notify userspace.
4230 * Notify userspace about cgroup removing only after rmdir of cgroup
4231 * directory to avoid race between userspace and kernelspace.
4232 */
Tejun Heofba94802013-11-22 18:20:43 -05004233 spin_lock(&memcg->event_list_lock);
4234 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
Tejun Heo79bd9812013-11-22 18:20:42 -05004235 list_del_init(&event->list);
4236 schedule_work(&event->remove);
4237 }
Tejun Heofba94802013-11-22 18:20:43 -05004238 spin_unlock(&memcg->event_list_lock);
KAMEZAWA Hiroyukiec64f512009-04-02 16:57:26 -07004239
Johannes Weiner567e9ab2016-01-20 15:02:24 -08004240 memcg_offline_kmem(memcg);
Tejun Heo52ebea72015-05-22 17:13:37 -04004241 wb_memcg_offline(memcg);
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004242}
4243
Vladimir Davydov6df38682015-12-29 14:54:10 -08004244static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4245{
4246 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4247
4248 invalidate_reclaim_iterators(memcg);
4249}
4250
Tejun Heoeb954192013-08-08 20:11:23 -04004251static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004252{
Tejun Heoeb954192013-08-08 20:11:23 -04004253 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimurac268e992009-01-15 13:51:13 -08004254
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004255 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
Johannes Weineref129472016-01-14 15:21:34 -08004256 static_branch_dec(&memcg_sockets_enabled_key);
Johannes Weiner3893e302016-01-20 15:02:29 -08004257
Johannes Weiner0db15292016-01-20 15:02:50 -08004258 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08004259 static_branch_dec(&memcg_sockets_enabled_key);
Johannes Weiner3893e302016-01-20 15:02:29 -08004260
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004261 vmpressure_cleanup(&memcg->vmpressure);
4262 cancel_work_sync(&memcg->high_work);
4263 mem_cgroup_remove_from_trees(memcg);
Johannes Weinerd886f4e2016-01-20 15:02:47 -08004264 memcg_free_kmem(memcg);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004265 mem_cgroup_free(memcg);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004266}
4267
Tejun Heo1ced9532014-07-08 18:02:57 -04004268/**
4269 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4270 * @css: the target css
4271 *
4272 * Reset the states of the mem_cgroup associated with @css. This is
4273 * invoked when the userland requests disabling on the default hierarchy
4274 * but the memcg is pinned through dependency. The memcg should stop
4275 * applying policies and should revert to the vanilla state as it may be
4276 * made visible again.
4277 *
4278 * The current implementation only resets the essential configurations.
4279 * This needs to be expanded to cover all the visible parts.
4280 */
4281static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4282{
4283 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4284
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004285 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
4286 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
4287 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
Johannes Weiner241994e2015-02-11 15:26:06 -08004288 memcg->low = 0;
4289 memcg->high = PAGE_COUNTER_MAX;
Johannes Weiner24d404d2015-01-08 14:32:35 -08004290 memcg->soft_limit = PAGE_COUNTER_MAX;
Tejun Heo2529bb32015-05-22 18:23:34 -04004291 memcg_wb_domain_size_changed(memcg);
Tejun Heo1ced9532014-07-08 18:02:57 -04004292}
4293
Daisuke Nishimura02491442010-03-10 15:22:17 -08004294#ifdef CONFIG_MMU
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004295/* Handlers for move charge at task migration. */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004296static int mem_cgroup_do_precharge(unsigned long count)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004297{
Johannes Weiner05b84302014-08-06 16:05:59 -07004298 int ret;
Johannes Weiner9476db92014-08-06 16:05:55 -07004299
Mel Gormand0164ad2015-11-06 16:28:21 -08004300 /* Try a single bulk charge without reclaim first, kswapd may wake */
4301 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
Johannes Weiner9476db92014-08-06 16:05:55 -07004302 if (!ret) {
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004303 mc.precharge += count;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004304 return ret;
4305 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004306
4307 /* Try charges one by one with reclaim */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004308 while (count--) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004309 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004310 if (ret)
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004311 return ret;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004312 mc.precharge++;
Johannes Weiner9476db92014-08-06 16:05:55 -07004313 cond_resched();
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004314 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004315 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004316}
4317
4318/**
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004319 * get_mctgt_type - get target type of moving charge
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004320 * @vma: the vma the pte to be checked belongs
4321 * @addr: the address corresponding to the pte to be checked
4322 * @ptent: the pte to be checked
Daisuke Nishimura02491442010-03-10 15:22:17 -08004323 * @target: the pointer the target page or swap ent will be stored(can be NULL)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004324 *
4325 * Returns
4326 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4327 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4328 * move charge. if @target is not NULL, the page is stored in target->page
4329 * with extra refcnt got(Callers should handle it).
Daisuke Nishimura02491442010-03-10 15:22:17 -08004330 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4331 * target for charge migration. if @target is not NULL, the entry is stored
4332 * in target->ent.
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004333 *
4334 * Called with pte lock held.
4335 */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004336union mc_target {
4337 struct page *page;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004338 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004339};
4340
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004341enum mc_target_type {
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004342 MC_TARGET_NONE = 0,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004343 MC_TARGET_PAGE,
Daisuke Nishimura02491442010-03-10 15:22:17 -08004344 MC_TARGET_SWAP,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004345};
4346
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004347static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4348 unsigned long addr, pte_t ptent)
4349{
4350 struct page *page = vm_normal_page(vma, addr, ptent);
4351
4352 if (!page || !page_mapped(page))
4353 return NULL;
4354 if (PageAnon(page)) {
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004355 if (!(mc.flags & MOVE_ANON))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004356 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004357 } else {
4358 if (!(mc.flags & MOVE_FILE))
4359 return NULL;
4360 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004361 if (!get_page_unless_zero(page))
4362 return NULL;
4363
4364 return page;
4365}
4366
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004367#ifdef CONFIG_SWAP
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004368static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4369 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4370{
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004371 struct page *page = NULL;
4372 swp_entry_t ent = pte_to_swp_entry(ptent);
4373
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004374 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004375 return NULL;
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004376 /*
4377 * Because lookup_swap_cache() updates some statistics counter,
4378 * we call find_get_page() with swapper_space directly.
4379 */
Shaohua Li33806f02013-02-22 16:34:37 -08004380 page = find_get_page(swap_address_space(ent), ent.val);
Johannes Weiner7941d212016-01-14 15:21:23 -08004381 if (do_memsw_account())
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004382 entry->val = ent.val;
4383
4384 return page;
4385}
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004386#else
4387static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4388 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4389{
4390 return NULL;
4391}
4392#endif
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004393
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004394static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4395 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4396{
4397 struct page *page = NULL;
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004398 struct address_space *mapping;
4399 pgoff_t pgoff;
4400
4401 if (!vma->vm_file) /* anonymous vma */
4402 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004403 if (!(mc.flags & MOVE_FILE))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004404 return NULL;
4405
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004406 mapping = vma->vm_file->f_mapping;
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08004407 pgoff = linear_page_index(vma, addr);
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004408
4409 /* page is moved even if it's not RSS of this task(page-faulted). */
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004410#ifdef CONFIG_SWAP
4411 /* shmem/tmpfs may report page out on swap: account for that too. */
Johannes Weiner139b6a62014-05-06 12:50:05 -07004412 if (shmem_mapping(mapping)) {
4413 page = find_get_entry(mapping, pgoff);
4414 if (radix_tree_exceptional_entry(page)) {
4415 swp_entry_t swp = radix_to_swp_entry(page);
Johannes Weiner7941d212016-01-14 15:21:23 -08004416 if (do_memsw_account())
Johannes Weiner139b6a62014-05-06 12:50:05 -07004417 *entry = swp;
4418 page = find_get_page(swap_address_space(swp), swp.val);
4419 }
4420 } else
4421 page = find_get_page(mapping, pgoff);
4422#else
4423 page = find_get_page(mapping, pgoff);
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004424#endif
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004425 return page;
4426}
4427
Chen Gangb1b0dea2015-04-14 15:47:35 -07004428/**
4429 * mem_cgroup_move_account - move account of the page
4430 * @page: the page
4431 * @nr_pages: number of regular pages (>1 for huge pages)
4432 * @from: mem_cgroup which the page is moved from.
4433 * @to: mem_cgroup which the page is moved to. @from != @to.
4434 *
Kirill A. Shutemov3ac808f2016-01-15 16:53:07 -08004435 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
Chen Gangb1b0dea2015-04-14 15:47:35 -07004436 *
4437 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4438 * from old cgroup.
4439 */
4440static int mem_cgroup_move_account(struct page *page,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004441 bool compound,
Chen Gangb1b0dea2015-04-14 15:47:35 -07004442 struct mem_cgroup *from,
4443 struct mem_cgroup *to)
4444{
4445 unsigned long flags;
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004446 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Chen Gangb1b0dea2015-04-14 15:47:35 -07004447 int ret;
Greg Thelenc4843a72015-05-22 17:13:16 -04004448 bool anon;
Chen Gangb1b0dea2015-04-14 15:47:35 -07004449
4450 VM_BUG_ON(from == to);
4451 VM_BUG_ON_PAGE(PageLRU(page), page);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004452 VM_BUG_ON(compound && !PageTransHuge(page));
Chen Gangb1b0dea2015-04-14 15:47:35 -07004453
4454 /*
Johannes Weiner6a93ca82016-03-15 14:57:19 -07004455 * Prevent mem_cgroup_migrate() from looking at
Hugh Dickins45637ba2015-11-05 18:49:40 -08004456 * page->mem_cgroup of its source page while we change it.
Chen Gangb1b0dea2015-04-14 15:47:35 -07004457 */
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004458 ret = -EBUSY;
Chen Gangb1b0dea2015-04-14 15:47:35 -07004459 if (!trylock_page(page))
4460 goto out;
4461
4462 ret = -EINVAL;
4463 if (page->mem_cgroup != from)
4464 goto out_unlock;
4465
Greg Thelenc4843a72015-05-22 17:13:16 -04004466 anon = PageAnon(page);
4467
Chen Gangb1b0dea2015-04-14 15:47:35 -07004468 spin_lock_irqsave(&from->move_lock, flags);
4469
Greg Thelenc4843a72015-05-22 17:13:16 -04004470 if (!anon && page_mapped(page)) {
Chen Gangb1b0dea2015-04-14 15:47:35 -07004471 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4472 nr_pages);
4473 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4474 nr_pages);
4475 }
4476
Greg Thelenc4843a72015-05-22 17:13:16 -04004477 /*
4478 * move_lock grabbed above and caller set from->moving_account, so
4479 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4480 * So mapping should be stable for dirty pages.
4481 */
4482 if (!anon && PageDirty(page)) {
4483 struct address_space *mapping = page_mapping(page);
4484
4485 if (mapping_cap_account_dirty(mapping)) {
4486 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4487 nr_pages);
4488 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4489 nr_pages);
4490 }
4491 }
4492
Chen Gangb1b0dea2015-04-14 15:47:35 -07004493 if (PageWriteback(page)) {
4494 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4495 nr_pages);
4496 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4497 nr_pages);
4498 }
4499
4500 /*
4501 * It is safe to change page->mem_cgroup here because the page
4502 * is referenced, charged, and isolated - we can't race with
4503 * uncharging, charging, migration, or LRU putback.
4504 */
4505
4506 /* caller should have done css_get */
4507 page->mem_cgroup = to;
4508 spin_unlock_irqrestore(&from->move_lock, flags);
4509
4510 ret = 0;
4511
4512 local_irq_disable();
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004513 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07004514 memcg_check_events(to, page);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004515 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07004516 memcg_check_events(from, page);
4517 local_irq_enable();
4518out_unlock:
4519 unlock_page(page);
4520out:
4521 return ret;
4522}
4523
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004524static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004525 unsigned long addr, pte_t ptent, union mc_target *target)
4526{
Daisuke Nishimura02491442010-03-10 15:22:17 -08004527 struct page *page = NULL;
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004528 enum mc_target_type ret = MC_TARGET_NONE;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004529 swp_entry_t ent = { .val = 0 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004530
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004531 if (pte_present(ptent))
4532 page = mc_handle_present_pte(vma, addr, ptent);
4533 else if (is_swap_pte(ptent))
4534 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08004535 else if (pte_none(ptent))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004536 page = mc_handle_file_pte(vma, addr, ptent, &ent);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004537
4538 if (!page && !ent.val)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004539 return ret;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004540 if (page) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004541 /*
Johannes Weiner0a31bc92014-08-08 14:19:22 -07004542 * Do only loose check w/o serialization.
Johannes Weiner1306a852014-12-10 15:44:52 -08004543 * mem_cgroup_move_account() checks the page is valid or
Johannes Weiner0a31bc92014-08-08 14:19:22 -07004544 * not under LRU exclusion.
Daisuke Nishimura02491442010-03-10 15:22:17 -08004545 */
Johannes Weiner1306a852014-12-10 15:44:52 -08004546 if (page->mem_cgroup == mc.from) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004547 ret = MC_TARGET_PAGE;
4548 if (target)
4549 target->page = page;
4550 }
4551 if (!ret || !target)
4552 put_page(page);
4553 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004554 /* There is a swap entry and a page doesn't exist or isn't charged */
4555 if (ent.val && !ret &&
Li Zefan34c00c32013-09-23 16:56:01 +08004556 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
KAMEZAWA Hiroyuki7f0f1542010-05-11 14:06:58 -07004557 ret = MC_TARGET_SWAP;
4558 if (target)
4559 target->ent = ent;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004560 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004561 return ret;
4562}
4563
Naoya Horiguchi12724852012-03-21 16:34:28 -07004564#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4565/*
4566 * We don't consider swapping or file mapped pages because THP does not
4567 * support them for now.
4568 * Caller should make sure that pmd_trans_huge(pmd) is true.
4569 */
4570static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4571 unsigned long addr, pmd_t pmd, union mc_target *target)
4572{
4573 struct page *page = NULL;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004574 enum mc_target_type ret = MC_TARGET_NONE;
4575
4576 page = pmd_page(pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08004577 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004578 if (!(mc.flags & MOVE_ANON))
Naoya Horiguchi12724852012-03-21 16:34:28 -07004579 return ret;
Johannes Weiner1306a852014-12-10 15:44:52 -08004580 if (page->mem_cgroup == mc.from) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07004581 ret = MC_TARGET_PAGE;
4582 if (target) {
4583 get_page(page);
4584 target->page = page;
4585 }
4586 }
4587 return ret;
4588}
4589#else
4590static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4591 unsigned long addr, pmd_t pmd, union mc_target *target)
4592{
4593 return MC_TARGET_NONE;
4594}
4595#endif
4596
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004597static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4598 unsigned long addr, unsigned long end,
4599 struct mm_walk *walk)
4600{
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004601 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004602 pte_t *pte;
4603 spinlock_t *ptl;
4604
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08004605 ptl = pmd_trans_huge_lock(pmd, vma);
4606 if (ptl) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07004607 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4608 mc.precharge += HPAGE_PMD_NR;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08004609 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07004610 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004611 }
Dave Hansen03319322011-03-22 16:32:56 -07004612
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07004613 if (pmd_trans_unstable(pmd))
4614 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004615 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4616 for (; addr != end; pte++, addr += PAGE_SIZE)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004617 if (get_mctgt_type(vma, addr, *pte, NULL))
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004618 mc.precharge++; /* increment precharge temporarily */
4619 pte_unmap_unlock(pte - 1, ptl);
4620 cond_resched();
4621
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004622 return 0;
4623}
4624
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004625static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4626{
4627 unsigned long precharge;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004628
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004629 struct mm_walk mem_cgroup_count_precharge_walk = {
4630 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4631 .mm = mm,
4632 };
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004633 down_read(&mm->mmap_sem);
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004634 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004635 up_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004636
4637 precharge = mc.precharge;
4638 mc.precharge = 0;
4639
4640 return precharge;
4641}
4642
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004643static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4644{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004645 unsigned long precharge = mem_cgroup_count_precharge(mm);
4646
4647 VM_BUG_ON(mc.moving_task);
4648 mc.moving_task = current;
4649 return mem_cgroup_do_precharge(precharge);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004650}
4651
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004652/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4653static void __mem_cgroup_clear_mc(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004654{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004655 struct mem_cgroup *from = mc.from;
4656 struct mem_cgroup *to = mc.to;
4657
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004658 /* we must uncharge all the leftover precharges from mc.to */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004659 if (mc.precharge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004660 cancel_charge(mc.to, mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004661 mc.precharge = 0;
4662 }
4663 /*
4664 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4665 * we must uncharge here.
4666 */
4667 if (mc.moved_charge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004668 cancel_charge(mc.from, mc.moved_charge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004669 mc.moved_charge = 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004670 }
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004671 /* we must fixup refcnts and charges */
4672 if (mc.moved_swap) {
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004673 /* uncharge swap account from the old cgroup */
Johannes Weinerce00a962014-09-05 08:43:57 -04004674 if (!mem_cgroup_is_root(mc.from))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004675 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004676
Johannes Weiner05b84302014-08-06 16:05:59 -07004677 /*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004678 * we charged both to->memory and to->memsw, so we
4679 * should uncharge to->memory.
Johannes Weiner05b84302014-08-06 16:05:59 -07004680 */
Johannes Weinerce00a962014-09-05 08:43:57 -04004681 if (!mem_cgroup_is_root(mc.to))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004682 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004683
Johannes Weinere8ea14c2014-12-10 15:42:42 -08004684 css_put_many(&mc.from->css, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004685
Li Zefan40503772013-07-08 16:00:34 -07004686 /* we've already done css_get(mc.to) */
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004687 mc.moved_swap = 0;
4688 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004689 memcg_oom_recover(from);
4690 memcg_oom_recover(to);
4691 wake_up_all(&mc.waitq);
4692}
4693
4694static void mem_cgroup_clear_mc(void)
4695{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004696 /*
4697 * we must clear moving_task before waking up waiters at the end of
4698 * task migration.
4699 */
4700 mc.moving_task = NULL;
4701 __mem_cgroup_clear_mc();
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004702 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004703 mc.from = NULL;
4704 mc.to = NULL;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004705 spin_unlock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004706}
4707
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004708static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004709{
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004710 struct cgroup_subsys_state *css;
Ross Zwislereed67d72015-12-23 14:53:27 -07004711 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
Tejun Heo9f2115f2015-09-08 15:01:10 -07004712 struct mem_cgroup *from;
Tejun Heo4530edd2015-09-11 15:00:19 -04004713 struct task_struct *leader, *p;
Tejun Heo9f2115f2015-09-08 15:01:10 -07004714 struct mm_struct *mm;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004715 unsigned long move_flags;
Tejun Heo9f2115f2015-09-08 15:01:10 -07004716 int ret = 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004717
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004718 /* charge immigration isn't supported on the default hierarchy */
4719 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Tejun Heo9f2115f2015-09-08 15:01:10 -07004720 return 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004721
Tejun Heo4530edd2015-09-11 15:00:19 -04004722 /*
4723 * Multi-process migrations only happen on the default hierarchy
4724 * where charge immigration is not used. Perform charge
4725 * immigration if @tset contains a leader and whine if there are
4726 * multiple.
4727 */
4728 p = NULL;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004729 cgroup_taskset_for_each_leader(leader, css, tset) {
Tejun Heo4530edd2015-09-11 15:00:19 -04004730 WARN_ON_ONCE(p);
4731 p = leader;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004732 memcg = mem_cgroup_from_css(css);
Tejun Heo4530edd2015-09-11 15:00:19 -04004733 }
4734 if (!p)
4735 return 0;
4736
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004737 /*
4738 * We are now commited to this value whatever it is. Changes in this
4739 * tunable will only affect upcoming migrations, not the current one.
4740 * So we need to save it, and keep it going.
4741 */
4742 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4743 if (!move_flags)
4744 return 0;
4745
Tejun Heo9f2115f2015-09-08 15:01:10 -07004746 from = mem_cgroup_from_task(p);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004747
Tejun Heo9f2115f2015-09-08 15:01:10 -07004748 VM_BUG_ON(from == memcg);
Johannes Weiner247b1442014-12-10 15:44:11 -08004749
Tejun Heo9f2115f2015-09-08 15:01:10 -07004750 mm = get_task_mm(p);
4751 if (!mm)
4752 return 0;
4753 /* We move charges only when we move a owner of the mm */
4754 if (mm->owner == p) {
4755 VM_BUG_ON(mc.from);
4756 VM_BUG_ON(mc.to);
4757 VM_BUG_ON(mc.precharge);
4758 VM_BUG_ON(mc.moved_charge);
4759 VM_BUG_ON(mc.moved_swap);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004760
Tejun Heo9f2115f2015-09-08 15:01:10 -07004761 spin_lock(&mc.lock);
4762 mc.from = from;
4763 mc.to = memcg;
4764 mc.flags = move_flags;
4765 spin_unlock(&mc.lock);
4766 /* We set mc.moving_task later */
4767
4768 ret = mem_cgroup_precharge_mc(mm);
4769 if (ret)
4770 mem_cgroup_clear_mc();
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004771 }
Tejun Heo9f2115f2015-09-08 15:01:10 -07004772 mmput(mm);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004773 return ret;
4774}
4775
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004776static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004777{
Johannes Weiner4e2f2452014-12-10 15:44:08 -08004778 if (mc.to)
4779 mem_cgroup_clear_mc();
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004780}
4781
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004782static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4783 unsigned long addr, unsigned long end,
4784 struct mm_walk *walk)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004785{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004786 int ret = 0;
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004787 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004788 pte_t *pte;
4789 spinlock_t *ptl;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004790 enum mc_target_type target_type;
4791 union mc_target target;
4792 struct page *page;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004793
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08004794 ptl = pmd_trans_huge_lock(pmd, vma);
4795 if (ptl) {
Hugh Dickins62ade862012-05-18 11:28:34 -07004796 if (mc.precharge < HPAGE_PMD_NR) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08004797 spin_unlock(ptl);
Naoya Horiguchi12724852012-03-21 16:34:28 -07004798 return 0;
4799 }
4800 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4801 if (target_type == MC_TARGET_PAGE) {
4802 page = target.page;
4803 if (!isolate_lru_page(page)) {
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004804 if (!mem_cgroup_move_account(page, true,
Johannes Weiner1306a852014-12-10 15:44:52 -08004805 mc.from, mc.to)) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07004806 mc.precharge -= HPAGE_PMD_NR;
4807 mc.moved_charge += HPAGE_PMD_NR;
4808 }
4809 putback_lru_page(page);
4810 }
4811 put_page(page);
4812 }
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08004813 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07004814 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004815 }
4816
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07004817 if (pmd_trans_unstable(pmd))
4818 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004819retry:
4820 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4821 for (; addr != end; addr += PAGE_SIZE) {
4822 pte_t ptent = *(pte++);
Daisuke Nishimura02491442010-03-10 15:22:17 -08004823 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004824
4825 if (!mc.precharge)
4826 break;
4827
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004828 switch (get_mctgt_type(vma, addr, ptent, &target)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004829 case MC_TARGET_PAGE:
4830 page = target.page;
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08004831 /*
4832 * We can have a part of the split pmd here. Moving it
4833 * can be done but it would be too convoluted so simply
4834 * ignore such a partial THP and keep it in original
4835 * memcg. There should be somebody mapping the head.
4836 */
4837 if (PageTransCompound(page))
4838 goto put;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004839 if (isolate_lru_page(page))
4840 goto put;
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004841 if (!mem_cgroup_move_account(page, false,
4842 mc.from, mc.to)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004843 mc.precharge--;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004844 /* we uncharge from mc.from later. */
4845 mc.moved_charge++;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004846 }
4847 putback_lru_page(page);
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004848put: /* get_mctgt_type() gets the page */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004849 put_page(page);
4850 break;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004851 case MC_TARGET_SWAP:
4852 ent = target.ent;
Hugh Dickinse91cbb42012-05-29 15:06:51 -07004853 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004854 mc.precharge--;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004855 /* we fixup refcnts and charges later. */
4856 mc.moved_swap++;
4857 }
Daisuke Nishimura02491442010-03-10 15:22:17 -08004858 break;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004859 default:
4860 break;
4861 }
4862 }
4863 pte_unmap_unlock(pte - 1, ptl);
4864 cond_resched();
4865
4866 if (addr != end) {
4867 /*
4868 * We have consumed all precharges we got in can_attach().
4869 * We try charge one by one, but don't do any additional
4870 * charges to mc.to if we have failed in charge once in attach()
4871 * phase.
4872 */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004873 ret = mem_cgroup_do_precharge(1);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004874 if (!ret)
4875 goto retry;
4876 }
4877
4878 return ret;
4879}
4880
4881static void mem_cgroup_move_charge(struct mm_struct *mm)
4882{
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004883 struct mm_walk mem_cgroup_move_charge_walk = {
4884 .pmd_entry = mem_cgroup_move_charge_pte_range,
4885 .mm = mm,
4886 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004887
4888 lru_add_drain_all();
Johannes Weiner312722c2014-12-10 15:44:25 -08004889 /*
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07004890 * Signal lock_page_memcg() to take the memcg's move_lock
4891 * while we're moving its pages to another memcg. Then wait
4892 * for already started RCU-only updates to finish.
Johannes Weiner312722c2014-12-10 15:44:25 -08004893 */
4894 atomic_inc(&mc.from->moving_account);
4895 synchronize_rcu();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004896retry:
4897 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
4898 /*
4899 * Someone who are holding the mmap_sem might be waiting in
4900 * waitq. So we cancel all extra charges, wake up all waiters,
4901 * and retry. Because we cancel precharges, we might not be able
4902 * to move enough charges, but moving charge is a best-effort
4903 * feature anyway, so it wouldn't be a big problem.
4904 */
4905 __mem_cgroup_clear_mc();
4906 cond_resched();
4907 goto retry;
4908 }
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004909 /*
4910 * When we have consumed all precharges and failed in doing
4911 * additional charge, the page walk just aborts.
4912 */
4913 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004914 up_read(&mm->mmap_sem);
Johannes Weiner312722c2014-12-10 15:44:25 -08004915 atomic_dec(&mc.from->moving_account);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004916}
4917
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004918static void mem_cgroup_move_task(struct cgroup_taskset *tset)
Balbir Singh67e465a2008-02-07 00:13:54 -08004919{
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004920 struct cgroup_subsys_state *css;
4921 struct task_struct *p = cgroup_taskset_first(tset, &css);
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07004922 struct mm_struct *mm = get_task_mm(p);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004923
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004924 if (mm) {
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07004925 if (mc.to)
4926 mem_cgroup_move_charge(mm);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004927 mmput(mm);
4928 }
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07004929 if (mc.to)
4930 mem_cgroup_clear_mc();
Balbir Singh67e465a2008-02-07 00:13:54 -08004931}
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07004932#else /* !CONFIG_MMU */
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004933static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07004934{
4935 return 0;
4936}
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004937static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07004938{
4939}
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004940static void mem_cgroup_move_task(struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07004941{
4942}
4943#endif
Balbir Singh67e465a2008-02-07 00:13:54 -08004944
Tejun Heof00baae2013-04-15 13:41:15 -07004945/*
4946 * Cgroup retains root cgroups across [un]mount cycles making it necessary
Tejun Heoaa6ec292014-07-09 10:08:08 -04004947 * to verify whether we're attached to the default hierarchy on each mount
4948 * attempt.
Tejun Heof00baae2013-04-15 13:41:15 -07004949 */
Tejun Heoeb954192013-08-08 20:11:23 -04004950static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
Tejun Heof00baae2013-04-15 13:41:15 -07004951{
4952 /*
Tejun Heoaa6ec292014-07-09 10:08:08 -04004953 * use_hierarchy is forced on the default hierarchy. cgroup core
Tejun Heof00baae2013-04-15 13:41:15 -07004954 * guarantees that @root doesn't have any children, so turning it
4955 * on for the root memcg is enough.
4956 */
Tejun Heo9e10a132015-09-18 11:56:28 -04004957 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Vladimir Davydov7feee592015-03-12 16:26:19 -07004958 root_mem_cgroup->use_hierarchy = true;
4959 else
4960 root_mem_cgroup->use_hierarchy = false;
Tejun Heof00baae2013-04-15 13:41:15 -07004961}
4962
Johannes Weiner241994e2015-02-11 15:26:06 -08004963static u64 memory_current_read(struct cgroup_subsys_state *css,
4964 struct cftype *cft)
4965{
Johannes Weinerf5fc3c5d2015-11-05 18:50:23 -08004966 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4967
4968 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
Johannes Weiner241994e2015-02-11 15:26:06 -08004969}
4970
4971static int memory_low_show(struct seq_file *m, void *v)
4972{
4973 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07004974 unsigned long low = READ_ONCE(memcg->low);
Johannes Weiner241994e2015-02-11 15:26:06 -08004975
4976 if (low == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08004977 seq_puts(m, "max\n");
Johannes Weiner241994e2015-02-11 15:26:06 -08004978 else
4979 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
4980
4981 return 0;
4982}
4983
4984static ssize_t memory_low_write(struct kernfs_open_file *of,
4985 char *buf, size_t nbytes, loff_t off)
4986{
4987 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4988 unsigned long low;
4989 int err;
4990
4991 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08004992 err = page_counter_memparse(buf, "max", &low);
Johannes Weiner241994e2015-02-11 15:26:06 -08004993 if (err)
4994 return err;
4995
4996 memcg->low = low;
4997
4998 return nbytes;
4999}
5000
5001static int memory_high_show(struct seq_file *m, void *v)
5002{
5003 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005004 unsigned long high = READ_ONCE(memcg->high);
Johannes Weiner241994e2015-02-11 15:26:06 -08005005
5006 if (high == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005007 seq_puts(m, "max\n");
Johannes Weiner241994e2015-02-11 15:26:06 -08005008 else
5009 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5010
5011 return 0;
5012}
5013
5014static ssize_t memory_high_write(struct kernfs_open_file *of,
5015 char *buf, size_t nbytes, loff_t off)
5016{
5017 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5018 unsigned long high;
5019 int err;
5020
5021 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005022 err = page_counter_memparse(buf, "max", &high);
Johannes Weiner241994e2015-02-11 15:26:06 -08005023 if (err)
5024 return err;
5025
5026 memcg->high = high;
5027
Tejun Heo2529bb32015-05-22 18:23:34 -04005028 memcg_wb_domain_size_changed(memcg);
Johannes Weiner241994e2015-02-11 15:26:06 -08005029 return nbytes;
5030}
5031
5032static int memory_max_show(struct seq_file *m, void *v)
5033{
5034 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005035 unsigned long max = READ_ONCE(memcg->memory.limit);
Johannes Weiner241994e2015-02-11 15:26:06 -08005036
5037 if (max == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005038 seq_puts(m, "max\n");
Johannes Weiner241994e2015-02-11 15:26:06 -08005039 else
5040 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5041
5042 return 0;
5043}
5044
5045static ssize_t memory_max_write(struct kernfs_open_file *of,
5046 char *buf, size_t nbytes, loff_t off)
5047{
5048 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5049 unsigned long max;
5050 int err;
5051
5052 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005053 err = page_counter_memparse(buf, "max", &max);
Johannes Weiner241994e2015-02-11 15:26:06 -08005054 if (err)
5055 return err;
5056
5057 err = mem_cgroup_resize_limit(memcg, max);
5058 if (err)
5059 return err;
5060
Tejun Heo2529bb32015-05-22 18:23:34 -04005061 memcg_wb_domain_size_changed(memcg);
Johannes Weiner241994e2015-02-11 15:26:06 -08005062 return nbytes;
5063}
5064
5065static int memory_events_show(struct seq_file *m, void *v)
5066{
5067 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5068
5069 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5070 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5071 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5072 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5073
5074 return 0;
5075}
5076
Johannes Weiner587d9f72016-01-20 15:03:19 -08005077static int memory_stat_show(struct seq_file *m, void *v)
5078{
5079 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5080 int i;
5081
5082 /*
5083 * Provide statistics on the state of the memory subsystem as
5084 * well as cumulative event counters that show past behavior.
5085 *
5086 * This list is ordered following a combination of these gradients:
5087 * 1) generic big picture -> specifics and details
5088 * 2) reflecting userspace activity -> reflecting kernel heuristics
5089 *
5090 * Current memory state:
5091 */
5092
5093 seq_printf(m, "anon %llu\n",
5094 (u64)tree_stat(memcg, MEM_CGROUP_STAT_RSS) * PAGE_SIZE);
5095 seq_printf(m, "file %llu\n",
5096 (u64)tree_stat(memcg, MEM_CGROUP_STAT_CACHE) * PAGE_SIZE);
Johannes Weinerb2807f02016-01-20 15:03:22 -08005097 seq_printf(m, "sock %llu\n",
5098 (u64)tree_stat(memcg, MEMCG_SOCK) * PAGE_SIZE);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005099
5100 seq_printf(m, "file_mapped %llu\n",
5101 (u64)tree_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED) *
5102 PAGE_SIZE);
5103 seq_printf(m, "file_dirty %llu\n",
5104 (u64)tree_stat(memcg, MEM_CGROUP_STAT_DIRTY) *
5105 PAGE_SIZE);
5106 seq_printf(m, "file_writeback %llu\n",
5107 (u64)tree_stat(memcg, MEM_CGROUP_STAT_WRITEBACK) *
5108 PAGE_SIZE);
5109
5110 for (i = 0; i < NR_LRU_LISTS; i++) {
5111 struct mem_cgroup *mi;
5112 unsigned long val = 0;
5113
5114 for_each_mem_cgroup_tree(mi, memcg)
5115 val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5116 seq_printf(m, "%s %llu\n",
5117 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5118 }
5119
5120 /* Accumulated memory events */
5121
5122 seq_printf(m, "pgfault %lu\n",
5123 tree_events(memcg, MEM_CGROUP_EVENTS_PGFAULT));
5124 seq_printf(m, "pgmajfault %lu\n",
5125 tree_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT));
5126
5127 return 0;
5128}
5129
Johannes Weiner241994e2015-02-11 15:26:06 -08005130static struct cftype memory_files[] = {
5131 {
5132 .name = "current",
Johannes Weinerf5fc3c5d2015-11-05 18:50:23 -08005133 .flags = CFTYPE_NOT_ON_ROOT,
Johannes Weiner241994e2015-02-11 15:26:06 -08005134 .read_u64 = memory_current_read,
5135 },
5136 {
5137 .name = "low",
5138 .flags = CFTYPE_NOT_ON_ROOT,
5139 .seq_show = memory_low_show,
5140 .write = memory_low_write,
5141 },
5142 {
5143 .name = "high",
5144 .flags = CFTYPE_NOT_ON_ROOT,
5145 .seq_show = memory_high_show,
5146 .write = memory_high_write,
5147 },
5148 {
5149 .name = "max",
5150 .flags = CFTYPE_NOT_ON_ROOT,
5151 .seq_show = memory_max_show,
5152 .write = memory_max_write,
5153 },
5154 {
5155 .name = "events",
5156 .flags = CFTYPE_NOT_ON_ROOT,
Tejun Heo472912a2015-09-18 18:01:59 -04005157 .file_offset = offsetof(struct mem_cgroup, events_file),
Johannes Weiner241994e2015-02-11 15:26:06 -08005158 .seq_show = memory_events_show,
5159 },
Johannes Weiner587d9f72016-01-20 15:03:19 -08005160 {
5161 .name = "stat",
5162 .flags = CFTYPE_NOT_ON_ROOT,
5163 .seq_show = memory_stat_show,
5164 },
Johannes Weiner241994e2015-02-11 15:26:06 -08005165 { } /* terminate */
5166};
5167
Tejun Heo073219e2014-02-08 10:36:58 -05005168struct cgroup_subsys memory_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -08005169 .css_alloc = mem_cgroup_css_alloc,
Glauber Costad142e3e2013-02-22 16:34:52 -08005170 .css_online = mem_cgroup_css_online,
Tejun Heo92fb9742012-11-19 08:13:38 -08005171 .css_offline = mem_cgroup_css_offline,
Vladimir Davydov6df38682015-12-29 14:54:10 -08005172 .css_released = mem_cgroup_css_released,
Tejun Heo92fb9742012-11-19 08:13:38 -08005173 .css_free = mem_cgroup_css_free,
Tejun Heo1ced9532014-07-08 18:02:57 -04005174 .css_reset = mem_cgroup_css_reset,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005175 .can_attach = mem_cgroup_can_attach,
5176 .cancel_attach = mem_cgroup_cancel_attach,
Balbir Singh67e465a2008-02-07 00:13:54 -08005177 .attach = mem_cgroup_move_task,
Tejun Heof00baae2013-04-15 13:41:15 -07005178 .bind = mem_cgroup_bind,
Johannes Weiner241994e2015-02-11 15:26:06 -08005179 .dfl_cftypes = memory_files,
5180 .legacy_cftypes = mem_cgroup_legacy_files,
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005181 .early_init = 0,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005182};
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005183
Johannes Weiner241994e2015-02-11 15:26:06 -08005184/**
Johannes Weiner241994e2015-02-11 15:26:06 -08005185 * mem_cgroup_low - check if memory consumption is below the normal range
5186 * @root: the highest ancestor to consider
5187 * @memcg: the memory cgroup to check
5188 *
5189 * Returns %true if memory consumption of @memcg, and that of all
5190 * configurable ancestors up to @root, is below the normal range.
5191 */
5192bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5193{
5194 if (mem_cgroup_disabled())
5195 return false;
5196
5197 /*
5198 * The toplevel group doesn't have a configurable range, so
5199 * it's never low when looked at directly, and it is not
5200 * considered an ancestor when assessing the hierarchy.
5201 */
5202
5203 if (memcg == root_mem_cgroup)
5204 return false;
5205
Michal Hocko4e54ded2015-02-27 15:51:46 -08005206 if (page_counter_read(&memcg->memory) >= memcg->low)
Johannes Weiner241994e2015-02-11 15:26:06 -08005207 return false;
5208
5209 while (memcg != root) {
5210 memcg = parent_mem_cgroup(memcg);
5211
5212 if (memcg == root_mem_cgroup)
5213 break;
5214
Michal Hocko4e54ded2015-02-27 15:51:46 -08005215 if (page_counter_read(&memcg->memory) >= memcg->low)
Johannes Weiner241994e2015-02-11 15:26:06 -08005216 return false;
5217 }
5218 return true;
5219}
5220
Johannes Weiner00501b52014-08-08 14:19:20 -07005221/**
5222 * mem_cgroup_try_charge - try charging a page
5223 * @page: page to charge
5224 * @mm: mm context of the victim
5225 * @gfp_mask: reclaim mode
5226 * @memcgp: charged memcg return
5227 *
5228 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5229 * pages according to @gfp_mask if necessary.
5230 *
5231 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5232 * Otherwise, an error code is returned.
5233 *
5234 * After page->mapping has been set up, the caller must finalize the
5235 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5236 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5237 */
5238int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005239 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5240 bool compound)
Johannes Weiner00501b52014-08-08 14:19:20 -07005241{
5242 struct mem_cgroup *memcg = NULL;
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005243 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Johannes Weiner00501b52014-08-08 14:19:20 -07005244 int ret = 0;
5245
5246 if (mem_cgroup_disabled())
5247 goto out;
5248
5249 if (PageSwapCache(page)) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005250 /*
5251 * Every swap fault against a single page tries to charge the
5252 * page, bail as early as possible. shmem_unuse() encounters
5253 * already charged pages, too. The USED bit is protected by
5254 * the page lock, which serializes swap cache removal, which
5255 * in turn serializes uncharging.
5256 */
Vladimir Davydove993d902015-09-09 15:35:35 -07005257 VM_BUG_ON_PAGE(!PageLocked(page), page);
Johannes Weiner1306a852014-12-10 15:44:52 -08005258 if (page->mem_cgroup)
Johannes Weiner00501b52014-08-08 14:19:20 -07005259 goto out;
Vladimir Davydove993d902015-09-09 15:35:35 -07005260
Vladimir Davydov37e84352016-01-20 15:02:56 -08005261 if (do_swap_account) {
Vladimir Davydove993d902015-09-09 15:35:35 -07005262 swp_entry_t ent = { .val = page_private(page), };
5263 unsigned short id = lookup_swap_cgroup_id(ent);
5264
5265 rcu_read_lock();
5266 memcg = mem_cgroup_from_id(id);
5267 if (memcg && !css_tryget_online(&memcg->css))
5268 memcg = NULL;
5269 rcu_read_unlock();
5270 }
Johannes Weiner00501b52014-08-08 14:19:20 -07005271 }
5272
Johannes Weiner00501b52014-08-08 14:19:20 -07005273 if (!memcg)
5274 memcg = get_mem_cgroup_from_mm(mm);
5275
5276 ret = try_charge(memcg, gfp_mask, nr_pages);
5277
5278 css_put(&memcg->css);
Johannes Weiner00501b52014-08-08 14:19:20 -07005279out:
5280 *memcgp = memcg;
5281 return ret;
5282}
5283
5284/**
5285 * mem_cgroup_commit_charge - commit a page charge
5286 * @page: page to charge
5287 * @memcg: memcg to charge the page to
5288 * @lrucare: page might be on LRU already
5289 *
5290 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5291 * after page->mapping has been set up. This must happen atomically
5292 * as part of the page instantiation, i.e. under the page table lock
5293 * for anonymous pages, under the page lock for page and swap cache.
5294 *
5295 * In addition, the page must not be on the LRU during the commit, to
5296 * prevent racing with task migration. If it might be, use @lrucare.
5297 *
5298 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5299 */
5300void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005301 bool lrucare, bool compound)
Johannes Weiner00501b52014-08-08 14:19:20 -07005302{
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005303 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Johannes Weiner00501b52014-08-08 14:19:20 -07005304
5305 VM_BUG_ON_PAGE(!page->mapping, page);
5306 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5307
5308 if (mem_cgroup_disabled())
5309 return;
5310 /*
5311 * Swap faults will attempt to charge the same page multiple
5312 * times. But reuse_swap_page() might have removed the page
5313 * from swapcache already, so we can't check PageSwapCache().
5314 */
5315 if (!memcg)
5316 return;
5317
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005318 commit_charge(page, memcg, lrucare);
5319
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005320 local_irq_disable();
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005321 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005322 memcg_check_events(memcg, page);
5323 local_irq_enable();
Johannes Weiner00501b52014-08-08 14:19:20 -07005324
Johannes Weiner7941d212016-01-14 15:21:23 -08005325 if (do_memsw_account() && PageSwapCache(page)) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005326 swp_entry_t entry = { .val = page_private(page) };
5327 /*
5328 * The swap entry might not get freed for a long time,
5329 * let's not wait for it. The page already received a
5330 * memory+swap charge, drop the swap entry duplicate.
5331 */
5332 mem_cgroup_uncharge_swap(entry);
5333 }
5334}
5335
5336/**
5337 * mem_cgroup_cancel_charge - cancel a page charge
5338 * @page: page to charge
5339 * @memcg: memcg to charge the page to
5340 *
5341 * Cancel a charge transaction started by mem_cgroup_try_charge().
5342 */
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005343void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5344 bool compound)
Johannes Weiner00501b52014-08-08 14:19:20 -07005345{
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005346 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Johannes Weiner00501b52014-08-08 14:19:20 -07005347
5348 if (mem_cgroup_disabled())
5349 return;
5350 /*
5351 * Swap faults will attempt to charge the same page multiple
5352 * times. But reuse_swap_page() might have removed the page
5353 * from swapcache already, so we can't check PageSwapCache().
5354 */
5355 if (!memcg)
5356 return;
5357
Johannes Weiner00501b52014-08-08 14:19:20 -07005358 cancel_charge(memcg, nr_pages);
5359}
5360
Johannes Weiner747db952014-08-08 14:19:24 -07005361static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
Johannes Weiner747db952014-08-08 14:19:24 -07005362 unsigned long nr_anon, unsigned long nr_file,
5363 unsigned long nr_huge, struct page *dummy_page)
5364{
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005365 unsigned long nr_pages = nr_anon + nr_file;
Johannes Weiner747db952014-08-08 14:19:24 -07005366 unsigned long flags;
5367
Johannes Weinerce00a962014-09-05 08:43:57 -04005368 if (!mem_cgroup_is_root(memcg)) {
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005369 page_counter_uncharge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08005370 if (do_memsw_account())
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005371 page_counter_uncharge(&memcg->memsw, nr_pages);
Johannes Weinerce00a962014-09-05 08:43:57 -04005372 memcg_oom_recover(memcg);
5373 }
Johannes Weiner747db952014-08-08 14:19:24 -07005374
5375 local_irq_save(flags);
5376 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5377 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5378 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5379 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005380 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
Johannes Weiner747db952014-08-08 14:19:24 -07005381 memcg_check_events(memcg, dummy_page);
5382 local_irq_restore(flags);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08005383
5384 if (!mem_cgroup_is_root(memcg))
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005385 css_put_many(&memcg->css, nr_pages);
Johannes Weiner747db952014-08-08 14:19:24 -07005386}
5387
5388static void uncharge_list(struct list_head *page_list)
5389{
5390 struct mem_cgroup *memcg = NULL;
Johannes Weiner747db952014-08-08 14:19:24 -07005391 unsigned long nr_anon = 0;
5392 unsigned long nr_file = 0;
5393 unsigned long nr_huge = 0;
5394 unsigned long pgpgout = 0;
Johannes Weiner747db952014-08-08 14:19:24 -07005395 struct list_head *next;
5396 struct page *page;
5397
5398 next = page_list->next;
5399 do {
5400 unsigned int nr_pages = 1;
Johannes Weiner747db952014-08-08 14:19:24 -07005401
5402 page = list_entry(next, struct page, lru);
5403 next = page->lru.next;
5404
5405 VM_BUG_ON_PAGE(PageLRU(page), page);
5406 VM_BUG_ON_PAGE(page_count(page), page);
5407
Johannes Weiner1306a852014-12-10 15:44:52 -08005408 if (!page->mem_cgroup)
Johannes Weiner747db952014-08-08 14:19:24 -07005409 continue;
5410
5411 /*
5412 * Nobody should be changing or seriously looking at
Johannes Weiner1306a852014-12-10 15:44:52 -08005413 * page->mem_cgroup at this point, we have fully
Johannes Weiner29833312014-12-10 15:44:02 -08005414 * exclusive access to the page.
Johannes Weiner747db952014-08-08 14:19:24 -07005415 */
5416
Johannes Weiner1306a852014-12-10 15:44:52 -08005417 if (memcg != page->mem_cgroup) {
Johannes Weiner747db952014-08-08 14:19:24 -07005418 if (memcg) {
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005419 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5420 nr_huge, page);
5421 pgpgout = nr_anon = nr_file = nr_huge = 0;
Johannes Weiner747db952014-08-08 14:19:24 -07005422 }
Johannes Weiner1306a852014-12-10 15:44:52 -08005423 memcg = page->mem_cgroup;
Johannes Weiner747db952014-08-08 14:19:24 -07005424 }
5425
5426 if (PageTransHuge(page)) {
5427 nr_pages <<= compound_order(page);
5428 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5429 nr_huge += nr_pages;
5430 }
5431
5432 if (PageAnon(page))
5433 nr_anon += nr_pages;
5434 else
5435 nr_file += nr_pages;
5436
Johannes Weiner1306a852014-12-10 15:44:52 -08005437 page->mem_cgroup = NULL;
Johannes Weiner747db952014-08-08 14:19:24 -07005438
5439 pgpgout++;
5440 } while (next != page_list);
5441
5442 if (memcg)
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005443 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5444 nr_huge, page);
Johannes Weiner747db952014-08-08 14:19:24 -07005445}
5446
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005447/**
5448 * mem_cgroup_uncharge - uncharge a page
5449 * @page: page to uncharge
5450 *
5451 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5452 * mem_cgroup_commit_charge().
5453 */
5454void mem_cgroup_uncharge(struct page *page)
5455{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005456 if (mem_cgroup_disabled())
5457 return;
5458
Johannes Weiner747db952014-08-08 14:19:24 -07005459 /* Don't touch page->lru of any random page, pre-check: */
Johannes Weiner1306a852014-12-10 15:44:52 -08005460 if (!page->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005461 return;
5462
Johannes Weiner747db952014-08-08 14:19:24 -07005463 INIT_LIST_HEAD(&page->lru);
5464 uncharge_list(&page->lru);
5465}
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005466
Johannes Weiner747db952014-08-08 14:19:24 -07005467/**
5468 * mem_cgroup_uncharge_list - uncharge a list of page
5469 * @page_list: list of pages to uncharge
5470 *
5471 * Uncharge a list of pages previously charged with
5472 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5473 */
5474void mem_cgroup_uncharge_list(struct list_head *page_list)
5475{
5476 if (mem_cgroup_disabled())
5477 return;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005478
Johannes Weiner747db952014-08-08 14:19:24 -07005479 if (!list_empty(page_list))
5480 uncharge_list(page_list);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005481}
5482
5483/**
Johannes Weiner6a93ca82016-03-15 14:57:19 -07005484 * mem_cgroup_migrate - charge a page's replacement
5485 * @oldpage: currently circulating page
5486 * @newpage: replacement page
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005487 *
Johannes Weiner6a93ca82016-03-15 14:57:19 -07005488 * Charge @newpage as a replacement page for @oldpage. @oldpage will
5489 * be uncharged upon free.
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005490 *
5491 * Both pages must be locked, @newpage->mapping must be set up.
5492 */
Johannes Weiner6a93ca82016-03-15 14:57:19 -07005493void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005494{
Johannes Weiner29833312014-12-10 15:44:02 -08005495 struct mem_cgroup *memcg;
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08005496 unsigned int nr_pages;
5497 bool compound;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005498
5499 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5500 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005501 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005502 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5503 newpage);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005504
5505 if (mem_cgroup_disabled())
5506 return;
5507
5508 /* Page cache replacement: new page already charged? */
Johannes Weiner1306a852014-12-10 15:44:52 -08005509 if (newpage->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005510 return;
5511
Hugh Dickins45637ba2015-11-05 18:49:40 -08005512 /* Swapcache readahead pages can get replaced before being charged */
Johannes Weiner1306a852014-12-10 15:44:52 -08005513 memcg = oldpage->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08005514 if (!memcg)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005515 return;
5516
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08005517 /* Force-charge the new page. The old one will be freed soon */
5518 compound = PageTransHuge(newpage);
5519 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5520
5521 page_counter_charge(&memcg->memory, nr_pages);
5522 if (do_memsw_account())
5523 page_counter_charge(&memcg->memsw, nr_pages);
5524 css_get_many(&memcg->css, nr_pages);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005525
Johannes Weiner9cf76662016-03-15 14:57:58 -07005526 commit_charge(newpage, memcg, false);
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08005527
5528 local_irq_disable();
5529 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5530 memcg_check_events(memcg, newpage);
5531 local_irq_enable();
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005532}
5533
Johannes Weineref129472016-01-14 15:21:34 -08005534DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
Johannes Weiner11092082016-01-14 15:21:26 -08005535EXPORT_SYMBOL(memcg_sockets_enabled_key);
5536
5537void sock_update_memcg(struct sock *sk)
5538{
5539 struct mem_cgroup *memcg;
5540
5541 /* Socket cloning can throw us here with sk_cgrp already
5542 * filled. It won't however, necessarily happen from
5543 * process context. So the test for root memcg given
5544 * the current task's memcg won't help us in this case.
5545 *
5546 * Respecting the original socket's memcg is a better
5547 * decision in this case.
5548 */
5549 if (sk->sk_memcg) {
5550 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5551 css_get(&sk->sk_memcg->css);
5552 return;
5553 }
5554
5555 rcu_read_lock();
5556 memcg = mem_cgroup_from_task(current);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005557 if (memcg == root_mem_cgroup)
5558 goto out;
Johannes Weiner0db15292016-01-20 15:02:50 -08005559 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005560 goto out;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005561 if (css_tryget_online(&memcg->css))
Johannes Weiner11092082016-01-14 15:21:26 -08005562 sk->sk_memcg = memcg;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005563out:
Johannes Weiner11092082016-01-14 15:21:26 -08005564 rcu_read_unlock();
5565}
5566EXPORT_SYMBOL(sock_update_memcg);
5567
5568void sock_release_memcg(struct sock *sk)
5569{
5570 WARN_ON(!sk->sk_memcg);
5571 css_put(&sk->sk_memcg->css);
5572}
5573
5574/**
5575 * mem_cgroup_charge_skmem - charge socket memory
5576 * @memcg: memcg to charge
5577 * @nr_pages: number of pages to charge
5578 *
5579 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5580 * @memcg's configured limit, %false if the charge had to be forced.
5581 */
5582bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5583{
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005584 gfp_t gfp_mask = GFP_KERNEL;
Johannes Weiner11092082016-01-14 15:21:26 -08005585
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005586 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
Johannes Weiner0db15292016-01-20 15:02:50 -08005587 struct page_counter *fail;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005588
Johannes Weiner0db15292016-01-20 15:02:50 -08005589 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5590 memcg->tcpmem_pressure = 0;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005591 return true;
5592 }
Johannes Weiner0db15292016-01-20 15:02:50 -08005593 page_counter_charge(&memcg->tcpmem, nr_pages);
5594 memcg->tcpmem_pressure = 1;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005595 return false;
Johannes Weiner11092082016-01-14 15:21:26 -08005596 }
Johannes Weinerd886f4e2016-01-20 15:02:47 -08005597
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005598 /* Don't block in the packet receive path */
5599 if (in_softirq())
5600 gfp_mask = GFP_NOWAIT;
5601
Johannes Weinerb2807f02016-01-20 15:03:22 -08005602 this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
5603
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005604 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5605 return true;
5606
5607 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
Johannes Weiner11092082016-01-14 15:21:26 -08005608 return false;
5609}
5610
5611/**
5612 * mem_cgroup_uncharge_skmem - uncharge socket memory
5613 * @memcg - memcg to uncharge
5614 * @nr_pages - number of pages to uncharge
5615 */
5616void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5617{
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005618 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
Johannes Weiner0db15292016-01-20 15:02:50 -08005619 page_counter_uncharge(&memcg->tcpmem, nr_pages);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005620 return;
5621 }
Johannes Weinerd886f4e2016-01-20 15:02:47 -08005622
Johannes Weinerb2807f02016-01-20 15:03:22 -08005623 this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
5624
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005625 page_counter_uncharge(&memcg->memory, nr_pages);
5626 css_put_many(&memcg->css, nr_pages);
Johannes Weiner11092082016-01-14 15:21:26 -08005627}
5628
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005629static int __init cgroup_memory(char *s)
5630{
5631 char *token;
5632
5633 while ((token = strsep(&s, ",")) != NULL) {
5634 if (!*token)
5635 continue;
5636 if (!strcmp(token, "nosocket"))
5637 cgroup_memory_nosocket = true;
Vladimir Davydov04823c82016-01-20 15:02:38 -08005638 if (!strcmp(token, "nokmem"))
5639 cgroup_memory_nokmem = true;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005640 }
5641 return 0;
5642}
5643__setup("cgroup.memory=", cgroup_memory);
Johannes Weiner11092082016-01-14 15:21:26 -08005644
Michal Hocko2d110852013-02-22 16:34:43 -08005645/*
Michal Hocko10813122013-02-22 16:35:41 -08005646 * subsys_initcall() for memory controller.
5647 *
5648 * Some parts like hotcpu_notifier() have to be initialized from this context
5649 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
5650 * everything that doesn't depend on a specific mem_cgroup structure should
5651 * be initialized from here.
Michal Hocko2d110852013-02-22 16:34:43 -08005652 */
5653static int __init mem_cgroup_init(void)
5654{
Johannes Weiner95a045f2015-02-11 15:26:33 -08005655 int cpu, node;
5656
Michal Hocko2d110852013-02-22 16:34:43 -08005657 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
Johannes Weiner95a045f2015-02-11 15:26:33 -08005658
5659 for_each_possible_cpu(cpu)
5660 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5661 drain_local_stock);
5662
5663 for_each_node(node) {
5664 struct mem_cgroup_tree_per_node *rtpn;
5665 int zone;
5666
5667 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5668 node_online(node) ? node : NUMA_NO_NODE);
5669
5670 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5671 struct mem_cgroup_tree_per_zone *rtpz;
5672
5673 rtpz = &rtpn->rb_tree_per_zone[zone];
5674 rtpz->rb_root = RB_ROOT;
5675 spin_lock_init(&rtpz->lock);
5676 }
5677 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5678 }
5679
Michal Hocko2d110852013-02-22 16:34:43 -08005680 return 0;
5681}
5682subsys_initcall(mem_cgroup_init);
Johannes Weiner21afa382015-02-11 15:26:36 -08005683
5684#ifdef CONFIG_MEMCG_SWAP
5685/**
5686 * mem_cgroup_swapout - transfer a memsw charge to swap
5687 * @page: page whose memsw charge to transfer
5688 * @entry: swap entry to move the charge to
5689 *
5690 * Transfer the memsw charge of @page to @entry.
5691 */
5692void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5693{
5694 struct mem_cgroup *memcg;
5695 unsigned short oldid;
5696
5697 VM_BUG_ON_PAGE(PageLRU(page), page);
5698 VM_BUG_ON_PAGE(page_count(page), page);
5699
Johannes Weiner7941d212016-01-14 15:21:23 -08005700 if (!do_memsw_account())
Johannes Weiner21afa382015-02-11 15:26:36 -08005701 return;
5702
5703 memcg = page->mem_cgroup;
5704
5705 /* Readahead page, never charged */
5706 if (!memcg)
5707 return;
5708
5709 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5710 VM_BUG_ON_PAGE(oldid, page);
5711 mem_cgroup_swap_statistics(memcg, true);
5712
5713 page->mem_cgroup = NULL;
5714
5715 if (!mem_cgroup_is_root(memcg))
5716 page_counter_uncharge(&memcg->memory, 1);
5717
Sebastian Andrzej Siewiorce9ce662015-09-04 15:47:50 -07005718 /*
5719 * Interrupts should be disabled here because the caller holds the
5720 * mapping->tree_lock lock which is taken with interrupts-off. It is
5721 * important here to have the interrupts disabled because it is the
5722 * only synchronisation we have for udpating the per-CPU variables.
5723 */
5724 VM_BUG_ON(!irqs_disabled());
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005725 mem_cgroup_charge_statistics(memcg, page, false, -1);
Johannes Weiner21afa382015-02-11 15:26:36 -08005726 memcg_check_events(memcg, page);
5727}
5728
Vladimir Davydov37e84352016-01-20 15:02:56 -08005729/*
5730 * mem_cgroup_try_charge_swap - try charging a swap entry
5731 * @page: page being added to swap
5732 * @entry: swap entry to charge
5733 *
5734 * Try to charge @entry to the memcg that @page belongs to.
5735 *
5736 * Returns 0 on success, -ENOMEM on failure.
5737 */
5738int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5739{
5740 struct mem_cgroup *memcg;
5741 struct page_counter *counter;
5742 unsigned short oldid;
5743
5744 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
5745 return 0;
5746
5747 memcg = page->mem_cgroup;
5748
5749 /* Readahead page, never charged */
5750 if (!memcg)
5751 return 0;
5752
5753 if (!mem_cgroup_is_root(memcg) &&
5754 !page_counter_try_charge(&memcg->swap, 1, &counter))
5755 return -ENOMEM;
5756
5757 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5758 VM_BUG_ON_PAGE(oldid, page);
5759 mem_cgroup_swap_statistics(memcg, true);
5760
5761 css_get(&memcg->css);
5762 return 0;
5763}
5764
Johannes Weiner21afa382015-02-11 15:26:36 -08005765/**
5766 * mem_cgroup_uncharge_swap - uncharge a swap entry
5767 * @entry: swap entry to uncharge
5768 *
Vladimir Davydov37e84352016-01-20 15:02:56 -08005769 * Drop the swap charge associated with @entry.
Johannes Weiner21afa382015-02-11 15:26:36 -08005770 */
5771void mem_cgroup_uncharge_swap(swp_entry_t entry)
5772{
5773 struct mem_cgroup *memcg;
5774 unsigned short id;
5775
Vladimir Davydov37e84352016-01-20 15:02:56 -08005776 if (!do_swap_account)
Johannes Weiner21afa382015-02-11 15:26:36 -08005777 return;
5778
5779 id = swap_cgroup_record(entry, 0);
5780 rcu_read_lock();
Vladimir Davydovadbe4272015-04-15 16:13:00 -07005781 memcg = mem_cgroup_from_id(id);
Johannes Weiner21afa382015-02-11 15:26:36 -08005782 if (memcg) {
Vladimir Davydov37e84352016-01-20 15:02:56 -08005783 if (!mem_cgroup_is_root(memcg)) {
5784 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5785 page_counter_uncharge(&memcg->swap, 1);
5786 else
5787 page_counter_uncharge(&memcg->memsw, 1);
5788 }
Johannes Weiner21afa382015-02-11 15:26:36 -08005789 mem_cgroup_swap_statistics(memcg, false);
5790 css_put(&memcg->css);
5791 }
5792 rcu_read_unlock();
5793}
5794
Vladimir Davydovd8b38432016-01-20 15:03:07 -08005795long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5796{
5797 long nr_swap_pages = get_nr_swap_pages();
5798
5799 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5800 return nr_swap_pages;
5801 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5802 nr_swap_pages = min_t(long, nr_swap_pages,
5803 READ_ONCE(memcg->swap.limit) -
5804 page_counter_read(&memcg->swap));
5805 return nr_swap_pages;
5806}
5807
Vladimir Davydov5ccc5ab2016-01-20 15:03:10 -08005808bool mem_cgroup_swap_full(struct page *page)
5809{
5810 struct mem_cgroup *memcg;
5811
5812 VM_BUG_ON_PAGE(!PageLocked(page), page);
5813
5814 if (vm_swap_full())
5815 return true;
5816 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5817 return false;
5818
5819 memcg = page->mem_cgroup;
5820 if (!memcg)
5821 return false;
5822
5823 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5824 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
5825 return true;
5826
5827 return false;
5828}
5829
Johannes Weiner21afa382015-02-11 15:26:36 -08005830/* for remember boot option*/
5831#ifdef CONFIG_MEMCG_SWAP_ENABLED
5832static int really_do_swap_account __initdata = 1;
5833#else
5834static int really_do_swap_account __initdata;
5835#endif
5836
5837static int __init enable_swap_account(char *s)
5838{
5839 if (!strcmp(s, "1"))
5840 really_do_swap_account = 1;
5841 else if (!strcmp(s, "0"))
5842 really_do_swap_account = 0;
5843 return 1;
5844}
5845__setup("swapaccount=", enable_swap_account);
5846
Vladimir Davydov37e84352016-01-20 15:02:56 -08005847static u64 swap_current_read(struct cgroup_subsys_state *css,
5848 struct cftype *cft)
5849{
5850 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5851
5852 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5853}
5854
5855static int swap_max_show(struct seq_file *m, void *v)
5856{
5857 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5858 unsigned long max = READ_ONCE(memcg->swap.limit);
5859
5860 if (max == PAGE_COUNTER_MAX)
5861 seq_puts(m, "max\n");
5862 else
5863 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5864
5865 return 0;
5866}
5867
5868static ssize_t swap_max_write(struct kernfs_open_file *of,
5869 char *buf, size_t nbytes, loff_t off)
5870{
5871 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5872 unsigned long max;
5873 int err;
5874
5875 buf = strstrip(buf);
5876 err = page_counter_memparse(buf, "max", &max);
5877 if (err)
5878 return err;
5879
5880 mutex_lock(&memcg_limit_mutex);
5881 err = page_counter_limit(&memcg->swap, max);
5882 mutex_unlock(&memcg_limit_mutex);
5883 if (err)
5884 return err;
5885
5886 return nbytes;
5887}
5888
5889static struct cftype swap_files[] = {
5890 {
5891 .name = "swap.current",
5892 .flags = CFTYPE_NOT_ON_ROOT,
5893 .read_u64 = swap_current_read,
5894 },
5895 {
5896 .name = "swap.max",
5897 .flags = CFTYPE_NOT_ON_ROOT,
5898 .seq_show = swap_max_show,
5899 .write = swap_max_write,
5900 },
5901 { } /* terminate */
5902};
5903
Johannes Weiner21afa382015-02-11 15:26:36 -08005904static struct cftype memsw_cgroup_files[] = {
5905 {
5906 .name = "memsw.usage_in_bytes",
5907 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
5908 .read_u64 = mem_cgroup_read_u64,
5909 },
5910 {
5911 .name = "memsw.max_usage_in_bytes",
5912 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
5913 .write = mem_cgroup_reset,
5914 .read_u64 = mem_cgroup_read_u64,
5915 },
5916 {
5917 .name = "memsw.limit_in_bytes",
5918 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
5919 .write = mem_cgroup_write,
5920 .read_u64 = mem_cgroup_read_u64,
5921 },
5922 {
5923 .name = "memsw.failcnt",
5924 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
5925 .write = mem_cgroup_reset,
5926 .read_u64 = mem_cgroup_read_u64,
5927 },
5928 { }, /* terminate */
5929};
5930
5931static int __init mem_cgroup_swap_init(void)
5932{
5933 if (!mem_cgroup_disabled() && really_do_swap_account) {
5934 do_swap_account = 1;
Vladimir Davydov37e84352016-01-20 15:02:56 -08005935 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
5936 swap_files));
Johannes Weiner21afa382015-02-11 15:26:36 -08005937 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5938 memsw_cgroup_files));
5939 }
5940 return 0;
5941}
5942subsys_initcall(mem_cgroup_swap_init);
5943
5944#endif /* CONFIG_MEMCG_SWAP */