blob: c52ec893e241cf6b52764797f6aea5ed56219e23 [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08009 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
Glauber Costa7ae1e1d2012-12-18 14:21:56 -080013 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
Johannes Weiner1575e682015-04-14 15:44:51 -070017 * Native page reclaim
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -080023 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 */
33
Johannes Weiner3e32cb22014-12-10 15:42:31 -080034#include <linux/page_counter.h>
Balbir Singh8cdea7c2008-02-07 00:13:50 -080035#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080037#include <linux/mm.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010038#include <linux/sched/mm.h>
Hugh Dickins3a4f8a02017-02-24 14:59:36 -080039#include <linux/shmem_fs.h>
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -080040#include <linux/hugetlb.h>
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -080041#include <linux/pagemap.h>
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080042#include <linux/smp.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080043#include <linux/page-flags.h>
Balbir Singh66e17072008-02-07 00:13:56 -080044#include <linux/backing-dev.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080045#include <linux/bit_spinlock.h>
46#include <linux/rcupdate.h>
Balbir Singhe2224322009-04-02 16:57:39 -070047#include <linux/limits.h>
Paul Gortmakerb9e15ba2011-05-26 16:00:52 -040048#include <linux/export.h>
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080049#include <linux/mutex.h>
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -070050#include <linux/rbtree.h>
Balbir Singhb6ac57d2008-04-29 01:00:19 -070051#include <linux/slab.h>
Balbir Singh66e17072008-02-07 00:13:56 -080052#include <linux/swap.h>
Daisuke Nishimura02491442010-03-10 15:22:17 -080053#include <linux/swapops.h>
Balbir Singh66e17072008-02-07 00:13:56 -080054#include <linux/spinlock.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080055#include <linux/eventfd.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050056#include <linux/poll.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080057#include <linux/sort.h>
Balbir Singh66e17072008-02-07 00:13:56 -080058#include <linux/fs.h>
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -080059#include <linux/seq_file.h>
Anton Vorontsov70ddf632013-04-29 15:08:31 -070060#include <linux/vmpressure.h>
Christoph Lameterb69408e2008-10-18 20:26:14 -070061#include <linux/mm_inline.h>
Johannes Weiner5d1ea482014-12-10 15:44:55 -080062#include <linux/swap_cgroup.h>
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -080063#include <linux/cpu.h>
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -070064#include <linux/oom.h>
Johannes Weiner0056f4e2013-10-31 16:34:14 -070065#include <linux/lockdep.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050066#include <linux/file.h>
Tejun Heob23afb92015-11-05 18:46:11 -080067#include <linux/tracehook.h>
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080068#include "internal.h"
Glauber Costad1a4c0b2011-12-11 21:47:04 +000069#include <net/sock.h>
Michal Hocko4bd2c1e2012-10-08 16:33:10 -070070#include <net/ip.h>
Qiang Huangf35c3a82013-11-12 15:08:22 -080071#include "slab.h"
Balbir Singh8cdea7c2008-02-07 00:13:50 -080072
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080073#include <linux/uaccess.h>
Balbir Singh8697d332008-02-07 00:13:59 -080074
KOSAKI Motohirocc8e9702010-08-09 17:19:57 -070075#include <trace/events/vmscan.h>
76
Tejun Heo073219e2014-02-08 10:36:58 -050077struct cgroup_subsys memory_cgrp_subsys __read_mostly;
78EXPORT_SYMBOL(memory_cgrp_subsys);
David Rientjes68ae5642012-12-12 13:51:57 -080079
Johannes Weiner7d828602016-01-14 15:20:56 -080080struct mem_cgroup *root_mem_cgroup __read_mostly;
81
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070082#define MEM_CGROUP_RECLAIM_RETRIES 5
Balbir Singh8cdea7c2008-02-07 00:13:50 -080083
Johannes Weinerf7e1cb62016-01-14 15:21:29 -080084/* Socket memory accounting disabled? */
85static bool cgroup_memory_nosocket;
86
Vladimir Davydov04823c82016-01-20 15:02:38 -080087/* Kernel memory accounting disabled? */
88static bool cgroup_memory_nokmem;
89
Johannes Weiner21afa382015-02-11 15:26:36 -080090/* Whether the swap controller is active */
Andrew Mortonc255a452012-07-31 16:43:02 -070091#ifdef CONFIG_MEMCG_SWAP
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080092int do_swap_account __read_mostly;
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080093#else
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -070094#define do_swap_account 0
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080095#endif
96
Johannes Weiner7941d212016-01-14 15:21:23 -080097/* Whether legacy memory+swap accounting is active */
98static bool do_memsw_account(void)
99{
100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
101}
102
Johannes Weineraf7c4b02012-05-29 15:07:08 -0700103static const char * const mem_cgroup_stat_names[] = {
104 "cache",
105 "rss",
David Rientjesb070e652013-05-07 16:18:09 -0700106 "rss_huge",
Johannes Weineraf7c4b02012-05-29 15:07:08 -0700107 "mapped_file",
Greg Thelenc4843a72015-05-22 17:13:16 -0400108 "dirty",
Sha Zhengju3ea67d02013-09-12 15:13:53 -0700109 "writeback",
Johannes Weineraf7c4b02012-05-29 15:07:08 -0700110 "swap",
111};
112
Johannes Weineraf7c4b02012-05-29 15:07:08 -0700113static const char * const mem_cgroup_events_names[] = {
114 "pgpgin",
115 "pgpgout",
116 "pgfault",
117 "pgmajfault",
118};
119
Sha Zhengju58cf1882013-02-22 16:32:05 -0800120static const char * const mem_cgroup_lru_names[] = {
121 "inactive_anon",
122 "active_anon",
123 "inactive_file",
124 "active_file",
125 "unevictable",
126};
127
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700128#define THRESHOLDS_EVENTS_TARGET 128
129#define SOFTLIMIT_EVENTS_TARGET 1024
130#define NUMAINFO_EVENTS_TARGET 1024
Johannes Weinere9f89742011-03-23 16:42:37 -0700131
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700132/*
133 * Cgroups above their limits are maintained in a RB-Tree, independent of
134 * their hierarchy representation
135 */
136
Mel Gormanef8f2322016-07-28 15:46:05 -0700137struct mem_cgroup_tree_per_node {
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700138 struct rb_root rb_root;
139 spinlock_t lock;
140};
141
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700142struct mem_cgroup_tree {
143 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
144};
145
146static struct mem_cgroup_tree soft_limit_tree __read_mostly;
147
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700148/* for OOM */
149struct mem_cgroup_eventfd_list {
150 struct list_head list;
151 struct eventfd_ctx *eventfd;
152};
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800153
Tejun Heo79bd9812013-11-22 18:20:42 -0500154/*
155 * cgroup_event represents events which userspace want to receive.
156 */
Tejun Heo3bc942f2013-11-22 18:20:44 -0500157struct mem_cgroup_event {
Tejun Heo79bd9812013-11-22 18:20:42 -0500158 /*
Tejun Heo59b6f872013-11-22 18:20:43 -0500159 * memcg which the event belongs to.
Tejun Heo79bd9812013-11-22 18:20:42 -0500160 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500161 struct mem_cgroup *memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -0500162 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500163 * eventfd to signal userspace about the event.
164 */
165 struct eventfd_ctx *eventfd;
166 /*
167 * Each of these stored in a list by the cgroup.
168 */
169 struct list_head list;
170 /*
Tejun Heofba94802013-11-22 18:20:43 -0500171 * register_event() callback will be used to add new userspace
172 * waiter for changes related to this event. Use eventfd_signal()
173 * on eventfd to send notification to userspace.
174 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500175 int (*register_event)(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -0500176 struct eventfd_ctx *eventfd, const char *args);
Tejun Heofba94802013-11-22 18:20:43 -0500177 /*
178 * unregister_event() callback will be called when userspace closes
179 * the eventfd or on cgroup removing. This callback must be set,
180 * if you want provide notification functionality.
181 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500182 void (*unregister_event)(struct mem_cgroup *memcg,
Tejun Heofba94802013-11-22 18:20:43 -0500183 struct eventfd_ctx *eventfd);
184 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500185 * All fields below needed to unregister event when
186 * userspace closes eventfd.
187 */
188 poll_table pt;
189 wait_queue_head_t *wqh;
190 wait_queue_t wait;
191 struct work_struct remove;
192};
193
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700194static void mem_cgroup_threshold(struct mem_cgroup *memcg);
195static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800196
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800197/* Stuffs for move charges at task migration. */
198/*
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800199 * Types of charges to be moved.
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800200 */
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800201#define MOVE_ANON 0x1U
202#define MOVE_FILE 0x2U
203#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800204
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800205/* "mc" and its members are protected by cgroup_mutex */
206static struct move_charge_struct {
Daisuke Nishimurab1dd6932010-11-24 12:57:06 -0800207 spinlock_t lock; /* for from, to */
Tejun Heo264a0ae2016-04-21 19:09:02 -0400208 struct mm_struct *mm;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800209 struct mem_cgroup *from;
210 struct mem_cgroup *to;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800211 unsigned long flags;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800212 unsigned long precharge;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -0800213 unsigned long moved_charge;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -0800214 unsigned long moved_swap;
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800215 struct task_struct *moving_task; /* a task moving charges */
216 wait_queue_head_t waitq; /* a waitq for other context */
217} mc = {
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -0700218 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800219 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
220};
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800221
Balbir Singh4e416952009-09-23 15:56:39 -0700222/*
223 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
224 * limit reclaim to prevent infinite loops, if they ever occur.
225 */
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700226#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700227#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
Balbir Singh4e416952009-09-23 15:56:39 -0700228
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800229enum charge_type {
230 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
Kamezawa Hiroyuki41326c12012-07-31 16:41:40 -0700231 MEM_CGROUP_CHARGE_TYPE_ANON,
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800232 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700233 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700234 NR_CHARGE_TYPE,
235};
236
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800237/* for encoding cft->private value on file */
Glauber Costa86ae53e2012-12-18 14:21:45 -0800238enum res_type {
239 _MEM,
240 _MEMSWAP,
241 _OOM_TYPE,
Glauber Costa510fc4e2012-12-18 14:21:47 -0800242 _KMEM,
Vladimir Davydovd55f90b2016-01-20 15:02:44 -0800243 _TCP,
Glauber Costa86ae53e2012-12-18 14:21:45 -0800244};
245
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700246#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
247#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800248#define MEMFILE_ATTR(val) ((val) & 0xffff)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700249/* Used for OOM nofiier */
250#define OOM_CONTROL (0)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800251
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700252/* Some nice accessors for the vmpressure. */
253struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
254{
255 if (!memcg)
256 memcg = root_mem_cgroup;
257 return &memcg->vmpressure;
258}
259
260struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
261{
262 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
263}
264
Michal Hocko7ffc0ed2012-10-08 16:33:13 -0700265static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
266{
267 return (memcg == root_mem_cgroup);
268}
269
Johannes Weiner127424c2016-01-20 15:02:32 -0800270#ifndef CONFIG_SLOB
Glauber Costa55007d82012-12-18 14:22:38 -0800271/*
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800272 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
Li Zefanb8627832013-09-23 16:56:47 +0800273 * The main reason for not using cgroup id for this:
274 * this works better in sparse environments, where we have a lot of memcgs,
275 * but only a few kmem-limited. Or also, if we have, for instance, 200
276 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
277 * 200 entry array for that.
Glauber Costa55007d82012-12-18 14:22:38 -0800278 *
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800279 * The current size of the caches array is stored in memcg_nr_cache_ids. It
280 * will double each time we have to increase it.
Glauber Costa55007d82012-12-18 14:22:38 -0800281 */
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800282static DEFINE_IDA(memcg_cache_ida);
283int memcg_nr_cache_ids;
Glauber Costa749c5412012-12-18 14:23:01 -0800284
Vladimir Davydov05257a12015-02-12 14:59:01 -0800285/* Protects memcg_nr_cache_ids */
286static DECLARE_RWSEM(memcg_cache_ids_sem);
287
288void memcg_get_cache_ids(void)
289{
290 down_read(&memcg_cache_ids_sem);
291}
292
293void memcg_put_cache_ids(void)
294{
295 up_read(&memcg_cache_ids_sem);
296}
297
Glauber Costa55007d82012-12-18 14:22:38 -0800298/*
299 * MIN_SIZE is different than 1, because we would like to avoid going through
300 * the alloc/free process all the time. In a small machine, 4 kmem-limited
301 * cgroups is a reasonable guess. In the future, it could be a parameter or
302 * tunable, but that is strictly not necessary.
303 *
Li Zefanb8627832013-09-23 16:56:47 +0800304 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
Glauber Costa55007d82012-12-18 14:22:38 -0800305 * this constant directly from cgroup, but it is understandable that this is
306 * better kept as an internal representation in cgroup.c. In any case, the
Li Zefanb8627832013-09-23 16:56:47 +0800307 * cgrp_id space is not getting any smaller, and we don't have to necessarily
Glauber Costa55007d82012-12-18 14:22:38 -0800308 * increase ours as well if it increases.
309 */
310#define MEMCG_CACHES_MIN_SIZE 4
Li Zefanb8627832013-09-23 16:56:47 +0800311#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
Glauber Costa55007d82012-12-18 14:22:38 -0800312
Glauber Costad7f25f82012-12-18 14:22:40 -0800313/*
314 * A lot of the calls to the cache allocation functions are expected to be
315 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
316 * conditional to this static branch, we'll have to allow modules that does
317 * kmem_cache_alloc and the such to see this symbol as well
318 */
Johannes Weineref129472016-01-14 15:21:34 -0800319DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
Glauber Costad7f25f82012-12-18 14:22:40 -0800320EXPORT_SYMBOL(memcg_kmem_enabled_key);
Glauber Costaa8964b92012-12-18 14:22:09 -0800321
Tejun Heo17cc4df2017-02-22 15:41:36 -0800322struct workqueue_struct *memcg_kmem_cache_wq;
323
Johannes Weiner127424c2016-01-20 15:02:32 -0800324#endif /* !CONFIG_SLOB */
Glauber Costaa8964b92012-12-18 14:22:09 -0800325
Tejun Heoad7fa852015-05-27 20:00:02 -0400326/**
327 * mem_cgroup_css_from_page - css of the memcg associated with a page
328 * @page: page of interest
329 *
330 * If memcg is bound to the default hierarchy, css of the memcg associated
331 * with @page is returned. The returned css remains associated with @page
332 * until it is released.
333 *
334 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
335 * is returned.
Tejun Heoad7fa852015-05-27 20:00:02 -0400336 */
337struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
338{
339 struct mem_cgroup *memcg;
340
Tejun Heoad7fa852015-05-27 20:00:02 -0400341 memcg = page->mem_cgroup;
342
Tejun Heo9e10a132015-09-18 11:56:28 -0400343 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
Tejun Heoad7fa852015-05-27 20:00:02 -0400344 memcg = root_mem_cgroup;
345
Tejun Heoad7fa852015-05-27 20:00:02 -0400346 return &memcg->css;
347}
348
Vladimir Davydov2fc04522015-09-09 15:35:28 -0700349/**
350 * page_cgroup_ino - return inode number of the memcg a page is charged to
351 * @page: the page
352 *
353 * Look up the closest online ancestor of the memory cgroup @page is charged to
354 * and return its inode number or 0 if @page is not charged to any cgroup. It
355 * is safe to call this function without holding a reference to @page.
356 *
357 * Note, this function is inherently racy, because there is nothing to prevent
358 * the cgroup inode from getting torn down and potentially reallocated a moment
359 * after page_cgroup_ino() returns, so it only should be used by callers that
360 * do not care (such as procfs interfaces).
361 */
362ino_t page_cgroup_ino(struct page *page)
363{
364 struct mem_cgroup *memcg;
365 unsigned long ino = 0;
366
367 rcu_read_lock();
368 memcg = READ_ONCE(page->mem_cgroup);
369 while (memcg && !(memcg->css.flags & CSS_ONLINE))
370 memcg = parent_mem_cgroup(memcg);
371 if (memcg)
372 ino = cgroup_ino(memcg->css.cgroup);
373 rcu_read_unlock();
374 return ino;
375}
376
Mel Gormanef8f2322016-07-28 15:46:05 -0700377static struct mem_cgroup_per_node *
378mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700379{
Johannes Weiner97a6c372011-03-23 16:42:27 -0700380 int nid = page_to_nid(page);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700381
Mel Gormanef8f2322016-07-28 15:46:05 -0700382 return memcg->nodeinfo[nid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700383}
384
Mel Gormanef8f2322016-07-28 15:46:05 -0700385static struct mem_cgroup_tree_per_node *
386soft_limit_tree_node(int nid)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700387{
Mel Gormanef8f2322016-07-28 15:46:05 -0700388 return soft_limit_tree.rb_tree_per_node[nid];
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700389}
390
Mel Gormanef8f2322016-07-28 15:46:05 -0700391static struct mem_cgroup_tree_per_node *
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700392soft_limit_tree_from_page(struct page *page)
393{
394 int nid = page_to_nid(page);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700395
Mel Gormanef8f2322016-07-28 15:46:05 -0700396 return soft_limit_tree.rb_tree_per_node[nid];
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700397}
398
Mel Gormanef8f2322016-07-28 15:46:05 -0700399static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
400 struct mem_cgroup_tree_per_node *mctz,
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800401 unsigned long new_usage_in_excess)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700402{
403 struct rb_node **p = &mctz->rb_root.rb_node;
404 struct rb_node *parent = NULL;
Mel Gormanef8f2322016-07-28 15:46:05 -0700405 struct mem_cgroup_per_node *mz_node;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700406
407 if (mz->on_tree)
408 return;
409
410 mz->usage_in_excess = new_usage_in_excess;
411 if (!mz->usage_in_excess)
412 return;
413 while (*p) {
414 parent = *p;
Mel Gormanef8f2322016-07-28 15:46:05 -0700415 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700416 tree_node);
417 if (mz->usage_in_excess < mz_node->usage_in_excess)
418 p = &(*p)->rb_left;
419 /*
420 * We can't avoid mem cgroups that are over their soft
421 * limit by the same amount
422 */
423 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
424 p = &(*p)->rb_right;
425 }
426 rb_link_node(&mz->tree_node, parent, p);
427 rb_insert_color(&mz->tree_node, &mctz->rb_root);
428 mz->on_tree = true;
429}
430
Mel Gormanef8f2322016-07-28 15:46:05 -0700431static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
432 struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700433{
434 if (!mz->on_tree)
435 return;
436 rb_erase(&mz->tree_node, &mctz->rb_root);
437 mz->on_tree = false;
438}
439
Mel Gormanef8f2322016-07-28 15:46:05 -0700440static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
441 struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700442{
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700443 unsigned long flags;
444
445 spin_lock_irqsave(&mctz->lock, flags);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700446 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700447 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700448}
449
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800450static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
451{
452 unsigned long nr_pages = page_counter_read(&memcg->memory);
Jason Low4db0c3c2015-04-15 16:14:08 -0700453 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800454 unsigned long excess = 0;
455
456 if (nr_pages > soft_limit)
457 excess = nr_pages - soft_limit;
458
459 return excess;
460}
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700461
462static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
463{
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800464 unsigned long excess;
Mel Gormanef8f2322016-07-28 15:46:05 -0700465 struct mem_cgroup_per_node *mz;
466 struct mem_cgroup_tree_per_node *mctz;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700467
Jianyu Zhane2318752014-06-06 14:38:20 -0700468 mctz = soft_limit_tree_from_page(page);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700469 /*
470 * Necessary to update all ancestors when hierarchy is used.
471 * because their event counter is not touched.
472 */
473 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
Mel Gormanef8f2322016-07-28 15:46:05 -0700474 mz = mem_cgroup_page_nodeinfo(memcg, page);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800475 excess = soft_limit_excess(memcg);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700476 /*
477 * We have to update the tree if mz is on RB-tree or
478 * mem is over its softlimit.
479 */
480 if (excess || mz->on_tree) {
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700481 unsigned long flags;
482
483 spin_lock_irqsave(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700484 /* if on-tree, remove it */
485 if (mz->on_tree)
Johannes Weinercf2c8122014-06-06 14:38:21 -0700486 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700487 /*
488 * Insert again. mz->usage_in_excess will be updated.
489 * If excess is 0, no tree ops.
490 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700491 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700492 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700493 }
494 }
495}
496
497static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
498{
Mel Gormanef8f2322016-07-28 15:46:05 -0700499 struct mem_cgroup_tree_per_node *mctz;
500 struct mem_cgroup_per_node *mz;
501 int nid;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700502
Jianyu Zhane2318752014-06-06 14:38:20 -0700503 for_each_node(nid) {
Mel Gormanef8f2322016-07-28 15:46:05 -0700504 mz = mem_cgroup_nodeinfo(memcg, nid);
505 mctz = soft_limit_tree_node(nid);
506 mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700507 }
508}
509
Mel Gormanef8f2322016-07-28 15:46:05 -0700510static struct mem_cgroup_per_node *
511__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700512{
513 struct rb_node *rightmost = NULL;
Mel Gormanef8f2322016-07-28 15:46:05 -0700514 struct mem_cgroup_per_node *mz;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700515
516retry:
517 mz = NULL;
518 rightmost = rb_last(&mctz->rb_root);
519 if (!rightmost)
520 goto done; /* Nothing to reclaim from */
521
Mel Gormanef8f2322016-07-28 15:46:05 -0700522 mz = rb_entry(rightmost, struct mem_cgroup_per_node, tree_node);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700523 /*
524 * Remove the node now but someone else can add it back,
525 * we will to add it back at the end of reclaim to its correct
526 * position in the tree.
527 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700528 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800529 if (!soft_limit_excess(mz->memcg) ||
Tejun Heoec903c02014-05-13 12:11:01 -0400530 !css_tryget_online(&mz->memcg->css))
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700531 goto retry;
532done:
533 return mz;
534}
535
Mel Gormanef8f2322016-07-28 15:46:05 -0700536static struct mem_cgroup_per_node *
537mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700538{
Mel Gormanef8f2322016-07-28 15:46:05 -0700539 struct mem_cgroup_per_node *mz;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700540
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700541 spin_lock_irq(&mctz->lock);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700542 mz = __mem_cgroup_largest_soft_limit_node(mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700543 spin_unlock_irq(&mctz->lock);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700544 return mz;
545}
546
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700547/*
Greg Thelen484ebb32015-10-01 15:37:05 -0700548 * Return page count for single (non recursive) @memcg.
549 *
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700550 * Implementation Note: reading percpu statistics for memcg.
551 *
552 * Both of vmstat[] and percpu_counter has threshold and do periodic
553 * synchronization to implement "quick" read. There are trade-off between
554 * reading cost and precision of value. Then, we may have a chance to implement
Greg Thelen484ebb32015-10-01 15:37:05 -0700555 * a periodic synchronization of counter in memcg's counter.
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700556 *
557 * But this _read() function is used for user interface now. The user accounts
558 * memory usage by memory cgroup and he _always_ requires exact value because
559 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
560 * have to visit all online cpus and make sum. So, for now, unnecessary
561 * synchronization is not implemented. (just implemented for cpu hotplug)
562 *
563 * If there are kernel internal actions which can make use of some not-exact
564 * value, and reading all cpu value can be performance bottleneck in some
Greg Thelen484ebb32015-10-01 15:37:05 -0700565 * common workload, threshold and synchronization as vmstat[] should be
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700566 * implemented.
567 */
Greg Thelen484ebb32015-10-01 15:37:05 -0700568static unsigned long
569mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800570{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700571 long val = 0;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800572 int cpu;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800573
Greg Thelen484ebb32015-10-01 15:37:05 -0700574 /* Per-cpu values can be negative, use a signed accumulator */
Tejun Heo733a5722015-05-22 18:23:18 -0400575 for_each_possible_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700576 val += per_cpu(memcg->stat->count[idx], cpu);
Greg Thelen484ebb32015-10-01 15:37:05 -0700577 /*
578 * Summing races with updates, so val may be negative. Avoid exposing
579 * transient negative values.
580 */
581 if (val < 0)
582 val = 0;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800583 return val;
584}
585
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700586static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
Johannes Weinere9f89742011-03-23 16:42:37 -0700587 enum mem_cgroup_events_index idx)
588{
589 unsigned long val = 0;
590 int cpu;
591
Tejun Heo733a5722015-05-22 18:23:18 -0400592 for_each_possible_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700593 val += per_cpu(memcg->stat->events[idx], cpu);
Johannes Weinere9f89742011-03-23 16:42:37 -0700594 return val;
595}
596
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700597static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
David Rientjesb070e652013-05-07 16:18:09 -0700598 struct page *page,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800599 bool compound, int nr_pages)
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800600{
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700601 /*
602 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
603 * counted as CACHE even if it's on ANON LRU.
604 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700605 if (PageAnon(page))
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700606 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700607 nr_pages);
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800608 else
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700609 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700610 nr_pages);
Balaji Rao55e462b2008-05-01 04:35:12 -0700611
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800612 if (compound) {
613 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
David Rientjesb070e652013-05-07 16:18:09 -0700614 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
615 nr_pages);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800616 }
David Rientjesb070e652013-05-07 16:18:09 -0700617
KAMEZAWA Hiroyukie401f1762011-01-20 14:44:23 -0800618 /* pagein of a big page is an event. So, ignore page size */
619 if (nr_pages > 0)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700620 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800621 else {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700622 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800623 nr_pages = -nr_pages; /* for event */
624 }
KAMEZAWA Hiroyukie401f1762011-01-20 14:44:23 -0800625
Johannes Weiner13114712012-05-29 15:07:07 -0700626 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800627}
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800628
Vladimir Davydov0a6b76d2016-03-17 14:18:42 -0700629unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
630 int nid, unsigned int lru_mask)
Ying Han889976d2011-05-26 16:25:33 -0700631{
Michal Hockob4536f0c82017-01-10 16:58:04 -0800632 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
Jianyu Zhane2318752014-06-06 14:38:20 -0700633 unsigned long nr = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -0700634 enum lru_list lru;
Ying Han889976d2011-05-26 16:25:33 -0700635
Jianyu Zhane2318752014-06-06 14:38:20 -0700636 VM_BUG_ON((unsigned)nid >= nr_node_ids);
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700637
Mel Gormanef8f2322016-07-28 15:46:05 -0700638 for_each_lru(lru) {
639 if (!(BIT(lru) & lru_mask))
640 continue;
Michal Hockob4536f0c82017-01-10 16:58:04 -0800641 nr += mem_cgroup_get_lru_size(lruvec, lru);
Jianyu Zhane2318752014-06-06 14:38:20 -0700642 }
643 return nr;
Ying Han889976d2011-05-26 16:25:33 -0700644}
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700645
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700646static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700647 unsigned int lru_mask)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800648{
Jianyu Zhane2318752014-06-06 14:38:20 -0700649 unsigned long nr = 0;
Ying Han889976d2011-05-26 16:25:33 -0700650 int nid;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800651
Lai Jiangshan31aaea42012-12-12 13:51:27 -0800652 for_each_node_state(nid, N_MEMORY)
Jianyu Zhane2318752014-06-06 14:38:20 -0700653 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
654 return nr;
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800655}
656
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800657static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
658 enum mem_cgroup_events_target target)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800659{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700660 unsigned long val, next;
661
Johannes Weiner13114712012-05-29 15:07:07 -0700662 val = __this_cpu_read(memcg->stat->nr_page_events);
Steven Rostedt47994012011-11-02 13:38:33 -0700663 next = __this_cpu_read(memcg->stat->targets[target]);
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700664 /* from time_after() in jiffies.h */
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800665 if ((long)next - (long)val < 0) {
666 switch (target) {
667 case MEM_CGROUP_TARGET_THRESH:
668 next = val + THRESHOLDS_EVENTS_TARGET;
669 break;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700670 case MEM_CGROUP_TARGET_SOFTLIMIT:
671 next = val + SOFTLIMIT_EVENTS_TARGET;
672 break;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800673 case MEM_CGROUP_TARGET_NUMAINFO:
674 next = val + NUMAINFO_EVENTS_TARGET;
675 break;
676 default:
677 break;
678 }
679 __this_cpu_write(memcg->stat->targets[target], next);
680 return true;
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700681 }
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800682 return false;
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800683}
684
685/*
686 * Check events in order.
687 *
688 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700689static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800690{
691 /* threshold event is triggered in finer grain than soft limit */
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800692 if (unlikely(mem_cgroup_event_ratelimit(memcg,
693 MEM_CGROUP_TARGET_THRESH))) {
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700694 bool do_softlimit;
Andrew Morton82b3f2a2012-02-03 15:37:14 -0800695 bool do_numainfo __maybe_unused;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800696
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700697 do_softlimit = mem_cgroup_event_ratelimit(memcg,
698 MEM_CGROUP_TARGET_SOFTLIMIT);
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700699#if MAX_NUMNODES > 1
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800700 do_numainfo = mem_cgroup_event_ratelimit(memcg,
701 MEM_CGROUP_TARGET_NUMAINFO);
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700702#endif
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800703 mem_cgroup_threshold(memcg);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700704 if (unlikely(do_softlimit))
705 mem_cgroup_update_tree(memcg, page);
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800706#if MAX_NUMNODES > 1
707 if (unlikely(do_numainfo))
708 atomic_inc(&memcg->numainfo_events);
709#endif
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700710 }
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800711}
712
Balbir Singhcf475ad2008-04-29 01:00:16 -0700713struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800714{
Balbir Singh31a78f22008-09-28 23:09:31 +0100715 /*
716 * mm_update_next_owner() may clear mm->owner to NULL
717 * if it races with swapoff, page migration, etc.
718 * So this can be called with p == NULL.
719 */
720 if (unlikely(!p))
721 return NULL;
722
Tejun Heo073219e2014-02-08 10:36:58 -0500723 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800724}
Michal Hocko33398cf2015-09-08 15:01:02 -0700725EXPORT_SYMBOL(mem_cgroup_from_task);
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800726
Johannes Weinerdf381972014-04-07 15:37:43 -0700727static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800728{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700729 struct mem_cgroup *memcg = NULL;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -0700730
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800731 rcu_read_lock();
732 do {
Michal Hocko6f6acb02014-05-22 11:54:19 -0700733 /*
734 * Page cache insertions can happen withou an
735 * actual mm context, e.g. during disk probing
736 * on boot, loopback IO, acct() writes etc.
737 */
738 if (unlikely(!mm))
Johannes Weinerdf381972014-04-07 15:37:43 -0700739 memcg = root_mem_cgroup;
Michal Hocko6f6acb02014-05-22 11:54:19 -0700740 else {
741 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
742 if (unlikely(!memcg))
743 memcg = root_mem_cgroup;
744 }
Tejun Heoec903c02014-05-13 12:11:01 -0400745 } while (!css_tryget_online(&memcg->css));
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800746 rcu_read_unlock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700747 return memcg;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800748}
749
Johannes Weiner56600482012-01-12 17:17:59 -0800750/**
751 * mem_cgroup_iter - iterate over memory cgroup hierarchy
752 * @root: hierarchy root
753 * @prev: previously returned memcg, NULL on first invocation
754 * @reclaim: cookie for shared reclaim walks, NULL for full walks
755 *
756 * Returns references to children of the hierarchy below @root, or
757 * @root itself, or %NULL after a full round-trip.
758 *
759 * Caller must pass the return value in @prev on subsequent
760 * invocations for reference counting, or use mem_cgroup_iter_break()
761 * to cancel a hierarchy walk before the round-trip is complete.
762 *
763 * Reclaimers can specify a zone and a priority level in @reclaim to
764 * divide up the memcgs in the hierarchy among all concurrent
765 * reclaimers operating on the same zone and priority.
766 */
Andrew Morton694fbc02013-09-24 15:27:37 -0700767struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
Johannes Weiner56600482012-01-12 17:17:59 -0800768 struct mem_cgroup *prev,
Andrew Morton694fbc02013-09-24 15:27:37 -0700769 struct mem_cgroup_reclaim_cookie *reclaim)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -0700770{
Michal Hocko33398cf2015-09-08 15:01:02 -0700771 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800772 struct cgroup_subsys_state *css = NULL;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800773 struct mem_cgroup *memcg = NULL;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800774 struct mem_cgroup *pos = NULL;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700775
Andrew Morton694fbc02013-09-24 15:27:37 -0700776 if (mem_cgroup_disabled())
777 return NULL;
Johannes Weiner56600482012-01-12 17:17:59 -0800778
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700779 if (!root)
780 root = root_mem_cgroup;
781
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800782 if (prev && !reclaim)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800783 pos = prev;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800784
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800785 if (!root->use_hierarchy && root != root_mem_cgroup) {
786 if (prev)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800787 goto out;
Andrew Morton694fbc02013-09-24 15:27:37 -0700788 return root;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800789 }
790
Michal Hocko542f85f2013-04-29 15:07:15 -0700791 rcu_read_lock();
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800792
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800793 if (reclaim) {
Mel Gormanef8f2322016-07-28 15:46:05 -0700794 struct mem_cgroup_per_node *mz;
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800795
Mel Gormanef8f2322016-07-28 15:46:05 -0700796 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800797 iter = &mz->iter[reclaim->priority];
Michal Hocko5f578162013-04-29 15:07:17 -0700798
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800799 if (prev && reclaim->generation != iter->generation)
Michal Hocko542f85f2013-04-29 15:07:15 -0700800 goto out_unlock;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800801
Vladimir Davydov6df38682015-12-29 14:54:10 -0800802 while (1) {
Jason Low4db0c3c2015-04-15 16:14:08 -0700803 pos = READ_ONCE(iter->position);
Vladimir Davydov6df38682015-12-29 14:54:10 -0800804 if (!pos || css_tryget(&pos->css))
805 break;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800806 /*
Vladimir Davydov6df38682015-12-29 14:54:10 -0800807 * css reference reached zero, so iter->position will
808 * be cleared by ->css_released. However, we should not
809 * rely on this happening soon, because ->css_released
810 * is called from a work queue, and by busy-waiting we
811 * might block it. So we clear iter->position right
812 * away.
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800813 */
Vladimir Davydov6df38682015-12-29 14:54:10 -0800814 (void)cmpxchg(&iter->position, pos, NULL);
815 }
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800816 }
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800817
818 if (pos)
819 css = &pos->css;
820
821 for (;;) {
822 css = css_next_descendant_pre(css, &root->css);
823 if (!css) {
824 /*
825 * Reclaimers share the hierarchy walk, and a
826 * new one might jump in right at the end of
827 * the hierarchy - make sure they see at least
828 * one group and restart from the beginning.
829 */
830 if (!prev)
831 continue;
832 break;
833 }
834
835 /*
836 * Verify the css and acquire a reference. The root
837 * is provided by the caller, so we know it's alive
838 * and kicking, and don't take an extra reference.
839 */
840 memcg = mem_cgroup_from_css(css);
841
842 if (css == &root->css)
843 break;
844
Johannes Weiner0b8f73e2016-01-20 15:02:53 -0800845 if (css_tryget(css))
846 break;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800847
848 memcg = NULL;
849 }
850
851 if (reclaim) {
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800852 /*
Vladimir Davydov6df38682015-12-29 14:54:10 -0800853 * The position could have already been updated by a competing
854 * thread, so check that the value hasn't changed since we read
855 * it to avoid reclaiming from the same cgroup twice.
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800856 */
Vladimir Davydov6df38682015-12-29 14:54:10 -0800857 (void)cmpxchg(&iter->position, pos, memcg);
858
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800859 if (pos)
860 css_put(&pos->css);
861
862 if (!memcg)
863 iter->generation++;
864 else if (!prev)
865 reclaim->generation = iter->generation;
866 }
867
Michal Hocko542f85f2013-04-29 15:07:15 -0700868out_unlock:
869 rcu_read_unlock();
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800870out:
Michal Hockoc40046f2013-04-29 15:07:14 -0700871 if (prev && prev != root)
872 css_put(&prev->css);
873
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800874 return memcg;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700875}
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800876
Johannes Weiner56600482012-01-12 17:17:59 -0800877/**
878 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
879 * @root: hierarchy root
880 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
881 */
882void mem_cgroup_iter_break(struct mem_cgroup *root,
883 struct mem_cgroup *prev)
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800884{
885 if (!root)
886 root = root_mem_cgroup;
887 if (prev && prev != root)
888 css_put(&prev->css);
889}
890
Vladimir Davydov6df38682015-12-29 14:54:10 -0800891static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
892{
893 struct mem_cgroup *memcg = dead_memcg;
894 struct mem_cgroup_reclaim_iter *iter;
Mel Gormanef8f2322016-07-28 15:46:05 -0700895 struct mem_cgroup_per_node *mz;
896 int nid;
Vladimir Davydov6df38682015-12-29 14:54:10 -0800897 int i;
898
899 while ((memcg = parent_mem_cgroup(memcg))) {
900 for_each_node(nid) {
Mel Gormanef8f2322016-07-28 15:46:05 -0700901 mz = mem_cgroup_nodeinfo(memcg, nid);
902 for (i = 0; i <= DEF_PRIORITY; i++) {
903 iter = &mz->iter[i];
904 cmpxchg(&iter->position,
905 dead_memcg, NULL);
Vladimir Davydov6df38682015-12-29 14:54:10 -0800906 }
907 }
908 }
909}
910
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700911/*
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800912 * Iteration constructs for visiting all cgroups (under a tree). If
913 * loops are exited prematurely (break), mem_cgroup_iter_break() must
914 * be used for reference counting.
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700915 */
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800916#define for_each_mem_cgroup_tree(iter, root) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800917 for (iter = mem_cgroup_iter(root, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800918 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800919 iter = mem_cgroup_iter(root, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700920
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800921#define for_each_mem_cgroup(iter) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800922 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800923 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800924 iter = mem_cgroup_iter(NULL, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700925
Johannes Weiner925b7672012-01-12 17:18:15 -0800926/**
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700927 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
928 * @memcg: hierarchy root
929 * @fn: function to call for each task
930 * @arg: argument passed to @fn
931 *
932 * This function iterates over tasks attached to @memcg or to any of its
933 * descendants and calls @fn for each task. If @fn returns a non-zero
934 * value, the function breaks the iteration loop and returns the value.
935 * Otherwise, it will iterate over all tasks and return 0.
936 *
937 * This function must not be called for the root memory cgroup.
938 */
939int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
940 int (*fn)(struct task_struct *, void *), void *arg)
941{
942 struct mem_cgroup *iter;
943 int ret = 0;
944
945 BUG_ON(memcg == root_mem_cgroup);
946
947 for_each_mem_cgroup_tree(iter, memcg) {
948 struct css_task_iter it;
949 struct task_struct *task;
950
951 css_task_iter_start(&iter->css, &it);
952 while (!ret && (task = css_task_iter_next(&it)))
953 ret = fn(task, arg);
954 css_task_iter_end(&it);
955 if (ret) {
956 mem_cgroup_iter_break(memcg, iter);
957 break;
958 }
959 }
960 return ret;
961}
962
963/**
Johannes Weinerdfe0e772014-12-10 15:43:43 -0800964 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
Johannes Weiner925b7672012-01-12 17:18:15 -0800965 * @page: the page
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700966 * @zone: zone of the page
Johannes Weinerdfe0e772014-12-10 15:43:43 -0800967 *
968 * This function is only safe when following the LRU page isolation
969 * and putback protocol: the LRU lock must be held, and the page must
970 * either be PageLRU() or the caller must have isolated/allocated it.
Minchan Kim3f58a822011-03-22 16:32:53 -0700971 */
Mel Gorman599d0c92016-07-28 15:45:31 -0700972struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
Minchan Kim3f58a822011-03-22 16:32:53 -0700973{
Mel Gormanef8f2322016-07-28 15:46:05 -0700974 struct mem_cgroup_per_node *mz;
Johannes Weiner925b7672012-01-12 17:18:15 -0800975 struct mem_cgroup *memcg;
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800976 struct lruvec *lruvec;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800977
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800978 if (mem_cgroup_disabled()) {
Mel Gorman599d0c92016-07-28 15:45:31 -0700979 lruvec = &pgdat->lruvec;
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800980 goto out;
981 }
Christoph Lameterb69408e2008-10-18 20:26:14 -0700982
Johannes Weiner1306a852014-12-10 15:44:52 -0800983 memcg = page->mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -0800984 /*
Johannes Weinerdfe0e772014-12-10 15:43:43 -0800985 * Swapcache readahead pages are added to the LRU - and
Johannes Weiner29833312014-12-10 15:44:02 -0800986 * possibly migrated - before they are charged.
Hugh Dickins75121022012-03-05 14:59:18 -0800987 */
Johannes Weiner29833312014-12-10 15:44:02 -0800988 if (!memcg)
989 memcg = root_mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -0800990
Mel Gormanef8f2322016-07-28 15:46:05 -0700991 mz = mem_cgroup_page_nodeinfo(memcg, page);
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800992 lruvec = &mz->lruvec;
993out:
994 /*
995 * Since a node can be onlined after the mem_cgroup was created,
996 * we have to be prepared to initialize lruvec->zone here;
997 * and if offlined then reonlined, we need to reinitialize it.
998 */
Mel Gorman599d0c92016-07-28 15:45:31 -0700999 if (unlikely(lruvec->pgdat != pgdat))
1000 lruvec->pgdat = pgdat;
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001001 return lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001002}
1003
1004/**
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001005 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1006 * @lruvec: mem_cgroup per zone lru vector
1007 * @lru: index of lru list the page is sitting on
Michal Hockob4536f0c82017-01-10 16:58:04 -08001008 * @zid: zone id of the accounted pages
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001009 * @nr_pages: positive when adding or negative when removing
Johannes Weiner925b7672012-01-12 17:18:15 -08001010 *
Hugh Dickinsca707232016-05-19 17:12:35 -07001011 * This function must be called under lru_lock, just before a page is added
1012 * to or just after a page is removed from an lru list (that ordering being
1013 * so as to allow it to check that lru_size 0 is consistent with list_empty).
Johannes Weiner925b7672012-01-12 17:18:15 -08001014 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001015void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
Michal Hockob4536f0c82017-01-10 16:58:04 -08001016 int zid, int nr_pages)
Johannes Weiner925b7672012-01-12 17:18:15 -08001017{
Mel Gormanef8f2322016-07-28 15:46:05 -07001018 struct mem_cgroup_per_node *mz;
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001019 unsigned long *lru_size;
Hugh Dickinsca707232016-05-19 17:12:35 -07001020 long size;
Johannes Weiner925b7672012-01-12 17:18:15 -08001021
1022 if (mem_cgroup_disabled())
1023 return;
1024
Mel Gormanef8f2322016-07-28 15:46:05 -07001025 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
Michal Hockob4536f0c82017-01-10 16:58:04 -08001026 lru_size = &mz->lru_zone_size[zid][lru];
Hugh Dickinsca707232016-05-19 17:12:35 -07001027
1028 if (nr_pages < 0)
1029 *lru_size += nr_pages;
1030
1031 size = *lru_size;
Michal Hockob4536f0c82017-01-10 16:58:04 -08001032 if (WARN_ONCE(size < 0,
1033 "%s(%p, %d, %d): lru_size %ld\n",
1034 __func__, lruvec, lru, nr_pages, size)) {
Hugh Dickinsca707232016-05-19 17:12:35 -07001035 VM_BUG_ON(1);
1036 *lru_size = 0;
1037 }
1038
1039 if (nr_pages > 0)
1040 *lru_size += nr_pages;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001041}
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001042
Johannes Weiner2314b422014-12-10 15:44:33 -08001043bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001044{
Johannes Weiner2314b422014-12-10 15:44:33 -08001045 struct mem_cgroup *task_memcg;
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001046 struct task_struct *p;
David Rientjesffbdccf2013-07-03 15:01:23 -07001047 bool ret;
David Rientjes4c4a2212008-02-07 00:14:06 -08001048
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001049 p = find_lock_task_mm(task);
David Rientjesde077d22012-01-12 17:18:52 -08001050 if (p) {
Johannes Weiner2314b422014-12-10 15:44:33 -08001051 task_memcg = get_mem_cgroup_from_mm(p->mm);
David Rientjesde077d22012-01-12 17:18:52 -08001052 task_unlock(p);
1053 } else {
1054 /*
1055 * All threads may have already detached their mm's, but the oom
1056 * killer still needs to detect if they have already been oom
1057 * killed to prevent needlessly killing additional tasks.
1058 */
David Rientjesffbdccf2013-07-03 15:01:23 -07001059 rcu_read_lock();
Johannes Weiner2314b422014-12-10 15:44:33 -08001060 task_memcg = mem_cgroup_from_task(task);
1061 css_get(&task_memcg->css);
David Rientjesffbdccf2013-07-03 15:01:23 -07001062 rcu_read_unlock();
David Rientjesde077d22012-01-12 17:18:52 -08001063 }
Johannes Weiner2314b422014-12-10 15:44:33 -08001064 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1065 css_put(&task_memcg->css);
David Rientjes4c4a2212008-02-07 00:14:06 -08001066 return ret;
1067}
1068
Johannes Weiner19942822011-02-01 15:52:43 -08001069/**
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001070 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
Wanpeng Lidad75572012-06-20 12:53:01 -07001071 * @memcg: the memory cgroup
Johannes Weiner19942822011-02-01 15:52:43 -08001072 *
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001073 * Returns the maximum amount of memory @mem can be charged with, in
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001074 * pages.
Johannes Weiner19942822011-02-01 15:52:43 -08001075 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001076static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
Johannes Weiner19942822011-02-01 15:52:43 -08001077{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001078 unsigned long margin = 0;
1079 unsigned long count;
1080 unsigned long limit;
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001081
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001082 count = page_counter_read(&memcg->memory);
Jason Low4db0c3c2015-04-15 16:14:08 -07001083 limit = READ_ONCE(memcg->memory.limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001084 if (count < limit)
1085 margin = limit - count;
1086
Johannes Weiner7941d212016-01-14 15:21:23 -08001087 if (do_memsw_account()) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001088 count = page_counter_read(&memcg->memsw);
Jason Low4db0c3c2015-04-15 16:14:08 -07001089 limit = READ_ONCE(memcg->memsw.limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001090 if (count <= limit)
1091 margin = min(margin, limit - count);
Li RongQingcbedbac2016-05-27 14:27:43 -07001092 else
1093 margin = 0;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001094 }
1095
1096 return margin;
Johannes Weiner19942822011-02-01 15:52:43 -08001097}
1098
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001099/*
Qiang Huangbdcbb652014-06-04 16:08:21 -07001100 * A routine for checking "mem" is under move_account() or not.
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001101 *
Qiang Huangbdcbb652014-06-04 16:08:21 -07001102 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1103 * moving cgroups. This is for waiting at high-memory pressure
1104 * caused by "move".
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001105 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001106static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001107{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001108 struct mem_cgroup *from;
1109 struct mem_cgroup *to;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001110 bool ret = false;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001111 /*
1112 * Unlike task_move routines, we access mc.to, mc.from not under
1113 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1114 */
1115 spin_lock(&mc.lock);
1116 from = mc.from;
1117 to = mc.to;
1118 if (!from)
1119 goto unlock;
Michal Hocko3e920412011-07-26 16:08:29 -07001120
Johannes Weiner2314b422014-12-10 15:44:33 -08001121 ret = mem_cgroup_is_descendant(from, memcg) ||
1122 mem_cgroup_is_descendant(to, memcg);
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001123unlock:
1124 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001125 return ret;
1126}
1127
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001128static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001129{
1130 if (mc.moving_task && current != mc.moving_task) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001131 if (mem_cgroup_under_move(memcg)) {
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001132 DEFINE_WAIT(wait);
1133 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1134 /* moving charge context might have finished. */
1135 if (mc.moving_task)
1136 schedule();
1137 finish_wait(&mc.waitq, &wait);
1138 return true;
1139 }
1140 }
1141 return false;
1142}
1143
Sha Zhengju58cf1882013-02-22 16:32:05 -08001144#define K(x) ((x) << (PAGE_SHIFT-10))
Balbir Singhe2224322009-04-02 16:57:39 -07001145/**
Sha Zhengju58cf1882013-02-22 16:32:05 -08001146 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
Balbir Singhe2224322009-04-02 16:57:39 -07001147 * @memcg: The memory cgroup that went over limit
1148 * @p: Task that is going to be killed
1149 *
1150 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1151 * enabled
1152 */
1153void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1154{
Sha Zhengju58cf1882013-02-22 16:32:05 -08001155 struct mem_cgroup *iter;
1156 unsigned int i;
Balbir Singhe2224322009-04-02 16:57:39 -07001157
Balbir Singhe2224322009-04-02 16:57:39 -07001158 rcu_read_lock();
1159
Balasubramani Vivekanandan2415b9f2015-04-14 15:48:18 -07001160 if (p) {
1161 pr_info("Task in ");
1162 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1163 pr_cont(" killed as a result of limit of ");
1164 } else {
1165 pr_info("Memory limit reached of cgroup ");
1166 }
1167
Tejun Heoe61734c2014-02-12 09:29:50 -05001168 pr_cont_cgroup_path(memcg->css.cgroup);
Greg Thelen0346dad2015-01-26 12:58:38 -08001169 pr_cont("\n");
Balbir Singhe2224322009-04-02 16:57:39 -07001170
Balbir Singhe2224322009-04-02 16:57:39 -07001171 rcu_read_unlock();
1172
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001173 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1174 K((u64)page_counter_read(&memcg->memory)),
1175 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1176 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1177 K((u64)page_counter_read(&memcg->memsw)),
1178 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1179 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1180 K((u64)page_counter_read(&memcg->kmem)),
1181 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001182
1183 for_each_mem_cgroup_tree(iter, memcg) {
Tejun Heoe61734c2014-02-12 09:29:50 -05001184 pr_info("Memory cgroup stats for ");
1185 pr_cont_cgroup_path(iter->css.cgroup);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001186 pr_cont(":");
1187
1188 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
Vladimir Davydov37e84352016-01-20 15:02:56 -08001189 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
Sha Zhengju58cf1882013-02-22 16:32:05 -08001190 continue;
Greg Thelen484ebb32015-10-01 15:37:05 -07001191 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
Sha Zhengju58cf1882013-02-22 16:32:05 -08001192 K(mem_cgroup_read_stat(iter, i)));
1193 }
1194
1195 for (i = 0; i < NR_LRU_LISTS; i++)
1196 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1197 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1198
1199 pr_cont("\n");
1200 }
Balbir Singhe2224322009-04-02 16:57:39 -07001201}
1202
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001203/*
1204 * This function returns the number of memcg under hierarchy tree. Returns
1205 * 1(self count) if no children.
1206 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001207static int mem_cgroup_count_children(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001208{
1209 int num = 0;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001210 struct mem_cgroup *iter;
1211
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001212 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001213 num++;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001214 return num;
1215}
1216
Balbir Singh6d61ef42009-01-07 18:08:06 -08001217/*
David Rientjesa63d83f2010-08-09 17:19:46 -07001218 * Return the memory (and swap, if configured) limit for a memcg.
1219 */
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001220unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
David Rientjesa63d83f2010-08-09 17:19:46 -07001221{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001222 unsigned long limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001223
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001224 limit = memcg->memory.limit;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001225 if (mem_cgroup_swappiness(memcg)) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001226 unsigned long memsw_limit;
Vladimir Davydov37e84352016-01-20 15:02:56 -08001227 unsigned long swap_limit;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001228
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001229 memsw_limit = memcg->memsw.limit;
Vladimir Davydov37e84352016-01-20 15:02:56 -08001230 swap_limit = memcg->swap.limit;
1231 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1232 limit = min(limit + swap_limit, memsw_limit);
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001233 }
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001234 return limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001235}
1236
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07001237static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
David Rientjes19965462012-12-11 16:00:26 -08001238 int order)
David Rientjes9cbb78b2012-07-31 16:43:44 -07001239{
David Rientjes6e0fc462015-09-08 15:00:36 -07001240 struct oom_control oc = {
1241 .zonelist = NULL,
1242 .nodemask = NULL,
Vladimir Davydov2a966b72016-07-26 15:22:33 -07001243 .memcg = memcg,
David Rientjes6e0fc462015-09-08 15:00:36 -07001244 .gfp_mask = gfp_mask,
1245 .order = order,
David Rientjes6e0fc462015-09-08 15:00:36 -07001246 };
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001247 bool ret;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001248
Johannes Weinerdc564012015-06-24 16:57:19 -07001249 mutex_lock(&oom_lock);
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001250 ret = out_of_memory(&oc);
Johannes Weinerdc564012015-06-24 16:57:19 -07001251 mutex_unlock(&oom_lock);
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001252 return ret;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001253}
1254
Michele Curtiae6e71d2014-12-12 16:56:35 -08001255#if MAX_NUMNODES > 1
1256
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001257/**
1258 * test_mem_cgroup_node_reclaimable
Wanpeng Lidad75572012-06-20 12:53:01 -07001259 * @memcg: the target memcg
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001260 * @nid: the node ID to be checked.
1261 * @noswap : specify true here if the user wants flle only information.
1262 *
1263 * This function returns whether the specified memcg contains any
1264 * reclaimable pages on a node. Returns true if there are any reclaimable
1265 * pages in the node.
1266 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001267static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001268 int nid, bool noswap)
1269{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001270 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001271 return true;
1272 if (noswap || !total_swap_pages)
1273 return false;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001274 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001275 return true;
1276 return false;
1277
1278}
Ying Han889976d2011-05-26 16:25:33 -07001279
1280/*
1281 * Always updating the nodemask is not very good - even if we have an empty
1282 * list or the wrong list here, we can start from some node and traverse all
1283 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1284 *
1285 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001286static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001287{
1288 int nid;
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001289 /*
1290 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1291 * pagein/pageout changes since the last update.
1292 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001293 if (!atomic_read(&memcg->numainfo_events))
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001294 return;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001295 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
Ying Han889976d2011-05-26 16:25:33 -07001296 return;
1297
Ying Han889976d2011-05-26 16:25:33 -07001298 /* make a nodemask where this memcg uses memory from */
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001299 memcg->scan_nodes = node_states[N_MEMORY];
Ying Han889976d2011-05-26 16:25:33 -07001300
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001301 for_each_node_mask(nid, node_states[N_MEMORY]) {
Ying Han889976d2011-05-26 16:25:33 -07001302
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001303 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1304 node_clear(nid, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001305 }
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001306
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001307 atomic_set(&memcg->numainfo_events, 0);
1308 atomic_set(&memcg->numainfo_updating, 0);
Ying Han889976d2011-05-26 16:25:33 -07001309}
1310
1311/*
1312 * Selecting a node where we start reclaim from. Because what we need is just
1313 * reducing usage counter, start from anywhere is O,K. Considering
1314 * memory reclaim from current node, there are pros. and cons.
1315 *
1316 * Freeing memory from current node means freeing memory from a node which
1317 * we'll use or we've used. So, it may make LRU bad. And if several threads
1318 * hit limits, it will see a contention on a node. But freeing from remote
1319 * node means more costs for memory reclaim because of memory latency.
1320 *
1321 * Now, we use round-robin. Better algorithm is welcomed.
1322 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001323int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001324{
1325 int node;
1326
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001327 mem_cgroup_may_update_nodemask(memcg);
1328 node = memcg->last_scanned_node;
Ying Han889976d2011-05-26 16:25:33 -07001329
Andrew Morton0edaf862016-05-19 17:10:58 -07001330 node = next_node_in(node, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001331 /*
Michal Hockofda3d692016-05-19 17:11:34 -07001332 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
1333 * last time it really checked all the LRUs due to rate limiting.
1334 * Fallback to the current node in that case for simplicity.
Ying Han889976d2011-05-26 16:25:33 -07001335 */
1336 if (unlikely(node == MAX_NUMNODES))
1337 node = numa_node_id();
1338
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001339 memcg->last_scanned_node = node;
Ying Han889976d2011-05-26 16:25:33 -07001340 return node;
1341}
Ying Han889976d2011-05-26 16:25:33 -07001342#else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001343int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001344{
1345 return 0;
1346}
1347#endif
1348
Andrew Morton0608f432013-09-24 15:27:41 -07001349static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
Mel Gormanef8f2322016-07-28 15:46:05 -07001350 pg_data_t *pgdat,
Andrew Morton0608f432013-09-24 15:27:41 -07001351 gfp_t gfp_mask,
1352 unsigned long *total_scanned)
Balbir Singh6d61ef42009-01-07 18:08:06 -08001353{
Andrew Morton0608f432013-09-24 15:27:41 -07001354 struct mem_cgroup *victim = NULL;
1355 int total = 0;
1356 int loop = 0;
1357 unsigned long excess;
1358 unsigned long nr_scanned;
1359 struct mem_cgroup_reclaim_cookie reclaim = {
Mel Gormanef8f2322016-07-28 15:46:05 -07001360 .pgdat = pgdat,
Andrew Morton0608f432013-09-24 15:27:41 -07001361 .priority = 0,
1362 };
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001363
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001364 excess = soft_limit_excess(root_memcg);
Balbir Singh6d61ef42009-01-07 18:08:06 -08001365
Andrew Morton0608f432013-09-24 15:27:41 -07001366 while (1) {
1367 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1368 if (!victim) {
1369 loop++;
1370 if (loop >= 2) {
1371 /*
1372 * If we have not been able to reclaim
1373 * anything, it might because there are
1374 * no reclaimable pages under this hierarchy
1375 */
1376 if (!total)
1377 break;
1378 /*
1379 * We want to do more targeted reclaim.
1380 * excess >> 2 is not to excessive so as to
1381 * reclaim too much, nor too less that we keep
1382 * coming back to reclaim from this cgroup
1383 */
1384 if (total >= (excess >> 2) ||
1385 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1386 break;
1387 }
1388 continue;
1389 }
Mel Gormana9dd0a82016-07-28 15:46:02 -07001390 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
Mel Gormanef8f2322016-07-28 15:46:05 -07001391 pgdat, &nr_scanned);
Andrew Morton0608f432013-09-24 15:27:41 -07001392 *total_scanned += nr_scanned;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001393 if (!soft_limit_excess(root_memcg))
Andrew Morton0608f432013-09-24 15:27:41 -07001394 break;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001395 }
Andrew Morton0608f432013-09-24 15:27:41 -07001396 mem_cgroup_iter_break(root_memcg, victim);
1397 return total;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001398}
1399
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001400#ifdef CONFIG_LOCKDEP
1401static struct lockdep_map memcg_oom_lock_dep_map = {
1402 .name = "memcg_oom_lock",
1403};
1404#endif
1405
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001406static DEFINE_SPINLOCK(memcg_oom_lock);
1407
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001408/*
1409 * Check OOM-Killer is already running under our hierarchy.
1410 * If someone is running, return false.
1411 */
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001412static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001413{
Michal Hocko79dfdac2011-07-26 16:08:23 -07001414 struct mem_cgroup *iter, *failed = NULL;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001415
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001416 spin_lock(&memcg_oom_lock);
1417
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001418 for_each_mem_cgroup_tree(iter, memcg) {
Johannes Weiner23751be2011-08-25 15:59:16 -07001419 if (iter->oom_lock) {
Michal Hocko79dfdac2011-07-26 16:08:23 -07001420 /*
1421 * this subtree of our hierarchy is already locked
1422 * so we cannot give a lock.
1423 */
Michal Hocko79dfdac2011-07-26 16:08:23 -07001424 failed = iter;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001425 mem_cgroup_iter_break(memcg, iter);
1426 break;
Johannes Weiner23751be2011-08-25 15:59:16 -07001427 } else
1428 iter->oom_lock = true;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001429 }
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001430
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001431 if (failed) {
1432 /*
1433 * OK, we failed to lock the whole subtree so we have
1434 * to clean up what we set up to the failing subtree
1435 */
1436 for_each_mem_cgroup_tree(iter, memcg) {
1437 if (iter == failed) {
1438 mem_cgroup_iter_break(memcg, iter);
1439 break;
1440 }
1441 iter->oom_lock = false;
Michal Hocko79dfdac2011-07-26 16:08:23 -07001442 }
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001443 } else
1444 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001445
1446 spin_unlock(&memcg_oom_lock);
1447
1448 return !failed;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001449}
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001450
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001451static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001452{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001453 struct mem_cgroup *iter;
1454
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001455 spin_lock(&memcg_oom_lock);
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001456 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001457 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001458 iter->oom_lock = false;
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001459 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001460}
1461
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001462static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001463{
1464 struct mem_cgroup *iter;
1465
Tejun Heoc2b42d32015-06-24 16:58:23 -07001466 spin_lock(&memcg_oom_lock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001467 for_each_mem_cgroup_tree(iter, memcg)
Tejun Heoc2b42d32015-06-24 16:58:23 -07001468 iter->under_oom++;
1469 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001470}
1471
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001472static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001473{
1474 struct mem_cgroup *iter;
1475
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001476 /*
1477 * When a new child is created while the hierarchy is under oom,
Tejun Heoc2b42d32015-06-24 16:58:23 -07001478 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001479 */
Tejun Heoc2b42d32015-06-24 16:58:23 -07001480 spin_lock(&memcg_oom_lock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001481 for_each_mem_cgroup_tree(iter, memcg)
Tejun Heoc2b42d32015-06-24 16:58:23 -07001482 if (iter->under_oom > 0)
1483 iter->under_oom--;
1484 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001485}
1486
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001487static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1488
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001489struct oom_wait_info {
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001490 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001491 wait_queue_t wait;
1492};
1493
1494static int memcg_oom_wake_function(wait_queue_t *wait,
1495 unsigned mode, int sync, void *arg)
1496{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001497 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1498 struct mem_cgroup *oom_wait_memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001499 struct oom_wait_info *oom_wait_info;
1500
1501 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001502 oom_wait_memcg = oom_wait_info->memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001503
Johannes Weiner2314b422014-12-10 15:44:33 -08001504 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1505 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001506 return 0;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001507 return autoremove_wake_function(wait, mode, sync, arg);
1508}
1509
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001510static void memcg_oom_recover(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001511{
Tejun Heoc2b42d32015-06-24 16:58:23 -07001512 /*
1513 * For the following lockless ->under_oom test, the only required
1514 * guarantee is that it must see the state asserted by an OOM when
1515 * this function is called as a result of userland actions
1516 * triggered by the notification of the OOM. This is trivially
1517 * achieved by invoking mem_cgroup_mark_under_oom() before
1518 * triggering notification.
1519 */
1520 if (memcg && memcg->under_oom)
Tejun Heof4b90b702015-06-24 16:58:21 -07001521 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001522}
1523
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001524static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001525{
Andrew Mortond0db7af2016-06-08 15:33:47 -07001526 if (!current->memcg_may_oom)
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001527 return;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001528 /*
Johannes Weiner49426422013-10-16 13:46:59 -07001529 * We are in the middle of the charge context here, so we
1530 * don't want to block when potentially sitting on a callstack
1531 * that holds all kinds of filesystem and mm locks.
1532 *
1533 * Also, the caller may handle a failed allocation gracefully
1534 * (like optional page cache readahead) and so an OOM killer
1535 * invocation might not even be necessary.
1536 *
1537 * That's why we don't do anything here except remember the
1538 * OOM context and then deal with it at the end of the page
1539 * fault when the stack is unwound, the locks are released,
1540 * and when we know whether the fault was overall successful.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001541 */
Johannes Weiner49426422013-10-16 13:46:59 -07001542 css_get(&memcg->css);
Tejun Heo626ebc42015-11-05 18:46:09 -08001543 current->memcg_in_oom = memcg;
1544 current->memcg_oom_gfp_mask = mask;
1545 current->memcg_oom_order = order;
Johannes Weiner49426422013-10-16 13:46:59 -07001546}
1547
1548/**
1549 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1550 * @handle: actually kill/wait or just clean up the OOM state
1551 *
1552 * This has to be called at the end of a page fault if the memcg OOM
1553 * handler was enabled.
1554 *
1555 * Memcg supports userspace OOM handling where failed allocations must
1556 * sleep on a waitqueue until the userspace task resolves the
1557 * situation. Sleeping directly in the charge context with all kinds
1558 * of locks held is not a good idea, instead we remember an OOM state
1559 * in the task and mem_cgroup_oom_synchronize() has to be called at
1560 * the end of the page fault to complete the OOM handling.
1561 *
1562 * Returns %true if an ongoing memcg OOM situation was detected and
1563 * completed, %false otherwise.
1564 */
1565bool mem_cgroup_oom_synchronize(bool handle)
1566{
Tejun Heo626ebc42015-11-05 18:46:09 -08001567 struct mem_cgroup *memcg = current->memcg_in_oom;
Johannes Weiner49426422013-10-16 13:46:59 -07001568 struct oom_wait_info owait;
1569 bool locked;
1570
1571 /* OOM is global, do not handle */
1572 if (!memcg)
1573 return false;
1574
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001575 if (!handle)
Johannes Weiner49426422013-10-16 13:46:59 -07001576 goto cleanup;
1577
1578 owait.memcg = memcg;
1579 owait.wait.flags = 0;
1580 owait.wait.func = memcg_oom_wake_function;
1581 owait.wait.private = current;
1582 INIT_LIST_HEAD(&owait.wait.task_list);
1583
1584 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001585 mem_cgroup_mark_under_oom(memcg);
1586
1587 locked = mem_cgroup_oom_trylock(memcg);
1588
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001589 if (locked)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001590 mem_cgroup_oom_notify(memcg);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001591
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001592 if (locked && !memcg->oom_kill_disable) {
1593 mem_cgroup_unmark_under_oom(memcg);
Johannes Weiner49426422013-10-16 13:46:59 -07001594 finish_wait(&memcg_oom_waitq, &owait.wait);
Tejun Heo626ebc42015-11-05 18:46:09 -08001595 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1596 current->memcg_oom_order);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001597 } else {
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001598 schedule();
Johannes Weiner49426422013-10-16 13:46:59 -07001599 mem_cgroup_unmark_under_oom(memcg);
1600 finish_wait(&memcg_oom_waitq, &owait.wait);
1601 }
1602
1603 if (locked) {
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001604 mem_cgroup_oom_unlock(memcg);
1605 /*
1606 * There is no guarantee that an OOM-lock contender
1607 * sees the wakeups triggered by the OOM kill
1608 * uncharges. Wake any sleepers explicitely.
1609 */
1610 memcg_oom_recover(memcg);
1611 }
Johannes Weiner49426422013-10-16 13:46:59 -07001612cleanup:
Tejun Heo626ebc42015-11-05 18:46:09 -08001613 current->memcg_in_oom = NULL;
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001614 css_put(&memcg->css);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001615 return true;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001616}
1617
Johannes Weinerd7365e72014-10-29 14:50:48 -07001618/**
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001619 * lock_page_memcg - lock a page->mem_cgroup binding
1620 * @page: the page
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001621 *
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001622 * This function protects unlocked LRU pages from being moved to
1623 * another cgroup and stabilizes their page->mem_cgroup binding.
Balbir Singhd69b0422009-06-17 16:26:34 -07001624 */
Johannes Weiner62cccb82016-03-15 14:57:22 -07001625void lock_page_memcg(struct page *page)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001626{
1627 struct mem_cgroup *memcg;
Johannes Weiner6de22612015-02-11 15:25:01 -08001628 unsigned long flags;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001629
Johannes Weiner6de22612015-02-11 15:25:01 -08001630 /*
1631 * The RCU lock is held throughout the transaction. The fast
1632 * path can get away without acquiring the memcg->move_lock
1633 * because page moving starts with an RCU grace period.
Johannes Weiner6de22612015-02-11 15:25:01 -08001634 */
Johannes Weinerd7365e72014-10-29 14:50:48 -07001635 rcu_read_lock();
1636
1637 if (mem_cgroup_disabled())
Johannes Weiner62cccb82016-03-15 14:57:22 -07001638 return;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001639again:
Johannes Weiner1306a852014-12-10 15:44:52 -08001640 memcg = page->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08001641 if (unlikely(!memcg))
Johannes Weiner62cccb82016-03-15 14:57:22 -07001642 return;
Johannes Weinerd7365e72014-10-29 14:50:48 -07001643
Qiang Huangbdcbb652014-06-04 16:08:21 -07001644 if (atomic_read(&memcg->moving_account) <= 0)
Johannes Weiner62cccb82016-03-15 14:57:22 -07001645 return;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001646
Johannes Weiner6de22612015-02-11 15:25:01 -08001647 spin_lock_irqsave(&memcg->move_lock, flags);
Johannes Weiner1306a852014-12-10 15:44:52 -08001648 if (memcg != page->mem_cgroup) {
Johannes Weiner6de22612015-02-11 15:25:01 -08001649 spin_unlock_irqrestore(&memcg->move_lock, flags);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001650 goto again;
1651 }
Johannes Weiner6de22612015-02-11 15:25:01 -08001652
1653 /*
1654 * When charge migration first begins, we can have locked and
1655 * unlocked page stat updates happening concurrently. Track
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001656 * the task who has the lock for unlock_page_memcg().
Johannes Weiner6de22612015-02-11 15:25:01 -08001657 */
1658 memcg->move_lock_task = current;
1659 memcg->move_lock_flags = flags;
Johannes Weinerd7365e72014-10-29 14:50:48 -07001660
Johannes Weiner62cccb82016-03-15 14:57:22 -07001661 return;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001662}
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001663EXPORT_SYMBOL(lock_page_memcg);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001664
Johannes Weinerd7365e72014-10-29 14:50:48 -07001665/**
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001666 * unlock_page_memcg - unlock a page->mem_cgroup binding
Johannes Weiner62cccb82016-03-15 14:57:22 -07001667 * @page: the page
Johannes Weinerd7365e72014-10-29 14:50:48 -07001668 */
Johannes Weiner62cccb82016-03-15 14:57:22 -07001669void unlock_page_memcg(struct page *page)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001670{
Johannes Weiner62cccb82016-03-15 14:57:22 -07001671 struct mem_cgroup *memcg = page->mem_cgroup;
1672
Johannes Weiner6de22612015-02-11 15:25:01 -08001673 if (memcg && memcg->move_lock_task == current) {
1674 unsigned long flags = memcg->move_lock_flags;
1675
1676 memcg->move_lock_task = NULL;
1677 memcg->move_lock_flags = 0;
1678
1679 spin_unlock_irqrestore(&memcg->move_lock, flags);
1680 }
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001681
Johannes Weinerd7365e72014-10-29 14:50:48 -07001682 rcu_read_unlock();
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001683}
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001684EXPORT_SYMBOL(unlock_page_memcg);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001685
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001686/*
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001687 * size of first charge trial. "32" comes from vmscan.c's magic value.
1688 * TODO: maybe necessary to use big numbers in big irons.
1689 */
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001690#define CHARGE_BATCH 32U
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001691struct memcg_stock_pcp {
1692 struct mem_cgroup *cached; /* this never be root cgroup */
Johannes Weiner11c9ea42011-03-23 16:42:34 -07001693 unsigned int nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001694 struct work_struct work;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001695 unsigned long flags;
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -07001696#define FLUSHING_CACHED_CHARGE 0
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001697};
1698static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
Michal Hocko9f50fad2011-08-09 11:56:26 +02001699static DEFINE_MUTEX(percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001700
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001701/**
1702 * consume_stock: Try to consume stocked charge on this cpu.
1703 * @memcg: memcg to consume from.
1704 * @nr_pages: how many pages to charge.
1705 *
1706 * The charges will only happen if @memcg matches the current cpu's memcg
1707 * stock, and at least @nr_pages are available in that stock. Failure to
1708 * service an allocation will refill the stock.
1709 *
1710 * returns true if successful, false otherwise.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001711 */
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001712static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001713{
1714 struct memcg_stock_pcp *stock;
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001715 unsigned long flags;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001716 bool ret = false;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001717
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001718 if (nr_pages > CHARGE_BATCH)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001719 return ret;
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001720
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001721 local_irq_save(flags);
1722
1723 stock = this_cpu_ptr(&memcg_stock);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001724 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001725 stock->nr_pages -= nr_pages;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001726 ret = true;
1727 }
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001728
1729 local_irq_restore(flags);
1730
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001731 return ret;
1732}
1733
1734/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001735 * Returns stocks cached in percpu and reset cached information.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001736 */
1737static void drain_stock(struct memcg_stock_pcp *stock)
1738{
1739 struct mem_cgroup *old = stock->cached;
1740
Johannes Weiner11c9ea42011-03-23 16:42:34 -07001741 if (stock->nr_pages) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001742 page_counter_uncharge(&old->memory, stock->nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08001743 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001744 page_counter_uncharge(&old->memsw, stock->nr_pages);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08001745 css_put_many(&old->css, stock->nr_pages);
Johannes Weiner11c9ea42011-03-23 16:42:34 -07001746 stock->nr_pages = 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001747 }
1748 stock->cached = NULL;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001749}
1750
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001751static void drain_local_stock(struct work_struct *dummy)
1752{
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001753 struct memcg_stock_pcp *stock;
1754 unsigned long flags;
1755
1756 local_irq_save(flags);
1757
1758 stock = this_cpu_ptr(&memcg_stock);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001759 drain_stock(stock);
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001760 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001761
1762 local_irq_restore(flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001763}
1764
1765/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001766 * Cache charges(val) to local per_cpu area.
Greg Thelen320cc512010-03-15 15:27:28 +01001767 * This will be consumed by consume_stock() function, later.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001768 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001769static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001770{
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001771 struct memcg_stock_pcp *stock;
1772 unsigned long flags;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001773
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001774 local_irq_save(flags);
1775
1776 stock = this_cpu_ptr(&memcg_stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001777 if (stock->cached != memcg) { /* reset if necessary */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001778 drain_stock(stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001779 stock->cached = memcg;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001780 }
Johannes Weiner11c9ea42011-03-23 16:42:34 -07001781 stock->nr_pages += nr_pages;
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001782
1783 local_irq_restore(flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001784}
1785
1786/*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001787 * Drains all per-CPU charge caches for given root_memcg resp. subtree
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08001788 * of the hierarchy under it.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001789 */
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08001790static void drain_all_stock(struct mem_cgroup *root_memcg)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001791{
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001792 int cpu, curcpu;
Michal Hockod38144b2011-07-26 16:08:28 -07001793
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08001794 /* If someone's already draining, avoid adding running more workers. */
1795 if (!mutex_trylock(&percpu_charge_mutex))
1796 return;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001797 /* Notify other cpus that system-wide "drain" is running */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001798 get_online_cpus();
Johannes Weiner5af12d02011-08-25 15:59:07 -07001799 curcpu = get_cpu();
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001800 for_each_online_cpu(cpu) {
1801 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001802 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001803
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001804 memcg = stock->cached;
1805 if (!memcg || !stock->nr_pages)
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001806 continue;
Johannes Weiner2314b422014-12-10 15:44:33 -08001807 if (!mem_cgroup_is_descendant(memcg, root_memcg))
Michal Hocko3e920412011-07-26 16:08:29 -07001808 continue;
Michal Hockod1a05b62011-07-26 16:08:27 -07001809 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1810 if (cpu == curcpu)
1811 drain_local_stock(&stock->work);
1812 else
1813 schedule_work_on(cpu, &stock->work);
1814 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001815 }
Johannes Weiner5af12d02011-08-25 15:59:07 -07001816 put_cpu();
Andrew Mortonf894ffa2013-09-12 15:13:35 -07001817 put_online_cpus();
Michal Hocko9f50fad2011-08-09 11:56:26 +02001818 mutex_unlock(&percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001819}
1820
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01001821static int memcg_hotplug_cpu_dead(unsigned int cpu)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001822{
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001823 struct memcg_stock_pcp *stock;
1824
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001825 stock = &per_cpu(memcg_stock, cpu);
1826 drain_stock(stock);
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01001827 return 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001828}
1829
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08001830static void reclaim_high(struct mem_cgroup *memcg,
1831 unsigned int nr_pages,
1832 gfp_t gfp_mask)
1833{
1834 do {
1835 if (page_counter_read(&memcg->memory) <= memcg->high)
1836 continue;
1837 mem_cgroup_events(memcg, MEMCG_HIGH, 1);
1838 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1839 } while ((memcg = parent_mem_cgroup(memcg)));
1840}
1841
1842static void high_work_func(struct work_struct *work)
1843{
1844 struct mem_cgroup *memcg;
1845
1846 memcg = container_of(work, struct mem_cgroup, high_work);
1847 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
1848}
1849
Tejun Heob23afb92015-11-05 18:46:11 -08001850/*
1851 * Scheduled by try_charge() to be executed from the userland return path
1852 * and reclaims memory over the high limit.
1853 */
1854void mem_cgroup_handle_over_high(void)
1855{
1856 unsigned int nr_pages = current->memcg_nr_pages_over_high;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08001857 struct mem_cgroup *memcg;
Tejun Heob23afb92015-11-05 18:46:11 -08001858
1859 if (likely(!nr_pages))
1860 return;
1861
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08001862 memcg = get_mem_cgroup_from_mm(current->mm);
1863 reclaim_high(memcg, nr_pages, GFP_KERNEL);
Tejun Heob23afb92015-11-05 18:46:11 -08001864 css_put(&memcg->css);
1865 current->memcg_nr_pages_over_high = 0;
1866}
1867
Johannes Weiner00501b52014-08-08 14:19:20 -07001868static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1869 unsigned int nr_pages)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001870{
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001871 unsigned int batch = max(CHARGE_BATCH, nr_pages);
Johannes Weiner9b130612014-08-06 16:05:51 -07001872 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001873 struct mem_cgroup *mem_over_limit;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001874 struct page_counter *counter;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001875 unsigned long nr_reclaimed;
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001876 bool may_swap = true;
1877 bool drained = false;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001878
Johannes Weinerce00a962014-09-05 08:43:57 -04001879 if (mem_cgroup_is_root(memcg))
Tejun Heo10d53c72015-11-05 18:46:17 -08001880 return 0;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001881retry:
Michal Hockob6b6cc72014-04-07 15:37:44 -07001882 if (consume_stock(memcg, nr_pages))
Tejun Heo10d53c72015-11-05 18:46:17 -08001883 return 0;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001884
Johannes Weiner7941d212016-01-14 15:21:23 -08001885 if (!do_memsw_account() ||
Johannes Weiner6071ca52015-11-05 18:50:26 -08001886 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1887 if (page_counter_try_charge(&memcg->memory, batch, &counter))
Johannes Weiner6539cc02014-08-06 16:05:42 -07001888 goto done_restock;
Johannes Weiner7941d212016-01-14 15:21:23 -08001889 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001890 page_counter_uncharge(&memcg->memsw, batch);
1891 mem_over_limit = mem_cgroup_from_counter(counter, memory);
Johannes Weiner3fbe7242014-10-09 15:28:54 -07001892 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001893 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001894 may_swap = false;
Johannes Weiner3fbe7242014-10-09 15:28:54 -07001895 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08001896
Johannes Weiner6539cc02014-08-06 16:05:42 -07001897 if (batch > nr_pages) {
1898 batch = nr_pages;
1899 goto retry;
1900 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001901
Johannes Weiner06b078f2014-08-06 16:05:44 -07001902 /*
1903 * Unlike in global OOM situations, memcg is not in a physical
1904 * memory shortage. Allow dying and OOM-killed tasks to
1905 * bypass the last charges so that they can exit quickly and
1906 * free their memory.
1907 */
1908 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
1909 fatal_signal_pending(current) ||
1910 current->flags & PF_EXITING))
Tejun Heo10d53c72015-11-05 18:46:17 -08001911 goto force;
Johannes Weiner06b078f2014-08-06 16:05:44 -07001912
Johannes Weiner89a28482016-10-27 17:46:56 -07001913 /*
1914 * Prevent unbounded recursion when reclaim operations need to
1915 * allocate memory. This might exceed the limits temporarily,
1916 * but we prefer facilitating memory reclaim and getting back
1917 * under the limit over triggering OOM kills in these cases.
1918 */
1919 if (unlikely(current->flags & PF_MEMALLOC))
1920 goto force;
1921
Johannes Weiner06b078f2014-08-06 16:05:44 -07001922 if (unlikely(task_in_memcg_oom(current)))
1923 goto nomem;
1924
Mel Gormand0164ad2015-11-06 16:28:21 -08001925 if (!gfpflags_allow_blocking(gfp_mask))
Johannes Weiner6539cc02014-08-06 16:05:42 -07001926 goto nomem;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001927
Johannes Weiner241994ed2015-02-11 15:26:06 -08001928 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
1929
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001930 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1931 gfp_mask, may_swap);
Johannes Weiner6539cc02014-08-06 16:05:42 -07001932
Johannes Weiner61e02c72014-08-06 16:08:16 -07001933 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
Johannes Weiner6539cc02014-08-06 16:05:42 -07001934 goto retry;
Johannes Weiner28c34c22014-08-06 16:05:47 -07001935
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001936 if (!drained) {
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08001937 drain_all_stock(mem_over_limit);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001938 drained = true;
1939 goto retry;
1940 }
1941
Johannes Weiner28c34c22014-08-06 16:05:47 -07001942 if (gfp_mask & __GFP_NORETRY)
1943 goto nomem;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001944 /*
1945 * Even though the limit is exceeded at this point, reclaim
1946 * may have been able to free some pages. Retry the charge
1947 * before killing the task.
1948 *
1949 * Only for regular pages, though: huge pages are rather
1950 * unlikely to succeed so close to the limit, and we fall back
1951 * to regular pages anyway in case of failure.
1952 */
Johannes Weiner61e02c72014-08-06 16:08:16 -07001953 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
Johannes Weiner6539cc02014-08-06 16:05:42 -07001954 goto retry;
1955 /*
1956 * At task move, charge accounts can be doubly counted. So, it's
1957 * better to wait until the end of task_move if something is going on.
1958 */
1959 if (mem_cgroup_wait_acct_move(mem_over_limit))
1960 goto retry;
1961
Johannes Weiner9b130612014-08-06 16:05:51 -07001962 if (nr_retries--)
1963 goto retry;
1964
Johannes Weiner06b078f2014-08-06 16:05:44 -07001965 if (gfp_mask & __GFP_NOFAIL)
Tejun Heo10d53c72015-11-05 18:46:17 -08001966 goto force;
Johannes Weiner06b078f2014-08-06 16:05:44 -07001967
Johannes Weiner6539cc02014-08-06 16:05:42 -07001968 if (fatal_signal_pending(current))
Tejun Heo10d53c72015-11-05 18:46:17 -08001969 goto force;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001970
Johannes Weiner241994ed2015-02-11 15:26:06 -08001971 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
1972
Jerome Marchand3608de02015-11-05 18:47:29 -08001973 mem_cgroup_oom(mem_over_limit, gfp_mask,
1974 get_order(nr_pages * PAGE_SIZE));
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08001975nomem:
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07001976 if (!(gfp_mask & __GFP_NOFAIL))
Johannes Weiner3168ecb2013-10-31 16:34:13 -07001977 return -ENOMEM;
Tejun Heo10d53c72015-11-05 18:46:17 -08001978force:
1979 /*
1980 * The allocation either can't fail or will lead to more memory
1981 * being freed very soon. Allow memory usage go over the limit
1982 * temporarily by force charging it.
1983 */
1984 page_counter_charge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08001985 if (do_memsw_account())
Tejun Heo10d53c72015-11-05 18:46:17 -08001986 page_counter_charge(&memcg->memsw, nr_pages);
1987 css_get_many(&memcg->css, nr_pages);
1988
1989 return 0;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001990
1991done_restock:
Johannes Weinere8ea14c2014-12-10 15:42:42 -08001992 css_get_many(&memcg->css, batch);
Johannes Weiner6539cc02014-08-06 16:05:42 -07001993 if (batch > nr_pages)
1994 refill_stock(memcg, batch - nr_pages);
Tejun Heob23afb92015-11-05 18:46:11 -08001995
Johannes Weiner241994ed2015-02-11 15:26:06 -08001996 /*
Tejun Heob23afb92015-11-05 18:46:11 -08001997 * If the hierarchy is above the normal consumption range, schedule
1998 * reclaim on returning to userland. We can perform reclaim here
Mel Gorman71baba42015-11-06 16:28:28 -08001999 * if __GFP_RECLAIM but let's always punt for simplicity and so that
Tejun Heob23afb92015-11-05 18:46:11 -08002000 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2001 * not recorded as it most likely matches current's and won't
2002 * change in the meantime. As high limit is checked again before
2003 * reclaim, the cost of mismatch is negligible.
Johannes Weiner241994ed2015-02-11 15:26:06 -08002004 */
2005 do {
Tejun Heob23afb92015-11-05 18:46:11 -08002006 if (page_counter_read(&memcg->memory) > memcg->high) {
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002007 /* Don't bother a random interrupted task */
2008 if (in_interrupt()) {
2009 schedule_work(&memcg->high_work);
2010 break;
2011 }
Vladimir Davydov9516a182015-12-11 13:40:24 -08002012 current->memcg_nr_pages_over_high += batch;
Tejun Heob23afb92015-11-05 18:46:11 -08002013 set_notify_resume(current);
2014 break;
2015 }
Johannes Weiner241994ed2015-02-11 15:26:06 -08002016 } while ((memcg = parent_mem_cgroup(memcg)));
Tejun Heo10d53c72015-11-05 18:46:17 -08002017
2018 return 0;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002019}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002020
Johannes Weiner00501b52014-08-08 14:19:20 -07002021static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002022{
Johannes Weinerce00a962014-09-05 08:43:57 -04002023 if (mem_cgroup_is_root(memcg))
2024 return;
2025
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002026 page_counter_uncharge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002027 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002028 page_counter_uncharge(&memcg->memsw, nr_pages);
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002029
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002030 css_put_many(&memcg->css, nr_pages);
KAMEZAWA Hiroyukid01dd172012-05-29 15:07:03 -07002031}
2032
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002033static void lock_page_lru(struct page *page, int *isolated)
2034{
2035 struct zone *zone = page_zone(page);
2036
Mel Gormana52633d2016-07-28 15:45:28 -07002037 spin_lock_irq(zone_lru_lock(zone));
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002038 if (PageLRU(page)) {
2039 struct lruvec *lruvec;
2040
Mel Gorman599d0c92016-07-28 15:45:31 -07002041 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002042 ClearPageLRU(page);
2043 del_page_from_lru_list(page, lruvec, page_lru(page));
2044 *isolated = 1;
2045 } else
2046 *isolated = 0;
2047}
2048
2049static void unlock_page_lru(struct page *page, int isolated)
2050{
2051 struct zone *zone = page_zone(page);
2052
2053 if (isolated) {
2054 struct lruvec *lruvec;
2055
Mel Gorman599d0c92016-07-28 15:45:31 -07002056 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002057 VM_BUG_ON_PAGE(PageLRU(page), page);
2058 SetPageLRU(page);
2059 add_page_to_lru_list(page, lruvec, page_lru(page));
2060 }
Mel Gormana52633d2016-07-28 15:45:28 -07002061 spin_unlock_irq(zone_lru_lock(zone));
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002062}
2063
Johannes Weiner00501b52014-08-08 14:19:20 -07002064static void commit_charge(struct page *page, struct mem_cgroup *memcg,
Johannes Weiner6abb5a82014-08-08 14:19:33 -07002065 bool lrucare)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002066{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002067 int isolated;
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002068
Johannes Weiner1306a852014-12-10 15:44:52 -08002069 VM_BUG_ON_PAGE(page->mem_cgroup, page);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002070
2071 /*
2072 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2073 * may already be on some other mem_cgroup's LRU. Take care of it.
2074 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002075 if (lrucare)
2076 lock_page_lru(page, &isolated);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002077
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002078 /*
2079 * Nobody should be changing or seriously looking at
Johannes Weiner1306a852014-12-10 15:44:52 -08002080 * page->mem_cgroup at this point:
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002081 *
2082 * - the page is uncharged
2083 *
2084 * - the page is off-LRU
2085 *
2086 * - an anonymous fault has exclusive page access, except for
2087 * a locked page table
2088 *
2089 * - a page cache insertion, a swapin fault, or a migration
2090 * have the page locked
2091 */
Johannes Weiner1306a852014-12-10 15:44:52 -08002092 page->mem_cgroup = memcg;
Hugh Dickins3be91272008-02-07 00:14:19 -08002093
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002094 if (lrucare)
2095 unlock_page_lru(page, isolated);
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002096}
2097
Johannes Weiner127424c2016-01-20 15:02:32 -08002098#ifndef CONFIG_SLOB
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002099static int memcg_alloc_cache_id(void)
Glauber Costa55007d82012-12-18 14:22:38 -08002100{
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002101 int id, size;
2102 int err;
Glauber Costa55007d82012-12-18 14:22:38 -08002103
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002104 id = ida_simple_get(&memcg_cache_ida,
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002105 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2106 if (id < 0)
2107 return id;
2108
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002109 if (id < memcg_nr_cache_ids)
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002110 return id;
2111
2112 /*
2113 * There's no space for the new id in memcg_caches arrays,
2114 * so we have to grow them.
2115 */
Vladimir Davydov05257a12015-02-12 14:59:01 -08002116 down_write(&memcg_cache_ids_sem);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002117
2118 size = 2 * (id + 1);
Glauber Costa55007d82012-12-18 14:22:38 -08002119 if (size < MEMCG_CACHES_MIN_SIZE)
2120 size = MEMCG_CACHES_MIN_SIZE;
2121 else if (size > MEMCG_CACHES_MAX_SIZE)
2122 size = MEMCG_CACHES_MAX_SIZE;
2123
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002124 err = memcg_update_all_caches(size);
Vladimir Davydov05257a12015-02-12 14:59:01 -08002125 if (!err)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002126 err = memcg_update_all_list_lrus(size);
2127 if (!err)
Vladimir Davydov05257a12015-02-12 14:59:01 -08002128 memcg_nr_cache_ids = size;
2129
2130 up_write(&memcg_cache_ids_sem);
2131
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002132 if (err) {
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002133 ida_simple_remove(&memcg_cache_ida, id);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002134 return err;
2135 }
2136 return id;
2137}
2138
2139static void memcg_free_cache_id(int id)
2140{
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002141 ida_simple_remove(&memcg_cache_ida, id);
Glauber Costa55007d82012-12-18 14:22:38 -08002142}
2143
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002144struct memcg_kmem_cache_create_work {
Vladimir Davydov5722d092014-04-07 15:39:24 -07002145 struct mem_cgroup *memcg;
2146 struct kmem_cache *cachep;
2147 struct work_struct work;
2148};
2149
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002150static void memcg_kmem_cache_create_func(struct work_struct *w)
Glauber Costad7f25f82012-12-18 14:22:40 -08002151{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002152 struct memcg_kmem_cache_create_work *cw =
2153 container_of(w, struct memcg_kmem_cache_create_work, work);
Vladimir Davydov5722d092014-04-07 15:39:24 -07002154 struct mem_cgroup *memcg = cw->memcg;
2155 struct kmem_cache *cachep = cw->cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002156
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002157 memcg_create_kmem_cache(memcg, cachep);
Vladimir Davydovbd673142014-06-04 16:07:40 -07002158
Vladimir Davydov5722d092014-04-07 15:39:24 -07002159 css_put(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002160 kfree(cw);
2161}
2162
2163/*
2164 * Enqueue the creation of a per-memcg kmem_cache.
Glauber Costad7f25f82012-12-18 14:22:40 -08002165 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002166static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2167 struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08002168{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002169 struct memcg_kmem_cache_create_work *cw;
Glauber Costad7f25f82012-12-18 14:22:40 -08002170
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002171 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002172 if (!cw)
Glauber Costad7f25f82012-12-18 14:22:40 -08002173 return;
Vladimir Davydov8135be52014-12-12 16:56:38 -08002174
2175 css_get(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002176
2177 cw->memcg = memcg;
2178 cw->cachep = cachep;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002179 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
Glauber Costad7f25f82012-12-18 14:22:40 -08002180
Tejun Heo17cc4df2017-02-22 15:41:36 -08002181 queue_work(memcg_kmem_cache_wq, &cw->work);
Glauber Costad7f25f82012-12-18 14:22:40 -08002182}
2183
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002184static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2185 struct kmem_cache *cachep)
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002186{
2187 /*
2188 * We need to stop accounting when we kmalloc, because if the
2189 * corresponding kmalloc cache is not yet created, the first allocation
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002190 * in __memcg_schedule_kmem_cache_create will recurse.
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002191 *
2192 * However, it is better to enclose the whole function. Depending on
2193 * the debugging options enabled, INIT_WORK(), for instance, can
2194 * trigger an allocation. This too, will make us recurse. Because at
2195 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2196 * the safest choice is to do it like this, wrapping the whole function.
2197 */
Vladimir Davydov6f185c22014-12-12 16:55:15 -08002198 current->memcg_kmem_skip_account = 1;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002199 __memcg_schedule_kmem_cache_create(memcg, cachep);
Vladimir Davydov6f185c22014-12-12 16:55:15 -08002200 current->memcg_kmem_skip_account = 0;
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002201}
Vladimir Davydovc67a8a62014-06-04 16:07:39 -07002202
Vladimir Davydov45264772016-07-26 15:24:21 -07002203static inline bool memcg_kmem_bypass(void)
2204{
2205 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2206 return true;
2207 return false;
2208}
2209
2210/**
2211 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2212 * @cachep: the original global kmem cache
2213 *
Glauber Costad7f25f82012-12-18 14:22:40 -08002214 * Return the kmem_cache we're supposed to use for a slab allocation.
2215 * We try to use the current memcg's version of the cache.
2216 *
Vladimir Davydov45264772016-07-26 15:24:21 -07002217 * If the cache does not exist yet, if we are the first user of it, we
2218 * create it asynchronously in a workqueue and let the current allocation
2219 * go through with the original cache.
Glauber Costad7f25f82012-12-18 14:22:40 -08002220 *
Vladimir Davydov45264772016-07-26 15:24:21 -07002221 * This function takes a reference to the cache it returns to assure it
2222 * won't get destroyed while we are working with it. Once the caller is
2223 * done with it, memcg_kmem_put_cache() must be called to release the
2224 * reference.
Glauber Costad7f25f82012-12-18 14:22:40 -08002225 */
Vladimir Davydov45264772016-07-26 15:24:21 -07002226struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08002227{
2228 struct mem_cgroup *memcg;
Vladimir Davydov959c8962014-01-23 15:52:59 -08002229 struct kmem_cache *memcg_cachep;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002230 int kmemcg_id;
Glauber Costad7f25f82012-12-18 14:22:40 -08002231
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002232 VM_BUG_ON(!is_root_cache(cachep));
Glauber Costad7f25f82012-12-18 14:22:40 -08002233
Vladimir Davydov45264772016-07-26 15:24:21 -07002234 if (memcg_kmem_bypass())
Vladimir Davydov230e9fc2016-01-14 15:18:15 -08002235 return cachep;
2236
Vladimir Davydov9d100c52014-12-12 16:54:53 -08002237 if (current->memcg_kmem_skip_account)
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002238 return cachep;
2239
Vladimir Davydov8135be52014-12-12 16:56:38 -08002240 memcg = get_mem_cgroup_from_mm(current->mm);
Jason Low4db0c3c2015-04-15 16:14:08 -07002241 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002242 if (kmemcg_id < 0)
Li Zefanca0dde92013-04-29 15:08:57 -07002243 goto out;
Glauber Costad7f25f82012-12-18 14:22:40 -08002244
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002245 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002246 if (likely(memcg_cachep))
2247 return memcg_cachep;
Li Zefanca0dde92013-04-29 15:08:57 -07002248
2249 /*
2250 * If we are in a safe context (can wait, and not in interrupt
2251 * context), we could be be predictable and return right away.
2252 * This would guarantee that the allocation being performed
2253 * already belongs in the new cache.
2254 *
2255 * However, there are some clashes that can arrive from locking.
2256 * For instance, because we acquire the slab_mutex while doing
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002257 * memcg_create_kmem_cache, this means no further allocation
2258 * could happen with the slab_mutex held. So it's better to
2259 * defer everything.
Li Zefanca0dde92013-04-29 15:08:57 -07002260 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002261 memcg_schedule_kmem_cache_create(memcg, cachep);
Li Zefanca0dde92013-04-29 15:08:57 -07002262out:
Vladimir Davydov8135be52014-12-12 16:56:38 -08002263 css_put(&memcg->css);
Li Zefanca0dde92013-04-29 15:08:57 -07002264 return cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002265}
Glauber Costad7f25f82012-12-18 14:22:40 -08002266
Vladimir Davydov45264772016-07-26 15:24:21 -07002267/**
2268 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2269 * @cachep: the cache returned by memcg_kmem_get_cache
2270 */
2271void memcg_kmem_put_cache(struct kmem_cache *cachep)
Vladimir Davydov8135be52014-12-12 16:56:38 -08002272{
2273 if (!is_root_cache(cachep))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002274 css_put(&cachep->memcg_params.memcg->css);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002275}
2276
Vladimir Davydov45264772016-07-26 15:24:21 -07002277/**
2278 * memcg_kmem_charge: charge a kmem page
2279 * @page: page to charge
2280 * @gfp: reclaim mode
2281 * @order: allocation order
2282 * @memcg: memory cgroup to charge
2283 *
2284 * Returns 0 on success, an error code on failure.
2285 */
2286int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2287 struct mem_cgroup *memcg)
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002288{
2289 unsigned int nr_pages = 1 << order;
2290 struct page_counter *counter;
Johannes Weiner6071ca52015-11-05 18:50:26 -08002291 int ret;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002292
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002293 ret = try_charge(memcg, gfp, nr_pages);
Johannes Weiner52c29b02016-01-20 15:02:35 -08002294 if (ret)
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002295 return ret;
Johannes Weiner52c29b02016-01-20 15:02:35 -08002296
2297 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2298 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2299 cancel_charge(memcg, nr_pages);
2300 return -ENOMEM;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002301 }
2302
2303 page->mem_cgroup = memcg;
2304
2305 return 0;
2306}
2307
Vladimir Davydov45264772016-07-26 15:24:21 -07002308/**
2309 * memcg_kmem_charge: charge a kmem page to the current memory cgroup
2310 * @page: page to charge
2311 * @gfp: reclaim mode
2312 * @order: allocation order
2313 *
2314 * Returns 0 on success, an error code on failure.
2315 */
2316int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002317{
2318 struct mem_cgroup *memcg;
Vladimir Davydovfcff7d72016-03-17 14:17:29 -07002319 int ret = 0;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002320
Vladimir Davydov45264772016-07-26 15:24:21 -07002321 if (memcg_kmem_bypass())
2322 return 0;
2323
Johannes Weinerdf381972014-04-07 15:37:43 -07002324 memcg = get_mem_cgroup_from_mm(current->mm);
Vladimir Davydovc4159a72016-08-08 23:03:12 +03002325 if (!mem_cgroup_is_root(memcg)) {
Vladimir Davydov45264772016-07-26 15:24:21 -07002326 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
Vladimir Davydovc4159a72016-08-08 23:03:12 +03002327 if (!ret)
2328 __SetPageKmemcg(page);
2329 }
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002330 css_put(&memcg->css);
Vladimir Davydovd05e83a2015-11-05 18:48:59 -08002331 return ret;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002332}
Vladimir Davydov45264772016-07-26 15:24:21 -07002333/**
2334 * memcg_kmem_uncharge: uncharge a kmem page
2335 * @page: page to uncharge
2336 * @order: allocation order
2337 */
2338void memcg_kmem_uncharge(struct page *page, int order)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002339{
Johannes Weiner1306a852014-12-10 15:44:52 -08002340 struct mem_cgroup *memcg = page->mem_cgroup;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002341 unsigned int nr_pages = 1 << order;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002342
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002343 if (!memcg)
2344 return;
2345
Sasha Levin309381fea2014-01-23 15:52:54 -08002346 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
Johannes Weiner29833312014-12-10 15:44:02 -08002347
Johannes Weiner52c29b02016-01-20 15:02:35 -08002348 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2349 page_counter_uncharge(&memcg->kmem, nr_pages);
2350
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002351 page_counter_uncharge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002352 if (do_memsw_account())
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002353 page_counter_uncharge(&memcg->memsw, nr_pages);
2354
Johannes Weiner1306a852014-12-10 15:44:52 -08002355 page->mem_cgroup = NULL;
Vladimir Davydovc4159a72016-08-08 23:03:12 +03002356
2357 /* slab pages do not have PageKmemcg flag set */
2358 if (PageKmemcg(page))
2359 __ClearPageKmemcg(page);
2360
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002361 css_put_many(&memcg->css, nr_pages);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002362}
Johannes Weiner127424c2016-01-20 15:02:32 -08002363#endif /* !CONFIG_SLOB */
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002364
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002365#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2366
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002367/*
2368 * Because tail pages are not marked as "used", set it. We're under
Mel Gormana52633d2016-07-28 15:45:28 -07002369 * zone_lru_lock and migration entries setup in all page mappings.
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002370 */
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002371void mem_cgroup_split_huge_fixup(struct page *head)
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002372{
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002373 int i;
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002374
KAMEZAWA Hiroyuki3d37c4a2011-01-25 15:07:28 -08002375 if (mem_cgroup_disabled())
2376 return;
David Rientjesb070e652013-05-07 16:18:09 -07002377
Johannes Weiner29833312014-12-10 15:44:02 -08002378 for (i = 1; i < HPAGE_PMD_NR; i++)
Johannes Weiner1306a852014-12-10 15:44:52 -08002379 head[i].mem_cgroup = head->mem_cgroup;
Michal Hockob9982f82014-12-10 15:43:51 -08002380
Johannes Weiner1306a852014-12-10 15:44:52 -08002381 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
David Rientjesb070e652013-05-07 16:18:09 -07002382 HPAGE_PMD_NR);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002383}
Hugh Dickins12d27102012-01-12 17:19:52 -08002384#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002385
Andrew Mortonc255a452012-07-31 16:43:02 -07002386#ifdef CONFIG_MEMCG_SWAP
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002387static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2388 bool charge)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002389{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002390 int val = (charge) ? 1 : -1;
2391 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002392}
Daisuke Nishimura02491442010-03-10 15:22:17 -08002393
2394/**
2395 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2396 * @entry: swap entry to be moved
2397 * @from: mem_cgroup which the entry is moved from
2398 * @to: mem_cgroup which the entry is moved to
2399 *
2400 * It succeeds only when the swap_cgroup's record for this entry is the same
2401 * as the mem_cgroup's id of @from.
2402 *
2403 * Returns 0 on success, -EINVAL on failure.
2404 *
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002405 * The caller must have charged to @to, IOW, called page_counter_charge() about
Daisuke Nishimura02491442010-03-10 15:22:17 -08002406 * both res and memsw, and called css_get().
2407 */
2408static int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07002409 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002410{
2411 unsigned short old_id, new_id;
2412
Li Zefan34c00c32013-09-23 16:56:01 +08002413 old_id = mem_cgroup_id(from);
2414 new_id = mem_cgroup_id(to);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002415
2416 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08002417 mem_cgroup_swap_statistics(from, false);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002418 mem_cgroup_swap_statistics(to, true);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002419 return 0;
2420 }
2421 return -EINVAL;
2422}
2423#else
2424static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07002425 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002426{
2427 return -EINVAL;
2428}
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002429#endif
2430
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002431static DEFINE_MUTEX(memcg_limit_mutex);
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -07002432
KOSAKI Motohirod38d2a72009-01-06 14:39:44 -08002433static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002434 unsigned long limit)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002435{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002436 unsigned long curusage;
2437 unsigned long oldusage;
2438 bool enlarge = false;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002439 int retry_count;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002440 int ret;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002441
2442 /*
2443 * For keeping hierarchical_reclaim simple, how long we should retry
2444 * is depends on callers. We set our retry-count to be function
2445 * of # of children which we should visit in this loop.
2446 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002447 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2448 mem_cgroup_count_children(memcg);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002449
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002450 oldusage = page_counter_read(&memcg->memory);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002451
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002452 do {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002453 if (signal_pending(current)) {
2454 ret = -EINTR;
2455 break;
2456 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002457
2458 mutex_lock(&memcg_limit_mutex);
2459 if (limit > memcg->memsw.limit) {
2460 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002461 ret = -EINVAL;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002462 break;
2463 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002464 if (limit > memcg->memory.limit)
2465 enlarge = true;
2466 ret = page_counter_limit(&memcg->memory, limit);
2467 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002468
2469 if (!ret)
2470 break;
2471
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002472 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2473
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002474 curusage = page_counter_read(&memcg->memory);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002475 /* Usage is reduced ? */
Andrew Mortonf894ffa2013-09-12 15:13:35 -07002476 if (curusage >= oldusage)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002477 retry_count--;
2478 else
2479 oldusage = curusage;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002480 } while (retry_count);
2481
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002482 if (!ret && enlarge)
2483 memcg_oom_recover(memcg);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08002484
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002485 return ret;
2486}
2487
Li Zefan338c8432009-06-17 16:27:15 -07002488static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002489 unsigned long limit)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002490{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002491 unsigned long curusage;
2492 unsigned long oldusage;
2493 bool enlarge = false;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002494 int retry_count;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002495 int ret;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002496
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002497 /* see mem_cgroup_resize_res_limit */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002498 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2499 mem_cgroup_count_children(memcg);
2500
2501 oldusage = page_counter_read(&memcg->memsw);
2502
2503 do {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002504 if (signal_pending(current)) {
2505 ret = -EINTR;
2506 break;
2507 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002508
2509 mutex_lock(&memcg_limit_mutex);
2510 if (limit < memcg->memory.limit) {
2511 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002512 ret = -EINVAL;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002513 break;
2514 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002515 if (limit > memcg->memsw.limit)
2516 enlarge = true;
2517 ret = page_counter_limit(&memcg->memsw, limit);
2518 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002519
2520 if (!ret)
2521 break;
2522
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002523 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2524
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002525 curusage = page_counter_read(&memcg->memsw);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002526 /* Usage is reduced ? */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002527 if (curusage >= oldusage)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002528 retry_count--;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002529 else
2530 oldusage = curusage;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002531 } while (retry_count);
2532
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002533 if (!ret && enlarge)
2534 memcg_oom_recover(memcg);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002535
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002536 return ret;
2537}
2538
Mel Gormanef8f2322016-07-28 15:46:05 -07002539unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
Andrew Morton0608f432013-09-24 15:27:41 -07002540 gfp_t gfp_mask,
2541 unsigned long *total_scanned)
2542{
2543 unsigned long nr_reclaimed = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07002544 struct mem_cgroup_per_node *mz, *next_mz = NULL;
Andrew Morton0608f432013-09-24 15:27:41 -07002545 unsigned long reclaimed;
2546 int loop = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07002547 struct mem_cgroup_tree_per_node *mctz;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002548 unsigned long excess;
Andrew Morton0608f432013-09-24 15:27:41 -07002549 unsigned long nr_scanned;
2550
2551 if (order > 0)
2552 return 0;
2553
Mel Gormanef8f2322016-07-28 15:46:05 -07002554 mctz = soft_limit_tree_node(pgdat->node_id);
Michal Hockod6507ff2016-08-02 14:02:37 -07002555
2556 /*
2557 * Do not even bother to check the largest node if the root
2558 * is empty. Do it lockless to prevent lock bouncing. Races
2559 * are acceptable as soft limit is best effort anyway.
2560 */
2561 if (RB_EMPTY_ROOT(&mctz->rb_root))
2562 return 0;
2563
Andrew Morton0608f432013-09-24 15:27:41 -07002564 /*
2565 * This loop can run a while, specially if mem_cgroup's continuously
2566 * keep exceeding their soft limit and putting the system under
2567 * pressure
2568 */
2569 do {
2570 if (next_mz)
2571 mz = next_mz;
2572 else
2573 mz = mem_cgroup_largest_soft_limit_node(mctz);
2574 if (!mz)
2575 break;
2576
2577 nr_scanned = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07002578 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
Andrew Morton0608f432013-09-24 15:27:41 -07002579 gfp_mask, &nr_scanned);
2580 nr_reclaimed += reclaimed;
2581 *total_scanned += nr_scanned;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002582 spin_lock_irq(&mctz->lock);
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08002583 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Morton0608f432013-09-24 15:27:41 -07002584
2585 /*
2586 * If we failed to reclaim anything from this memory cgroup
2587 * it is time to move on to the next cgroup
2588 */
2589 next_mz = NULL;
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08002590 if (!reclaimed)
2591 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2592
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002593 excess = soft_limit_excess(mz->memcg);
Andrew Morton0608f432013-09-24 15:27:41 -07002594 /*
2595 * One school of thought says that we should not add
2596 * back the node to the tree if reclaim returns 0.
2597 * But our reclaim could return 0, simply because due
2598 * to priority we are exposing a smaller subset of
2599 * memory to reclaim from. Consider this as a longer
2600 * term TODO.
2601 */
2602 /* If excess == 0, no tree ops */
Johannes Weinercf2c8122014-06-06 14:38:21 -07002603 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002604 spin_unlock_irq(&mctz->lock);
Andrew Morton0608f432013-09-24 15:27:41 -07002605 css_put(&mz->memcg->css);
2606 loop++;
2607 /*
2608 * Could not reclaim anything and there are no more
2609 * mem cgroups to try or we seem to be looping without
2610 * reclaiming anything.
2611 */
2612 if (!nr_reclaimed &&
2613 (next_mz == NULL ||
2614 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2615 break;
2616 } while (!nr_reclaimed);
2617 if (next_mz)
2618 css_put(&next_mz->memcg->css);
2619 return nr_reclaimed;
2620}
2621
Tejun Heoea280e72014-05-16 13:22:48 -04002622/*
2623 * Test whether @memcg has children, dead or alive. Note that this
2624 * function doesn't care whether @memcg has use_hierarchy enabled and
2625 * returns %true if there are child csses according to the cgroup
2626 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
2627 */
Glauber Costab5f99b52013-02-22 16:34:53 -08002628static inline bool memcg_has_children(struct mem_cgroup *memcg)
2629{
Tejun Heoea280e72014-05-16 13:22:48 -04002630 bool ret;
2631
Tejun Heoea280e72014-05-16 13:22:48 -04002632 rcu_read_lock();
2633 ret = css_next_child(NULL, &memcg->css);
2634 rcu_read_unlock();
2635 return ret;
Glauber Costab5f99b52013-02-22 16:34:53 -08002636}
2637
2638/*
Greg Thelen51038172016-05-20 16:58:18 -07002639 * Reclaims as many pages from the given memcg as possible.
Michal Hockoc26251f2012-10-26 13:37:28 +02002640 *
2641 * Caller is responsible for holding css reference for memcg.
2642 */
2643static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2644{
2645 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Michal Hockoc26251f2012-10-26 13:37:28 +02002646
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002647 /* we call try-to-free pages for make this cgroup empty */
2648 lru_add_drain_all();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002649 /* try to free all pages in this cgroup */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002650 while (nr_retries && page_counter_read(&memcg->memory)) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002651 int progress;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002652
Michal Hockoc26251f2012-10-26 13:37:28 +02002653 if (signal_pending(current))
2654 return -EINTR;
2655
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002656 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2657 GFP_KERNEL, true);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002658 if (!progress) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002659 nr_retries--;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002660 /* maybe some writeback is necessary */
Jens Axboe8aa7e842009-07-09 14:52:32 +02002661 congestion_wait(BLK_RW_ASYNC, HZ/10);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002662 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002663
2664 }
Michal Hockoab5196c2012-10-26 13:37:32 +02002665
2666 return 0;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08002667}
2668
Tejun Heo6770c642014-05-13 12:16:21 -04002669static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2670 char *buf, size_t nbytes,
2671 loff_t off)
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002672{
Tejun Heo6770c642014-05-13 12:16:21 -04002673 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Michal Hockoc26251f2012-10-26 13:37:28 +02002674
Michal Hockod8423012012-10-26 13:37:29 +02002675 if (mem_cgroup_is_root(memcg))
2676 return -EINVAL;
Tejun Heo6770c642014-05-13 12:16:21 -04002677 return mem_cgroup_force_empty(memcg) ?: nbytes;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002678}
2679
Tejun Heo182446d2013-08-08 20:11:24 -04002680static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2681 struct cftype *cft)
Balbir Singh18f59ea2009-01-07 18:08:07 -08002682{
Tejun Heo182446d2013-08-08 20:11:24 -04002683 return mem_cgroup_from_css(css)->use_hierarchy;
Balbir Singh18f59ea2009-01-07 18:08:07 -08002684}
2685
Tejun Heo182446d2013-08-08 20:11:24 -04002686static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2687 struct cftype *cft, u64 val)
Balbir Singh18f59ea2009-01-07 18:08:07 -08002688{
2689 int retval = 0;
Tejun Heo182446d2013-08-08 20:11:24 -04002690 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo5c9d5352014-05-16 13:22:48 -04002691 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
Balbir Singh18f59ea2009-01-07 18:08:07 -08002692
Glauber Costa567fb432012-07-31 16:43:07 -07002693 if (memcg->use_hierarchy == val)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002694 return 0;
Glauber Costa567fb432012-07-31 16:43:07 -07002695
Balbir Singh18f59ea2009-01-07 18:08:07 -08002696 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002697 * If parent's use_hierarchy is set, we can't make any modifications
Balbir Singh18f59ea2009-01-07 18:08:07 -08002698 * in the child subtrees. If it is unset, then the change can
2699 * occur, provided the current cgroup has no children.
2700 *
2701 * For the root cgroup, parent_mem is NULL, we allow value to be
2702 * set if there are no children.
2703 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002704 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
Balbir Singh18f59ea2009-01-07 18:08:07 -08002705 (val == 1 || val == 0)) {
Tejun Heoea280e72014-05-16 13:22:48 -04002706 if (!memcg_has_children(memcg))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002707 memcg->use_hierarchy = val;
Balbir Singh18f59ea2009-01-07 18:08:07 -08002708 else
2709 retval = -EBUSY;
2710 } else
2711 retval = -EINVAL;
Glauber Costa567fb432012-07-31 16:43:07 -07002712
Balbir Singh18f59ea2009-01-07 18:08:07 -08002713 return retval;
2714}
2715
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002716static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
Johannes Weinerce00a962014-09-05 08:43:57 -04002717{
2718 struct mem_cgroup *iter;
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002719 int i;
Johannes Weinerce00a962014-09-05 08:43:57 -04002720
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002721 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
Johannes Weinerce00a962014-09-05 08:43:57 -04002722
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002723 for_each_mem_cgroup_tree(iter, memcg) {
2724 for (i = 0; i < MEMCG_NR_STAT; i++)
2725 stat[i] += mem_cgroup_read_stat(iter, i);
2726 }
Johannes Weinerce00a962014-09-05 08:43:57 -04002727}
2728
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002729static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
Johannes Weiner587d9f72016-01-20 15:03:19 -08002730{
2731 struct mem_cgroup *iter;
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002732 int i;
Johannes Weiner587d9f72016-01-20 15:03:19 -08002733
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002734 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
Johannes Weiner587d9f72016-01-20 15:03:19 -08002735
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002736 for_each_mem_cgroup_tree(iter, memcg) {
2737 for (i = 0; i < MEMCG_NR_EVENTS; i++)
2738 events[i] += mem_cgroup_read_events(iter, i);
2739 }
Johannes Weiner587d9f72016-01-20 15:03:19 -08002740}
2741
Andrew Morton6f646152015-11-06 16:28:58 -08002742static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
Johannes Weinerce00a962014-09-05 08:43:57 -04002743{
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002744 unsigned long val = 0;
Johannes Weinerce00a962014-09-05 08:43:57 -04002745
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002746 if (mem_cgroup_is_root(memcg)) {
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002747 struct mem_cgroup *iter;
2748
2749 for_each_mem_cgroup_tree(iter, memcg) {
2750 val += mem_cgroup_read_stat(iter,
2751 MEM_CGROUP_STAT_CACHE);
2752 val += mem_cgroup_read_stat(iter,
2753 MEM_CGROUP_STAT_RSS);
2754 if (swap)
2755 val += mem_cgroup_read_stat(iter,
2756 MEM_CGROUP_STAT_SWAP);
2757 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002758 } else {
Johannes Weinerce00a962014-09-05 08:43:57 -04002759 if (!swap)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002760 val = page_counter_read(&memcg->memory);
Johannes Weinerce00a962014-09-05 08:43:57 -04002761 else
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002762 val = page_counter_read(&memcg->memsw);
Johannes Weinerce00a962014-09-05 08:43:57 -04002763 }
Michal Hockoc12176d2015-11-05 18:50:29 -08002764 return val;
Johannes Weinerce00a962014-09-05 08:43:57 -04002765}
2766
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002767enum {
2768 RES_USAGE,
2769 RES_LIMIT,
2770 RES_MAX_USAGE,
2771 RES_FAILCNT,
2772 RES_SOFT_LIMIT,
2773};
Johannes Weinerce00a962014-09-05 08:43:57 -04002774
Tejun Heo791badb2013-12-05 12:28:02 -05002775static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
Johannes Weiner05b84302014-08-06 16:05:59 -07002776 struct cftype *cft)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08002777{
Tejun Heo182446d2013-08-08 20:11:24 -04002778 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002779 struct page_counter *counter;
Tejun Heoaf36f902012-04-01 12:09:55 -07002780
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002781 switch (MEMFILE_TYPE(cft->private)) {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002782 case _MEM:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002783 counter = &memcg->memory;
Glauber Costa510fc4e2012-12-18 14:21:47 -08002784 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002785 case _MEMSWAP:
2786 counter = &memcg->memsw;
2787 break;
2788 case _KMEM:
2789 counter = &memcg->kmem;
2790 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002791 case _TCP:
Johannes Weiner0db15292016-01-20 15:02:50 -08002792 counter = &memcg->tcpmem;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002793 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002794 default:
2795 BUG();
2796 }
2797
2798 switch (MEMFILE_ATTR(cft->private)) {
2799 case RES_USAGE:
2800 if (counter == &memcg->memory)
Michal Hockoc12176d2015-11-05 18:50:29 -08002801 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002802 if (counter == &memcg->memsw)
Michal Hockoc12176d2015-11-05 18:50:29 -08002803 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002804 return (u64)page_counter_read(counter) * PAGE_SIZE;
2805 case RES_LIMIT:
2806 return (u64)counter->limit * PAGE_SIZE;
2807 case RES_MAX_USAGE:
2808 return (u64)counter->watermark * PAGE_SIZE;
2809 case RES_FAILCNT:
2810 return counter->failcnt;
2811 case RES_SOFT_LIMIT:
2812 return (u64)memcg->soft_limit * PAGE_SIZE;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002813 default:
2814 BUG();
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002815 }
Balbir Singh8cdea7c2008-02-07 00:13:50 -08002816}
Glauber Costa510fc4e2012-12-18 14:21:47 -08002817
Johannes Weiner127424c2016-01-20 15:02:32 -08002818#ifndef CONFIG_SLOB
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002819static int memcg_online_kmem(struct mem_cgroup *memcg)
Vladimir Davydovd6441632014-01-23 15:53:09 -08002820{
Vladimir Davydovd6441632014-01-23 15:53:09 -08002821 int memcg_id;
2822
Vladimir Davydovb313aee2016-03-17 14:18:27 -07002823 if (cgroup_memory_nokmem)
2824 return 0;
2825
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002826 BUG_ON(memcg->kmemcg_id >= 0);
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002827 BUG_ON(memcg->kmem_state);
Vladimir Davydovd6441632014-01-23 15:53:09 -08002828
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002829 memcg_id = memcg_alloc_cache_id();
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002830 if (memcg_id < 0)
2831 return memcg_id;
Vladimir Davydovd6441632014-01-23 15:53:09 -08002832
Johannes Weineref129472016-01-14 15:21:34 -08002833 static_branch_inc(&memcg_kmem_enabled_key);
Vladimir Davydovd6441632014-01-23 15:53:09 -08002834 /*
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002835 * A memory cgroup is considered kmem-online as soon as it gets
Vladimir Davydov900a38f2014-12-12 16:55:10 -08002836 * kmemcg_id. Setting the id after enabling static branching will
Vladimir Davydovd6441632014-01-23 15:53:09 -08002837 * guarantee no one starts accounting before all call sites are
2838 * patched.
2839 */
Vladimir Davydov900a38f2014-12-12 16:55:10 -08002840 memcg->kmemcg_id = memcg_id;
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002841 memcg->kmem_state = KMEM_ONLINE;
Tejun Heobc2791f2017-02-22 15:41:21 -08002842 INIT_LIST_HEAD(&memcg->kmem_caches);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002843
2844 return 0;
Vladimir Davydovd6441632014-01-23 15:53:09 -08002845}
2846
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002847static void memcg_offline_kmem(struct mem_cgroup *memcg)
2848{
2849 struct cgroup_subsys_state *css;
2850 struct mem_cgroup *parent, *child;
2851 int kmemcg_id;
2852
2853 if (memcg->kmem_state != KMEM_ONLINE)
2854 return;
2855 /*
2856 * Clear the online state before clearing memcg_caches array
2857 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2858 * guarantees that no cache will be created for this cgroup
2859 * after we are done (see memcg_create_kmem_cache()).
2860 */
2861 memcg->kmem_state = KMEM_ALLOCATED;
2862
2863 memcg_deactivate_kmem_caches(memcg);
2864
2865 kmemcg_id = memcg->kmemcg_id;
2866 BUG_ON(kmemcg_id < 0);
2867
2868 parent = parent_mem_cgroup(memcg);
2869 if (!parent)
2870 parent = root_mem_cgroup;
2871
2872 /*
2873 * Change kmemcg_id of this cgroup and all its descendants to the
2874 * parent's id, and then move all entries from this cgroup's list_lrus
2875 * to ones of the parent. After we have finished, all list_lrus
2876 * corresponding to this cgroup are guaranteed to remain empty. The
2877 * ordering is imposed by list_lru_node->lock taken by
2878 * memcg_drain_all_list_lrus().
2879 */
Tejun Heo3a06bb72016-06-03 14:55:44 -07002880 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002881 css_for_each_descendant_pre(css, &memcg->css) {
2882 child = mem_cgroup_from_css(css);
2883 BUG_ON(child->kmemcg_id != kmemcg_id);
2884 child->kmemcg_id = parent->kmemcg_id;
2885 if (!memcg->use_hierarchy)
2886 break;
2887 }
Tejun Heo3a06bb72016-06-03 14:55:44 -07002888 rcu_read_unlock();
2889
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002890 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2891
2892 memcg_free_cache_id(kmemcg_id);
2893}
2894
2895static void memcg_free_kmem(struct mem_cgroup *memcg)
2896{
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002897 /* css_alloc() failed, offlining didn't happen */
2898 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2899 memcg_offline_kmem(memcg);
2900
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002901 if (memcg->kmem_state == KMEM_ALLOCATED) {
2902 memcg_destroy_kmem_caches(memcg);
2903 static_branch_dec(&memcg_kmem_enabled_key);
2904 WARN_ON(page_counter_read(&memcg->kmem));
2905 }
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002906}
Vladimir Davydovd6441632014-01-23 15:53:09 -08002907#else
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002908static int memcg_online_kmem(struct mem_cgroup *memcg)
Johannes Weiner127424c2016-01-20 15:02:32 -08002909{
2910 return 0;
2911}
2912static void memcg_offline_kmem(struct mem_cgroup *memcg)
2913{
2914}
2915static void memcg_free_kmem(struct mem_cgroup *memcg)
2916{
2917}
2918#endif /* !CONFIG_SLOB */
2919
Johannes Weiner127424c2016-01-20 15:02:32 -08002920static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2921 unsigned long limit)
2922{
Vladimir Davydovb313aee2016-03-17 14:18:27 -07002923 int ret;
Johannes Weiner127424c2016-01-20 15:02:32 -08002924
2925 mutex_lock(&memcg_limit_mutex);
Johannes Weiner127424c2016-01-20 15:02:32 -08002926 ret = page_counter_limit(&memcg->kmem, limit);
Johannes Weiner127424c2016-01-20 15:02:32 -08002927 mutex_unlock(&memcg_limit_mutex);
2928 return ret;
2929}
Glauber Costa510fc4e2012-12-18 14:21:47 -08002930
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002931static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2932{
2933 int ret;
2934
2935 mutex_lock(&memcg_limit_mutex);
2936
Johannes Weiner0db15292016-01-20 15:02:50 -08002937 ret = page_counter_limit(&memcg->tcpmem, limit);
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002938 if (ret)
2939 goto out;
2940
Johannes Weiner0db15292016-01-20 15:02:50 -08002941 if (!memcg->tcpmem_active) {
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002942 /*
2943 * The active flag needs to be written after the static_key
2944 * update. This is what guarantees that the socket activation
Johannes Weiner2d758072016-10-07 17:00:58 -07002945 * function is the last one to run. See mem_cgroup_sk_alloc()
2946 * for details, and note that we don't mark any socket as
2947 * belonging to this memcg until that flag is up.
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002948 *
2949 * We need to do this, because static_keys will span multiple
2950 * sites, but we can't control their order. If we mark a socket
2951 * as accounted, but the accounting functions are not patched in
2952 * yet, we'll lose accounting.
2953 *
Johannes Weiner2d758072016-10-07 17:00:58 -07002954 * We never race with the readers in mem_cgroup_sk_alloc(),
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002955 * because when this value change, the code to process it is not
2956 * patched in yet.
2957 */
2958 static_branch_inc(&memcg_sockets_enabled_key);
Johannes Weiner0db15292016-01-20 15:02:50 -08002959 memcg->tcpmem_active = true;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002960 }
2961out:
2962 mutex_unlock(&memcg_limit_mutex);
2963 return ret;
2964}
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002965
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002966/*
2967 * The user of this function is...
2968 * RES_LIMIT.
2969 */
Tejun Heo451af502014-05-13 12:16:21 -04002970static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2971 char *buf, size_t nbytes, loff_t off)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08002972{
Tejun Heo451af502014-05-13 12:16:21 -04002973 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002974 unsigned long nr_pages;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002975 int ret;
2976
Tejun Heo451af502014-05-13 12:16:21 -04002977 buf = strstrip(buf);
Johannes Weiner650c5e52015-02-11 15:26:03 -08002978 ret = page_counter_memparse(buf, "-1", &nr_pages);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002979 if (ret)
2980 return ret;
Tejun Heoaf36f902012-04-01 12:09:55 -07002981
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002982 switch (MEMFILE_ATTR(of_cft(of)->private)) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002983 case RES_LIMIT:
Balbir Singh4b3bde42009-09-23 15:56:32 -07002984 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2985 ret = -EINVAL;
2986 break;
2987 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002988 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2989 case _MEM:
2990 ret = mem_cgroup_resize_limit(memcg, nr_pages);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002991 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002992 case _MEMSWAP:
2993 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
2994 break;
2995 case _KMEM:
2996 ret = memcg_update_kmem_limit(memcg, nr_pages);
2997 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002998 case _TCP:
2999 ret = memcg_update_tcp_limit(memcg, nr_pages);
3000 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003001 }
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003002 break;
Balbir Singh296c81d2009-09-23 15:56:36 -07003003 case RES_SOFT_LIMIT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003004 memcg->soft_limit = nr_pages;
3005 ret = 0;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003006 break;
3007 }
Tejun Heo451af502014-05-13 12:16:21 -04003008 return ret ?: nbytes;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003009}
3010
Tejun Heo6770c642014-05-13 12:16:21 -04003011static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3012 size_t nbytes, loff_t off)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003013{
Tejun Heo6770c642014-05-13 12:16:21 -04003014 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003015 struct page_counter *counter;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003016
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003017 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3018 case _MEM:
3019 counter = &memcg->memory;
3020 break;
3021 case _MEMSWAP:
3022 counter = &memcg->memsw;
3023 break;
3024 case _KMEM:
3025 counter = &memcg->kmem;
3026 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003027 case _TCP:
Johannes Weiner0db15292016-01-20 15:02:50 -08003028 counter = &memcg->tcpmem;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003029 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003030 default:
3031 BUG();
3032 }
Tejun Heoaf36f902012-04-01 12:09:55 -07003033
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003034 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003035 case RES_MAX_USAGE:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003036 page_counter_reset_watermark(counter);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003037 break;
3038 case RES_FAILCNT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003039 counter->failcnt = 0;
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003040 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003041 default:
3042 BUG();
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003043 }
Balbir Singhf64c3f52009-09-23 15:56:37 -07003044
Tejun Heo6770c642014-05-13 12:16:21 -04003045 return nbytes;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003046}
3047
Tejun Heo182446d2013-08-08 20:11:24 -04003048static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003049 struct cftype *cft)
3050{
Tejun Heo182446d2013-08-08 20:11:24 -04003051 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003052}
3053
Daisuke Nishimura02491442010-03-10 15:22:17 -08003054#ifdef CONFIG_MMU
Tejun Heo182446d2013-08-08 20:11:24 -04003055static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003056 struct cftype *cft, u64 val)
3057{
Tejun Heo182446d2013-08-08 20:11:24 -04003058 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003059
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08003060 if (val & ~MOVE_MASK)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003061 return -EINVAL;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003062
Glauber Costaee5e8472013-02-22 16:34:50 -08003063 /*
3064 * No kind of locking is needed in here, because ->can_attach() will
3065 * check this value once in the beginning of the process, and then carry
3066 * on with stale data. This means that changes to this value will only
3067 * affect task migrations starting after the change.
3068 */
3069 memcg->move_charge_at_immigrate = val;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003070 return 0;
3071}
Daisuke Nishimura02491442010-03-10 15:22:17 -08003072#else
Tejun Heo182446d2013-08-08 20:11:24 -04003073static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura02491442010-03-10 15:22:17 -08003074 struct cftype *cft, u64 val)
3075{
3076 return -ENOSYS;
3077}
3078#endif
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003079
Ying Han406eb0c2011-05-26 16:25:37 -07003080#ifdef CONFIG_NUMA
Tejun Heo2da8ca82013-12-05 12:28:04 -05003081static int memcg_numa_stat_show(struct seq_file *m, void *v)
Ying Han406eb0c2011-05-26 16:25:37 -07003082{
Greg Thelen25485de2013-11-12 15:07:40 -08003083 struct numa_stat {
3084 const char *name;
3085 unsigned int lru_mask;
3086 };
3087
3088 static const struct numa_stat stats[] = {
3089 { "total", LRU_ALL },
3090 { "file", LRU_ALL_FILE },
3091 { "anon", LRU_ALL_ANON },
3092 { "unevictable", BIT(LRU_UNEVICTABLE) },
3093 };
3094 const struct numa_stat *stat;
Ying Han406eb0c2011-05-26 16:25:37 -07003095 int nid;
Greg Thelen25485de2013-11-12 15:07:40 -08003096 unsigned long nr;
Tejun Heo2da8ca82013-12-05 12:28:04 -05003097 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Ying Han406eb0c2011-05-26 16:25:37 -07003098
Greg Thelen25485de2013-11-12 15:07:40 -08003099 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3100 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3101 seq_printf(m, "%s=%lu", stat->name, nr);
3102 for_each_node_state(nid, N_MEMORY) {
3103 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3104 stat->lru_mask);
3105 seq_printf(m, " N%d=%lu", nid, nr);
3106 }
3107 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003108 }
Ying Han406eb0c2011-05-26 16:25:37 -07003109
Ying Han071aee12013-11-12 15:07:41 -08003110 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3111 struct mem_cgroup *iter;
Ying Han406eb0c2011-05-26 16:25:37 -07003112
Ying Han071aee12013-11-12 15:07:41 -08003113 nr = 0;
3114 for_each_mem_cgroup_tree(iter, memcg)
3115 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3116 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3117 for_each_node_state(nid, N_MEMORY) {
3118 nr = 0;
3119 for_each_mem_cgroup_tree(iter, memcg)
3120 nr += mem_cgroup_node_nr_lru_pages(
3121 iter, nid, stat->lru_mask);
3122 seq_printf(m, " N%d=%lu", nid, nr);
3123 }
3124 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003125 }
Ying Han406eb0c2011-05-26 16:25:37 -07003126
Ying Han406eb0c2011-05-26 16:25:37 -07003127 return 0;
3128}
3129#endif /* CONFIG_NUMA */
3130
Tejun Heo2da8ca82013-12-05 12:28:04 -05003131static int memcg_stat_show(struct seq_file *m, void *v)
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003132{
Tejun Heo2da8ca82013-12-05 12:28:04 -05003133 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003134 unsigned long memory, memsw;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003135 struct mem_cgroup *mi;
3136 unsigned int i;
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003137
Greg Thelen0ca44b12015-02-11 15:25:58 -08003138 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3139 MEM_CGROUP_STAT_NSTATS);
3140 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3141 MEM_CGROUP_EVENTS_NSTATS);
Rickard Strandqvist70bc0682014-12-12 16:56:41 -08003142 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3143
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003144 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
Johannes Weiner7941d212016-01-14 15:21:23 -08003145 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003146 continue;
Greg Thelen484ebb32015-10-01 15:37:05 -07003147 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003148 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003149 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08003150
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003151 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3152 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3153 mem_cgroup_read_events(memcg, i));
3154
3155 for (i = 0; i < NR_LRU_LISTS; i++)
3156 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3157 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3158
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003159 /* Hierarchical information */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003160 memory = memsw = PAGE_COUNTER_MAX;
3161 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3162 memory = min(memory, mi->memory.limit);
3163 memsw = min(memsw, mi->memsw.limit);
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08003164 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003165 seq_printf(m, "hierarchical_memory_limit %llu\n",
3166 (u64)memory * PAGE_SIZE);
Johannes Weiner7941d212016-01-14 15:21:23 -08003167 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003168 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3169 (u64)memsw * PAGE_SIZE);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003170
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003171 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
Greg Thelen484ebb32015-10-01 15:37:05 -07003172 unsigned long long val = 0;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003173
Johannes Weiner7941d212016-01-14 15:21:23 -08003174 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003175 continue;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003176 for_each_mem_cgroup_tree(mi, memcg)
3177 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
Greg Thelen484ebb32015-10-01 15:37:05 -07003178 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003179 }
3180
3181 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3182 unsigned long long val = 0;
3183
3184 for_each_mem_cgroup_tree(mi, memcg)
3185 val += mem_cgroup_read_events(mi, i);
3186 seq_printf(m, "total_%s %llu\n",
3187 mem_cgroup_events_names[i], val);
3188 }
3189
3190 for (i = 0; i < NR_LRU_LISTS; i++) {
3191 unsigned long long val = 0;
3192
3193 for_each_mem_cgroup_tree(mi, memcg)
3194 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3195 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003196 }
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003197
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003198#ifdef CONFIG_DEBUG_VM
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003199 {
Mel Gormanef8f2322016-07-28 15:46:05 -07003200 pg_data_t *pgdat;
3201 struct mem_cgroup_per_node *mz;
Hugh Dickins89abfab2012-05-29 15:06:53 -07003202 struct zone_reclaim_stat *rstat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003203 unsigned long recent_rotated[2] = {0, 0};
3204 unsigned long recent_scanned[2] = {0, 0};
3205
Mel Gormanef8f2322016-07-28 15:46:05 -07003206 for_each_online_pgdat(pgdat) {
3207 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3208 rstat = &mz->lruvec.reclaim_stat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003209
Mel Gormanef8f2322016-07-28 15:46:05 -07003210 recent_rotated[0] += rstat->recent_rotated[0];
3211 recent_rotated[1] += rstat->recent_rotated[1];
3212 recent_scanned[0] += rstat->recent_scanned[0];
3213 recent_scanned[1] += rstat->recent_scanned[1];
3214 }
Johannes Weiner78ccf5b2012-05-29 15:07:06 -07003215 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3216 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3217 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3218 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003219 }
3220#endif
3221
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003222 return 0;
3223}
3224
Tejun Heo182446d2013-08-08 20:11:24 -04003225static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3226 struct cftype *cft)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003227{
Tejun Heo182446d2013-08-08 20:11:24 -04003228 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003229
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07003230 return mem_cgroup_swappiness(memcg);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003231}
3232
Tejun Heo182446d2013-08-08 20:11:24 -04003233static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3234 struct cftype *cft, u64 val)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003235{
Tejun Heo182446d2013-08-08 20:11:24 -04003236 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Li Zefan068b38c2009-01-15 13:51:26 -08003237
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003238 if (val > 100)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003239 return -EINVAL;
3240
Linus Torvalds14208b02014-06-09 15:03:33 -07003241 if (css->parent)
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003242 memcg->swappiness = val;
3243 else
3244 vm_swappiness = val;
Li Zefan068b38c2009-01-15 13:51:26 -08003245
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003246 return 0;
3247}
3248
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003249static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3250{
3251 struct mem_cgroup_threshold_ary *t;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003252 unsigned long usage;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003253 int i;
3254
3255 rcu_read_lock();
3256 if (!swap)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003257 t = rcu_dereference(memcg->thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003258 else
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003259 t = rcu_dereference(memcg->memsw_thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003260
3261 if (!t)
3262 goto unlock;
3263
Johannes Weinerce00a962014-09-05 08:43:57 -04003264 usage = mem_cgroup_usage(memcg, swap);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003265
3266 /*
Sha Zhengju748dad32012-05-29 15:06:57 -07003267 * current_threshold points to threshold just below or equal to usage.
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003268 * If it's not true, a threshold was crossed after last
3269 * call of __mem_cgroup_threshold().
3270 */
Phil Carmody5407a562010-05-26 14:42:42 -07003271 i = t->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003272
3273 /*
3274 * Iterate backward over array of thresholds starting from
3275 * current_threshold and check if a threshold is crossed.
3276 * If none of thresholds below usage is crossed, we read
3277 * only one element of the array here.
3278 */
3279 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3280 eventfd_signal(t->entries[i].eventfd, 1);
3281
3282 /* i = current_threshold + 1 */
3283 i++;
3284
3285 /*
3286 * Iterate forward over array of thresholds starting from
3287 * current_threshold+1 and check if a threshold is crossed.
3288 * If none of thresholds above usage is crossed, we read
3289 * only one element of the array here.
3290 */
3291 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3292 eventfd_signal(t->entries[i].eventfd, 1);
3293
3294 /* Update current_threshold */
Phil Carmody5407a562010-05-26 14:42:42 -07003295 t->current_threshold = i - 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003296unlock:
3297 rcu_read_unlock();
3298}
3299
3300static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3301{
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07003302 while (memcg) {
3303 __mem_cgroup_threshold(memcg, false);
Johannes Weiner7941d212016-01-14 15:21:23 -08003304 if (do_memsw_account())
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07003305 __mem_cgroup_threshold(memcg, true);
3306
3307 memcg = parent_mem_cgroup(memcg);
3308 }
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003309}
3310
3311static int compare_thresholds(const void *a, const void *b)
3312{
3313 const struct mem_cgroup_threshold *_a = a;
3314 const struct mem_cgroup_threshold *_b = b;
3315
Greg Thelen2bff24a2013-09-11 14:23:08 -07003316 if (_a->threshold > _b->threshold)
3317 return 1;
3318
3319 if (_a->threshold < _b->threshold)
3320 return -1;
3321
3322 return 0;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003323}
3324
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003325static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003326{
3327 struct mem_cgroup_eventfd_list *ev;
3328
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003329 spin_lock(&memcg_oom_lock);
3330
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003331 list_for_each_entry(ev, &memcg->oom_notify, list)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003332 eventfd_signal(ev->eventfd, 1);
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003333
3334 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003335 return 0;
3336}
3337
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003338static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003339{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003340 struct mem_cgroup *iter;
3341
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003342 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003343 mem_cgroup_oom_notify_cb(iter);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003344}
3345
Tejun Heo59b6f872013-11-22 18:20:43 -05003346static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003347 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003348{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003349 struct mem_cgroup_thresholds *thresholds;
3350 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003351 unsigned long threshold;
3352 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003353 int i, size, ret;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003354
Johannes Weiner650c5e52015-02-11 15:26:03 -08003355 ret = page_counter_memparse(args, "-1", &threshold);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003356 if (ret)
3357 return ret;
3358
3359 mutex_lock(&memcg->thresholds_lock);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003360
Johannes Weiner05b84302014-08-06 16:05:59 -07003361 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003362 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003363 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003364 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003365 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003366 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003367 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003368 BUG();
3369
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003370 /* Check if a threshold crossed before adding a new one */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003371 if (thresholds->primary)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003372 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3373
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003374 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003375
3376 /* Allocate memory for new array of thresholds */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003377 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003378 GFP_KERNEL);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003379 if (!new) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003380 ret = -ENOMEM;
3381 goto unlock;
3382 }
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003383 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003384
3385 /* Copy thresholds (if any) to new array */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003386 if (thresholds->primary) {
3387 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003388 sizeof(struct mem_cgroup_threshold));
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003389 }
3390
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003391 /* Add new threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003392 new->entries[size - 1].eventfd = eventfd;
3393 new->entries[size - 1].threshold = threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003394
3395 /* Sort thresholds. Registering of new threshold isn't time-critical */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003396 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003397 compare_thresholds, NULL);
3398
3399 /* Find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003400 new->current_threshold = -1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003401 for (i = 0; i < size; i++) {
Sha Zhengju748dad32012-05-29 15:06:57 -07003402 if (new->entries[i].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003403 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003404 * new->current_threshold will not be used until
3405 * rcu_assign_pointer(), so it's safe to increment
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003406 * it here.
3407 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003408 ++new->current_threshold;
Sha Zhengju748dad32012-05-29 15:06:57 -07003409 } else
3410 break;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003411 }
3412
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003413 /* Free old spare buffer and save old primary buffer as spare */
3414 kfree(thresholds->spare);
3415 thresholds->spare = thresholds->primary;
3416
3417 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003418
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003419 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003420 synchronize_rcu();
3421
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003422unlock:
3423 mutex_unlock(&memcg->thresholds_lock);
3424
3425 return ret;
3426}
3427
Tejun Heo59b6f872013-11-22 18:20:43 -05003428static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003429 struct eventfd_ctx *eventfd, const char *args)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003430{
Tejun Heo59b6f872013-11-22 18:20:43 -05003431 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05003432}
3433
Tejun Heo59b6f872013-11-22 18:20:43 -05003434static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003435 struct eventfd_ctx *eventfd, const char *args)
3436{
Tejun Heo59b6f872013-11-22 18:20:43 -05003437 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05003438}
3439
Tejun Heo59b6f872013-11-22 18:20:43 -05003440static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003441 struct eventfd_ctx *eventfd, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003442{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003443 struct mem_cgroup_thresholds *thresholds;
3444 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003445 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003446 int i, j, size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003447
3448 mutex_lock(&memcg->thresholds_lock);
Johannes Weiner05b84302014-08-06 16:05:59 -07003449
3450 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003451 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003452 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003453 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003454 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003455 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003456 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003457 BUG();
3458
Anton Vorontsov371528c2012-02-24 05:14:46 +04003459 if (!thresholds->primary)
3460 goto unlock;
3461
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003462 /* Check if a threshold crossed before removing */
3463 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3464
3465 /* Calculate new number of threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003466 size = 0;
3467 for (i = 0; i < thresholds->primary->size; i++) {
3468 if (thresholds->primary->entries[i].eventfd != eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003469 size++;
3470 }
3471
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003472 new = thresholds->spare;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003473
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003474 /* Set thresholds array to NULL if we don't have thresholds */
3475 if (!size) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003476 kfree(new);
3477 new = NULL;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003478 goto swap_buffers;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003479 }
3480
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003481 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003482
3483 /* Copy thresholds and find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003484 new->current_threshold = -1;
3485 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3486 if (thresholds->primary->entries[i].eventfd == eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003487 continue;
3488
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003489 new->entries[j] = thresholds->primary->entries[i];
Sha Zhengju748dad32012-05-29 15:06:57 -07003490 if (new->entries[j].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003491 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003492 * new->current_threshold will not be used
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003493 * until rcu_assign_pointer(), so it's safe to increment
3494 * it here.
3495 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003496 ++new->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003497 }
3498 j++;
3499 }
3500
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003501swap_buffers:
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003502 /* Swap primary and spare array */
3503 thresholds->spare = thresholds->primary;
Sha Zhengju8c757762012-05-10 13:01:45 -07003504
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003505 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003506
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003507 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003508 synchronize_rcu();
Martijn Coenen6611d8d2016-01-15 16:57:49 -08003509
3510 /* If all events are unregistered, free the spare array */
3511 if (!new) {
3512 kfree(thresholds->spare);
3513 thresholds->spare = NULL;
3514 }
Anton Vorontsov371528c2012-02-24 05:14:46 +04003515unlock:
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003516 mutex_unlock(&memcg->thresholds_lock);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003517}
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003518
Tejun Heo59b6f872013-11-22 18:20:43 -05003519static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003520 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003521{
Tejun Heo59b6f872013-11-22 18:20:43 -05003522 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05003523}
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003524
Tejun Heo59b6f872013-11-22 18:20:43 -05003525static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003526 struct eventfd_ctx *eventfd)
3527{
Tejun Heo59b6f872013-11-22 18:20:43 -05003528 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05003529}
3530
Tejun Heo59b6f872013-11-22 18:20:43 -05003531static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003532 struct eventfd_ctx *eventfd, const char *args)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003533{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003534 struct mem_cgroup_eventfd_list *event;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003535
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003536 event = kmalloc(sizeof(*event), GFP_KERNEL);
3537 if (!event)
3538 return -ENOMEM;
3539
Michal Hocko1af8efe2011-07-26 16:08:24 -07003540 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003541
3542 event->eventfd = eventfd;
3543 list_add(&event->list, &memcg->oom_notify);
3544
3545 /* already in OOM ? */
Tejun Heoc2b42d32015-06-24 16:58:23 -07003546 if (memcg->under_oom)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003547 eventfd_signal(eventfd, 1);
Michal Hocko1af8efe2011-07-26 16:08:24 -07003548 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003549
3550 return 0;
3551}
3552
Tejun Heo59b6f872013-11-22 18:20:43 -05003553static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003554 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003555{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003556 struct mem_cgroup_eventfd_list *ev, *tmp;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003557
Michal Hocko1af8efe2011-07-26 16:08:24 -07003558 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003559
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003560 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003561 if (ev->eventfd == eventfd) {
3562 list_del(&ev->list);
3563 kfree(ev);
3564 }
3565 }
3566
Michal Hocko1af8efe2011-07-26 16:08:24 -07003567 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003568}
3569
Tejun Heo2da8ca82013-12-05 12:28:04 -05003570static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003571{
Tejun Heo2da8ca82013-12-05 12:28:04 -05003572 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003573
Tejun Heo791badb2013-12-05 12:28:02 -05003574 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
Tejun Heoc2b42d32015-06-24 16:58:23 -07003575 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003576 return 0;
3577}
3578
Tejun Heo182446d2013-08-08 20:11:24 -04003579static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003580 struct cftype *cft, u64 val)
3581{
Tejun Heo182446d2013-08-08 20:11:24 -04003582 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003583
3584 /* cannot set to root cgroup and only 0 and 1 are allowed */
Linus Torvalds14208b02014-06-09 15:03:33 -07003585 if (!css->parent || !((val == 0) || (val == 1)))
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003586 return -EINVAL;
3587
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003588 memcg->oom_kill_disable = val;
KAMEZAWA Hiroyuki4d845eb2010-06-29 15:05:18 -07003589 if (!val)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003590 memcg_oom_recover(memcg);
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003591
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003592 return 0;
3593}
3594
Tejun Heo52ebea72015-05-22 17:13:37 -04003595#ifdef CONFIG_CGROUP_WRITEBACK
3596
3597struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3598{
3599 return &memcg->cgwb_list;
3600}
3601
Tejun Heo841710a2015-05-22 18:23:33 -04003602static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3603{
3604 return wb_domain_init(&memcg->cgwb_domain, gfp);
3605}
3606
3607static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3608{
3609 wb_domain_exit(&memcg->cgwb_domain);
3610}
3611
Tejun Heo2529bb32015-05-22 18:23:34 -04003612static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3613{
3614 wb_domain_size_changed(&memcg->cgwb_domain);
3615}
3616
Tejun Heo841710a2015-05-22 18:23:33 -04003617struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3618{
3619 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3620
3621 if (!memcg->css.parent)
3622 return NULL;
3623
3624 return &memcg->cgwb_domain;
3625}
3626
Tejun Heoc2aa7232015-05-22 18:23:35 -04003627/**
3628 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3629 * @wb: bdi_writeback in question
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003630 * @pfilepages: out parameter for number of file pages
3631 * @pheadroom: out parameter for number of allocatable pages according to memcg
Tejun Heoc2aa7232015-05-22 18:23:35 -04003632 * @pdirty: out parameter for number of dirty pages
3633 * @pwriteback: out parameter for number of pages under writeback
3634 *
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003635 * Determine the numbers of file, headroom, dirty, and writeback pages in
3636 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3637 * is a bit more involved.
Tejun Heoc2aa7232015-05-22 18:23:35 -04003638 *
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003639 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3640 * headroom is calculated as the lowest headroom of itself and the
3641 * ancestors. Note that this doesn't consider the actual amount of
3642 * available memory in the system. The caller should further cap
3643 * *@pheadroom accordingly.
Tejun Heoc2aa7232015-05-22 18:23:35 -04003644 */
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003645void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3646 unsigned long *pheadroom, unsigned long *pdirty,
3647 unsigned long *pwriteback)
Tejun Heoc2aa7232015-05-22 18:23:35 -04003648{
3649 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3650 struct mem_cgroup *parent;
Tejun Heoc2aa7232015-05-22 18:23:35 -04003651
3652 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3653
3654 /* this should eventually include NR_UNSTABLE_NFS */
3655 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003656 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3657 (1 << LRU_ACTIVE_FILE));
3658 *pheadroom = PAGE_COUNTER_MAX;
Tejun Heoc2aa7232015-05-22 18:23:35 -04003659
Tejun Heoc2aa7232015-05-22 18:23:35 -04003660 while ((parent = parent_mem_cgroup(memcg))) {
3661 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3662 unsigned long used = page_counter_read(&memcg->memory);
3663
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003664 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
Tejun Heoc2aa7232015-05-22 18:23:35 -04003665 memcg = parent;
3666 }
Tejun Heoc2aa7232015-05-22 18:23:35 -04003667}
3668
Tejun Heo841710a2015-05-22 18:23:33 -04003669#else /* CONFIG_CGROUP_WRITEBACK */
3670
3671static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3672{
3673 return 0;
3674}
3675
3676static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3677{
3678}
3679
Tejun Heo2529bb32015-05-22 18:23:34 -04003680static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3681{
3682}
3683
Tejun Heo52ebea72015-05-22 17:13:37 -04003684#endif /* CONFIG_CGROUP_WRITEBACK */
3685
Tejun Heo79bd9812013-11-22 18:20:42 -05003686/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05003687 * DO NOT USE IN NEW FILES.
3688 *
3689 * "cgroup.event_control" implementation.
3690 *
3691 * This is way over-engineered. It tries to support fully configurable
3692 * events for each user. Such level of flexibility is completely
3693 * unnecessary especially in the light of the planned unified hierarchy.
3694 *
3695 * Please deprecate this and replace with something simpler if at all
3696 * possible.
3697 */
3698
3699/*
Tejun Heo79bd9812013-11-22 18:20:42 -05003700 * Unregister event and free resources.
3701 *
3702 * Gets called from workqueue.
3703 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05003704static void memcg_event_remove(struct work_struct *work)
Tejun Heo79bd9812013-11-22 18:20:42 -05003705{
Tejun Heo3bc942f2013-11-22 18:20:44 -05003706 struct mem_cgroup_event *event =
3707 container_of(work, struct mem_cgroup_event, remove);
Tejun Heo59b6f872013-11-22 18:20:43 -05003708 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05003709
3710 remove_wait_queue(event->wqh, &event->wait);
3711
Tejun Heo59b6f872013-11-22 18:20:43 -05003712 event->unregister_event(memcg, event->eventfd);
Tejun Heo79bd9812013-11-22 18:20:42 -05003713
3714 /* Notify userspace the event is going away. */
3715 eventfd_signal(event->eventfd, 1);
3716
3717 eventfd_ctx_put(event->eventfd);
3718 kfree(event);
Tejun Heo59b6f872013-11-22 18:20:43 -05003719 css_put(&memcg->css);
Tejun Heo79bd9812013-11-22 18:20:42 -05003720}
3721
3722/*
3723 * Gets called on POLLHUP on eventfd when user closes it.
3724 *
3725 * Called with wqh->lock held and interrupts disabled.
3726 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05003727static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3728 int sync, void *key)
Tejun Heo79bd9812013-11-22 18:20:42 -05003729{
Tejun Heo3bc942f2013-11-22 18:20:44 -05003730 struct mem_cgroup_event *event =
3731 container_of(wait, struct mem_cgroup_event, wait);
Tejun Heo59b6f872013-11-22 18:20:43 -05003732 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05003733 unsigned long flags = (unsigned long)key;
3734
3735 if (flags & POLLHUP) {
3736 /*
3737 * If the event has been detached at cgroup removal, we
3738 * can simply return knowing the other side will cleanup
3739 * for us.
3740 *
3741 * We can't race against event freeing since the other
3742 * side will require wqh->lock via remove_wait_queue(),
3743 * which we hold.
3744 */
Tejun Heofba94802013-11-22 18:20:43 -05003745 spin_lock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05003746 if (!list_empty(&event->list)) {
3747 list_del_init(&event->list);
3748 /*
3749 * We are in atomic context, but cgroup_event_remove()
3750 * may sleep, so we have to call it in workqueue.
3751 */
3752 schedule_work(&event->remove);
3753 }
Tejun Heofba94802013-11-22 18:20:43 -05003754 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05003755 }
3756
3757 return 0;
3758}
3759
Tejun Heo3bc942f2013-11-22 18:20:44 -05003760static void memcg_event_ptable_queue_proc(struct file *file,
Tejun Heo79bd9812013-11-22 18:20:42 -05003761 wait_queue_head_t *wqh, poll_table *pt)
3762{
Tejun Heo3bc942f2013-11-22 18:20:44 -05003763 struct mem_cgroup_event *event =
3764 container_of(pt, struct mem_cgroup_event, pt);
Tejun Heo79bd9812013-11-22 18:20:42 -05003765
3766 event->wqh = wqh;
3767 add_wait_queue(wqh, &event->wait);
3768}
3769
3770/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05003771 * DO NOT USE IN NEW FILES.
3772 *
Tejun Heo79bd9812013-11-22 18:20:42 -05003773 * Parse input and register new cgroup event handler.
3774 *
3775 * Input must be in format '<event_fd> <control_fd> <args>'.
3776 * Interpretation of args is defined by control file implementation.
3777 */
Tejun Heo451af502014-05-13 12:16:21 -04003778static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3779 char *buf, size_t nbytes, loff_t off)
Tejun Heo79bd9812013-11-22 18:20:42 -05003780{
Tejun Heo451af502014-05-13 12:16:21 -04003781 struct cgroup_subsys_state *css = of_css(of);
Tejun Heofba94802013-11-22 18:20:43 -05003782 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05003783 struct mem_cgroup_event *event;
Tejun Heo79bd9812013-11-22 18:20:42 -05003784 struct cgroup_subsys_state *cfile_css;
3785 unsigned int efd, cfd;
3786 struct fd efile;
3787 struct fd cfile;
Tejun Heofba94802013-11-22 18:20:43 -05003788 const char *name;
Tejun Heo79bd9812013-11-22 18:20:42 -05003789 char *endp;
3790 int ret;
3791
Tejun Heo451af502014-05-13 12:16:21 -04003792 buf = strstrip(buf);
3793
3794 efd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05003795 if (*endp != ' ')
3796 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04003797 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05003798
Tejun Heo451af502014-05-13 12:16:21 -04003799 cfd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05003800 if ((*endp != ' ') && (*endp != '\0'))
3801 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04003802 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05003803
3804 event = kzalloc(sizeof(*event), GFP_KERNEL);
3805 if (!event)
3806 return -ENOMEM;
3807
Tejun Heo59b6f872013-11-22 18:20:43 -05003808 event->memcg = memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05003809 INIT_LIST_HEAD(&event->list);
Tejun Heo3bc942f2013-11-22 18:20:44 -05003810 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3811 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3812 INIT_WORK(&event->remove, memcg_event_remove);
Tejun Heo79bd9812013-11-22 18:20:42 -05003813
3814 efile = fdget(efd);
3815 if (!efile.file) {
3816 ret = -EBADF;
3817 goto out_kfree;
3818 }
3819
3820 event->eventfd = eventfd_ctx_fileget(efile.file);
3821 if (IS_ERR(event->eventfd)) {
3822 ret = PTR_ERR(event->eventfd);
3823 goto out_put_efile;
3824 }
3825
3826 cfile = fdget(cfd);
3827 if (!cfile.file) {
3828 ret = -EBADF;
3829 goto out_put_eventfd;
3830 }
3831
3832 /* the process need read permission on control file */
3833 /* AV: shouldn't we check that it's been opened for read instead? */
3834 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3835 if (ret < 0)
3836 goto out_put_cfile;
3837
Tejun Heo79bd9812013-11-22 18:20:42 -05003838 /*
Tejun Heofba94802013-11-22 18:20:43 -05003839 * Determine the event callbacks and set them in @event. This used
3840 * to be done via struct cftype but cgroup core no longer knows
3841 * about these events. The following is crude but the whole thing
3842 * is for compatibility anyway.
Tejun Heo3bc942f2013-11-22 18:20:44 -05003843 *
3844 * DO NOT ADD NEW FILES.
Tejun Heofba94802013-11-22 18:20:43 -05003845 */
Al Virob5830432014-10-31 01:22:04 -04003846 name = cfile.file->f_path.dentry->d_name.name;
Tejun Heofba94802013-11-22 18:20:43 -05003847
3848 if (!strcmp(name, "memory.usage_in_bytes")) {
3849 event->register_event = mem_cgroup_usage_register_event;
3850 event->unregister_event = mem_cgroup_usage_unregister_event;
3851 } else if (!strcmp(name, "memory.oom_control")) {
3852 event->register_event = mem_cgroup_oom_register_event;
3853 event->unregister_event = mem_cgroup_oom_unregister_event;
3854 } else if (!strcmp(name, "memory.pressure_level")) {
3855 event->register_event = vmpressure_register_event;
3856 event->unregister_event = vmpressure_unregister_event;
3857 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
Tejun Heo347c4a82013-11-22 18:20:43 -05003858 event->register_event = memsw_cgroup_usage_register_event;
3859 event->unregister_event = memsw_cgroup_usage_unregister_event;
Tejun Heofba94802013-11-22 18:20:43 -05003860 } else {
3861 ret = -EINVAL;
3862 goto out_put_cfile;
3863 }
3864
3865 /*
Tejun Heob5557c42013-11-22 18:20:42 -05003866 * Verify @cfile should belong to @css. Also, remaining events are
3867 * automatically removed on cgroup destruction but the removal is
3868 * asynchronous, so take an extra ref on @css.
Tejun Heo79bd9812013-11-22 18:20:42 -05003869 */
Al Virob5830432014-10-31 01:22:04 -04003870 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
Tejun Heoec903c02014-05-13 12:11:01 -04003871 &memory_cgrp_subsys);
Tejun Heo79bd9812013-11-22 18:20:42 -05003872 ret = -EINVAL;
Tejun Heo5a17f542014-02-11 11:52:47 -05003873 if (IS_ERR(cfile_css))
Tejun Heo79bd9812013-11-22 18:20:42 -05003874 goto out_put_cfile;
Tejun Heo5a17f542014-02-11 11:52:47 -05003875 if (cfile_css != css) {
3876 css_put(cfile_css);
3877 goto out_put_cfile;
3878 }
Tejun Heo79bd9812013-11-22 18:20:42 -05003879
Tejun Heo451af502014-05-13 12:16:21 -04003880 ret = event->register_event(memcg, event->eventfd, buf);
Tejun Heo79bd9812013-11-22 18:20:42 -05003881 if (ret)
3882 goto out_put_css;
3883
3884 efile.file->f_op->poll(efile.file, &event->pt);
3885
Tejun Heofba94802013-11-22 18:20:43 -05003886 spin_lock(&memcg->event_list_lock);
3887 list_add(&event->list, &memcg->event_list);
3888 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05003889
3890 fdput(cfile);
3891 fdput(efile);
3892
Tejun Heo451af502014-05-13 12:16:21 -04003893 return nbytes;
Tejun Heo79bd9812013-11-22 18:20:42 -05003894
3895out_put_css:
Tejun Heob5557c42013-11-22 18:20:42 -05003896 css_put(css);
Tejun Heo79bd9812013-11-22 18:20:42 -05003897out_put_cfile:
3898 fdput(cfile);
3899out_put_eventfd:
3900 eventfd_ctx_put(event->eventfd);
3901out_put_efile:
3902 fdput(efile);
3903out_kfree:
3904 kfree(event);
3905
3906 return ret;
3907}
3908
Johannes Weiner241994ed2015-02-11 15:26:06 -08003909static struct cftype mem_cgroup_legacy_files[] = {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003910 {
Balbir Singh0eea1032008-02-07 00:13:57 -08003911 .name = "usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003912 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05003913 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003914 },
3915 {
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003916 .name = "max_usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003917 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04003918 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05003919 .read_u64 = mem_cgroup_read_u64,
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003920 },
3921 {
Balbir Singh0eea1032008-02-07 00:13:57 -08003922 .name = "limit_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003923 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04003924 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05003925 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003926 },
3927 {
Balbir Singh296c81d2009-09-23 15:56:36 -07003928 .name = "soft_limit_in_bytes",
3929 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04003930 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05003931 .read_u64 = mem_cgroup_read_u64,
Balbir Singh296c81d2009-09-23 15:56:36 -07003932 },
3933 {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003934 .name = "failcnt",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003935 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04003936 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05003937 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003938 },
Balbir Singh8697d332008-02-07 00:13:59 -08003939 {
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003940 .name = "stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05003941 .seq_show = memcg_stat_show,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003942 },
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003943 {
3944 .name = "force_empty",
Tejun Heo6770c642014-05-13 12:16:21 -04003945 .write = mem_cgroup_force_empty_write,
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003946 },
Balbir Singh18f59ea2009-01-07 18:08:07 -08003947 {
3948 .name = "use_hierarchy",
3949 .write_u64 = mem_cgroup_hierarchy_write,
3950 .read_u64 = mem_cgroup_hierarchy_read,
3951 },
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003952 {
Tejun Heo3bc942f2013-11-22 18:20:44 -05003953 .name = "cgroup.event_control", /* XXX: for compat */
Tejun Heo451af502014-05-13 12:16:21 -04003954 .write = memcg_write_event_control,
Tejun Heo7dbdb192015-09-18 17:54:23 -04003955 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
Tejun Heo79bd9812013-11-22 18:20:42 -05003956 },
3957 {
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003958 .name = "swappiness",
3959 .read_u64 = mem_cgroup_swappiness_read,
3960 .write_u64 = mem_cgroup_swappiness_write,
3961 },
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003962 {
3963 .name = "move_charge_at_immigrate",
3964 .read_u64 = mem_cgroup_move_charge_read,
3965 .write_u64 = mem_cgroup_move_charge_write,
3966 },
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003967 {
3968 .name = "oom_control",
Tejun Heo2da8ca82013-12-05 12:28:04 -05003969 .seq_show = mem_cgroup_oom_control_read,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003970 .write_u64 = mem_cgroup_oom_control_write,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003971 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3972 },
Anton Vorontsov70ddf632013-04-29 15:08:31 -07003973 {
3974 .name = "pressure_level",
Anton Vorontsov70ddf632013-04-29 15:08:31 -07003975 },
Ying Han406eb0c2011-05-26 16:25:37 -07003976#ifdef CONFIG_NUMA
3977 {
3978 .name = "numa_stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05003979 .seq_show = memcg_numa_stat_show,
Ying Han406eb0c2011-05-26 16:25:37 -07003980 },
3981#endif
Glauber Costa510fc4e2012-12-18 14:21:47 -08003982 {
3983 .name = "kmem.limit_in_bytes",
3984 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04003985 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05003986 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08003987 },
3988 {
3989 .name = "kmem.usage_in_bytes",
3990 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05003991 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08003992 },
3993 {
3994 .name = "kmem.failcnt",
3995 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04003996 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05003997 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08003998 },
3999 {
4000 .name = "kmem.max_usage_in_bytes",
4001 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04004002 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004003 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004004 },
Glauber Costa749c5412012-12-18 14:23:01 -08004005#ifdef CONFIG_SLABINFO
4006 {
4007 .name = "kmem.slabinfo",
Tejun Heobc2791f2017-02-22 15:41:21 -08004008 .seq_start = memcg_slab_start,
4009 .seq_next = memcg_slab_next,
4010 .seq_stop = memcg_slab_stop,
Vladimir Davydovb0475012014-12-10 15:44:19 -08004011 .seq_show = memcg_slab_show,
Glauber Costa749c5412012-12-18 14:23:01 -08004012 },
4013#endif
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08004014 {
4015 .name = "kmem.tcp.limit_in_bytes",
4016 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4017 .write = mem_cgroup_write,
4018 .read_u64 = mem_cgroup_read_u64,
4019 },
4020 {
4021 .name = "kmem.tcp.usage_in_bytes",
4022 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4023 .read_u64 = mem_cgroup_read_u64,
4024 },
4025 {
4026 .name = "kmem.tcp.failcnt",
4027 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4028 .write = mem_cgroup_reset,
4029 .read_u64 = mem_cgroup_read_u64,
4030 },
4031 {
4032 .name = "kmem.tcp.max_usage_in_bytes",
4033 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4034 .write = mem_cgroup_reset,
4035 .read_u64 = mem_cgroup_read_u64,
4036 },
Tejun Heo6bc10342012-04-01 12:09:55 -07004037 { }, /* terminate */
Tejun Heoaf36f902012-04-01 12:09:55 -07004038};
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004039
Johannes Weiner73f576c2016-07-20 15:44:57 -07004040/*
4041 * Private memory cgroup IDR
4042 *
4043 * Swap-out records and page cache shadow entries need to store memcg
4044 * references in constrained space, so we maintain an ID space that is
4045 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4046 * memory-controlled cgroups to 64k.
4047 *
4048 * However, there usually are many references to the oflline CSS after
4049 * the cgroup has been destroyed, such as page cache or reclaimable
4050 * slab objects, that don't need to hang on to the ID. We want to keep
4051 * those dead CSS from occupying IDs, or we might quickly exhaust the
4052 * relatively small ID space and prevent the creation of new cgroups
4053 * even when there are much fewer than 64k cgroups - possibly none.
4054 *
4055 * Maintain a private 16-bit ID space for memcg, and allow the ID to
4056 * be freed and recycled when it's no longer needed, which is usually
4057 * when the CSS is offlined.
4058 *
4059 * The only exception to that are records of swapped out tmpfs/shmem
4060 * pages that need to be attributed to live ancestors on swapin. But
4061 * those references are manageable from userspace.
4062 */
4063
4064static DEFINE_IDR(mem_cgroup_idr);
4065
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004066static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
Johannes Weiner73f576c2016-07-20 15:44:57 -07004067{
Vladimir Davydov58fa2a52016-10-07 16:57:29 -07004068 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004069 atomic_add(n, &memcg->id.ref);
Johannes Weiner73f576c2016-07-20 15:44:57 -07004070}
4071
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004072static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
Johannes Weiner73f576c2016-07-20 15:44:57 -07004073{
Vladimir Davydov58fa2a52016-10-07 16:57:29 -07004074 VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004075 if (atomic_sub_and_test(n, &memcg->id.ref)) {
Johannes Weiner73f576c2016-07-20 15:44:57 -07004076 idr_remove(&mem_cgroup_idr, memcg->id.id);
4077 memcg->id.id = 0;
4078
4079 /* Memcg ID pins CSS */
4080 css_put(&memcg->css);
4081 }
4082}
4083
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004084static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4085{
4086 mem_cgroup_id_get_many(memcg, 1);
4087}
4088
4089static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4090{
4091 mem_cgroup_id_put_many(memcg, 1);
4092}
4093
Johannes Weiner73f576c2016-07-20 15:44:57 -07004094/**
4095 * mem_cgroup_from_id - look up a memcg from a memcg id
4096 * @id: the memcg id to look up
4097 *
4098 * Caller must hold rcu_read_lock().
4099 */
4100struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4101{
4102 WARN_ON_ONCE(!rcu_read_lock_held());
4103 return idr_find(&mem_cgroup_idr, id);
4104}
4105
Mel Gormanef8f2322016-07-28 15:46:05 -07004106static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004107{
4108 struct mem_cgroup_per_node *pn;
Mel Gormanef8f2322016-07-28 15:46:05 -07004109 int tmp = node;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004110 /*
4111 * This routine is called against possible nodes.
4112 * But it's BUG to call kmalloc() against offline node.
4113 *
4114 * TODO: this routine can waste much memory for nodes which will
4115 * never be onlined. It's better to use memory hotplug callback
4116 * function.
4117 */
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004118 if (!node_state(node, N_NORMAL_MEMORY))
4119 tmp = -1;
Jesper Juhl17295c82011-01-13 15:47:42 -08004120 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004121 if (!pn)
4122 return 1;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004123
Mel Gormanef8f2322016-07-28 15:46:05 -07004124 lruvec_init(&pn->lruvec);
4125 pn->usage_in_excess = 0;
4126 pn->on_tree = false;
4127 pn->memcg = memcg;
4128
Johannes Weiner54f72fe2013-07-08 15:59:49 -07004129 memcg->nodeinfo[node] = pn;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004130 return 0;
4131}
4132
Mel Gormanef8f2322016-07-28 15:46:05 -07004133static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004134{
Johannes Weiner54f72fe2013-07-08 15:59:49 -07004135 kfree(memcg->nodeinfo[node]);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004136}
4137
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004138static void mem_cgroup_free(struct mem_cgroup *memcg)
4139{
4140 int node;
4141
4142 memcg_wb_domain_exit(memcg);
4143 for_each_node(node)
Mel Gormanef8f2322016-07-28 15:46:05 -07004144 free_mem_cgroup_per_node_info(memcg, node);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004145 free_percpu(memcg->stat);
4146 kfree(memcg);
4147}
4148
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004149static struct mem_cgroup *mem_cgroup_alloc(void)
4150{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004151 struct mem_cgroup *memcg;
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004152 size_t size;
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004153 int node;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004154
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004155 size = sizeof(struct mem_cgroup);
4156 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004157
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004158 memcg = kzalloc(size, GFP_KERNEL);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004159 if (!memcg)
Dan Carpentere7bbcdf2010-03-23 13:35:12 -07004160 return NULL;
4161
Johannes Weiner73f576c2016-07-20 15:44:57 -07004162 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4163 1, MEM_CGROUP_ID_MAX,
4164 GFP_KERNEL);
4165 if (memcg->id.id < 0)
4166 goto fail;
4167
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004168 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4169 if (!memcg->stat)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004170 goto fail;
Pavel Emelianov78fb7462008-02-07 00:13:51 -08004171
Bob Liu3ed28fa2012-01-12 17:19:04 -08004172 for_each_node(node)
Mel Gormanef8f2322016-07-28 15:46:05 -07004173 if (alloc_mem_cgroup_per_node_info(memcg, node))
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004174 goto fail;
Balbir Singhf64c3f52009-09-23 15:56:37 -07004175
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004176 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4177 goto fail;
Balbir Singh28dbc4b2009-01-07 18:08:05 -08004178
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004179 INIT_WORK(&memcg->high_work, high_work_func);
Glauber Costad142e3e2013-02-22 16:34:52 -08004180 memcg->last_scanned_node = MAX_NUMNODES;
4181 INIT_LIST_HEAD(&memcg->oom_notify);
Glauber Costad142e3e2013-02-22 16:34:52 -08004182 mutex_init(&memcg->thresholds_lock);
4183 spin_lock_init(&memcg->move_lock);
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004184 vmpressure_init(&memcg->vmpressure);
Tejun Heofba94802013-11-22 18:20:43 -05004185 INIT_LIST_HEAD(&memcg->event_list);
4186 spin_lock_init(&memcg->event_list_lock);
Johannes Weinerd886f4e2016-01-20 15:02:47 -08004187 memcg->socket_pressure = jiffies;
Johannes Weiner127424c2016-01-20 15:02:32 -08004188#ifndef CONFIG_SLOB
Vladimir Davydov900a38f2014-12-12 16:55:10 -08004189 memcg->kmemcg_id = -1;
Vladimir Davydov900a38f2014-12-12 16:55:10 -08004190#endif
Tejun Heo52ebea72015-05-22 17:13:37 -04004191#ifdef CONFIG_CGROUP_WRITEBACK
4192 INIT_LIST_HEAD(&memcg->cgwb_list);
4193#endif
Johannes Weiner73f576c2016-07-20 15:44:57 -07004194 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004195 return memcg;
4196fail:
Johannes Weiner73f576c2016-07-20 15:44:57 -07004197 if (memcg->id.id > 0)
4198 idr_remove(&mem_cgroup_idr, memcg->id.id);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004199 mem_cgroup_free(memcg);
4200 return NULL;
Glauber Costad142e3e2013-02-22 16:34:52 -08004201}
4202
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004203static struct cgroup_subsys_state * __ref
4204mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Glauber Costad142e3e2013-02-22 16:34:52 -08004205{
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004206 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4207 struct mem_cgroup *memcg;
4208 long error = -ENOMEM;
Glauber Costad142e3e2013-02-22 16:34:52 -08004209
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004210 memcg = mem_cgroup_alloc();
4211 if (!memcg)
4212 return ERR_PTR(error);
Li Zefan4219b2d2013-09-23 16:56:29 +08004213
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004214 memcg->high = PAGE_COUNTER_MAX;
4215 memcg->soft_limit = PAGE_COUNTER_MAX;
4216 if (parent) {
4217 memcg->swappiness = mem_cgroup_swappiness(parent);
4218 memcg->oom_kill_disable = parent->oom_kill_disable;
4219 }
4220 if (parent && parent->use_hierarchy) {
4221 memcg->use_hierarchy = true;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004222 page_counter_init(&memcg->memory, &parent->memory);
Vladimir Davydov37e84352016-01-20 15:02:56 -08004223 page_counter_init(&memcg->swap, &parent->swap);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004224 page_counter_init(&memcg->memsw, &parent->memsw);
4225 page_counter_init(&memcg->kmem, &parent->kmem);
Johannes Weiner0db15292016-01-20 15:02:50 -08004226 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
Balbir Singh18f59ea2009-01-07 18:08:07 -08004227 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004228 page_counter_init(&memcg->memory, NULL);
Vladimir Davydov37e84352016-01-20 15:02:56 -08004229 page_counter_init(&memcg->swap, NULL);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004230 page_counter_init(&memcg->memsw, NULL);
4231 page_counter_init(&memcg->kmem, NULL);
Johannes Weiner0db15292016-01-20 15:02:50 -08004232 page_counter_init(&memcg->tcpmem, NULL);
Tejun Heo8c7f6ed2012-09-13 12:20:58 -07004233 /*
4234 * Deeper hierachy with use_hierarchy == false doesn't make
4235 * much sense so let cgroup subsystem know about this
4236 * unfortunate state in our controller.
4237 */
Glauber Costad142e3e2013-02-22 16:34:52 -08004238 if (parent != root_mem_cgroup)
Tejun Heo073219e2014-02-08 10:36:58 -05004239 memory_cgrp_subsys.broken_hierarchy = true;
Balbir Singh18f59ea2009-01-07 18:08:07 -08004240 }
Vladimir Davydovd6441632014-01-23 15:53:09 -08004241
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004242 /* The following stuff does not apply to the root */
4243 if (!parent) {
4244 root_mem_cgroup = memcg;
4245 return &memcg->css;
4246 }
4247
Vladimir Davydovb313aee2016-03-17 14:18:27 -07004248 error = memcg_online_kmem(memcg);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004249 if (error)
4250 goto fail;
Johannes Weiner127424c2016-01-20 15:02:32 -08004251
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004252 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
Johannes Weineref129472016-01-14 15:21:34 -08004253 static_branch_inc(&memcg_sockets_enabled_key);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004254
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004255 return &memcg->css;
4256fail:
4257 mem_cgroup_free(memcg);
Tejun Heoea3a9642016-06-24 14:49:58 -07004258 return ERR_PTR(-ENOMEM);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004259}
4260
Johannes Weiner73f576c2016-07-20 15:44:57 -07004261static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004262{
Vladimir Davydov58fa2a52016-10-07 16:57:29 -07004263 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4264
Johannes Weiner73f576c2016-07-20 15:44:57 -07004265 /* Online state pins memcg ID, memcg ID pins CSS */
Vladimir Davydov58fa2a52016-10-07 16:57:29 -07004266 atomic_set(&memcg->id.ref, 1);
Johannes Weiner73f576c2016-07-20 15:44:57 -07004267 css_get(css);
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -07004268 return 0;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004269}
4270
Tejun Heoeb954192013-08-08 20:11:23 -04004271static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004272{
Tejun Heoeb954192013-08-08 20:11:23 -04004273 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004274 struct mem_cgroup_event *event, *tmp;
Tejun Heo79bd9812013-11-22 18:20:42 -05004275
4276 /*
4277 * Unregister events and notify userspace.
4278 * Notify userspace about cgroup removing only after rmdir of cgroup
4279 * directory to avoid race between userspace and kernelspace.
4280 */
Tejun Heofba94802013-11-22 18:20:43 -05004281 spin_lock(&memcg->event_list_lock);
4282 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
Tejun Heo79bd9812013-11-22 18:20:42 -05004283 list_del_init(&event->list);
4284 schedule_work(&event->remove);
4285 }
Tejun Heofba94802013-11-22 18:20:43 -05004286 spin_unlock(&memcg->event_list_lock);
KAMEZAWA Hiroyukiec64f512009-04-02 16:57:26 -07004287
Johannes Weiner567e9ab2016-01-20 15:02:24 -08004288 memcg_offline_kmem(memcg);
Tejun Heo52ebea72015-05-22 17:13:37 -04004289 wb_memcg_offline(memcg);
Johannes Weiner73f576c2016-07-20 15:44:57 -07004290
4291 mem_cgroup_id_put(memcg);
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004292}
4293
Vladimir Davydov6df38682015-12-29 14:54:10 -08004294static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4295{
4296 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4297
4298 invalidate_reclaim_iterators(memcg);
4299}
4300
Tejun Heoeb954192013-08-08 20:11:23 -04004301static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004302{
Tejun Heoeb954192013-08-08 20:11:23 -04004303 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimurac268e992009-01-15 13:51:13 -08004304
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004305 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
Johannes Weineref129472016-01-14 15:21:34 -08004306 static_branch_dec(&memcg_sockets_enabled_key);
Johannes Weiner3893e302016-01-20 15:02:29 -08004307
Johannes Weiner0db15292016-01-20 15:02:50 -08004308 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08004309 static_branch_dec(&memcg_sockets_enabled_key);
Johannes Weiner3893e302016-01-20 15:02:29 -08004310
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004311 vmpressure_cleanup(&memcg->vmpressure);
4312 cancel_work_sync(&memcg->high_work);
4313 mem_cgroup_remove_from_trees(memcg);
Johannes Weinerd886f4e2016-01-20 15:02:47 -08004314 memcg_free_kmem(memcg);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004315 mem_cgroup_free(memcg);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004316}
4317
Tejun Heo1ced9532014-07-08 18:02:57 -04004318/**
4319 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4320 * @css: the target css
4321 *
4322 * Reset the states of the mem_cgroup associated with @css. This is
4323 * invoked when the userland requests disabling on the default hierarchy
4324 * but the memcg is pinned through dependency. The memcg should stop
4325 * applying policies and should revert to the vanilla state as it may be
4326 * made visible again.
4327 *
4328 * The current implementation only resets the essential configurations.
4329 * This needs to be expanded to cover all the visible parts.
4330 */
4331static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4332{
4333 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4334
Vladimir Davydovd334c9bc2016-03-17 14:19:38 -07004335 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4336 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4337 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4338 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4339 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
Johannes Weiner241994ed2015-02-11 15:26:06 -08004340 memcg->low = 0;
4341 memcg->high = PAGE_COUNTER_MAX;
Johannes Weiner24d404d2015-01-08 14:32:35 -08004342 memcg->soft_limit = PAGE_COUNTER_MAX;
Tejun Heo2529bb32015-05-22 18:23:34 -04004343 memcg_wb_domain_size_changed(memcg);
Tejun Heo1ced9532014-07-08 18:02:57 -04004344}
4345
Daisuke Nishimura02491442010-03-10 15:22:17 -08004346#ifdef CONFIG_MMU
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004347/* Handlers for move charge at task migration. */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004348static int mem_cgroup_do_precharge(unsigned long count)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004349{
Johannes Weiner05b84302014-08-06 16:05:59 -07004350 int ret;
Johannes Weiner9476db92014-08-06 16:05:55 -07004351
Mel Gormand0164ad2015-11-06 16:28:21 -08004352 /* Try a single bulk charge without reclaim first, kswapd may wake */
4353 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
Johannes Weiner9476db92014-08-06 16:05:55 -07004354 if (!ret) {
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004355 mc.precharge += count;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004356 return ret;
4357 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004358
David Rientjes36745342017-01-24 15:18:10 -08004359 /* Try charges one by one with reclaim, but do not retry */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004360 while (count--) {
David Rientjes36745342017-01-24 15:18:10 -08004361 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004362 if (ret)
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004363 return ret;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004364 mc.precharge++;
Johannes Weiner9476db92014-08-06 16:05:55 -07004365 cond_resched();
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004366 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004367 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004368}
4369
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004370union mc_target {
4371 struct page *page;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004372 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004373};
4374
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004375enum mc_target_type {
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004376 MC_TARGET_NONE = 0,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004377 MC_TARGET_PAGE,
Daisuke Nishimura02491442010-03-10 15:22:17 -08004378 MC_TARGET_SWAP,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004379};
4380
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004381static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4382 unsigned long addr, pte_t ptent)
4383{
4384 struct page *page = vm_normal_page(vma, addr, ptent);
4385
4386 if (!page || !page_mapped(page))
4387 return NULL;
4388 if (PageAnon(page)) {
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004389 if (!(mc.flags & MOVE_ANON))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004390 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004391 } else {
4392 if (!(mc.flags & MOVE_FILE))
4393 return NULL;
4394 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004395 if (!get_page_unless_zero(page))
4396 return NULL;
4397
4398 return page;
4399}
4400
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004401#ifdef CONFIG_SWAP
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004402static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
Li RongQing48406ef2016-07-26 15:22:14 -07004403 pte_t ptent, swp_entry_t *entry)
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004404{
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004405 struct page *page = NULL;
4406 swp_entry_t ent = pte_to_swp_entry(ptent);
4407
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004408 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004409 return NULL;
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004410 /*
4411 * Because lookup_swap_cache() updates some statistics counter,
4412 * we call find_get_page() with swapper_space directly.
4413 */
Huang Yingf6ab1f72016-10-07 17:00:21 -07004414 page = find_get_page(swap_address_space(ent), swp_offset(ent));
Johannes Weiner7941d212016-01-14 15:21:23 -08004415 if (do_memsw_account())
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004416 entry->val = ent.val;
4417
4418 return page;
4419}
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004420#else
4421static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
Li RongQing48406ef2016-07-26 15:22:14 -07004422 pte_t ptent, swp_entry_t *entry)
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004423{
4424 return NULL;
4425}
4426#endif
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004427
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004428static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4429 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4430{
4431 struct page *page = NULL;
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004432 struct address_space *mapping;
4433 pgoff_t pgoff;
4434
4435 if (!vma->vm_file) /* anonymous vma */
4436 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004437 if (!(mc.flags & MOVE_FILE))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004438 return NULL;
4439
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004440 mapping = vma->vm_file->f_mapping;
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08004441 pgoff = linear_page_index(vma, addr);
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004442
4443 /* page is moved even if it's not RSS of this task(page-faulted). */
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004444#ifdef CONFIG_SWAP
4445 /* shmem/tmpfs may report page out on swap: account for that too. */
Johannes Weiner139b6a62014-05-06 12:50:05 -07004446 if (shmem_mapping(mapping)) {
4447 page = find_get_entry(mapping, pgoff);
4448 if (radix_tree_exceptional_entry(page)) {
4449 swp_entry_t swp = radix_to_swp_entry(page);
Johannes Weiner7941d212016-01-14 15:21:23 -08004450 if (do_memsw_account())
Johannes Weiner139b6a62014-05-06 12:50:05 -07004451 *entry = swp;
Huang Yingf6ab1f72016-10-07 17:00:21 -07004452 page = find_get_page(swap_address_space(swp),
4453 swp_offset(swp));
Johannes Weiner139b6a62014-05-06 12:50:05 -07004454 }
4455 } else
4456 page = find_get_page(mapping, pgoff);
4457#else
4458 page = find_get_page(mapping, pgoff);
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004459#endif
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004460 return page;
4461}
4462
Chen Gangb1b0dea2015-04-14 15:47:35 -07004463/**
4464 * mem_cgroup_move_account - move account of the page
4465 * @page: the page
Li RongQing25843c22016-07-26 15:26:56 -07004466 * @compound: charge the page as compound or small page
Chen Gangb1b0dea2015-04-14 15:47:35 -07004467 * @from: mem_cgroup which the page is moved from.
4468 * @to: mem_cgroup which the page is moved to. @from != @to.
4469 *
Kirill A. Shutemov3ac808f2016-01-15 16:53:07 -08004470 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
Chen Gangb1b0dea2015-04-14 15:47:35 -07004471 *
4472 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4473 * from old cgroup.
4474 */
4475static int mem_cgroup_move_account(struct page *page,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004476 bool compound,
Chen Gangb1b0dea2015-04-14 15:47:35 -07004477 struct mem_cgroup *from,
4478 struct mem_cgroup *to)
4479{
4480 unsigned long flags;
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004481 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Chen Gangb1b0dea2015-04-14 15:47:35 -07004482 int ret;
Greg Thelenc4843a72015-05-22 17:13:16 -04004483 bool anon;
Chen Gangb1b0dea2015-04-14 15:47:35 -07004484
4485 VM_BUG_ON(from == to);
4486 VM_BUG_ON_PAGE(PageLRU(page), page);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004487 VM_BUG_ON(compound && !PageTransHuge(page));
Chen Gangb1b0dea2015-04-14 15:47:35 -07004488
4489 /*
Johannes Weiner6a93ca82016-03-15 14:57:19 -07004490 * Prevent mem_cgroup_migrate() from looking at
Hugh Dickins45637ba2015-11-05 18:49:40 -08004491 * page->mem_cgroup of its source page while we change it.
Chen Gangb1b0dea2015-04-14 15:47:35 -07004492 */
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004493 ret = -EBUSY;
Chen Gangb1b0dea2015-04-14 15:47:35 -07004494 if (!trylock_page(page))
4495 goto out;
4496
4497 ret = -EINVAL;
4498 if (page->mem_cgroup != from)
4499 goto out_unlock;
4500
Greg Thelenc4843a72015-05-22 17:13:16 -04004501 anon = PageAnon(page);
4502
Chen Gangb1b0dea2015-04-14 15:47:35 -07004503 spin_lock_irqsave(&from->move_lock, flags);
4504
Greg Thelenc4843a72015-05-22 17:13:16 -04004505 if (!anon && page_mapped(page)) {
Chen Gangb1b0dea2015-04-14 15:47:35 -07004506 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4507 nr_pages);
4508 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4509 nr_pages);
4510 }
4511
Greg Thelenc4843a72015-05-22 17:13:16 -04004512 /*
4513 * move_lock grabbed above and caller set from->moving_account, so
4514 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4515 * So mapping should be stable for dirty pages.
4516 */
4517 if (!anon && PageDirty(page)) {
4518 struct address_space *mapping = page_mapping(page);
4519
4520 if (mapping_cap_account_dirty(mapping)) {
4521 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4522 nr_pages);
4523 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4524 nr_pages);
4525 }
4526 }
4527
Chen Gangb1b0dea2015-04-14 15:47:35 -07004528 if (PageWriteback(page)) {
4529 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4530 nr_pages);
4531 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4532 nr_pages);
4533 }
4534
4535 /*
4536 * It is safe to change page->mem_cgroup here because the page
4537 * is referenced, charged, and isolated - we can't race with
4538 * uncharging, charging, migration, or LRU putback.
4539 */
4540
4541 /* caller should have done css_get */
4542 page->mem_cgroup = to;
4543 spin_unlock_irqrestore(&from->move_lock, flags);
4544
4545 ret = 0;
4546
4547 local_irq_disable();
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004548 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07004549 memcg_check_events(to, page);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004550 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07004551 memcg_check_events(from, page);
4552 local_irq_enable();
4553out_unlock:
4554 unlock_page(page);
4555out:
4556 return ret;
4557}
4558
Li RongQing7cf78062016-05-27 14:27:46 -07004559/**
4560 * get_mctgt_type - get target type of moving charge
4561 * @vma: the vma the pte to be checked belongs
4562 * @addr: the address corresponding to the pte to be checked
4563 * @ptent: the pte to be checked
4564 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4565 *
4566 * Returns
4567 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4568 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4569 * move charge. if @target is not NULL, the page is stored in target->page
4570 * with extra refcnt got(Callers should handle it).
4571 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4572 * target for charge migration. if @target is not NULL, the entry is stored
4573 * in target->ent.
4574 *
4575 * Called with pte lock held.
4576 */
4577
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004578static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004579 unsigned long addr, pte_t ptent, union mc_target *target)
4580{
Daisuke Nishimura02491442010-03-10 15:22:17 -08004581 struct page *page = NULL;
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004582 enum mc_target_type ret = MC_TARGET_NONE;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004583 swp_entry_t ent = { .val = 0 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004584
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004585 if (pte_present(ptent))
4586 page = mc_handle_present_pte(vma, addr, ptent);
4587 else if (is_swap_pte(ptent))
Li RongQing48406ef2016-07-26 15:22:14 -07004588 page = mc_handle_swap_pte(vma, ptent, &ent);
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08004589 else if (pte_none(ptent))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004590 page = mc_handle_file_pte(vma, addr, ptent, &ent);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004591
4592 if (!page && !ent.val)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004593 return ret;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004594 if (page) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004595 /*
Johannes Weiner0a31bc92014-08-08 14:19:22 -07004596 * Do only loose check w/o serialization.
Johannes Weiner1306a852014-12-10 15:44:52 -08004597 * mem_cgroup_move_account() checks the page is valid or
Johannes Weiner0a31bc92014-08-08 14:19:22 -07004598 * not under LRU exclusion.
Daisuke Nishimura02491442010-03-10 15:22:17 -08004599 */
Johannes Weiner1306a852014-12-10 15:44:52 -08004600 if (page->mem_cgroup == mc.from) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004601 ret = MC_TARGET_PAGE;
4602 if (target)
4603 target->page = page;
4604 }
4605 if (!ret || !target)
4606 put_page(page);
4607 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004608 /* There is a swap entry and a page doesn't exist or isn't charged */
4609 if (ent.val && !ret &&
Li Zefan34c00c32013-09-23 16:56:01 +08004610 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
KAMEZAWA Hiroyuki7f0f1542010-05-11 14:06:58 -07004611 ret = MC_TARGET_SWAP;
4612 if (target)
4613 target->ent = ent;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004614 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004615 return ret;
4616}
4617
Naoya Horiguchi12724852012-03-21 16:34:28 -07004618#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4619/*
4620 * We don't consider swapping or file mapped pages because THP does not
4621 * support them for now.
4622 * Caller should make sure that pmd_trans_huge(pmd) is true.
4623 */
4624static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4625 unsigned long addr, pmd_t pmd, union mc_target *target)
4626{
4627 struct page *page = NULL;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004628 enum mc_target_type ret = MC_TARGET_NONE;
4629
4630 page = pmd_page(pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08004631 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004632 if (!(mc.flags & MOVE_ANON))
Naoya Horiguchi12724852012-03-21 16:34:28 -07004633 return ret;
Johannes Weiner1306a852014-12-10 15:44:52 -08004634 if (page->mem_cgroup == mc.from) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07004635 ret = MC_TARGET_PAGE;
4636 if (target) {
4637 get_page(page);
4638 target->page = page;
4639 }
4640 }
4641 return ret;
4642}
4643#else
4644static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4645 unsigned long addr, pmd_t pmd, union mc_target *target)
4646{
4647 return MC_TARGET_NONE;
4648}
4649#endif
4650
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004651static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4652 unsigned long addr, unsigned long end,
4653 struct mm_walk *walk)
4654{
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004655 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004656 pte_t *pte;
4657 spinlock_t *ptl;
4658
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08004659 ptl = pmd_trans_huge_lock(pmd, vma);
4660 if (ptl) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07004661 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4662 mc.precharge += HPAGE_PMD_NR;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08004663 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07004664 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004665 }
Dave Hansen03319322011-03-22 16:32:56 -07004666
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07004667 if (pmd_trans_unstable(pmd))
4668 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004669 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4670 for (; addr != end; pte++, addr += PAGE_SIZE)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004671 if (get_mctgt_type(vma, addr, *pte, NULL))
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004672 mc.precharge++; /* increment precharge temporarily */
4673 pte_unmap_unlock(pte - 1, ptl);
4674 cond_resched();
4675
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004676 return 0;
4677}
4678
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004679static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4680{
4681 unsigned long precharge;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004682
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004683 struct mm_walk mem_cgroup_count_precharge_walk = {
4684 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4685 .mm = mm,
4686 };
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004687 down_read(&mm->mmap_sem);
James Morse0247f3f2016-10-07 17:00:12 -07004688 walk_page_range(0, mm->highest_vm_end,
4689 &mem_cgroup_count_precharge_walk);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004690 up_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004691
4692 precharge = mc.precharge;
4693 mc.precharge = 0;
4694
4695 return precharge;
4696}
4697
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004698static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4699{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004700 unsigned long precharge = mem_cgroup_count_precharge(mm);
4701
4702 VM_BUG_ON(mc.moving_task);
4703 mc.moving_task = current;
4704 return mem_cgroup_do_precharge(precharge);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004705}
4706
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004707/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4708static void __mem_cgroup_clear_mc(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004709{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004710 struct mem_cgroup *from = mc.from;
4711 struct mem_cgroup *to = mc.to;
4712
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004713 /* we must uncharge all the leftover precharges from mc.to */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004714 if (mc.precharge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004715 cancel_charge(mc.to, mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004716 mc.precharge = 0;
4717 }
4718 /*
4719 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4720 * we must uncharge here.
4721 */
4722 if (mc.moved_charge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004723 cancel_charge(mc.from, mc.moved_charge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004724 mc.moved_charge = 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004725 }
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004726 /* we must fixup refcnts and charges */
4727 if (mc.moved_swap) {
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004728 /* uncharge swap account from the old cgroup */
Johannes Weinerce00a962014-09-05 08:43:57 -04004729 if (!mem_cgroup_is_root(mc.from))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004730 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004731
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004732 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
4733
Johannes Weiner05b84302014-08-06 16:05:59 -07004734 /*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004735 * we charged both to->memory and to->memsw, so we
4736 * should uncharge to->memory.
Johannes Weiner05b84302014-08-06 16:05:59 -07004737 */
Johannes Weinerce00a962014-09-05 08:43:57 -04004738 if (!mem_cgroup_is_root(mc.to))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004739 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004740
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004741 mem_cgroup_id_get_many(mc.to, mc.moved_swap);
4742 css_put_many(&mc.to->css, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004743
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004744 mc.moved_swap = 0;
4745 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004746 memcg_oom_recover(from);
4747 memcg_oom_recover(to);
4748 wake_up_all(&mc.waitq);
4749}
4750
4751static void mem_cgroup_clear_mc(void)
4752{
Tejun Heo264a0ae2016-04-21 19:09:02 -04004753 struct mm_struct *mm = mc.mm;
4754
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004755 /*
4756 * we must clear moving_task before waking up waiters at the end of
4757 * task migration.
4758 */
4759 mc.moving_task = NULL;
4760 __mem_cgroup_clear_mc();
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004761 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004762 mc.from = NULL;
4763 mc.to = NULL;
Tejun Heo264a0ae2016-04-21 19:09:02 -04004764 mc.mm = NULL;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004765 spin_unlock(&mc.lock);
Tejun Heo264a0ae2016-04-21 19:09:02 -04004766
4767 mmput(mm);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004768}
4769
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004770static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004771{
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004772 struct cgroup_subsys_state *css;
Ross Zwislereed67d72015-12-23 14:53:27 -07004773 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
Tejun Heo9f2115f2015-09-08 15:01:10 -07004774 struct mem_cgroup *from;
Tejun Heo4530edd2015-09-11 15:00:19 -04004775 struct task_struct *leader, *p;
Tejun Heo9f2115f2015-09-08 15:01:10 -07004776 struct mm_struct *mm;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004777 unsigned long move_flags;
Tejun Heo9f2115f2015-09-08 15:01:10 -07004778 int ret = 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004779
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004780 /* charge immigration isn't supported on the default hierarchy */
4781 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Tejun Heo9f2115f2015-09-08 15:01:10 -07004782 return 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004783
Tejun Heo4530edd2015-09-11 15:00:19 -04004784 /*
4785 * Multi-process migrations only happen on the default hierarchy
4786 * where charge immigration is not used. Perform charge
4787 * immigration if @tset contains a leader and whine if there are
4788 * multiple.
4789 */
4790 p = NULL;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004791 cgroup_taskset_for_each_leader(leader, css, tset) {
Tejun Heo4530edd2015-09-11 15:00:19 -04004792 WARN_ON_ONCE(p);
4793 p = leader;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004794 memcg = mem_cgroup_from_css(css);
Tejun Heo4530edd2015-09-11 15:00:19 -04004795 }
4796 if (!p)
4797 return 0;
4798
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004799 /*
4800 * We are now commited to this value whatever it is. Changes in this
4801 * tunable will only affect upcoming migrations, not the current one.
4802 * So we need to save it, and keep it going.
4803 */
4804 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4805 if (!move_flags)
4806 return 0;
4807
Tejun Heo9f2115f2015-09-08 15:01:10 -07004808 from = mem_cgroup_from_task(p);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004809
Tejun Heo9f2115f2015-09-08 15:01:10 -07004810 VM_BUG_ON(from == memcg);
Johannes Weiner247b1442014-12-10 15:44:11 -08004811
Tejun Heo9f2115f2015-09-08 15:01:10 -07004812 mm = get_task_mm(p);
4813 if (!mm)
4814 return 0;
4815 /* We move charges only when we move a owner of the mm */
4816 if (mm->owner == p) {
4817 VM_BUG_ON(mc.from);
4818 VM_BUG_ON(mc.to);
4819 VM_BUG_ON(mc.precharge);
4820 VM_BUG_ON(mc.moved_charge);
4821 VM_BUG_ON(mc.moved_swap);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004822
Tejun Heo9f2115f2015-09-08 15:01:10 -07004823 spin_lock(&mc.lock);
Tejun Heo264a0ae2016-04-21 19:09:02 -04004824 mc.mm = mm;
Tejun Heo9f2115f2015-09-08 15:01:10 -07004825 mc.from = from;
4826 mc.to = memcg;
4827 mc.flags = move_flags;
4828 spin_unlock(&mc.lock);
4829 /* We set mc.moving_task later */
4830
4831 ret = mem_cgroup_precharge_mc(mm);
4832 if (ret)
4833 mem_cgroup_clear_mc();
Tejun Heo264a0ae2016-04-21 19:09:02 -04004834 } else {
4835 mmput(mm);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004836 }
4837 return ret;
4838}
4839
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004840static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004841{
Johannes Weiner4e2f2452014-12-10 15:44:08 -08004842 if (mc.to)
4843 mem_cgroup_clear_mc();
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004844}
4845
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004846static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4847 unsigned long addr, unsigned long end,
4848 struct mm_walk *walk)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004849{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004850 int ret = 0;
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004851 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004852 pte_t *pte;
4853 spinlock_t *ptl;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004854 enum mc_target_type target_type;
4855 union mc_target target;
4856 struct page *page;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004857
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08004858 ptl = pmd_trans_huge_lock(pmd, vma);
4859 if (ptl) {
Hugh Dickins62ade862012-05-18 11:28:34 -07004860 if (mc.precharge < HPAGE_PMD_NR) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08004861 spin_unlock(ptl);
Naoya Horiguchi12724852012-03-21 16:34:28 -07004862 return 0;
4863 }
4864 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4865 if (target_type == MC_TARGET_PAGE) {
4866 page = target.page;
4867 if (!isolate_lru_page(page)) {
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004868 if (!mem_cgroup_move_account(page, true,
Johannes Weiner1306a852014-12-10 15:44:52 -08004869 mc.from, mc.to)) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07004870 mc.precharge -= HPAGE_PMD_NR;
4871 mc.moved_charge += HPAGE_PMD_NR;
4872 }
4873 putback_lru_page(page);
4874 }
4875 put_page(page);
4876 }
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08004877 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07004878 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004879 }
4880
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07004881 if (pmd_trans_unstable(pmd))
4882 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004883retry:
4884 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4885 for (; addr != end; addr += PAGE_SIZE) {
4886 pte_t ptent = *(pte++);
Daisuke Nishimura02491442010-03-10 15:22:17 -08004887 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004888
4889 if (!mc.precharge)
4890 break;
4891
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004892 switch (get_mctgt_type(vma, addr, ptent, &target)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004893 case MC_TARGET_PAGE:
4894 page = target.page;
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08004895 /*
4896 * We can have a part of the split pmd here. Moving it
4897 * can be done but it would be too convoluted so simply
4898 * ignore such a partial THP and keep it in original
4899 * memcg. There should be somebody mapping the head.
4900 */
4901 if (PageTransCompound(page))
4902 goto put;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004903 if (isolate_lru_page(page))
4904 goto put;
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004905 if (!mem_cgroup_move_account(page, false,
4906 mc.from, mc.to)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004907 mc.precharge--;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004908 /* we uncharge from mc.from later. */
4909 mc.moved_charge++;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004910 }
4911 putback_lru_page(page);
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004912put: /* get_mctgt_type() gets the page */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004913 put_page(page);
4914 break;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004915 case MC_TARGET_SWAP:
4916 ent = target.ent;
Hugh Dickinse91cbb42012-05-29 15:06:51 -07004917 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004918 mc.precharge--;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004919 /* we fixup refcnts and charges later. */
4920 mc.moved_swap++;
4921 }
Daisuke Nishimura02491442010-03-10 15:22:17 -08004922 break;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004923 default:
4924 break;
4925 }
4926 }
4927 pte_unmap_unlock(pte - 1, ptl);
4928 cond_resched();
4929
4930 if (addr != end) {
4931 /*
4932 * We have consumed all precharges we got in can_attach().
4933 * We try charge one by one, but don't do any additional
4934 * charges to mc.to if we have failed in charge once in attach()
4935 * phase.
4936 */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004937 ret = mem_cgroup_do_precharge(1);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004938 if (!ret)
4939 goto retry;
4940 }
4941
4942 return ret;
4943}
4944
Tejun Heo264a0ae2016-04-21 19:09:02 -04004945static void mem_cgroup_move_charge(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004946{
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004947 struct mm_walk mem_cgroup_move_charge_walk = {
4948 .pmd_entry = mem_cgroup_move_charge_pte_range,
Tejun Heo264a0ae2016-04-21 19:09:02 -04004949 .mm = mc.mm,
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004950 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004951
4952 lru_add_drain_all();
Johannes Weiner312722c2014-12-10 15:44:25 -08004953 /*
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07004954 * Signal lock_page_memcg() to take the memcg's move_lock
4955 * while we're moving its pages to another memcg. Then wait
4956 * for already started RCU-only updates to finish.
Johannes Weiner312722c2014-12-10 15:44:25 -08004957 */
4958 atomic_inc(&mc.from->moving_account);
4959 synchronize_rcu();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004960retry:
Tejun Heo264a0ae2016-04-21 19:09:02 -04004961 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004962 /*
4963 * Someone who are holding the mmap_sem might be waiting in
4964 * waitq. So we cancel all extra charges, wake up all waiters,
4965 * and retry. Because we cancel precharges, we might not be able
4966 * to move enough charges, but moving charge is a best-effort
4967 * feature anyway, so it wouldn't be a big problem.
4968 */
4969 __mem_cgroup_clear_mc();
4970 cond_resched();
4971 goto retry;
4972 }
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004973 /*
4974 * When we have consumed all precharges and failed in doing
4975 * additional charge, the page walk just aborts.
4976 */
James Morse0247f3f2016-10-07 17:00:12 -07004977 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);
4978
Tejun Heo264a0ae2016-04-21 19:09:02 -04004979 up_read(&mc.mm->mmap_sem);
Johannes Weiner312722c2014-12-10 15:44:25 -08004980 atomic_dec(&mc.from->moving_account);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004981}
4982
Tejun Heo264a0ae2016-04-21 19:09:02 -04004983static void mem_cgroup_move_task(void)
Balbir Singh67e465a2008-02-07 00:13:54 -08004984{
Tejun Heo264a0ae2016-04-21 19:09:02 -04004985 if (mc.to) {
4986 mem_cgroup_move_charge();
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07004987 mem_cgroup_clear_mc();
Tejun Heo264a0ae2016-04-21 19:09:02 -04004988 }
Balbir Singh67e465a2008-02-07 00:13:54 -08004989}
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07004990#else /* !CONFIG_MMU */
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004991static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07004992{
4993 return 0;
4994}
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004995static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07004996{
4997}
Tejun Heo264a0ae2016-04-21 19:09:02 -04004998static void mem_cgroup_move_task(void)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07004999{
5000}
5001#endif
Balbir Singh67e465a2008-02-07 00:13:54 -08005002
Tejun Heof00baae2013-04-15 13:41:15 -07005003/*
5004 * Cgroup retains root cgroups across [un]mount cycles making it necessary
Tejun Heoaa6ec292014-07-09 10:08:08 -04005005 * to verify whether we're attached to the default hierarchy on each mount
5006 * attempt.
Tejun Heof00baae2013-04-15 13:41:15 -07005007 */
Tejun Heoeb954192013-08-08 20:11:23 -04005008static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
Tejun Heof00baae2013-04-15 13:41:15 -07005009{
5010 /*
Tejun Heoaa6ec292014-07-09 10:08:08 -04005011 * use_hierarchy is forced on the default hierarchy. cgroup core
Tejun Heof00baae2013-04-15 13:41:15 -07005012 * guarantees that @root doesn't have any children, so turning it
5013 * on for the root memcg is enough.
5014 */
Tejun Heo9e10a132015-09-18 11:56:28 -04005015 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Vladimir Davydov7feee592015-03-12 16:26:19 -07005016 root_mem_cgroup->use_hierarchy = true;
5017 else
5018 root_mem_cgroup->use_hierarchy = false;
Tejun Heof00baae2013-04-15 13:41:15 -07005019}
5020
Johannes Weiner241994ed2015-02-11 15:26:06 -08005021static u64 memory_current_read(struct cgroup_subsys_state *css,
5022 struct cftype *cft)
5023{
Johannes Weinerf5fc3c5d2015-11-05 18:50:23 -08005024 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5025
5026 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
Johannes Weiner241994ed2015-02-11 15:26:06 -08005027}
5028
5029static int memory_low_show(struct seq_file *m, void *v)
5030{
5031 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005032 unsigned long low = READ_ONCE(memcg->low);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005033
5034 if (low == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005035 seq_puts(m, "max\n");
Johannes Weiner241994ed2015-02-11 15:26:06 -08005036 else
5037 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5038
5039 return 0;
5040}
5041
5042static ssize_t memory_low_write(struct kernfs_open_file *of,
5043 char *buf, size_t nbytes, loff_t off)
5044{
5045 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5046 unsigned long low;
5047 int err;
5048
5049 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005050 err = page_counter_memparse(buf, "max", &low);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005051 if (err)
5052 return err;
5053
5054 memcg->low = low;
5055
5056 return nbytes;
5057}
5058
5059static int memory_high_show(struct seq_file *m, void *v)
5060{
5061 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005062 unsigned long high = READ_ONCE(memcg->high);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005063
5064 if (high == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005065 seq_puts(m, "max\n");
Johannes Weiner241994ed2015-02-11 15:26:06 -08005066 else
5067 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5068
5069 return 0;
5070}
5071
5072static ssize_t memory_high_write(struct kernfs_open_file *of,
5073 char *buf, size_t nbytes, loff_t off)
5074{
5075 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner588083b2016-03-17 14:20:25 -07005076 unsigned long nr_pages;
Johannes Weiner241994ed2015-02-11 15:26:06 -08005077 unsigned long high;
5078 int err;
5079
5080 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005081 err = page_counter_memparse(buf, "max", &high);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005082 if (err)
5083 return err;
5084
5085 memcg->high = high;
5086
Johannes Weiner588083b2016-03-17 14:20:25 -07005087 nr_pages = page_counter_read(&memcg->memory);
5088 if (nr_pages > high)
5089 try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5090 GFP_KERNEL, true);
5091
Tejun Heo2529bb32015-05-22 18:23:34 -04005092 memcg_wb_domain_size_changed(memcg);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005093 return nbytes;
5094}
5095
5096static int memory_max_show(struct seq_file *m, void *v)
5097{
5098 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005099 unsigned long max = READ_ONCE(memcg->memory.limit);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005100
5101 if (max == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005102 seq_puts(m, "max\n");
Johannes Weiner241994ed2015-02-11 15:26:06 -08005103 else
5104 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5105
5106 return 0;
5107}
5108
5109static ssize_t memory_max_write(struct kernfs_open_file *of,
5110 char *buf, size_t nbytes, loff_t off)
5111{
5112 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07005113 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5114 bool drained = false;
Johannes Weiner241994ed2015-02-11 15:26:06 -08005115 unsigned long max;
5116 int err;
5117
5118 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005119 err = page_counter_memparse(buf, "max", &max);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005120 if (err)
5121 return err;
5122
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07005123 xchg(&memcg->memory.limit, max);
5124
5125 for (;;) {
5126 unsigned long nr_pages = page_counter_read(&memcg->memory);
5127
5128 if (nr_pages <= max)
5129 break;
5130
5131 if (signal_pending(current)) {
5132 err = -EINTR;
5133 break;
5134 }
5135
5136 if (!drained) {
5137 drain_all_stock(memcg);
5138 drained = true;
5139 continue;
5140 }
5141
5142 if (nr_reclaims) {
5143 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5144 GFP_KERNEL, true))
5145 nr_reclaims--;
5146 continue;
5147 }
5148
5149 mem_cgroup_events(memcg, MEMCG_OOM, 1);
5150 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5151 break;
5152 }
Johannes Weiner241994ed2015-02-11 15:26:06 -08005153
Tejun Heo2529bb32015-05-22 18:23:34 -04005154 memcg_wb_domain_size_changed(memcg);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005155 return nbytes;
5156}
5157
5158static int memory_events_show(struct seq_file *m, void *v)
5159{
5160 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5161
5162 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5163 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5164 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5165 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5166
5167 return 0;
5168}
5169
Johannes Weiner587d9f72016-01-20 15:03:19 -08005170static int memory_stat_show(struct seq_file *m, void *v)
5171{
5172 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Vladimir Davydov72b54e72016-03-17 14:17:32 -07005173 unsigned long stat[MEMCG_NR_STAT];
5174 unsigned long events[MEMCG_NR_EVENTS];
Johannes Weiner587d9f72016-01-20 15:03:19 -08005175 int i;
5176
5177 /*
5178 * Provide statistics on the state of the memory subsystem as
5179 * well as cumulative event counters that show past behavior.
5180 *
5181 * This list is ordered following a combination of these gradients:
5182 * 1) generic big picture -> specifics and details
5183 * 2) reflecting userspace activity -> reflecting kernel heuristics
5184 *
5185 * Current memory state:
5186 */
5187
Vladimir Davydov72b54e72016-03-17 14:17:32 -07005188 tree_stat(memcg, stat);
5189 tree_events(memcg, events);
5190
Johannes Weiner587d9f72016-01-20 15:03:19 -08005191 seq_printf(m, "anon %llu\n",
Vladimir Davydov72b54e72016-03-17 14:17:32 -07005192 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005193 seq_printf(m, "file %llu\n",
Vladimir Davydov72b54e72016-03-17 14:17:32 -07005194 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
Vladimir Davydov12580e42016-03-17 14:17:38 -07005195 seq_printf(m, "kernel_stack %llu\n",
Andy Lutomirskiefdc9492016-07-28 15:48:17 -07005196 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
Vladimir Davydov27ee57c2016-03-17 14:17:35 -07005197 seq_printf(m, "slab %llu\n",
5198 (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
5199 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
Johannes Weinerb2807f02016-01-20 15:03:22 -08005200 seq_printf(m, "sock %llu\n",
Vladimir Davydov72b54e72016-03-17 14:17:32 -07005201 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005202
5203 seq_printf(m, "file_mapped %llu\n",
Vladimir Davydov72b54e72016-03-17 14:17:32 -07005204 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005205 seq_printf(m, "file_dirty %llu\n",
Vladimir Davydov72b54e72016-03-17 14:17:32 -07005206 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005207 seq_printf(m, "file_writeback %llu\n",
Vladimir Davydov72b54e72016-03-17 14:17:32 -07005208 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005209
5210 for (i = 0; i < NR_LRU_LISTS; i++) {
5211 struct mem_cgroup *mi;
5212 unsigned long val = 0;
5213
5214 for_each_mem_cgroup_tree(mi, memcg)
5215 val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5216 seq_printf(m, "%s %llu\n",
5217 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5218 }
5219
Vladimir Davydov27ee57c2016-03-17 14:17:35 -07005220 seq_printf(m, "slab_reclaimable %llu\n",
5221 (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
5222 seq_printf(m, "slab_unreclaimable %llu\n",
5223 (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5224
Johannes Weiner587d9f72016-01-20 15:03:19 -08005225 /* Accumulated memory events */
5226
5227 seq_printf(m, "pgfault %lu\n",
Vladimir Davydov72b54e72016-03-17 14:17:32 -07005228 events[MEM_CGROUP_EVENTS_PGFAULT]);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005229 seq_printf(m, "pgmajfault %lu\n",
Vladimir Davydov72b54e72016-03-17 14:17:32 -07005230 events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005231
5232 return 0;
5233}
5234
Johannes Weiner241994ed2015-02-11 15:26:06 -08005235static struct cftype memory_files[] = {
5236 {
5237 .name = "current",
Johannes Weinerf5fc3c5d2015-11-05 18:50:23 -08005238 .flags = CFTYPE_NOT_ON_ROOT,
Johannes Weiner241994ed2015-02-11 15:26:06 -08005239 .read_u64 = memory_current_read,
5240 },
5241 {
5242 .name = "low",
5243 .flags = CFTYPE_NOT_ON_ROOT,
5244 .seq_show = memory_low_show,
5245 .write = memory_low_write,
5246 },
5247 {
5248 .name = "high",
5249 .flags = CFTYPE_NOT_ON_ROOT,
5250 .seq_show = memory_high_show,
5251 .write = memory_high_write,
5252 },
5253 {
5254 .name = "max",
5255 .flags = CFTYPE_NOT_ON_ROOT,
5256 .seq_show = memory_max_show,
5257 .write = memory_max_write,
5258 },
5259 {
5260 .name = "events",
5261 .flags = CFTYPE_NOT_ON_ROOT,
Tejun Heo472912a2015-09-18 18:01:59 -04005262 .file_offset = offsetof(struct mem_cgroup, events_file),
Johannes Weiner241994ed2015-02-11 15:26:06 -08005263 .seq_show = memory_events_show,
5264 },
Johannes Weiner587d9f72016-01-20 15:03:19 -08005265 {
5266 .name = "stat",
5267 .flags = CFTYPE_NOT_ON_ROOT,
5268 .seq_show = memory_stat_show,
5269 },
Johannes Weiner241994ed2015-02-11 15:26:06 -08005270 { } /* terminate */
5271};
5272
Tejun Heo073219e2014-02-08 10:36:58 -05005273struct cgroup_subsys memory_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -08005274 .css_alloc = mem_cgroup_css_alloc,
Glauber Costad142e3e2013-02-22 16:34:52 -08005275 .css_online = mem_cgroup_css_online,
Tejun Heo92fb9742012-11-19 08:13:38 -08005276 .css_offline = mem_cgroup_css_offline,
Vladimir Davydov6df38682015-12-29 14:54:10 -08005277 .css_released = mem_cgroup_css_released,
Tejun Heo92fb9742012-11-19 08:13:38 -08005278 .css_free = mem_cgroup_css_free,
Tejun Heo1ced9532014-07-08 18:02:57 -04005279 .css_reset = mem_cgroup_css_reset,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005280 .can_attach = mem_cgroup_can_attach,
5281 .cancel_attach = mem_cgroup_cancel_attach,
Tejun Heo264a0ae2016-04-21 19:09:02 -04005282 .post_attach = mem_cgroup_move_task,
Tejun Heof00baae2013-04-15 13:41:15 -07005283 .bind = mem_cgroup_bind,
Johannes Weiner241994ed2015-02-11 15:26:06 -08005284 .dfl_cftypes = memory_files,
5285 .legacy_cftypes = mem_cgroup_legacy_files,
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005286 .early_init = 0,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005287};
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005288
Johannes Weiner241994ed2015-02-11 15:26:06 -08005289/**
Johannes Weiner241994ed2015-02-11 15:26:06 -08005290 * mem_cgroup_low - check if memory consumption is below the normal range
5291 * @root: the highest ancestor to consider
5292 * @memcg: the memory cgroup to check
5293 *
5294 * Returns %true if memory consumption of @memcg, and that of all
5295 * configurable ancestors up to @root, is below the normal range.
5296 */
5297bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5298{
5299 if (mem_cgroup_disabled())
5300 return false;
5301
5302 /*
5303 * The toplevel group doesn't have a configurable range, so
5304 * it's never low when looked at directly, and it is not
5305 * considered an ancestor when assessing the hierarchy.
5306 */
5307
5308 if (memcg == root_mem_cgroup)
5309 return false;
5310
Michal Hocko4e54ded2015-02-27 15:51:46 -08005311 if (page_counter_read(&memcg->memory) >= memcg->low)
Johannes Weiner241994ed2015-02-11 15:26:06 -08005312 return false;
5313
5314 while (memcg != root) {
5315 memcg = parent_mem_cgroup(memcg);
5316
5317 if (memcg == root_mem_cgroup)
5318 break;
5319
Michal Hocko4e54ded2015-02-27 15:51:46 -08005320 if (page_counter_read(&memcg->memory) >= memcg->low)
Johannes Weiner241994ed2015-02-11 15:26:06 -08005321 return false;
5322 }
5323 return true;
5324}
5325
Johannes Weiner00501b52014-08-08 14:19:20 -07005326/**
5327 * mem_cgroup_try_charge - try charging a page
5328 * @page: page to charge
5329 * @mm: mm context of the victim
5330 * @gfp_mask: reclaim mode
5331 * @memcgp: charged memcg return
Li RongQing25843c22016-07-26 15:26:56 -07005332 * @compound: charge the page as compound or small page
Johannes Weiner00501b52014-08-08 14:19:20 -07005333 *
5334 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5335 * pages according to @gfp_mask if necessary.
5336 *
5337 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5338 * Otherwise, an error code is returned.
5339 *
5340 * After page->mapping has been set up, the caller must finalize the
5341 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5342 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5343 */
5344int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005345 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5346 bool compound)
Johannes Weiner00501b52014-08-08 14:19:20 -07005347{
5348 struct mem_cgroup *memcg = NULL;
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005349 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Johannes Weiner00501b52014-08-08 14:19:20 -07005350 int ret = 0;
5351
5352 if (mem_cgroup_disabled())
5353 goto out;
5354
5355 if (PageSwapCache(page)) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005356 /*
5357 * Every swap fault against a single page tries to charge the
5358 * page, bail as early as possible. shmem_unuse() encounters
5359 * already charged pages, too. The USED bit is protected by
5360 * the page lock, which serializes swap cache removal, which
5361 * in turn serializes uncharging.
5362 */
Vladimir Davydove993d902015-09-09 15:35:35 -07005363 VM_BUG_ON_PAGE(!PageLocked(page), page);
Johannes Weiner1306a852014-12-10 15:44:52 -08005364 if (page->mem_cgroup)
Johannes Weiner00501b52014-08-08 14:19:20 -07005365 goto out;
Vladimir Davydove993d902015-09-09 15:35:35 -07005366
Vladimir Davydov37e84352016-01-20 15:02:56 -08005367 if (do_swap_account) {
Vladimir Davydove993d902015-09-09 15:35:35 -07005368 swp_entry_t ent = { .val = page_private(page), };
5369 unsigned short id = lookup_swap_cgroup_id(ent);
5370
5371 rcu_read_lock();
5372 memcg = mem_cgroup_from_id(id);
5373 if (memcg && !css_tryget_online(&memcg->css))
5374 memcg = NULL;
5375 rcu_read_unlock();
5376 }
Johannes Weiner00501b52014-08-08 14:19:20 -07005377 }
5378
Johannes Weiner00501b52014-08-08 14:19:20 -07005379 if (!memcg)
5380 memcg = get_mem_cgroup_from_mm(mm);
5381
5382 ret = try_charge(memcg, gfp_mask, nr_pages);
5383
5384 css_put(&memcg->css);
Johannes Weiner00501b52014-08-08 14:19:20 -07005385out:
5386 *memcgp = memcg;
5387 return ret;
5388}
5389
5390/**
5391 * mem_cgroup_commit_charge - commit a page charge
5392 * @page: page to charge
5393 * @memcg: memcg to charge the page to
5394 * @lrucare: page might be on LRU already
Li RongQing25843c22016-07-26 15:26:56 -07005395 * @compound: charge the page as compound or small page
Johannes Weiner00501b52014-08-08 14:19:20 -07005396 *
5397 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5398 * after page->mapping has been set up. This must happen atomically
5399 * as part of the page instantiation, i.e. under the page table lock
5400 * for anonymous pages, under the page lock for page and swap cache.
5401 *
5402 * In addition, the page must not be on the LRU during the commit, to
5403 * prevent racing with task migration. If it might be, use @lrucare.
5404 *
5405 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5406 */
5407void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005408 bool lrucare, bool compound)
Johannes Weiner00501b52014-08-08 14:19:20 -07005409{
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005410 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Johannes Weiner00501b52014-08-08 14:19:20 -07005411
5412 VM_BUG_ON_PAGE(!page->mapping, page);
5413 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5414
5415 if (mem_cgroup_disabled())
5416 return;
5417 /*
5418 * Swap faults will attempt to charge the same page multiple
5419 * times. But reuse_swap_page() might have removed the page
5420 * from swapcache already, so we can't check PageSwapCache().
5421 */
5422 if (!memcg)
5423 return;
5424
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005425 commit_charge(page, memcg, lrucare);
5426
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005427 local_irq_disable();
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005428 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005429 memcg_check_events(memcg, page);
5430 local_irq_enable();
Johannes Weiner00501b52014-08-08 14:19:20 -07005431
Johannes Weiner7941d212016-01-14 15:21:23 -08005432 if (do_memsw_account() && PageSwapCache(page)) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005433 swp_entry_t entry = { .val = page_private(page) };
5434 /*
5435 * The swap entry might not get freed for a long time,
5436 * let's not wait for it. The page already received a
5437 * memory+swap charge, drop the swap entry duplicate.
5438 */
5439 mem_cgroup_uncharge_swap(entry);
5440 }
5441}
5442
5443/**
5444 * mem_cgroup_cancel_charge - cancel a page charge
5445 * @page: page to charge
5446 * @memcg: memcg to charge the page to
Li RongQing25843c22016-07-26 15:26:56 -07005447 * @compound: charge the page as compound or small page
Johannes Weiner00501b52014-08-08 14:19:20 -07005448 *
5449 * Cancel a charge transaction started by mem_cgroup_try_charge().
5450 */
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005451void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5452 bool compound)
Johannes Weiner00501b52014-08-08 14:19:20 -07005453{
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005454 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Johannes Weiner00501b52014-08-08 14:19:20 -07005455
5456 if (mem_cgroup_disabled())
5457 return;
5458 /*
5459 * Swap faults will attempt to charge the same page multiple
5460 * times. But reuse_swap_page() might have removed the page
5461 * from swapcache already, so we can't check PageSwapCache().
5462 */
5463 if (!memcg)
5464 return;
5465
Johannes Weiner00501b52014-08-08 14:19:20 -07005466 cancel_charge(memcg, nr_pages);
5467}
5468
Johannes Weiner747db952014-08-08 14:19:24 -07005469static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
Johannes Weiner747db952014-08-08 14:19:24 -07005470 unsigned long nr_anon, unsigned long nr_file,
Vladimir Davydov5e8d35f2016-07-26 15:24:27 -07005471 unsigned long nr_huge, unsigned long nr_kmem,
5472 struct page *dummy_page)
Johannes Weiner747db952014-08-08 14:19:24 -07005473{
Vladimir Davydov5e8d35f2016-07-26 15:24:27 -07005474 unsigned long nr_pages = nr_anon + nr_file + nr_kmem;
Johannes Weiner747db952014-08-08 14:19:24 -07005475 unsigned long flags;
5476
Johannes Weinerce00a962014-09-05 08:43:57 -04005477 if (!mem_cgroup_is_root(memcg)) {
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005478 page_counter_uncharge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08005479 if (do_memsw_account())
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005480 page_counter_uncharge(&memcg->memsw, nr_pages);
Vladimir Davydov5e8d35f2016-07-26 15:24:27 -07005481 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem)
5482 page_counter_uncharge(&memcg->kmem, nr_kmem);
Johannes Weinerce00a962014-09-05 08:43:57 -04005483 memcg_oom_recover(memcg);
5484 }
Johannes Weiner747db952014-08-08 14:19:24 -07005485
5486 local_irq_save(flags);
5487 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5488 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5489 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5490 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005491 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
Johannes Weiner747db952014-08-08 14:19:24 -07005492 memcg_check_events(memcg, dummy_page);
5493 local_irq_restore(flags);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08005494
5495 if (!mem_cgroup_is_root(memcg))
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005496 css_put_many(&memcg->css, nr_pages);
Johannes Weiner747db952014-08-08 14:19:24 -07005497}
5498
5499static void uncharge_list(struct list_head *page_list)
5500{
5501 struct mem_cgroup *memcg = NULL;
Johannes Weiner747db952014-08-08 14:19:24 -07005502 unsigned long nr_anon = 0;
5503 unsigned long nr_file = 0;
5504 unsigned long nr_huge = 0;
Vladimir Davydov5e8d35f2016-07-26 15:24:27 -07005505 unsigned long nr_kmem = 0;
Johannes Weiner747db952014-08-08 14:19:24 -07005506 unsigned long pgpgout = 0;
Johannes Weiner747db952014-08-08 14:19:24 -07005507 struct list_head *next;
5508 struct page *page;
5509
Johannes Weiner8b592652016-03-17 14:20:31 -07005510 /*
5511 * Note that the list can be a single page->lru; hence the
5512 * do-while loop instead of a simple list_for_each_entry().
5513 */
Johannes Weiner747db952014-08-08 14:19:24 -07005514 next = page_list->next;
5515 do {
Johannes Weiner747db952014-08-08 14:19:24 -07005516 page = list_entry(next, struct page, lru);
5517 next = page->lru.next;
5518
5519 VM_BUG_ON_PAGE(PageLRU(page), page);
5520 VM_BUG_ON_PAGE(page_count(page), page);
5521
Johannes Weiner1306a852014-12-10 15:44:52 -08005522 if (!page->mem_cgroup)
Johannes Weiner747db952014-08-08 14:19:24 -07005523 continue;
5524
5525 /*
5526 * Nobody should be changing or seriously looking at
Johannes Weiner1306a852014-12-10 15:44:52 -08005527 * page->mem_cgroup at this point, we have fully
Johannes Weiner29833312014-12-10 15:44:02 -08005528 * exclusive access to the page.
Johannes Weiner747db952014-08-08 14:19:24 -07005529 */
5530
Johannes Weiner1306a852014-12-10 15:44:52 -08005531 if (memcg != page->mem_cgroup) {
Johannes Weiner747db952014-08-08 14:19:24 -07005532 if (memcg) {
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005533 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
Vladimir Davydov5e8d35f2016-07-26 15:24:27 -07005534 nr_huge, nr_kmem, page);
5535 pgpgout = nr_anon = nr_file =
5536 nr_huge = nr_kmem = 0;
Johannes Weiner747db952014-08-08 14:19:24 -07005537 }
Johannes Weiner1306a852014-12-10 15:44:52 -08005538 memcg = page->mem_cgroup;
Johannes Weiner747db952014-08-08 14:19:24 -07005539 }
5540
Vladimir Davydov5e8d35f2016-07-26 15:24:27 -07005541 if (!PageKmemcg(page)) {
5542 unsigned int nr_pages = 1;
Johannes Weiner747db952014-08-08 14:19:24 -07005543
Vladimir Davydov5e8d35f2016-07-26 15:24:27 -07005544 if (PageTransHuge(page)) {
5545 nr_pages <<= compound_order(page);
Vladimir Davydov5e8d35f2016-07-26 15:24:27 -07005546 nr_huge += nr_pages;
5547 }
5548 if (PageAnon(page))
5549 nr_anon += nr_pages;
5550 else
5551 nr_file += nr_pages;
5552 pgpgout++;
Vladimir Davydovc4159a72016-08-08 23:03:12 +03005553 } else {
Vladimir Davydov5e8d35f2016-07-26 15:24:27 -07005554 nr_kmem += 1 << compound_order(page);
Vladimir Davydovc4159a72016-08-08 23:03:12 +03005555 __ClearPageKmemcg(page);
5556 }
Johannes Weiner747db952014-08-08 14:19:24 -07005557
Johannes Weiner1306a852014-12-10 15:44:52 -08005558 page->mem_cgroup = NULL;
Johannes Weiner747db952014-08-08 14:19:24 -07005559 } while (next != page_list);
5560
5561 if (memcg)
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005562 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
Vladimir Davydov5e8d35f2016-07-26 15:24:27 -07005563 nr_huge, nr_kmem, page);
Johannes Weiner747db952014-08-08 14:19:24 -07005564}
5565
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005566/**
5567 * mem_cgroup_uncharge - uncharge a page
5568 * @page: page to uncharge
5569 *
5570 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5571 * mem_cgroup_commit_charge().
5572 */
5573void mem_cgroup_uncharge(struct page *page)
5574{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005575 if (mem_cgroup_disabled())
5576 return;
5577
Johannes Weiner747db952014-08-08 14:19:24 -07005578 /* Don't touch page->lru of any random page, pre-check: */
Johannes Weiner1306a852014-12-10 15:44:52 -08005579 if (!page->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005580 return;
5581
Johannes Weiner747db952014-08-08 14:19:24 -07005582 INIT_LIST_HEAD(&page->lru);
5583 uncharge_list(&page->lru);
5584}
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005585
Johannes Weiner747db952014-08-08 14:19:24 -07005586/**
5587 * mem_cgroup_uncharge_list - uncharge a list of page
5588 * @page_list: list of pages to uncharge
5589 *
5590 * Uncharge a list of pages previously charged with
5591 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5592 */
5593void mem_cgroup_uncharge_list(struct list_head *page_list)
5594{
5595 if (mem_cgroup_disabled())
5596 return;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005597
Johannes Weiner747db952014-08-08 14:19:24 -07005598 if (!list_empty(page_list))
5599 uncharge_list(page_list);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005600}
5601
5602/**
Johannes Weiner6a93ca82016-03-15 14:57:19 -07005603 * mem_cgroup_migrate - charge a page's replacement
5604 * @oldpage: currently circulating page
5605 * @newpage: replacement page
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005606 *
Johannes Weiner6a93ca82016-03-15 14:57:19 -07005607 * Charge @newpage as a replacement page for @oldpage. @oldpage will
5608 * be uncharged upon free.
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005609 *
5610 * Both pages must be locked, @newpage->mapping must be set up.
5611 */
Johannes Weiner6a93ca82016-03-15 14:57:19 -07005612void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005613{
Johannes Weiner29833312014-12-10 15:44:02 -08005614 struct mem_cgroup *memcg;
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08005615 unsigned int nr_pages;
5616 bool compound;
Tejun Heod93c4132016-06-24 14:49:54 -07005617 unsigned long flags;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005618
5619 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5620 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005621 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005622 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5623 newpage);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005624
5625 if (mem_cgroup_disabled())
5626 return;
5627
5628 /* Page cache replacement: new page already charged? */
Johannes Weiner1306a852014-12-10 15:44:52 -08005629 if (newpage->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005630 return;
5631
Hugh Dickins45637ba2015-11-05 18:49:40 -08005632 /* Swapcache readahead pages can get replaced before being charged */
Johannes Weiner1306a852014-12-10 15:44:52 -08005633 memcg = oldpage->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08005634 if (!memcg)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005635 return;
5636
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08005637 /* Force-charge the new page. The old one will be freed soon */
5638 compound = PageTransHuge(newpage);
5639 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5640
5641 page_counter_charge(&memcg->memory, nr_pages);
5642 if (do_memsw_account())
5643 page_counter_charge(&memcg->memsw, nr_pages);
5644 css_get_many(&memcg->css, nr_pages);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005645
Johannes Weiner9cf76662016-03-15 14:57:58 -07005646 commit_charge(newpage, memcg, false);
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08005647
Tejun Heod93c4132016-06-24 14:49:54 -07005648 local_irq_save(flags);
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08005649 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5650 memcg_check_events(memcg, newpage);
Tejun Heod93c4132016-06-24 14:49:54 -07005651 local_irq_restore(flags);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005652}
5653
Johannes Weineref129472016-01-14 15:21:34 -08005654DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
Johannes Weiner11092082016-01-14 15:21:26 -08005655EXPORT_SYMBOL(memcg_sockets_enabled_key);
5656
Johannes Weiner2d758072016-10-07 17:00:58 -07005657void mem_cgroup_sk_alloc(struct sock *sk)
Johannes Weiner11092082016-01-14 15:21:26 -08005658{
5659 struct mem_cgroup *memcg;
5660
Johannes Weiner2d758072016-10-07 17:00:58 -07005661 if (!mem_cgroup_sockets_enabled)
5662 return;
5663
5664 /*
5665 * Socket cloning can throw us here with sk_memcg already
Johannes Weiner11092082016-01-14 15:21:26 -08005666 * filled. It won't however, necessarily happen from
5667 * process context. So the test for root memcg given
5668 * the current task's memcg won't help us in this case.
5669 *
5670 * Respecting the original socket's memcg is a better
5671 * decision in this case.
5672 */
5673 if (sk->sk_memcg) {
5674 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5675 css_get(&sk->sk_memcg->css);
5676 return;
5677 }
5678
5679 rcu_read_lock();
5680 memcg = mem_cgroup_from_task(current);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005681 if (memcg == root_mem_cgroup)
5682 goto out;
Johannes Weiner0db15292016-01-20 15:02:50 -08005683 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005684 goto out;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005685 if (css_tryget_online(&memcg->css))
Johannes Weiner11092082016-01-14 15:21:26 -08005686 sk->sk_memcg = memcg;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005687out:
Johannes Weiner11092082016-01-14 15:21:26 -08005688 rcu_read_unlock();
5689}
Johannes Weiner11092082016-01-14 15:21:26 -08005690
Johannes Weiner2d758072016-10-07 17:00:58 -07005691void mem_cgroup_sk_free(struct sock *sk)
Johannes Weiner11092082016-01-14 15:21:26 -08005692{
Johannes Weiner2d758072016-10-07 17:00:58 -07005693 if (sk->sk_memcg)
5694 css_put(&sk->sk_memcg->css);
Johannes Weiner11092082016-01-14 15:21:26 -08005695}
5696
5697/**
5698 * mem_cgroup_charge_skmem - charge socket memory
5699 * @memcg: memcg to charge
5700 * @nr_pages: number of pages to charge
5701 *
5702 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5703 * @memcg's configured limit, %false if the charge had to be forced.
5704 */
5705bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5706{
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005707 gfp_t gfp_mask = GFP_KERNEL;
Johannes Weiner11092082016-01-14 15:21:26 -08005708
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005709 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
Johannes Weiner0db15292016-01-20 15:02:50 -08005710 struct page_counter *fail;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005711
Johannes Weiner0db15292016-01-20 15:02:50 -08005712 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5713 memcg->tcpmem_pressure = 0;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005714 return true;
5715 }
Johannes Weiner0db15292016-01-20 15:02:50 -08005716 page_counter_charge(&memcg->tcpmem, nr_pages);
5717 memcg->tcpmem_pressure = 1;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005718 return false;
Johannes Weiner11092082016-01-14 15:21:26 -08005719 }
Johannes Weinerd886f4e2016-01-20 15:02:47 -08005720
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005721 /* Don't block in the packet receive path */
5722 if (in_softirq())
5723 gfp_mask = GFP_NOWAIT;
5724
Johannes Weinerb2807f02016-01-20 15:03:22 -08005725 this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
5726
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005727 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5728 return true;
5729
5730 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
Johannes Weiner11092082016-01-14 15:21:26 -08005731 return false;
5732}
5733
5734/**
5735 * mem_cgroup_uncharge_skmem - uncharge socket memory
5736 * @memcg - memcg to uncharge
5737 * @nr_pages - number of pages to uncharge
5738 */
5739void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5740{
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005741 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
Johannes Weiner0db15292016-01-20 15:02:50 -08005742 page_counter_uncharge(&memcg->tcpmem, nr_pages);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005743 return;
5744 }
Johannes Weinerd886f4e2016-01-20 15:02:47 -08005745
Johannes Weinerb2807f02016-01-20 15:03:22 -08005746 this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
5747
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005748 page_counter_uncharge(&memcg->memory, nr_pages);
5749 css_put_many(&memcg->css, nr_pages);
Johannes Weiner11092082016-01-14 15:21:26 -08005750}
5751
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005752static int __init cgroup_memory(char *s)
5753{
5754 char *token;
5755
5756 while ((token = strsep(&s, ",")) != NULL) {
5757 if (!*token)
5758 continue;
5759 if (!strcmp(token, "nosocket"))
5760 cgroup_memory_nosocket = true;
Vladimir Davydov04823c82016-01-20 15:02:38 -08005761 if (!strcmp(token, "nokmem"))
5762 cgroup_memory_nokmem = true;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005763 }
5764 return 0;
5765}
5766__setup("cgroup.memory=", cgroup_memory);
Johannes Weiner11092082016-01-14 15:21:26 -08005767
Michal Hocko2d110852013-02-22 16:34:43 -08005768/*
Michal Hocko10813122013-02-22 16:35:41 -08005769 * subsys_initcall() for memory controller.
5770 *
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01005771 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5772 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5773 * basically everything that doesn't depend on a specific mem_cgroup structure
5774 * should be initialized from here.
Michal Hocko2d110852013-02-22 16:34:43 -08005775 */
5776static int __init mem_cgroup_init(void)
5777{
Johannes Weiner95a045f2015-02-11 15:26:33 -08005778 int cpu, node;
5779
Vladimir Davydov13583c32016-12-12 16:41:29 -08005780#ifndef CONFIG_SLOB
5781 /*
5782 * Kmem cache creation is mostly done with the slab_mutex held,
Tejun Heo17cc4df2017-02-22 15:41:36 -08005783 * so use a workqueue with limited concurrency to avoid stalling
5784 * all worker threads in case lots of cgroups are created and
5785 * destroyed simultaneously.
Vladimir Davydov13583c32016-12-12 16:41:29 -08005786 */
Tejun Heo17cc4df2017-02-22 15:41:36 -08005787 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
5788 BUG_ON(!memcg_kmem_cache_wq);
Vladimir Davydov13583c32016-12-12 16:41:29 -08005789#endif
5790
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01005791 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5792 memcg_hotplug_cpu_dead);
Johannes Weiner95a045f2015-02-11 15:26:33 -08005793
5794 for_each_possible_cpu(cpu)
5795 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5796 drain_local_stock);
5797
5798 for_each_node(node) {
5799 struct mem_cgroup_tree_per_node *rtpn;
Johannes Weiner95a045f2015-02-11 15:26:33 -08005800
5801 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5802 node_online(node) ? node : NUMA_NO_NODE);
5803
Mel Gormanef8f2322016-07-28 15:46:05 -07005804 rtpn->rb_root = RB_ROOT;
5805 spin_lock_init(&rtpn->lock);
Johannes Weiner95a045f2015-02-11 15:26:33 -08005806 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5807 }
5808
Michal Hocko2d110852013-02-22 16:34:43 -08005809 return 0;
5810}
5811subsys_initcall(mem_cgroup_init);
Johannes Weiner21afa382015-02-11 15:26:36 -08005812
5813#ifdef CONFIG_MEMCG_SWAP
Arnd Bergmann358c07f2016-08-25 15:17:08 -07005814static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
5815{
5816 while (!atomic_inc_not_zero(&memcg->id.ref)) {
5817 /*
5818 * The root cgroup cannot be destroyed, so it's refcount must
5819 * always be >= 1.
5820 */
5821 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
5822 VM_BUG_ON(1);
5823 break;
5824 }
5825 memcg = parent_mem_cgroup(memcg);
5826 if (!memcg)
5827 memcg = root_mem_cgroup;
5828 }
5829 return memcg;
5830}
5831
Johannes Weiner21afa382015-02-11 15:26:36 -08005832/**
5833 * mem_cgroup_swapout - transfer a memsw charge to swap
5834 * @page: page whose memsw charge to transfer
5835 * @entry: swap entry to move the charge to
5836 *
5837 * Transfer the memsw charge of @page to @entry.
5838 */
5839void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5840{
Vladimir Davydov1f47b612016-08-11 15:33:00 -07005841 struct mem_cgroup *memcg, *swap_memcg;
Johannes Weiner21afa382015-02-11 15:26:36 -08005842 unsigned short oldid;
5843
5844 VM_BUG_ON_PAGE(PageLRU(page), page);
5845 VM_BUG_ON_PAGE(page_count(page), page);
5846
Johannes Weiner7941d212016-01-14 15:21:23 -08005847 if (!do_memsw_account())
Johannes Weiner21afa382015-02-11 15:26:36 -08005848 return;
5849
5850 memcg = page->mem_cgroup;
5851
5852 /* Readahead page, never charged */
5853 if (!memcg)
5854 return;
5855
Vladimir Davydov1f47b612016-08-11 15:33:00 -07005856 /*
5857 * In case the memcg owning these pages has been offlined and doesn't
5858 * have an ID allocated to it anymore, charge the closest online
5859 * ancestor for the swap instead and transfer the memory+swap charge.
5860 */
5861 swap_memcg = mem_cgroup_id_get_online(memcg);
5862 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
Johannes Weiner21afa382015-02-11 15:26:36 -08005863 VM_BUG_ON_PAGE(oldid, page);
Vladimir Davydov1f47b612016-08-11 15:33:00 -07005864 mem_cgroup_swap_statistics(swap_memcg, true);
Johannes Weiner21afa382015-02-11 15:26:36 -08005865
5866 page->mem_cgroup = NULL;
5867
5868 if (!mem_cgroup_is_root(memcg))
5869 page_counter_uncharge(&memcg->memory, 1);
5870
Vladimir Davydov1f47b612016-08-11 15:33:00 -07005871 if (memcg != swap_memcg) {
5872 if (!mem_cgroup_is_root(swap_memcg))
5873 page_counter_charge(&swap_memcg->memsw, 1);
5874 page_counter_uncharge(&memcg->memsw, 1);
5875 }
5876
Sebastian Andrzej Siewiorce9ce662015-09-04 15:47:50 -07005877 /*
5878 * Interrupts should be disabled here because the caller holds the
5879 * mapping->tree_lock lock which is taken with interrupts-off. It is
5880 * important here to have the interrupts disabled because it is the
5881 * only synchronisation we have for udpating the per-CPU variables.
5882 */
5883 VM_BUG_ON(!irqs_disabled());
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005884 mem_cgroup_charge_statistics(memcg, page, false, -1);
Johannes Weiner21afa382015-02-11 15:26:36 -08005885 memcg_check_events(memcg, page);
Johannes Weiner73f576c2016-07-20 15:44:57 -07005886
5887 if (!mem_cgroup_is_root(memcg))
5888 css_put(&memcg->css);
Johannes Weiner21afa382015-02-11 15:26:36 -08005889}
5890
Vladimir Davydov37e84352016-01-20 15:02:56 -08005891/*
5892 * mem_cgroup_try_charge_swap - try charging a swap entry
5893 * @page: page being added to swap
5894 * @entry: swap entry to charge
5895 *
5896 * Try to charge @entry to the memcg that @page belongs to.
5897 *
5898 * Returns 0 on success, -ENOMEM on failure.
5899 */
5900int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5901{
5902 struct mem_cgroup *memcg;
5903 struct page_counter *counter;
5904 unsigned short oldid;
5905
5906 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
5907 return 0;
5908
5909 memcg = page->mem_cgroup;
5910
5911 /* Readahead page, never charged */
5912 if (!memcg)
5913 return 0;
5914
Vladimir Davydov1f47b612016-08-11 15:33:00 -07005915 memcg = mem_cgroup_id_get_online(memcg);
Vladimir Davydov37e84352016-01-20 15:02:56 -08005916
Vladimir Davydov1f47b612016-08-11 15:33:00 -07005917 if (!mem_cgroup_is_root(memcg) &&
5918 !page_counter_try_charge(&memcg->swap, 1, &counter)) {
5919 mem_cgroup_id_put(memcg);
5920 return -ENOMEM;
5921 }
5922
Vladimir Davydov37e84352016-01-20 15:02:56 -08005923 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5924 VM_BUG_ON_PAGE(oldid, page);
5925 mem_cgroup_swap_statistics(memcg, true);
5926
Vladimir Davydov37e84352016-01-20 15:02:56 -08005927 return 0;
5928}
5929
Johannes Weiner21afa382015-02-11 15:26:36 -08005930/**
5931 * mem_cgroup_uncharge_swap - uncharge a swap entry
5932 * @entry: swap entry to uncharge
5933 *
Vladimir Davydov37e84352016-01-20 15:02:56 -08005934 * Drop the swap charge associated with @entry.
Johannes Weiner21afa382015-02-11 15:26:36 -08005935 */
5936void mem_cgroup_uncharge_swap(swp_entry_t entry)
5937{
5938 struct mem_cgroup *memcg;
5939 unsigned short id;
5940
Vladimir Davydov37e84352016-01-20 15:02:56 -08005941 if (!do_swap_account)
Johannes Weiner21afa382015-02-11 15:26:36 -08005942 return;
5943
5944 id = swap_cgroup_record(entry, 0);
5945 rcu_read_lock();
Vladimir Davydovadbe4272015-04-15 16:13:00 -07005946 memcg = mem_cgroup_from_id(id);
Johannes Weiner21afa382015-02-11 15:26:36 -08005947 if (memcg) {
Vladimir Davydov37e84352016-01-20 15:02:56 -08005948 if (!mem_cgroup_is_root(memcg)) {
5949 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5950 page_counter_uncharge(&memcg->swap, 1);
5951 else
5952 page_counter_uncharge(&memcg->memsw, 1);
5953 }
Johannes Weiner21afa382015-02-11 15:26:36 -08005954 mem_cgroup_swap_statistics(memcg, false);
Johannes Weiner73f576c2016-07-20 15:44:57 -07005955 mem_cgroup_id_put(memcg);
Johannes Weiner21afa382015-02-11 15:26:36 -08005956 }
5957 rcu_read_unlock();
5958}
5959
Vladimir Davydovd8b38432016-01-20 15:03:07 -08005960long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5961{
5962 long nr_swap_pages = get_nr_swap_pages();
5963
5964 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5965 return nr_swap_pages;
5966 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5967 nr_swap_pages = min_t(long, nr_swap_pages,
5968 READ_ONCE(memcg->swap.limit) -
5969 page_counter_read(&memcg->swap));
5970 return nr_swap_pages;
5971}
5972
Vladimir Davydov5ccc5ab2016-01-20 15:03:10 -08005973bool mem_cgroup_swap_full(struct page *page)
5974{
5975 struct mem_cgroup *memcg;
5976
5977 VM_BUG_ON_PAGE(!PageLocked(page), page);
5978
5979 if (vm_swap_full())
5980 return true;
5981 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5982 return false;
5983
5984 memcg = page->mem_cgroup;
5985 if (!memcg)
5986 return false;
5987
5988 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5989 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
5990 return true;
5991
5992 return false;
5993}
5994
Johannes Weiner21afa382015-02-11 15:26:36 -08005995/* for remember boot option*/
5996#ifdef CONFIG_MEMCG_SWAP_ENABLED
5997static int really_do_swap_account __initdata = 1;
5998#else
5999static int really_do_swap_account __initdata;
6000#endif
6001
6002static int __init enable_swap_account(char *s)
6003{
6004 if (!strcmp(s, "1"))
6005 really_do_swap_account = 1;
6006 else if (!strcmp(s, "0"))
6007 really_do_swap_account = 0;
6008 return 1;
6009}
6010__setup("swapaccount=", enable_swap_account);
6011
Vladimir Davydov37e84352016-01-20 15:02:56 -08006012static u64 swap_current_read(struct cgroup_subsys_state *css,
6013 struct cftype *cft)
6014{
6015 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6016
6017 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
6018}
6019
6020static int swap_max_show(struct seq_file *m, void *v)
6021{
6022 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
6023 unsigned long max = READ_ONCE(memcg->swap.limit);
6024
6025 if (max == PAGE_COUNTER_MAX)
6026 seq_puts(m, "max\n");
6027 else
6028 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
6029
6030 return 0;
6031}
6032
6033static ssize_t swap_max_write(struct kernfs_open_file *of,
6034 char *buf, size_t nbytes, loff_t off)
6035{
6036 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6037 unsigned long max;
6038 int err;
6039
6040 buf = strstrip(buf);
6041 err = page_counter_memparse(buf, "max", &max);
6042 if (err)
6043 return err;
6044
6045 mutex_lock(&memcg_limit_mutex);
6046 err = page_counter_limit(&memcg->swap, max);
6047 mutex_unlock(&memcg_limit_mutex);
6048 if (err)
6049 return err;
6050
6051 return nbytes;
6052}
6053
6054static struct cftype swap_files[] = {
6055 {
6056 .name = "swap.current",
6057 .flags = CFTYPE_NOT_ON_ROOT,
6058 .read_u64 = swap_current_read,
6059 },
6060 {
6061 .name = "swap.max",
6062 .flags = CFTYPE_NOT_ON_ROOT,
6063 .seq_show = swap_max_show,
6064 .write = swap_max_write,
6065 },
6066 { } /* terminate */
6067};
6068
Johannes Weiner21afa382015-02-11 15:26:36 -08006069static struct cftype memsw_cgroup_files[] = {
6070 {
6071 .name = "memsw.usage_in_bytes",
6072 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6073 .read_u64 = mem_cgroup_read_u64,
6074 },
6075 {
6076 .name = "memsw.max_usage_in_bytes",
6077 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6078 .write = mem_cgroup_reset,
6079 .read_u64 = mem_cgroup_read_u64,
6080 },
6081 {
6082 .name = "memsw.limit_in_bytes",
6083 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6084 .write = mem_cgroup_write,
6085 .read_u64 = mem_cgroup_read_u64,
6086 },
6087 {
6088 .name = "memsw.failcnt",
6089 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6090 .write = mem_cgroup_reset,
6091 .read_u64 = mem_cgroup_read_u64,
6092 },
6093 { }, /* terminate */
6094};
6095
6096static int __init mem_cgroup_swap_init(void)
6097{
6098 if (!mem_cgroup_disabled() && really_do_swap_account) {
6099 do_swap_account = 1;
Vladimir Davydov37e84352016-01-20 15:02:56 -08006100 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
6101 swap_files));
Johannes Weiner21afa382015-02-11 15:26:36 -08006102 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6103 memsw_cgroup_files));
6104 }
6105 return 0;
6106}
6107subsys_initcall(mem_cgroup_swap_init);
6108
6109#endif /* CONFIG_MEMCG_SWAP */