blob: f7ed6ece0719d1508129aee70551b2b13434e40a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/oom_kill.c
3 *
4 * Copyright (C) 1998,2000 Rik van Riel
5 * Thanks go out to Claus Fischer for some serious inspiration and
6 * for goading me into coding this file...
David Rientjesa63d83f2010-08-09 17:19:46 -07007 * Copyright (C) 2010 Google, Inc.
8 * Rewritten by David Rientjes
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * The routines in this file are used to kill a process when
Paul Jacksona49335c2005-09-06 15:18:09 -070011 * we're seriously out of memory. This gets called from __alloc_pages()
12 * in mm/page_alloc.c when we really run out of memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
14 * Since we won't call these routines often (on a well-configured
15 * machine) this file will double as a 'coding guide' and a signpost
16 * for newbie kernel hackers. It features several pointers to major
17 * kernel subsystems and hints as to where to find out what things do.
18 */
19
Alexey Dobriyan8ac773b2006-10-19 23:28:32 -070020#include <linux/oom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040022#include <linux/err.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/sched.h>
25#include <linux/swap.h>
26#include <linux/timex.h>
27#include <linux/jiffies.h>
Paul Jacksonef08e3b2005-09-06 15:18:13 -070028#include <linux/cpuset.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040029#include <linux/export.h>
Martin Schwidefsky8bc719d2006-09-25 23:31:20 -070030#include <linux/notifier.h>
Pavel Emelianovc7ba5c92008-02-07 00:13:58 -080031#include <linux/memcontrol.h>
David Rientjes6f48d0eb2010-08-09 17:18:52 -070032#include <linux/mempolicy.h>
David Howells5cd9c582008-08-14 11:37:28 +010033#include <linux/security.h>
David Rientjesedd45542011-03-22 16:30:12 -070034#include <linux/ptrace.h>
David Rientjesf660daa2011-10-31 17:07:07 -070035#include <linux/freezer.h>
KAMEZAWA Hiroyuki43d2b112012-01-10 15:08:09 -080036#include <linux/ftrace.h>
David Rientjesdc3f21e2012-03-21 16:33:47 -070037#include <linux/ratelimit.h>
Michal Hockoaac45362016-03-25 14:20:24 -070038#include <linux/kthread.h>
39#include <linux/init.h>
40
41#include <asm/tlb.h>
42#include "internal.h"
KAMEZAWA Hiroyuki43d2b112012-01-10 15:08:09 -080043
44#define CREATE_TRACE_POINTS
45#include <trace/events/oom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
KAMEZAWA Hiroyukifadd8fb2006-06-23 02:03:13 -070047int sysctl_panic_on_oom;
David Rientjesfe071d72007-10-16 23:25:56 -070048int sysctl_oom_kill_allocating_task;
David Rientjesad915c42010-08-09 17:18:53 -070049int sysctl_oom_dump_tasks = 1;
Johannes Weinerdc564012015-06-24 16:57:19 -070050
51DEFINE_MUTEX(oom_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
David Rientjes6f48d0eb2010-08-09 17:18:52 -070053#ifdef CONFIG_NUMA
54/**
55 * has_intersects_mems_allowed() - check task eligiblity for kill
Oleg Nesterovad962442014-01-21 15:50:00 -080056 * @start: task struct of which task to consider
David Rientjes6f48d0eb2010-08-09 17:18:52 -070057 * @mask: nodemask passed to page allocator for mempolicy ooms
58 *
59 * Task eligibility is determined by whether or not a candidate task, @tsk,
60 * shares the same mempolicy nodes as current if it is bound by such a policy
61 * and whether or not it has the same set of allowed cpuset nodes.
KOSAKI Motohiro495789a2009-09-21 17:03:14 -070062 */
Oleg Nesterovad962442014-01-21 15:50:00 -080063static bool has_intersects_mems_allowed(struct task_struct *start,
David Rientjes6f48d0eb2010-08-09 17:18:52 -070064 const nodemask_t *mask)
KOSAKI Motohiro495789a2009-09-21 17:03:14 -070065{
Oleg Nesterovad962442014-01-21 15:50:00 -080066 struct task_struct *tsk;
67 bool ret = false;
KOSAKI Motohiro495789a2009-09-21 17:03:14 -070068
Oleg Nesterovad962442014-01-21 15:50:00 -080069 rcu_read_lock();
Oleg Nesterov1da4db02014-01-21 15:49:58 -080070 for_each_thread(start, tsk) {
David Rientjes6f48d0eb2010-08-09 17:18:52 -070071 if (mask) {
72 /*
73 * If this is a mempolicy constrained oom, tsk's
74 * cpuset is irrelevant. Only return true if its
75 * mempolicy intersects current, otherwise it may be
76 * needlessly killed.
77 */
Oleg Nesterovad962442014-01-21 15:50:00 -080078 ret = mempolicy_nodemask_intersects(tsk, mask);
David Rientjes6f48d0eb2010-08-09 17:18:52 -070079 } else {
80 /*
81 * This is not a mempolicy constrained oom, so only
82 * check the mems of tsk's cpuset.
83 */
Oleg Nesterovad962442014-01-21 15:50:00 -080084 ret = cpuset_mems_allowed_intersects(current, tsk);
David Rientjes6f48d0eb2010-08-09 17:18:52 -070085 }
Oleg Nesterovad962442014-01-21 15:50:00 -080086 if (ret)
87 break;
Oleg Nesterov1da4db02014-01-21 15:49:58 -080088 }
Oleg Nesterovad962442014-01-21 15:50:00 -080089 rcu_read_unlock();
KOSAKI Motohirodf1090a2010-08-09 17:19:39 -070090
Oleg Nesterovad962442014-01-21 15:50:00 -080091 return ret;
KOSAKI Motohiro495789a2009-09-21 17:03:14 -070092}
David Rientjes6f48d0eb2010-08-09 17:18:52 -070093#else
94static bool has_intersects_mems_allowed(struct task_struct *tsk,
95 const nodemask_t *mask)
96{
97 return true;
98}
99#endif /* CONFIG_NUMA */
KOSAKI Motohiro495789a2009-09-21 17:03:14 -0700100
David Rientjes6f48d0eb2010-08-09 17:18:52 -0700101/*
102 * The process p may have detached its own ->mm while exiting or through
103 * use_mm(), but one or more of its subthreads may still have a valid
104 * pointer. Return p, or any of its subthreads with a valid ->mm, with
105 * task_lock() held.
106 */
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -0700107struct task_struct *find_lock_task_mm(struct task_struct *p)
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700108{
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800109 struct task_struct *t;
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700110
Oleg Nesterov4d4048b2014-01-21 15:50:01 -0800111 rcu_read_lock();
112
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800113 for_each_thread(p, t) {
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700114 task_lock(t);
115 if (likely(t->mm))
Oleg Nesterov4d4048b2014-01-21 15:50:01 -0800116 goto found;
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700117 task_unlock(t);
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800118 }
Oleg Nesterov4d4048b2014-01-21 15:50:01 -0800119 t = NULL;
120found:
121 rcu_read_unlock();
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700122
Oleg Nesterov4d4048b2014-01-21 15:50:01 -0800123 return t;
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700124}
125
Yaowei Baidb2a0dd2015-11-06 16:28:06 -0800126/*
127 * order == -1 means the oom kill is required by sysrq, otherwise only
128 * for display purposes.
129 */
130static inline bool is_sysrq_oom(struct oom_control *oc)
131{
132 return oc->order == -1;
133}
134
KOSAKI Motohiroab290ad2010-08-09 17:19:35 -0700135/* return true if the task is not adequate as candidate victim task. */
David Rientjese85bfd32010-09-22 13:05:10 -0700136static bool oom_unkillable_task(struct task_struct *p,
Johannes Weiner2314b422014-12-10 15:44:33 -0800137 struct mem_cgroup *memcg, const nodemask_t *nodemask)
KOSAKI Motohiroab290ad2010-08-09 17:19:35 -0700138{
139 if (is_global_init(p))
140 return true;
141 if (p->flags & PF_KTHREAD)
142 return true;
143
144 /* When mem_cgroup_out_of_memory() and p is not member of the group */
Johannes Weiner72835c82012-01-12 17:18:32 -0800145 if (memcg && !task_in_mem_cgroup(p, memcg))
KOSAKI Motohiroab290ad2010-08-09 17:19:35 -0700146 return true;
147
148 /* p may not have freeable memory in nodemask */
149 if (!has_intersects_mems_allowed(p, nodemask))
150 return true;
151
152 return false;
153}
154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155/**
David Rientjesa63d83f2010-08-09 17:19:46 -0700156 * oom_badness - heuristic function to determine which candidate task to kill
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 * @p: task struct of which task we should calculate
David Rientjesa63d83f2010-08-09 17:19:46 -0700158 * @totalpages: total present RAM allowed for page allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 *
David Rientjesa63d83f2010-08-09 17:19:46 -0700160 * The heuristic for determining which task to kill is made to be as simple and
161 * predictable as possible. The goal is to return the highest value for the
162 * task consuming the most memory to avoid subsequent oom failures.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 */
David Rientjesa7f638f2012-05-29 15:06:47 -0700164unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
165 const nodemask_t *nodemask, unsigned long totalpages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166{
David Rientjes1e11ad82012-06-08 13:21:26 -0700167 long points;
David Rientjes61eafb02012-06-20 12:52:58 -0700168 long adj;
KOSAKI Motohiro28b83c52009-09-21 17:03:13 -0700169
Johannes Weiner72835c82012-01-12 17:18:32 -0800170 if (oom_unkillable_task(p, memcg, nodemask))
KOSAKI Motohiro26ebc982010-08-09 17:19:37 -0700171 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700173 p = find_lock_task_mm(p);
174 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 return 0;
176
David Rientjesa9c58b902012-12-11 16:02:54 -0800177 adj = (long)p->signal->oom_score_adj;
David Rientjes61eafb02012-06-20 12:52:58 -0700178 if (adj == OOM_SCORE_ADJ_MIN) {
Michal Hocko5aecc852011-11-15 14:36:07 -0800179 task_unlock(p);
180 return 0;
181 }
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 /*
David Rientjesa63d83f2010-08-09 17:19:46 -0700184 * The baseline for the badness score is the proportion of RAM that each
KOSAKI Motohirof755a042011-04-27 15:26:50 -0700185 * task's rss, pagetable and swap space use.
David Rientjesa63d83f2010-08-09 17:19:46 -0700186 */
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -0800187 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
188 atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
Andrew Morton97c2c9b82006-04-18 22:20:38 -0700189 task_unlock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 /*
David Rientjesa63d83f2010-08-09 17:19:46 -0700192 * Root processes get 3% bonus, just like the __vm_enough_memory()
193 * implementation used by LSMs.
Hugh Dickins7ba34852007-01-05 16:37:03 -0800194 */
David Rientjesa63d83f2010-08-09 17:19:46 -0700195 if (has_capability_noaudit(p, CAP_SYS_ADMIN))
David Rientjes778c14a2014-01-30 15:46:11 -0800196 points -= (points * 3) / 100;
Hugh Dickins7ba34852007-01-05 16:37:03 -0800197
David Rientjes61eafb02012-06-20 12:52:58 -0700198 /* Normalize to oom_score_adj units */
199 adj *= totalpages / 1000;
200 points += adj;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
David Rientjesf19e8aa2010-09-22 13:04:52 -0700202 /*
David Rientjesa7f638f2012-05-29 15:06:47 -0700203 * Never return 0 for an eligible task regardless of the root bonus and
204 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
David Rientjesf19e8aa2010-09-22 13:04:52 -0700205 */
David Rientjes1e11ad82012-06-08 13:21:26 -0700206 return points > 0 ? points : 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
209/*
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800210 * Determine the type of allocation constraint.
211 */
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800212#ifdef CONFIG_NUMA
David Rientjes6e0fc462015-09-08 15:00:36 -0700213static enum oom_constraint constrained_alloc(struct oom_control *oc,
214 unsigned long *totalpages)
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800215{
Mel Gorman54a6eb52008-04-28 02:12:16 -0700216 struct zone *zone;
Mel Gormandd1a2392008-04-28 02:12:17 -0700217 struct zoneref *z;
David Rientjes6e0fc462015-09-08 15:00:36 -0700218 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
David Rientjesa63d83f2010-08-09 17:19:46 -0700219 bool cpuset_limited = false;
220 int nid;
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800221
David Rientjesa63d83f2010-08-09 17:19:46 -0700222 /* Default to all available memory */
223 *totalpages = totalram_pages + total_swap_pages;
224
David Rientjes6e0fc462015-09-08 15:00:36 -0700225 if (!oc->zonelist)
David Rientjesa63d83f2010-08-09 17:19:46 -0700226 return CONSTRAINT_NONE;
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800227 /*
228 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
229 * to kill current.We have to random task kill in this case.
230 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
231 */
David Rientjes6e0fc462015-09-08 15:00:36 -0700232 if (oc->gfp_mask & __GFP_THISNODE)
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800233 return CONSTRAINT_NONE;
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800234
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800235 /*
David Rientjesa63d83f2010-08-09 17:19:46 -0700236 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
237 * the page allocator means a mempolicy is in effect. Cpuset policy
238 * is enforced in get_page_from_freelist().
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800239 */
David Rientjes6e0fc462015-09-08 15:00:36 -0700240 if (oc->nodemask &&
241 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
David Rientjesa63d83f2010-08-09 17:19:46 -0700242 *totalpages = total_swap_pages;
David Rientjes6e0fc462015-09-08 15:00:36 -0700243 for_each_node_mask(nid, *oc->nodemask)
David Rientjesa63d83f2010-08-09 17:19:46 -0700244 *totalpages += node_spanned_pages(nid);
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800245 return CONSTRAINT_MEMORY_POLICY;
David Rientjesa63d83f2010-08-09 17:19:46 -0700246 }
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800247
248 /* Check this allocation failure is caused by cpuset's wall function */
David Rientjes6e0fc462015-09-08 15:00:36 -0700249 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
250 high_zoneidx, oc->nodemask)
251 if (!cpuset_zone_allowed(zone, oc->gfp_mask))
David Rientjesa63d83f2010-08-09 17:19:46 -0700252 cpuset_limited = true;
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800253
David Rientjesa63d83f2010-08-09 17:19:46 -0700254 if (cpuset_limited) {
255 *totalpages = total_swap_pages;
256 for_each_node_mask(nid, cpuset_current_mems_allowed)
257 *totalpages += node_spanned_pages(nid);
258 return CONSTRAINT_CPUSET;
259 }
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800260 return CONSTRAINT_NONE;
261}
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800262#else
David Rientjes6e0fc462015-09-08 15:00:36 -0700263static enum oom_constraint constrained_alloc(struct oom_control *oc,
264 unsigned long *totalpages)
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800265{
David Rientjesa63d83f2010-08-09 17:19:46 -0700266 *totalpages = totalram_pages + total_swap_pages;
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -0800267 return CONSTRAINT_NONE;
268}
269#endif
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800270
David Rientjes6e0fc462015-09-08 15:00:36 -0700271enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
272 struct task_struct *task, unsigned long totalpages)
David Rientjes462607e2012-07-31 16:43:40 -0700273{
David Rientjes6e0fc462015-09-08 15:00:36 -0700274 if (oom_unkillable_task(task, NULL, oc->nodemask))
David Rientjes462607e2012-07-31 16:43:40 -0700275 return OOM_SCAN_CONTINUE;
276
277 /*
278 * This task already has access to memory reserves and is being killed.
279 * Don't allow any other task to have access to the reserves.
280 */
281 if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
Yaowei Baidb2a0dd2015-11-06 16:28:06 -0800282 if (!is_sysrq_oom(oc))
David Rientjes462607e2012-07-31 16:43:40 -0700283 return OOM_SCAN_ABORT;
284 }
285 if (!task->mm)
286 return OOM_SCAN_CONTINUE;
287
David Rientjese1e12d22012-12-11 16:02:56 -0800288 /*
289 * If task is allocating a lot of memory and has been marked to be
290 * killed first if it triggers an oom, then select it.
291 */
292 if (oom_task_origin(task))
293 return OOM_SCAN_SELECT;
294
David Rientjes462607e2012-07-31 16:43:40 -0700295 return OOM_SCAN_OK;
296}
297
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800298/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 * Simple selection loop. We chose the process with the highest
Rusty Russell6b4f2b52013-07-15 11:24:08 +0930300 * number of 'points'. Returns -1 on scan abort.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 */
David Rientjes6e0fc462015-09-08 15:00:36 -0700302static struct task_struct *select_bad_process(struct oom_control *oc,
303 unsigned int *ppoints, unsigned long totalpages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
David Rientjes3a5dda72011-03-22 16:30:09 -0700305 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 struct task_struct *chosen = NULL;
David Rientjesa7f638f2012-05-29 15:06:47 -0700307 unsigned long chosen_points = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
David Rientjes6b0c81b2012-07-31 16:43:45 -0700309 rcu_read_lock();
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800310 for_each_process_thread(g, p) {
David Rientjesa63d83f2010-08-09 17:19:46 -0700311 unsigned int points;
Paul Jacksona49335c2005-09-06 15:18:09 -0700312
David Rientjes6e0fc462015-09-08 15:00:36 -0700313 switch (oom_scan_process_thread(oc, p, totalpages)) {
David Rientjes462607e2012-07-31 16:43:40 -0700314 case OOM_SCAN_SELECT:
315 chosen = p;
316 chosen_points = ULONG_MAX;
317 /* fall through */
318 case OOM_SCAN_CONTINUE:
Andrey Vagin30e2b412011-03-22 16:30:11 -0700319 continue;
David Rientjes462607e2012-07-31 16:43:40 -0700320 case OOM_SCAN_ABORT:
David Rientjes6b0c81b2012-07-31 16:43:45 -0700321 rcu_read_unlock();
Rusty Russell6b4f2b52013-07-15 11:24:08 +0930322 return (struct task_struct *)(-1UL);
David Rientjes462607e2012-07-31 16:43:40 -0700323 case OOM_SCAN_OK:
324 break;
325 };
David Rientjes6e0fc462015-09-08 15:00:36 -0700326 points = oom_badness(p, NULL, oc->nodemask, totalpages);
David Rientjesd49ad932014-01-23 15:53:34 -0800327 if (!points || points < chosen_points)
328 continue;
329 /* Prefer thread group leaders for display purposes */
330 if (points == chosen_points && thread_group_leader(chosen))
331 continue;
332
333 chosen = p;
334 chosen_points = points;
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800335 }
David Rientjes6b0c81b2012-07-31 16:43:45 -0700336 if (chosen)
337 get_task_struct(chosen);
338 rcu_read_unlock();
Oleg Nesterov972c4ea2006-09-29 02:01:12 -0700339
David Rientjesa7f638f2012-05-29 15:06:47 -0700340 *ppoints = chosen_points * 1000 / totalpages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 return chosen;
342}
343
344/**
Randy Dunlap1b578df2008-03-19 17:00:42 -0700345 * dump_tasks - dump current memory state of all system tasks
Wanpeng Lidad75572012-06-20 12:53:01 -0700346 * @memcg: current's memory controller, if constrained
David Rientjese85bfd32010-09-22 13:05:10 -0700347 * @nodemask: nodemask passed to page allocator for mempolicy ooms
Randy Dunlap1b578df2008-03-19 17:00:42 -0700348 *
David Rientjese85bfd32010-09-22 13:05:10 -0700349 * Dumps the current memory state of all eligible tasks. Tasks not in the same
350 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
351 * are not shown.
David Rientjesde34d962012-07-31 16:42:56 -0700352 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
353 * swapents, oom_score_adj value, and name.
David Rientjesfef1bdd2008-02-07 00:14:07 -0800354 */
Johannes Weiner2314b422014-12-10 15:44:33 -0800355static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
David Rientjesfef1bdd2008-02-07 00:14:07 -0800356{
KOSAKI Motohiroc55db952010-08-09 17:18:46 -0700357 struct task_struct *p;
358 struct task_struct *task;
David Rientjesfef1bdd2008-02-07 00:14:07 -0800359
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -0800360 pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n");
David Rientjes6b0c81b2012-07-31 16:43:45 -0700361 rcu_read_lock();
KOSAKI Motohiroc55db952010-08-09 17:18:46 -0700362 for_each_process(p) {
Johannes Weiner72835c82012-01-12 17:18:32 -0800363 if (oom_unkillable_task(p, memcg, nodemask))
David Rientjesfef1bdd2008-02-07 00:14:07 -0800364 continue;
365
KOSAKI Motohiroc55db952010-08-09 17:18:46 -0700366 task = find_lock_task_mm(p);
367 if (!task) {
David Rientjes6d2661e2009-05-28 14:34:19 -0700368 /*
David Rientjes74ab7f12010-08-09 17:18:46 -0700369 * This is a kthread or all of p's threads have already
370 * detached their mm's. There's no need to report
KOSAKI Motohiroc55db952010-08-09 17:18:46 -0700371 * them; they can't be oom killed anyway.
David Rientjes6d2661e2009-05-28 14:34:19 -0700372 */
David Rientjes6d2661e2009-05-28 14:34:19 -0700373 continue;
374 }
KOSAKI Motohiroc55db952010-08-09 17:18:46 -0700375
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -0800376 pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n",
Eric W. Biederman078de5f2012-02-08 07:00:08 -0800377 task->pid, from_kuid(&init_user_ns, task_uid(task)),
378 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
Kirill A. Shutemove1f56c82013-11-14 14:30:48 -0800379 atomic_long_read(&task->mm->nr_ptes),
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -0800380 mm_nr_pmds(task->mm),
David Rientjesde34d962012-07-31 16:42:56 -0700381 get_mm_counter(task->mm, MM_SWAPENTS),
David Rientjesa63d83f2010-08-09 17:19:46 -0700382 task->signal->oom_score_adj, task->comm);
KOSAKI Motohiroc55db952010-08-09 17:18:46 -0700383 task_unlock(task);
384 }
David Rientjes6b0c81b2012-07-31 16:43:45 -0700385 rcu_read_unlock();
David Rientjesfef1bdd2008-02-07 00:14:07 -0800386}
387
David Rientjes6e0fc462015-09-08 15:00:36 -0700388static void dump_header(struct oom_control *oc, struct task_struct *p,
389 struct mem_cgroup *memcg)
David Rientjes1b604d72009-12-14 17:57:47 -0800390{
Joe Perches756a025f02016-03-17 14:19:47 -0700391 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
Vlastimil Babkaa0795cd2016-03-15 14:56:05 -0700392 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
David Rientjesa63d83f2010-08-09 17:19:46 -0700393 current->signal->oom_score_adj);
Vlastimil Babkaa0795cd2016-03-15 14:56:05 -0700394
David Rientjesda39da32015-11-05 18:48:05 -0800395 cpuset_print_current_mems_allowed();
David Rientjes1b604d72009-12-14 17:57:47 -0800396 dump_stack();
Sha Zhengju58cf1882013-02-22 16:32:05 -0800397 if (memcg)
398 mem_cgroup_print_oom_info(memcg, p);
399 else
400 show_mem(SHOW_MEM_FILTER_NODES);
David Rientjes1b604d72009-12-14 17:57:47 -0800401 if (sysctl_oom_dump_tasks)
David Rientjes6e0fc462015-09-08 15:00:36 -0700402 dump_tasks(memcg, oc->nodemask);
David Rientjes1b604d72009-12-14 17:57:47 -0800403}
404
Michal Hocko5695be12014-10-20 18:12:32 +0200405/*
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800406 * Number of OOM victims in flight
Michal Hocko5695be12014-10-20 18:12:32 +0200407 */
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800408static atomic_t oom_victims = ATOMIC_INIT(0);
409static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
Michal Hocko5695be12014-10-20 18:12:32 +0200410
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800411bool oom_killer_disabled __read_mostly;
Michal Hocko5695be12014-10-20 18:12:32 +0200412
Michal Hockoaac45362016-03-25 14:20:24 -0700413#ifdef CONFIG_MMU
414/*
415 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
416 * victim (if that is possible) to help the OOM killer to move on.
417 */
418static struct task_struct *oom_reaper_th;
419static struct mm_struct *mm_to_reap;
420static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
421
422static bool __oom_reap_vmas(struct mm_struct *mm)
423{
424 struct mmu_gather tlb;
425 struct vm_area_struct *vma;
426 struct zap_details details = {.check_swap_entries = true,
427 .ignore_dirty = true};
428 bool ret = true;
429
430 /* We might have raced with exit path */
431 if (!atomic_inc_not_zero(&mm->mm_users))
432 return true;
433
434 if (!down_read_trylock(&mm->mmap_sem)) {
435 ret = false;
436 goto out;
437 }
438
439 tlb_gather_mmu(&tlb, mm, 0, -1);
440 for (vma = mm->mmap ; vma; vma = vma->vm_next) {
441 if (is_vm_hugetlb_page(vma))
442 continue;
443
444 /*
445 * mlocked VMAs require explicit munlocking before unmap.
446 * Let's keep it simple here and skip such VMAs.
447 */
448 if (vma->vm_flags & VM_LOCKED)
449 continue;
450
451 /*
452 * Only anonymous pages have a good chance to be dropped
453 * without additional steps which we cannot afford as we
454 * are OOM already.
455 *
456 * We do not even care about fs backed pages because all
457 * which are reclaimable have already been reclaimed and
458 * we do not want to block exit_mmap by keeping mm ref
459 * count elevated without a good reason.
460 */
461 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
462 unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
463 &details);
464 }
465 tlb_finish_mmu(&tlb, 0, -1);
466 up_read(&mm->mmap_sem);
467out:
468 mmput(mm);
469 return ret;
470}
471
472static void oom_reap_vmas(struct mm_struct *mm)
473{
474 int attempts = 0;
475
476 /* Retry the down_read_trylock(mmap_sem) a few times */
477 while (attempts++ < 10 && !__oom_reap_vmas(mm))
478 schedule_timeout_idle(HZ/10);
479
480 /* Drop a reference taken by wake_oom_reaper */
481 mmdrop(mm);
482}
483
484static int oom_reaper(void *unused)
485{
486 while (true) {
487 struct mm_struct *mm;
488
489 wait_event_freezable(oom_reaper_wait,
490 (mm = READ_ONCE(mm_to_reap)));
491 oom_reap_vmas(mm);
492 WRITE_ONCE(mm_to_reap, NULL);
493 }
494
495 return 0;
496}
497
498static void wake_oom_reaper(struct mm_struct *mm)
499{
500 struct mm_struct *old_mm;
501
502 if (!oom_reaper_th)
503 return;
504
505 /*
506 * Pin the given mm. Use mm_count instead of mm_users because
507 * we do not want to delay the address space tear down.
508 */
509 atomic_inc(&mm->mm_count);
510
511 /*
512 * Make sure that only a single mm is ever queued for the reaper
513 * because multiple are not necessary and the operation might be
514 * disruptive so better reduce it to the bare minimum.
515 */
516 old_mm = cmpxchg(&mm_to_reap, NULL, mm);
517 if (!old_mm)
518 wake_up(&oom_reaper_wait);
519 else
520 mmdrop(mm);
521}
522
523static int __init oom_init(void)
524{
525 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
526 if (IS_ERR(oom_reaper_th)) {
527 pr_err("Unable to start OOM reaper %ld. Continuing regardless\n",
528 PTR_ERR(oom_reaper_th));
529 oom_reaper_th = NULL;
530 }
531 return 0;
532}
533subsys_initcall(oom_init)
534#else
535static void wake_oom_reaper(struct mm_struct *mm)
536{
537}
538#endif
539
Michal Hocko49550b62015-02-11 15:26:12 -0800540/**
Johannes Weiner16e95192015-06-24 16:57:07 -0700541 * mark_oom_victim - mark the given task as OOM victim
Michal Hocko49550b62015-02-11 15:26:12 -0800542 * @tsk: task to mark
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800543 *
Johannes Weinerdc564012015-06-24 16:57:19 -0700544 * Has to be called with oom_lock held and never after
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800545 * oom has been disabled already.
Michal Hocko49550b62015-02-11 15:26:12 -0800546 */
Johannes Weiner16e95192015-06-24 16:57:07 -0700547void mark_oom_victim(struct task_struct *tsk)
Michal Hocko49550b62015-02-11 15:26:12 -0800548{
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800549 WARN_ON(oom_killer_disabled);
550 /* OOM killer might race with memcg OOM */
551 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
552 return;
Michal Hocko63a8ca92015-02-11 15:26:15 -0800553 /*
554 * Make sure that the task is woken up from uninterruptible sleep
555 * if it is frozen because OOM killer wouldn't be able to free
556 * any memory and livelock. freezing_slow_path will tell the freezer
557 * that TIF_MEMDIE tasks should be ignored.
558 */
559 __thaw_task(tsk);
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800560 atomic_inc(&oom_victims);
Michal Hocko49550b62015-02-11 15:26:12 -0800561}
562
563/**
Johannes Weiner16e95192015-06-24 16:57:07 -0700564 * exit_oom_victim - note the exit of an OOM victim
Michal Hocko49550b62015-02-11 15:26:12 -0800565 */
Johannes Weiner16e95192015-06-24 16:57:07 -0700566void exit_oom_victim(void)
Michal Hocko49550b62015-02-11 15:26:12 -0800567{
Johannes Weiner46402772015-06-24 16:57:10 -0700568 clear_thread_flag(TIF_MEMDIE);
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800569
Johannes Weinerc38f1022015-06-24 16:57:13 -0700570 if (!atomic_dec_return(&oom_victims))
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800571 wake_up_all(&oom_victims_wait);
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800572}
573
574/**
575 * oom_killer_disable - disable OOM killer
576 *
577 * Forces all page allocations to fail rather than trigger OOM killer.
578 * Will block and wait until all OOM victims are killed.
579 *
580 * The function cannot be called when there are runnable user tasks because
581 * the userspace would see unexpected allocation failures as a result. Any
582 * new usage of this function should be consulted with MM people.
583 *
584 * Returns true if successful and false if the OOM killer cannot be
585 * disabled.
586 */
587bool oom_killer_disable(void)
588{
589 /*
Tetsuo Handa6afcf282016-03-17 14:20:45 -0700590 * Make sure to not race with an ongoing OOM killer. Check that the
591 * current is not killed (possibly due to sharing the victim's memory).
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800592 */
Tetsuo Handa6afcf282016-03-17 14:20:45 -0700593 if (mutex_lock_killable(&oom_lock))
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800594 return false;
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800595 oom_killer_disabled = true;
Johannes Weinerdc564012015-06-24 16:57:19 -0700596 mutex_unlock(&oom_lock);
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800597
598 wait_event(oom_victims_wait, !atomic_read(&oom_victims));
599
600 return true;
601}
602
603/**
604 * oom_killer_enable - enable OOM killer
605 */
606void oom_killer_enable(void)
607{
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800608 oom_killer_disabled = false;
Michal Hocko49550b62015-02-11 15:26:12 -0800609}
610
Oleg Nesterov4d7b3392015-11-05 18:48:26 -0800611/*
612 * task->mm can be NULL if the task is the exited group leader. So to
613 * determine whether the task is using a particular mm, we examine all the
614 * task's threads: if one of those is using this mm then this task was also
615 * using it.
616 */
617static bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
618{
619 struct task_struct *t;
620
621 for_each_thread(p, t) {
622 struct mm_struct *t_mm = READ_ONCE(t->mm);
623 if (t_mm)
624 return t_mm == mm;
625 }
626 return false;
627}
628
KOSAKI Motohiro3b4798c2009-12-15 16:45:32 -0800629#define K(x) ((x) << (PAGE_SHIFT-10))
David Rientjes6b0c81b2012-07-31 16:43:45 -0700630/*
631 * Must be called while holding a reference to p, which will be released upon
632 * returning.
633 */
David Rientjes6e0fc462015-09-08 15:00:36 -0700634void oom_kill_process(struct oom_control *oc, struct task_struct *p,
David Rientjes9cbb78b2012-07-31 16:43:44 -0700635 unsigned int points, unsigned long totalpages,
David Rientjes6e0fc462015-09-08 15:00:36 -0700636 struct mem_cgroup *memcg, const char *message)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637{
Linus Torvalds52d3c032011-03-14 15:17:07 -0700638 struct task_struct *victim = p;
David Rientjes5e9d8342010-08-09 17:18:51 -0700639 struct task_struct *child;
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800640 struct task_struct *t;
David Rientjes647f2bd2012-03-21 16:33:46 -0700641 struct mm_struct *mm;
Linus Torvalds52d3c032011-03-14 15:17:07 -0700642 unsigned int victim_points = 0;
David Rientjesdc3f21e2012-03-21 16:33:47 -0700643 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
644 DEFAULT_RATELIMIT_BURST);
Michal Hockoaac45362016-03-25 14:20:24 -0700645 bool can_oom_reap = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
Nick Piggin50ec3bb2006-09-25 23:31:29 -0700647 /*
648 * If the task is already exiting, don't alarm the sysadmin or kill
649 * its children or threads, just set TIF_MEMDIE so it can die quickly
650 */
Michal Hocko83363b92015-02-11 15:24:56 -0800651 task_lock(p);
652 if (p->mm && task_will_free_mem(p)) {
Johannes Weiner16e95192015-06-24 16:57:07 -0700653 mark_oom_victim(p);
Michal Hocko83363b92015-02-11 15:24:56 -0800654 task_unlock(p);
David Rientjes6b0c81b2012-07-31 16:43:45 -0700655 put_task_struct(p);
David Rientjes2a1c9b12012-03-21 16:33:46 -0700656 return;
Nick Piggin50ec3bb2006-09-25 23:31:29 -0700657 }
Michal Hocko83363b92015-02-11 15:24:56 -0800658 task_unlock(p);
Nick Piggin50ec3bb2006-09-25 23:31:29 -0700659
David Rientjesdc3f21e2012-03-21 16:33:47 -0700660 if (__ratelimit(&oom_rs))
David Rientjes6e0fc462015-09-08 15:00:36 -0700661 dump_header(oc, p, memcg);
David Rientjes8447d952012-03-21 16:33:47 -0700662
Wang Longf0d66472015-06-24 16:58:01 -0700663 pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
David Rientjes5e9d8342010-08-09 17:18:51 -0700664 message, task_pid_nr(p), p->comm, points);
Nick Pigginf3af38d2006-12-06 20:31:51 -0800665
David Rientjes5e9d8342010-08-09 17:18:51 -0700666 /*
667 * If any of p's children has a different mm and is eligible for kill,
David Rientjes11239832011-07-25 17:12:17 -0700668 * the one with the highest oom_badness() score is sacrificed for its
David Rientjes5e9d8342010-08-09 17:18:51 -0700669 * parent. This attempts to lose the minimal amount of work done while
670 * still freeing memory.
671 */
David Rientjes6b0c81b2012-07-31 16:43:45 -0700672 read_lock(&tasklist_lock);
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800673 for_each_thread(p, t) {
David Rientjes5e9d8342010-08-09 17:18:51 -0700674 list_for_each_entry(child, &t->children, sibling) {
David Rientjesa63d83f2010-08-09 17:19:46 -0700675 unsigned int child_points;
David Rientjes5e9d8342010-08-09 17:18:51 -0700676
Oleg Nesterov4d7b3392015-11-05 18:48:26 -0800677 if (process_shares_mm(child, p->mm))
David Rientjesedd45542011-03-22 16:30:12 -0700678 continue;
David Rientjesa63d83f2010-08-09 17:19:46 -0700679 /*
680 * oom_badness() returns 0 if the thread is unkillable
681 */
David Rientjes6e0fc462015-09-08 15:00:36 -0700682 child_points = oom_badness(child, memcg, oc->nodemask,
David Rientjesa63d83f2010-08-09 17:19:46 -0700683 totalpages);
David Rientjes5e9d8342010-08-09 17:18:51 -0700684 if (child_points > victim_points) {
David Rientjes6b0c81b2012-07-31 16:43:45 -0700685 put_task_struct(victim);
David Rientjes5e9d8342010-08-09 17:18:51 -0700686 victim = child;
687 victim_points = child_points;
David Rientjes6b0c81b2012-07-31 16:43:45 -0700688 get_task_struct(victim);
David Rientjes5e9d8342010-08-09 17:18:51 -0700689 }
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700690 }
Oleg Nesterov1da4db02014-01-21 15:49:58 -0800691 }
David Rientjes6b0c81b2012-07-31 16:43:45 -0700692 read_unlock(&tasklist_lock);
Oleg Nesterovdd8e8f42010-08-09 17:18:45 -0700693
David Rientjes6b0c81b2012-07-31 16:43:45 -0700694 p = find_lock_task_mm(victim);
695 if (!p) {
David Rientjes6b0c81b2012-07-31 16:43:45 -0700696 put_task_struct(victim);
David Rientjes647f2bd2012-03-21 16:33:46 -0700697 return;
David Rientjes6b0c81b2012-07-31 16:43:45 -0700698 } else if (victim != p) {
699 get_task_struct(p);
700 put_task_struct(victim);
701 victim = p;
702 }
David Rientjes647f2bd2012-03-21 16:33:46 -0700703
Tetsuo Handa880b7682015-11-05 18:47:51 -0800704 /* Get a reference to safely compare mm after task_unlock(victim) */
David Rientjes647f2bd2012-03-21 16:33:46 -0700705 mm = victim->mm;
Tetsuo Handa880b7682015-11-05 18:47:51 -0800706 atomic_inc(&mm->mm_count);
Tetsuo Handa426fb5e2015-11-05 18:47:44 -0800707 /*
708 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
709 * the OOM victim from depleting the memory reserves from the user
710 * space under its control.
711 */
712 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
Johannes Weiner16e95192015-06-24 16:57:07 -0700713 mark_oom_victim(victim);
Jerome Marchandeca56ff2016-01-14 15:19:26 -0800714 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
David Rientjes647f2bd2012-03-21 16:33:46 -0700715 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
716 K(get_mm_counter(victim->mm, MM_ANONPAGES)),
Jerome Marchandeca56ff2016-01-14 15:19:26 -0800717 K(get_mm_counter(victim->mm, MM_FILEPAGES)),
718 K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
David Rientjes647f2bd2012-03-21 16:33:46 -0700719 task_unlock(victim);
720
721 /*
722 * Kill all user processes sharing victim->mm in other thread groups, if
723 * any. They don't get access to memory reserves, though, to avoid
724 * depletion of all memory. This prevents mm->mmap_sem livelock when an
725 * oom killed thread cannot exit because it requires the semaphore and
726 * its contended by another thread trying to allocate memory itself.
727 * That thread will now get access to memory reserves since it has a
728 * pending fatal signal.
729 */
Oleg Nesterov4d4048b2014-01-21 15:50:01 -0800730 rcu_read_lock();
Oleg Nesterovc3190252015-11-05 18:48:23 -0800731 for_each_process(p) {
Oleg Nesterov4d7b3392015-11-05 18:48:26 -0800732 if (!process_shares_mm(p, mm))
Oleg Nesterovc3190252015-11-05 18:48:23 -0800733 continue;
734 if (same_thread_group(p, victim))
735 continue;
Michal Hockoaac45362016-03-25 14:20:24 -0700736 if (unlikely(p->flags & PF_KTHREAD) || is_global_init(p) ||
737 p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
738 /*
739 * We cannot use oom_reaper for the mm shared by this
740 * process because it wouldn't get killed and so the
741 * memory might be still used.
742 */
743 can_oom_reap = false;
Oleg Nesterovc3190252015-11-05 18:48:23 -0800744 continue;
Michal Hockoaac45362016-03-25 14:20:24 -0700745 }
Oleg Nesterovc3190252015-11-05 18:48:23 -0800746 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
747 }
David Rientjes6b0c81b2012-07-31 16:43:45 -0700748 rcu_read_unlock();
David Rientjes647f2bd2012-03-21 16:33:46 -0700749
Michal Hockoaac45362016-03-25 14:20:24 -0700750 if (can_oom_reap)
751 wake_oom_reaper(mm);
752
Tetsuo Handa880b7682015-11-05 18:47:51 -0800753 mmdrop(mm);
David Rientjes6b0c81b2012-07-31 16:43:45 -0700754 put_task_struct(victim);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755}
David Rientjes647f2bd2012-03-21 16:33:46 -0700756#undef K
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
David Rientjes309ed882010-08-09 17:18:54 -0700758/*
759 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
760 */
David Rientjes6e0fc462015-09-08 15:00:36 -0700761void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint,
Balasubramani Vivekanandan2415b9f2015-04-14 15:48:18 -0700762 struct mem_cgroup *memcg)
David Rientjes309ed882010-08-09 17:18:54 -0700763{
764 if (likely(!sysctl_panic_on_oom))
765 return;
766 if (sysctl_panic_on_oom != 2) {
767 /*
768 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
769 * does not panic for cpuset, mempolicy, or memcg allocation
770 * failures.
771 */
772 if (constraint != CONSTRAINT_NONE)
773 return;
774 }
David Rientjes071a4be2015-09-08 15:00:42 -0700775 /* Do not panic for oom kills triggered by sysrq */
Yaowei Baidb2a0dd2015-11-06 16:28:06 -0800776 if (is_sysrq_oom(oc))
David Rientjes071a4be2015-09-08 15:00:42 -0700777 return;
David Rientjes6e0fc462015-09-08 15:00:36 -0700778 dump_header(oc, NULL, memcg);
David Rientjes309ed882010-08-09 17:18:54 -0700779 panic("Out of memory: %s panic_on_oom is enabled\n",
780 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
781}
782
Martin Schwidefsky8bc719d2006-09-25 23:31:20 -0700783static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
784
785int register_oom_notifier(struct notifier_block *nb)
786{
787 return blocking_notifier_chain_register(&oom_notify_list, nb);
788}
789EXPORT_SYMBOL_GPL(register_oom_notifier);
790
791int unregister_oom_notifier(struct notifier_block *nb)
792{
793 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
794}
795EXPORT_SYMBOL_GPL(unregister_oom_notifier);
796
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797/**
David Rientjes6e0fc462015-09-08 15:00:36 -0700798 * out_of_memory - kill the "best" process when we run out of memory
799 * @oc: pointer to struct oom_control
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 *
801 * If we run out of memory, we have the choice between either
802 * killing a random task (bad), letting the system crash (worse)
803 * OR try to be smart about which process to kill. Note that we
804 * don't have to be perfect here, we just have to be good.
805 */
David Rientjes6e0fc462015-09-08 15:00:36 -0700806bool out_of_memory(struct oom_control *oc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807{
David Rientjes0aad4b32010-08-09 17:18:59 -0700808 struct task_struct *p;
David Rientjesa63d83f2010-08-09 17:19:46 -0700809 unsigned long totalpages;
Martin Schwidefsky8bc719d2006-09-25 23:31:20 -0700810 unsigned long freed = 0;
David Rientjes9cbb78b2012-07-31 16:43:44 -0700811 unsigned int uninitialized_var(points);
David Rientjese3658932010-08-09 17:18:55 -0700812 enum oom_constraint constraint = CONSTRAINT_NONE;
Martin Schwidefsky8bc719d2006-09-25 23:31:20 -0700813
Johannes Weinerdc564012015-06-24 16:57:19 -0700814 if (oom_killer_disabled)
815 return false;
816
Martin Schwidefsky8bc719d2006-09-25 23:31:20 -0700817 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
818 if (freed > 0)
819 /* Got some memory back in the last second. */
David Rientjes75e8f8b2015-09-08 15:00:47 -0700820 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821
David Rientjes7b98c2e2010-08-09 17:18:48 -0700822 /*
David Rientjes9ff48682012-12-11 16:01:30 -0800823 * If current has a pending SIGKILL or is exiting, then automatically
824 * select it. The goal is to allow it to allocate so that it may
825 * quickly exit and free its memory.
Tetsuo Handad7a94e72015-02-11 15:24:54 -0800826 *
827 * But don't select if current has already released its mm and cleared
828 * TIF_MEMDIE flag at exit_mm(), otherwise an OOM livelock may occur.
David Rientjes7b98c2e2010-08-09 17:18:48 -0700829 */
Tetsuo Handad7a94e72015-02-11 15:24:54 -0800830 if (current->mm &&
831 (fatal_signal_pending(current) || task_will_free_mem(current))) {
Johannes Weiner16e95192015-06-24 16:57:07 -0700832 mark_oom_victim(current);
David Rientjes75e8f8b2015-09-08 15:00:47 -0700833 return true;
David Rientjes7b98c2e2010-08-09 17:18:48 -0700834 }
835
Christoph Lameter9b0f8b02006-02-20 18:27:52 -0800836 /*
837 * Check if there were limitations on the allocation (only relevant for
838 * NUMA) that may require different handling.
839 */
David Rientjes6e0fc462015-09-08 15:00:36 -0700840 constraint = constrained_alloc(oc, &totalpages);
841 if (constraint != CONSTRAINT_MEMORY_POLICY)
842 oc->nodemask = NULL;
843 check_panic_on_oom(oc, constraint, NULL);
David Rientjes0aad4b32010-08-09 17:18:59 -0700844
David Rientjes121d1ba2012-07-31 16:42:55 -0700845 if (sysctl_oom_kill_allocating_task && current->mm &&
David Rientjes6e0fc462015-09-08 15:00:36 -0700846 !oom_unkillable_task(current, NULL, oc->nodemask) &&
David Rientjes121d1ba2012-07-31 16:42:55 -0700847 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
David Rientjes6b0c81b2012-07-31 16:43:45 -0700848 get_task_struct(current);
David Rientjes6e0fc462015-09-08 15:00:36 -0700849 oom_kill_process(oc, current, 0, totalpages, NULL,
David Rientjes2a1c9b12012-03-21 16:33:46 -0700850 "Out of memory (oom_kill_allocating_task)");
David Rientjes75e8f8b2015-09-08 15:00:47 -0700851 return true;
David Rientjes0aad4b32010-08-09 17:18:59 -0700852 }
853
David Rientjes6e0fc462015-09-08 15:00:36 -0700854 p = select_bad_process(oc, &points, totalpages);
David Rientjes0aad4b32010-08-09 17:18:59 -0700855 /* Found nothing?!?! Either we hang forever, or we panic. */
Yaowei Baidb2a0dd2015-11-06 16:28:06 -0800856 if (!p && !is_sysrq_oom(oc)) {
David Rientjes6e0fc462015-09-08 15:00:36 -0700857 dump_header(oc, NULL, NULL);
David Rientjes0aad4b32010-08-09 17:18:59 -0700858 panic("Out of memory and no killable processes...\n");
859 }
David Rientjes071a4be2015-09-08 15:00:42 -0700860 if (p && p != (void *)-1UL) {
David Rientjes6e0fc462015-09-08 15:00:36 -0700861 oom_kill_process(oc, p, points, totalpages, NULL,
862 "Out of memory");
David Rientjes75e8f8b2015-09-08 15:00:47 -0700863 /*
864 * Give the killed process a good chance to exit before trying
865 * to allocate memory again.
866 */
David Rientjes4f774b92012-07-31 16:42:37 -0700867 schedule_timeout_killable(1);
David Rientjes75e8f8b2015-09-08 15:00:47 -0700868 }
Johannes Weinerdc564012015-06-24 16:57:19 -0700869 return true;
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800870}
871
David Rientjese3658932010-08-09 17:18:55 -0700872/*
873 * The pagefault handler calls here because it is out of memory, so kill a
David Rientjesefacd022012-12-12 13:52:06 -0800874 * memory-hogging task. If any populated zone has ZONE_OOM_LOCKED set, a
875 * parallel oom killing is already in progress so do nothing.
David Rientjese3658932010-08-09 17:18:55 -0700876 */
877void pagefault_out_of_memory(void)
878{
David Rientjes6e0fc462015-09-08 15:00:36 -0700879 struct oom_control oc = {
880 .zonelist = NULL,
881 .nodemask = NULL,
882 .gfp_mask = 0,
883 .order = 0,
David Rientjes6e0fc462015-09-08 15:00:36 -0700884 };
885
Johannes Weiner49426422013-10-16 13:46:59 -0700886 if (mem_cgroup_oom_synchronize(true))
Johannes Weinerdc564012015-06-24 16:57:19 -0700887 return;
Johannes Weiner3812c8c2013-09-12 15:13:44 -0700888
Johannes Weinerdc564012015-06-24 16:57:19 -0700889 if (!mutex_trylock(&oom_lock))
890 return;
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800891
David Rientjes6e0fc462015-09-08 15:00:36 -0700892 if (!out_of_memory(&oc)) {
Johannes Weinerdc564012015-06-24 16:57:19 -0700893 /*
894 * There shouldn't be any user tasks runnable while the
895 * OOM killer is disabled, so the current task has to
896 * be a racing OOM victim for which oom_killer_disable()
897 * is waiting for.
898 */
899 WARN_ON(test_thread_flag(TIF_MEMDIE));
David Rientjese3658932010-08-09 17:18:55 -0700900 }
Johannes Weinerdc564012015-06-24 16:57:19 -0700901
902 mutex_unlock(&oom_lock);
David Rientjese3658932010-08-09 17:18:55 -0700903}