blob: 69864a547663ecda818ac17c171ed085dbe33c26 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Alexey Dobriyan8ac773b2006-10-19 23:28:32 -07002#ifndef __INCLUDE_LINUX_OOM_H
3#define __INCLUDE_LINUX_OOM_H
4
David Rientjes5a3135c22007-10-16 23:25:53 -07005
Ingo Molnar3f07c012017-02-08 18:51:30 +01006#include <linux/sched/signal.h>
David Rientjes172acf62007-10-16 23:25:59 -07007#include <linux/types.h>
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08008#include <linux/nodemask.h>
David Howells607ca462012-10-13 10:46:48 +01009#include <uapi/linux/oom.h>
Michal Hocko6b31d592017-08-18 15:16:15 -070010#include <linux/sched/coredump.h> /* MMF_* */
11#include <linux/mm.h> /* VM_FAULT* */
David Rientjes172acf62007-10-16 23:25:59 -070012
13struct zonelist;
14struct notifier_block;
Andrew Morton74bcbf42010-08-09 17:19:43 -070015struct mem_cgroup;
16struct task_struct;
David Rientjes172acf62007-10-16 23:25:59 -070017
David Rientjes8989e4c2015-09-08 15:00:44 -070018/*
19 * Details of the page allocation that triggered the oom killer that are used to
20 * determine what should be killed.
21 */
David Rientjes6e0fc462015-09-08 15:00:36 -070022struct oom_control {
David Rientjes8989e4c2015-09-08 15:00:44 -070023 /* Used to determine cpuset */
David Rientjes6e0fc462015-09-08 15:00:36 -070024 struct zonelist *zonelist;
David Rientjes8989e4c2015-09-08 15:00:44 -070025
26 /* Used to determine mempolicy */
27 nodemask_t *nodemask;
28
Vladimir Davydov2a966b72016-07-26 15:22:33 -070029 /* Memory cgroup in which oom is invoked, or NULL for global oom */
30 struct mem_cgroup *memcg;
31
David Rientjes8989e4c2015-09-08 15:00:44 -070032 /* Used to determine cpuset and node locality requirement */
33 const gfp_t gfp_mask;
34
35 /*
36 * order == -1 means the oom kill is required by sysrq, otherwise only
37 * for display purposes.
38 */
39 const int order;
David Rientjes6e0fc462015-09-08 15:00:36 -070040
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -070041 /* Used by oom implementation, do not set */
42 unsigned long totalpages;
43 struct task_struct *chosen;
44 unsigned long chosen_points;
David Rientjes9cbb78b2012-07-31 16:43:44 -070045};
46
Johannes Weinerdc564012015-06-24 16:57:19 -070047extern struct mutex oom_lock;
48
David Rientjese1e12d22012-12-11 16:02:56 -080049static inline void set_current_oom_origin(void)
50{
Tetsuo Handac96fc2d2016-05-23 16:23:57 -070051 current->signal->oom_flag_origin = true;
David Rientjese1e12d22012-12-11 16:02:56 -080052}
53
54static inline void clear_current_oom_origin(void)
55{
Tetsuo Handac96fc2d2016-05-23 16:23:57 -070056 current->signal->oom_flag_origin = false;
David Rientjese1e12d22012-12-11 16:02:56 -080057}
58
59static inline bool oom_task_origin(const struct task_struct *p)
60{
Tetsuo Handac96fc2d2016-05-23 16:23:57 -070061 return p->signal->oom_flag_origin;
David Rientjese1e12d22012-12-11 16:02:56 -080062}
David Rientjes72788c32011-05-24 17:11:40 -070063
Michal Hocko862e3072016-10-07 16:58:57 -070064static inline bool tsk_is_oom_victim(struct task_struct * tsk)
65{
66 return tsk->signal->oom_mm;
67}
68
Michal Hocko6b31d592017-08-18 15:16:15 -070069/*
Michal Hocko4837fe32017-12-14 15:33:15 -080070 * Use this helper if tsk->mm != mm and the victim mm needs a special
71 * handling. This is guaranteed to stay true after once set.
72 */
73static inline bool mm_is_oom_victim(struct mm_struct *mm)
74{
75 return test_bit(MMF_OOM_VICTIM, &mm->flags);
76}
77
78/*
Michal Hocko6b31d592017-08-18 15:16:15 -070079 * Checks whether a page fault on the given mm is still reliable.
80 * This is no longer true if the oom reaper started to reap the
81 * address space which is reflected by MMF_UNSTABLE flag set in
82 * the mm. At that moment any !shared mapping would lose the content
83 * and could cause a memory corruption (zero pages instead of the
84 * original content).
85 *
86 * User should call this before establishing a page table entry for
87 * a !shared mapping and under the proper page table lock.
88 *
89 * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
90 */
Souptick Joarder2b740302018-08-23 17:01:36 -070091static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
Michal Hocko6b31d592017-08-18 15:16:15 -070092{
93 if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
94 return VM_FAULT_SIGBUS;
95 return 0;
96}
97
Michal Hocko93065ac2018-08-21 21:52:33 -070098bool __oom_reap_task_mm(struct mm_struct *mm);
David Rientjes27ae3572018-05-11 16:02:04 -070099
David Rientjesa7f638f2012-05-29 15:06:47 -0700100extern unsigned long oom_badness(struct task_struct *p,
101 struct mem_cgroup *memcg, const nodemask_t *nodemask,
102 unsigned long totalpages);
Michal Hocko5695be12014-10-20 18:12:32 +0200103
David Rientjes6e0fc462015-09-08 15:00:36 -0700104extern bool out_of_memory(struct oom_control *oc);
Johannes Weiner16e95192015-06-24 16:57:07 -0700105
Tetsuo Handa38531202016-10-07 16:59:03 -0700106extern void exit_oom_victim(void);
Johannes Weiner16e95192015-06-24 16:57:07 -0700107
David Rientjes5a3135c22007-10-16 23:25:53 -0700108extern int register_oom_notifier(struct notifier_block *nb);
109extern int unregister_oom_notifier(struct notifier_block *nb);
110
Michal Hocko7d2e7a22016-10-07 16:59:00 -0700111extern bool oom_killer_disable(signed long timeout);
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800112extern void oom_killer_enable(void);
David Rientjes8e4228e2010-08-09 17:18:56 -0700113
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -0700114extern struct task_struct *find_lock_task_mm(struct task_struct *p);
115
David Rientjes8e4228e2010-08-09 17:18:56 -0700116/* sysctls */
117extern int sysctl_oom_dump_tasks;
118extern int sysctl_oom_kill_allocating_task;
119extern int sysctl_panic_on_oom;
David Rientjes5a3135c22007-10-16 23:25:53 -0700120#endif /* _INCLUDE_LINUX_OOM_H */