Alexey Dobriyan | 8ac773b | 2006-10-19 23:28:32 -0700 | [diff] [blame] | 1 | #ifndef __INCLUDE_LINUX_OOM_H |
| 2 | #define __INCLUDE_LINUX_OOM_H |
| 3 | |
David Rientjes | 5a3135c2 | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 4 | |
David Rientjes | a63d83f | 2010-08-09 17:19:46 -0700 | [diff] [blame] | 5 | #include <linux/sched.h> |
David Rientjes | 172acf6 | 2007-10-16 23:25:59 -0700 | [diff] [blame] | 6 | #include <linux/types.h> |
KAMEZAWA Hiroyuki | 4365a56 | 2009-12-15 16:45:33 -0800 | [diff] [blame] | 7 | #include <linux/nodemask.h> |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 8 | #include <uapi/linux/oom.h> |
David Rientjes | 172acf6 | 2007-10-16 23:25:59 -0700 | [diff] [blame] | 9 | |
| 10 | struct zonelist; |
| 11 | struct notifier_block; |
Andrew Morton | 74bcbf4 | 2010-08-09 17:19:43 -0700 | [diff] [blame] | 12 | struct mem_cgroup; |
| 13 | struct task_struct; |
David Rientjes | 172acf6 | 2007-10-16 23:25:59 -0700 | [diff] [blame] | 14 | |
David Rientjes | 8989e4c | 2015-09-08 15:00:44 -0700 | [diff] [blame] | 15 | /* |
| 16 | * Details of the page allocation that triggered the oom killer that are used to |
| 17 | * determine what should be killed. |
| 18 | */ |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 19 | struct oom_control { |
David Rientjes | 8989e4c | 2015-09-08 15:00:44 -0700 | [diff] [blame] | 20 | /* Used to determine cpuset */ |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 21 | struct zonelist *zonelist; |
David Rientjes | 8989e4c | 2015-09-08 15:00:44 -0700 | [diff] [blame] | 22 | |
| 23 | /* Used to determine mempolicy */ |
| 24 | nodemask_t *nodemask; |
| 25 | |
| 26 | /* Used to determine cpuset and node locality requirement */ |
| 27 | const gfp_t gfp_mask; |
| 28 | |
| 29 | /* |
| 30 | * order == -1 means the oom kill is required by sysrq, otherwise only |
| 31 | * for display purposes. |
| 32 | */ |
| 33 | const int order; |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 34 | }; |
| 35 | |
David Rientjes | 70e24bd | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 36 | /* |
| 37 | * Types of limitations to the nodes from which allocations may occur |
| 38 | */ |
| 39 | enum oom_constraint { |
| 40 | CONSTRAINT_NONE, |
| 41 | CONSTRAINT_CPUSET, |
| 42 | CONSTRAINT_MEMORY_POLICY, |
David Rientjes | 309ed88 | 2010-08-09 17:18:54 -0700 | [diff] [blame] | 43 | CONSTRAINT_MEMCG, |
David Rientjes | 70e24bd | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 44 | }; |
| 45 | |
David Rientjes | 9cbb78b | 2012-07-31 16:43:44 -0700 | [diff] [blame] | 46 | enum oom_scan_t { |
| 47 | OOM_SCAN_OK, /* scan thread and find its badness */ |
| 48 | OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */ |
| 49 | OOM_SCAN_ABORT, /* abort the iteration and return */ |
| 50 | OOM_SCAN_SELECT, /* always select this thread first */ |
| 51 | }; |
| 52 | |
David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 53 | /* Thread is the potential origin of an oom condition; kill first on oom */ |
| 54 | #define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1) |
| 55 | |
Johannes Weiner | dc56401 | 2015-06-24 16:57:19 -0700 | [diff] [blame] | 56 | extern struct mutex oom_lock; |
| 57 | |
David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 58 | static inline void set_current_oom_origin(void) |
| 59 | { |
| 60 | current->signal->oom_flags |= OOM_FLAG_ORIGIN; |
| 61 | } |
| 62 | |
| 63 | static inline void clear_current_oom_origin(void) |
| 64 | { |
| 65 | current->signal->oom_flags &= ~OOM_FLAG_ORIGIN; |
| 66 | } |
| 67 | |
| 68 | static inline bool oom_task_origin(const struct task_struct *p) |
| 69 | { |
| 70 | return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN); |
| 71 | } |
David Rientjes | 72788c3 | 2011-05-24 17:11:40 -0700 | [diff] [blame] | 72 | |
Johannes Weiner | 16e9519 | 2015-06-24 16:57:07 -0700 | [diff] [blame] | 73 | extern void mark_oom_victim(struct task_struct *tsk); |
Michal Hocko | 49550b6 | 2015-02-11 15:26:12 -0800 | [diff] [blame] | 74 | |
David Rientjes | a7f638f | 2012-05-29 15:06:47 -0700 | [diff] [blame] | 75 | extern unsigned long oom_badness(struct task_struct *p, |
| 76 | struct mem_cgroup *memcg, const nodemask_t *nodemask, |
| 77 | unsigned long totalpages); |
Michal Hocko | 5695be1 | 2014-10-20 18:12:32 +0200 | [diff] [blame] | 78 | |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 79 | extern void oom_kill_process(struct oom_control *oc, struct task_struct *p, |
David Rientjes | 9cbb78b | 2012-07-31 16:43:44 -0700 | [diff] [blame] | 80 | unsigned int points, unsigned long totalpages, |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 81 | struct mem_cgroup *memcg, const char *message); |
David Rientjes | 9cbb78b | 2012-07-31 16:43:44 -0700 | [diff] [blame] | 82 | |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 83 | extern void check_panic_on_oom(struct oom_control *oc, |
| 84 | enum oom_constraint constraint, |
Balasubramani Vivekanandan | 2415b9f | 2015-04-14 15:48:18 -0700 | [diff] [blame] | 85 | struct mem_cgroup *memcg); |
David Rientjes | 876aafb | 2012-07-31 16:43:48 -0700 | [diff] [blame] | 86 | |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 87 | extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, |
| 88 | struct task_struct *task, unsigned long totalpages); |
David Rientjes | 9cbb78b | 2012-07-31 16:43:44 -0700 | [diff] [blame] | 89 | |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 90 | extern bool out_of_memory(struct oom_control *oc); |
Johannes Weiner | 16e9519 | 2015-06-24 16:57:07 -0700 | [diff] [blame] | 91 | |
Michal Hocko | 36324a9 | 2016-03-25 14:20:27 -0700 | [diff] [blame] | 92 | extern void exit_oom_victim(struct task_struct *tsk); |
Johannes Weiner | 16e9519 | 2015-06-24 16:57:07 -0700 | [diff] [blame] | 93 | |
David Rientjes | 5a3135c2 | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 94 | extern int register_oom_notifier(struct notifier_block *nb); |
| 95 | extern int unregister_oom_notifier(struct notifier_block *nb); |
| 96 | |
Alexey Dobriyan | 1a8670a | 2009-09-21 17:03:09 -0700 | [diff] [blame] | 97 | extern bool oom_killer_disabled; |
Michal Hocko | c32b3cb | 2015-02-11 15:26:24 -0800 | [diff] [blame] | 98 | extern bool oom_killer_disable(void); |
| 99 | extern void oom_killer_enable(void); |
David Rientjes | 8e4228e | 2010-08-09 17:18:56 -0700 | [diff] [blame] | 100 | |
KAMEZAWA Hiroyuki | 158e0a2 | 2010-08-10 18:03:00 -0700 | [diff] [blame] | 101 | extern struct task_struct *find_lock_task_mm(struct task_struct *p); |
| 102 | |
Oleg Nesterov | d003f37 | 2014-12-12 16:56:24 -0800 | [diff] [blame] | 103 | static inline bool task_will_free_mem(struct task_struct *task) |
| 104 | { |
| 105 | /* |
| 106 | * A coredumping process may sleep for an extended period in exit_mm(), |
| 107 | * so the oom killer cannot assume that the process will promptly exit |
| 108 | * and release memory. |
| 109 | */ |
| 110 | return (task->flags & PF_EXITING) && |
| 111 | !(task->signal->flags & SIGNAL_GROUP_COREDUMP); |
| 112 | } |
| 113 | |
David Rientjes | 8e4228e | 2010-08-09 17:18:56 -0700 | [diff] [blame] | 114 | /* sysctls */ |
| 115 | extern int sysctl_oom_dump_tasks; |
| 116 | extern int sysctl_oom_kill_allocating_task; |
| 117 | extern int sysctl_panic_on_oom; |
David Rientjes | 5a3135c2 | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 118 | #endif /* _INCLUDE_LINUX_OOM_H */ |