Alexey Dobriyan | 8ac773b | 2006-10-19 23:28:32 -0700 | [diff] [blame] | 1 | #ifndef __INCLUDE_LINUX_OOM_H |
| 2 | #define __INCLUDE_LINUX_OOM_H |
| 3 | |
David Rientjes | 5a3135c2 | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 4 | |
David Rientjes | a63d83f | 2010-08-09 17:19:46 -0700 | [diff] [blame] | 5 | #include <linux/sched.h> |
David Rientjes | 172acf6 | 2007-10-16 23:25:59 -0700 | [diff] [blame] | 6 | #include <linux/types.h> |
KAMEZAWA Hiroyuki | 4365a56 | 2009-12-15 16:45:33 -0800 | [diff] [blame] | 7 | #include <linux/nodemask.h> |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 8 | #include <uapi/linux/oom.h> |
David Rientjes | 172acf6 | 2007-10-16 23:25:59 -0700 | [diff] [blame] | 9 | |
| 10 | struct zonelist; |
| 11 | struct notifier_block; |
Andrew Morton | 74bcbf4 | 2010-08-09 17:19:43 -0700 | [diff] [blame] | 12 | struct mem_cgroup; |
| 13 | struct task_struct; |
David Rientjes | 172acf6 | 2007-10-16 23:25:59 -0700 | [diff] [blame] | 14 | |
David Rientjes | 8989e4c | 2015-09-08 15:00:44 -0700 | [diff] [blame] | 15 | /* |
| 16 | * Details of the page allocation that triggered the oom killer that are used to |
| 17 | * determine what should be killed. |
| 18 | */ |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 19 | struct oom_control { |
David Rientjes | 8989e4c | 2015-09-08 15:00:44 -0700 | [diff] [blame] | 20 | /* Used to determine cpuset */ |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 21 | struct zonelist *zonelist; |
David Rientjes | 8989e4c | 2015-09-08 15:00:44 -0700 | [diff] [blame] | 22 | |
| 23 | /* Used to determine mempolicy */ |
| 24 | nodemask_t *nodemask; |
| 25 | |
| 26 | /* Used to determine cpuset and node locality requirement */ |
| 27 | const gfp_t gfp_mask; |
| 28 | |
| 29 | /* |
| 30 | * order == -1 means the oom kill is required by sysrq, otherwise only |
| 31 | * for display purposes. |
| 32 | */ |
| 33 | const int order; |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 34 | }; |
| 35 | |
David Rientjes | 70e24bd | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 36 | /* |
| 37 | * Types of limitations to the nodes from which allocations may occur |
| 38 | */ |
| 39 | enum oom_constraint { |
| 40 | CONSTRAINT_NONE, |
| 41 | CONSTRAINT_CPUSET, |
| 42 | CONSTRAINT_MEMORY_POLICY, |
David Rientjes | 309ed88 | 2010-08-09 17:18:54 -0700 | [diff] [blame] | 43 | CONSTRAINT_MEMCG, |
David Rientjes | 70e24bd | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 44 | }; |
| 45 | |
David Rientjes | 9cbb78b | 2012-07-31 16:43:44 -0700 | [diff] [blame] | 46 | enum oom_scan_t { |
| 47 | OOM_SCAN_OK, /* scan thread and find its badness */ |
| 48 | OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */ |
| 49 | OOM_SCAN_ABORT, /* abort the iteration and return */ |
| 50 | OOM_SCAN_SELECT, /* always select this thread first */ |
| 51 | }; |
| 52 | |
David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 53 | /* Thread is the potential origin of an oom condition; kill first on oom */ |
| 54 | #define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1) |
| 55 | |
Johannes Weiner | dc56401 | 2015-06-24 16:57:19 -0700 | [diff] [blame] | 56 | extern struct mutex oom_lock; |
| 57 | |
David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 58 | static inline void set_current_oom_origin(void) |
| 59 | { |
| 60 | current->signal->oom_flags |= OOM_FLAG_ORIGIN; |
| 61 | } |
| 62 | |
| 63 | static inline void clear_current_oom_origin(void) |
| 64 | { |
| 65 | current->signal->oom_flags &= ~OOM_FLAG_ORIGIN; |
| 66 | } |
| 67 | |
| 68 | static inline bool oom_task_origin(const struct task_struct *p) |
| 69 | { |
| 70 | return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN); |
| 71 | } |
David Rientjes | 72788c3 | 2011-05-24 17:11:40 -0700 | [diff] [blame] | 72 | |
Johannes Weiner | 16e9519 | 2015-06-24 16:57:07 -0700 | [diff] [blame] | 73 | extern void mark_oom_victim(struct task_struct *tsk); |
Michal Hocko | 49550b6 | 2015-02-11 15:26:12 -0800 | [diff] [blame] | 74 | |
David Rientjes | a7f638f | 2012-05-29 15:06:47 -0700 | [diff] [blame] | 75 | extern unsigned long oom_badness(struct task_struct *p, |
| 76 | struct mem_cgroup *memcg, const nodemask_t *nodemask, |
| 77 | unsigned long totalpages); |
Michal Hocko | 5695be1 | 2014-10-20 18:12:32 +0200 | [diff] [blame] | 78 | |
| 79 | extern int oom_kills_count(void); |
| 80 | extern void note_oom_kill(void); |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 81 | extern void oom_kill_process(struct oom_control *oc, struct task_struct *p, |
David Rientjes | 9cbb78b | 2012-07-31 16:43:44 -0700 | [diff] [blame] | 82 | unsigned int points, unsigned long totalpages, |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 83 | struct mem_cgroup *memcg, const char *message); |
David Rientjes | 9cbb78b | 2012-07-31 16:43:44 -0700 | [diff] [blame] | 84 | |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 85 | extern void check_panic_on_oom(struct oom_control *oc, |
| 86 | enum oom_constraint constraint, |
Balasubramani Vivekanandan | 2415b9f | 2015-04-14 15:48:18 -0700 | [diff] [blame] | 87 | struct mem_cgroup *memcg); |
David Rientjes | 876aafb | 2012-07-31 16:43:48 -0700 | [diff] [blame] | 88 | |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 89 | extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, |
| 90 | struct task_struct *task, unsigned long totalpages); |
David Rientjes | 9cbb78b | 2012-07-31 16:43:44 -0700 | [diff] [blame] | 91 | |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 92 | extern bool out_of_memory(struct oom_control *oc); |
Johannes Weiner | 16e9519 | 2015-06-24 16:57:07 -0700 | [diff] [blame] | 93 | |
| 94 | extern void exit_oom_victim(void); |
| 95 | |
David Rientjes | 5a3135c2 | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 96 | extern int register_oom_notifier(struct notifier_block *nb); |
| 97 | extern int unregister_oom_notifier(struct notifier_block *nb); |
| 98 | |
Alexey Dobriyan | 1a8670a | 2009-09-21 17:03:09 -0700 | [diff] [blame] | 99 | extern bool oom_killer_disabled; |
Michal Hocko | c32b3cb | 2015-02-11 15:26:24 -0800 | [diff] [blame] | 100 | extern bool oom_killer_disable(void); |
| 101 | extern void oom_killer_enable(void); |
David Rientjes | 8e4228e | 2010-08-09 17:18:56 -0700 | [diff] [blame] | 102 | |
KAMEZAWA Hiroyuki | 158e0a2 | 2010-08-10 18:03:00 -0700 | [diff] [blame] | 103 | extern struct task_struct *find_lock_task_mm(struct task_struct *p); |
| 104 | |
Oleg Nesterov | d003f37 | 2014-12-12 16:56:24 -0800 | [diff] [blame] | 105 | static inline bool task_will_free_mem(struct task_struct *task) |
| 106 | { |
| 107 | /* |
| 108 | * A coredumping process may sleep for an extended period in exit_mm(), |
| 109 | * so the oom killer cannot assume that the process will promptly exit |
| 110 | * and release memory. |
| 111 | */ |
| 112 | return (task->flags & PF_EXITING) && |
| 113 | !(task->signal->flags & SIGNAL_GROUP_COREDUMP); |
| 114 | } |
| 115 | |
David Rientjes | 8e4228e | 2010-08-09 17:18:56 -0700 | [diff] [blame] | 116 | /* sysctls */ |
| 117 | extern int sysctl_oom_dump_tasks; |
| 118 | extern int sysctl_oom_kill_allocating_task; |
| 119 | extern int sysctl_panic_on_oom; |
David Rientjes | 5a3135c2 | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 120 | #endif /* _INCLUDE_LINUX_OOM_H */ |