Alexey Dobriyan | 8ac773b | 2006-10-19 23:28:32 -0700 | [diff] [blame] | 1 | #ifndef __INCLUDE_LINUX_OOM_H |
| 2 | #define __INCLUDE_LINUX_OOM_H |
| 3 | |
David Rientjes | 5a3135c2 | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 4 | |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 5 | #include <linux/sched/signal.h> |
David Rientjes | 172acf6 | 2007-10-16 23:25:59 -0700 | [diff] [blame] | 6 | #include <linux/types.h> |
KAMEZAWA Hiroyuki | 4365a56 | 2009-12-15 16:45:33 -0800 | [diff] [blame] | 7 | #include <linux/nodemask.h> |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 8 | #include <uapi/linux/oom.h> |
David Rientjes | 172acf6 | 2007-10-16 23:25:59 -0700 | [diff] [blame] | 9 | |
| 10 | struct zonelist; |
| 11 | struct notifier_block; |
Andrew Morton | 74bcbf4 | 2010-08-09 17:19:43 -0700 | [diff] [blame] | 12 | struct mem_cgroup; |
| 13 | struct task_struct; |
David Rientjes | 172acf6 | 2007-10-16 23:25:59 -0700 | [diff] [blame] | 14 | |
David Rientjes | 8989e4c | 2015-09-08 15:00:44 -0700 | [diff] [blame] | 15 | /* |
| 16 | * Details of the page allocation that triggered the oom killer that are used to |
| 17 | * determine what should be killed. |
| 18 | */ |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 19 | struct oom_control { |
David Rientjes | 8989e4c | 2015-09-08 15:00:44 -0700 | [diff] [blame] | 20 | /* Used to determine cpuset */ |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 21 | struct zonelist *zonelist; |
David Rientjes | 8989e4c | 2015-09-08 15:00:44 -0700 | [diff] [blame] | 22 | |
| 23 | /* Used to determine mempolicy */ |
| 24 | nodemask_t *nodemask; |
| 25 | |
Vladimir Davydov | 2a966b7 | 2016-07-26 15:22:33 -0700 | [diff] [blame] | 26 | /* Memory cgroup in which oom is invoked, or NULL for global oom */ |
| 27 | struct mem_cgroup *memcg; |
| 28 | |
David Rientjes | 8989e4c | 2015-09-08 15:00:44 -0700 | [diff] [blame] | 29 | /* Used to determine cpuset and node locality requirement */ |
| 30 | const gfp_t gfp_mask; |
| 31 | |
| 32 | /* |
| 33 | * order == -1 means the oom kill is required by sysrq, otherwise only |
| 34 | * for display purposes. |
| 35 | */ |
| 36 | const int order; |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 37 | |
Vladimir Davydov | 7c5f64f | 2016-10-07 16:57:23 -0700 | [diff] [blame] | 38 | /* Used by oom implementation, do not set */ |
| 39 | unsigned long totalpages; |
| 40 | struct task_struct *chosen; |
| 41 | unsigned long chosen_points; |
David Rientjes | 9cbb78b | 2012-07-31 16:43:44 -0700 | [diff] [blame] | 42 | }; |
| 43 | |
Johannes Weiner | dc56401 | 2015-06-24 16:57:19 -0700 | [diff] [blame] | 44 | extern struct mutex oom_lock; |
| 45 | |
David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 46 | static inline void set_current_oom_origin(void) |
| 47 | { |
Tetsuo Handa | c96fc2d | 2016-05-23 16:23:57 -0700 | [diff] [blame] | 48 | current->signal->oom_flag_origin = true; |
David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 49 | } |
| 50 | |
| 51 | static inline void clear_current_oom_origin(void) |
| 52 | { |
Tetsuo Handa | c96fc2d | 2016-05-23 16:23:57 -0700 | [diff] [blame] | 53 | current->signal->oom_flag_origin = false; |
David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 54 | } |
| 55 | |
| 56 | static inline bool oom_task_origin(const struct task_struct *p) |
| 57 | { |
Tetsuo Handa | c96fc2d | 2016-05-23 16:23:57 -0700 | [diff] [blame] | 58 | return p->signal->oom_flag_origin; |
David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 59 | } |
David Rientjes | 72788c3 | 2011-05-24 17:11:40 -0700 | [diff] [blame] | 60 | |
Michal Hocko | 862e307 | 2016-10-07 16:58:57 -0700 | [diff] [blame] | 61 | static inline bool tsk_is_oom_victim(struct task_struct * tsk) |
| 62 | { |
| 63 | return tsk->signal->oom_mm; |
| 64 | } |
| 65 | |
David Rientjes | a7f638f | 2012-05-29 15:06:47 -0700 | [diff] [blame] | 66 | extern unsigned long oom_badness(struct task_struct *p, |
| 67 | struct mem_cgroup *memcg, const nodemask_t *nodemask, |
| 68 | unsigned long totalpages); |
Michal Hocko | 5695be1 | 2014-10-20 18:12:32 +0200 | [diff] [blame] | 69 | |
David Rientjes | 6e0fc46 | 2015-09-08 15:00:36 -0700 | [diff] [blame] | 70 | extern bool out_of_memory(struct oom_control *oc); |
Johannes Weiner | 16e9519 | 2015-06-24 16:57:07 -0700 | [diff] [blame] | 71 | |
Tetsuo Handa | 3853120 | 2016-10-07 16:59:03 -0700 | [diff] [blame] | 72 | extern void exit_oom_victim(void); |
Johannes Weiner | 16e9519 | 2015-06-24 16:57:07 -0700 | [diff] [blame] | 73 | |
David Rientjes | 5a3135c2 | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 74 | extern int register_oom_notifier(struct notifier_block *nb); |
| 75 | extern int unregister_oom_notifier(struct notifier_block *nb); |
| 76 | |
Michal Hocko | 7d2e7a2 | 2016-10-07 16:59:00 -0700 | [diff] [blame] | 77 | extern bool oom_killer_disable(signed long timeout); |
Michal Hocko | c32b3cb | 2015-02-11 15:26:24 -0800 | [diff] [blame] | 78 | extern void oom_killer_enable(void); |
David Rientjes | 8e4228e | 2010-08-09 17:18:56 -0700 | [diff] [blame] | 79 | |
KAMEZAWA Hiroyuki | 158e0a2 | 2010-08-10 18:03:00 -0700 | [diff] [blame] | 80 | extern struct task_struct *find_lock_task_mm(struct task_struct *p); |
| 81 | |
David Rientjes | 8e4228e | 2010-08-09 17:18:56 -0700 | [diff] [blame] | 82 | /* sysctls */ |
| 83 | extern int sysctl_oom_dump_tasks; |
| 84 | extern int sysctl_oom_kill_allocating_task; |
| 85 | extern int sysctl_panic_on_oom; |
David Rientjes | 5a3135c2 | 2007-10-16 23:25:53 -0700 | [diff] [blame] | 86 | #endif /* _INCLUDE_LINUX_OOM_H */ |