Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Ingo Molnar | f7ccbae | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 2 | #ifndef _LINUX_SCHED_COREDUMP_H |
| 3 | #define _LINUX_SCHED_COREDUMP_H |
| 4 | |
Ingo Molnar | 11701c6 | 2017-02-01 19:21:47 +0100 | [diff] [blame] | 5 | #include <linux/mm_types.h> |
| 6 | |
| 7 | #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ |
| 8 | #define SUID_DUMP_USER 1 /* Dump as user of process */ |
| 9 | #define SUID_DUMP_ROOT 2 /* Dump as root */ |
| 10 | |
| 11 | /* mm flags */ |
| 12 | |
| 13 | /* for SUID_DUMP_* above */ |
| 14 | #define MMF_DUMPABLE_BITS 2 |
| 15 | #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) |
| 16 | |
| 17 | extern void set_dumpable(struct mm_struct *mm, int value); |
| 18 | /* |
| 19 | * This returns the actual value of the suid_dumpable flag. For things |
| 20 | * that are using this for checking for privilege transitions, it must |
| 21 | * test against SUID_DUMP_USER rather than treating it as a boolean |
| 22 | * value. |
| 23 | */ |
| 24 | static inline int __get_dumpable(unsigned long mm_flags) |
| 25 | { |
| 26 | return mm_flags & MMF_DUMPABLE_MASK; |
| 27 | } |
| 28 | |
| 29 | static inline int get_dumpable(struct mm_struct *mm) |
| 30 | { |
| 31 | return __get_dumpable(mm->flags); |
| 32 | } |
| 33 | |
| 34 | /* coredump filter bits */ |
| 35 | #define MMF_DUMP_ANON_PRIVATE 2 |
| 36 | #define MMF_DUMP_ANON_SHARED 3 |
| 37 | #define MMF_DUMP_MAPPED_PRIVATE 4 |
| 38 | #define MMF_DUMP_MAPPED_SHARED 5 |
| 39 | #define MMF_DUMP_ELF_HEADERS 6 |
| 40 | #define MMF_DUMP_HUGETLB_PRIVATE 7 |
| 41 | #define MMF_DUMP_HUGETLB_SHARED 8 |
| 42 | #define MMF_DUMP_DAX_PRIVATE 9 |
| 43 | #define MMF_DUMP_DAX_SHARED 10 |
| 44 | |
| 45 | #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS |
| 46 | #define MMF_DUMP_FILTER_BITS 9 |
| 47 | #define MMF_DUMP_FILTER_MASK \ |
| 48 | (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) |
| 49 | #define MMF_DUMP_FILTER_DEFAULT \ |
| 50 | ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ |
| 51 | (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) |
| 52 | |
| 53 | #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS |
| 54 | # define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) |
| 55 | #else |
| 56 | # define MMF_DUMP_MASK_DEFAULT_ELF 0 |
| 57 | #endif |
| 58 | /* leave room for more dump flags */ |
| 59 | #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ |
| 60 | #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ |
| 61 | /* |
| 62 | * This one-shot flag is dropped due to necessity of changing exe once again |
| 63 | * on NFS restore |
| 64 | */ |
| 65 | //#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ |
| 66 | |
| 67 | #define MMF_HAS_UPROBES 19 /* has uprobes */ |
| 68 | #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ |
| 69 | #define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ |
| 70 | #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ |
| 71 | #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ |
Michal Hocko | 1860033 | 2017-07-10 15:48:02 -0700 | [diff] [blame] | 72 | #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ |
Michal Hocko | 4837fe3 | 2017-12-14 15:33:15 -0800 | [diff] [blame] | 73 | #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ |
Michal Hocko | 1860033 | 2017-07-10 15:48:02 -0700 | [diff] [blame] | 74 | #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) |
Ingo Molnar | 11701c6 | 2017-02-01 19:21:47 +0100 | [diff] [blame] | 75 | |
Michal Hocko | 1860033 | 2017-07-10 15:48:02 -0700 | [diff] [blame] | 76 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ |
| 77 | MMF_DISABLE_THP_MASK) |
Ingo Molnar | f7ccbae | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 78 | |
| 79 | #endif /* _LINUX_SCHED_COREDUMP_H */ |