Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_MMAN_H |
| 3 | #define _LINUX_MMAN_H |
| 4 | |
David Woodhouse | 9cdcb56 | 2006-04-25 14:18:07 +0100 | [diff] [blame] | 5 | #include <linux/mm.h> |
KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 6 | #include <linux/percpu_counter.h> |
David Woodhouse | 9cdcb56 | 2006-04-25 14:18:07 +0100 | [diff] [blame] | 7 | |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 8 | #include <linux/atomic.h> |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 9 | #include <uapi/linux/mman.h> |
David Woodhouse | 9cdcb56 | 2006-04-25 14:18:07 +0100 | [diff] [blame] | 10 | |
Dan Williams | 1c97259 | 2017-11-01 16:36:30 +0100 | [diff] [blame] | 11 | /* |
| 12 | * Arrange for legacy / undefined architecture specific flags to be |
Jan Kara | b6fb293 | 2017-11-01 16:36:41 +0100 | [diff] [blame] | 13 | * ignored by mmap handling code. |
Dan Williams | 1c97259 | 2017-11-01 16:36:30 +0100 | [diff] [blame] | 14 | */ |
| 15 | #ifndef MAP_32BIT |
| 16 | #define MAP_32BIT 0 |
| 17 | #endif |
| 18 | #ifndef MAP_HUGE_2MB |
| 19 | #define MAP_HUGE_2MB 0 |
| 20 | #endif |
| 21 | #ifndef MAP_HUGE_1GB |
| 22 | #define MAP_HUGE_1GB 0 |
| 23 | #endif |
| 24 | #ifndef MAP_UNINITIALIZED |
| 25 | #define MAP_UNINITIALIZED 0 |
| 26 | #endif |
Jan Kara | b6fb293 | 2017-11-01 16:36:41 +0100 | [diff] [blame] | 27 | #ifndef MAP_SYNC |
| 28 | #define MAP_SYNC 0 |
| 29 | #endif |
Dan Williams | 1c97259 | 2017-11-01 16:36:30 +0100 | [diff] [blame] | 30 | |
| 31 | /* |
| 32 | * The historical set of flags that all mmap implementations implicitly |
| 33 | * support when a ->mmap_validate() op is not provided in file_operations. |
| 34 | */ |
| 35 | #define LEGACY_MAP_MASK (MAP_SHARED \ |
| 36 | | MAP_PRIVATE \ |
| 37 | | MAP_FIXED \ |
| 38 | | MAP_ANONYMOUS \ |
| 39 | | MAP_DENYWRITE \ |
| 40 | | MAP_EXECUTABLE \ |
| 41 | | MAP_UNINITIALIZED \ |
| 42 | | MAP_GROWSDOWN \ |
| 43 | | MAP_LOCKED \ |
| 44 | | MAP_NORESERVE \ |
| 45 | | MAP_POPULATE \ |
| 46 | | MAP_NONBLOCK \ |
| 47 | | MAP_STACK \ |
| 48 | | MAP_HUGETLB \ |
| 49 | | MAP_32BIT \ |
| 50 | | MAP_HUGE_2MB \ |
| 51 | | MAP_HUGE_1GB) |
| 52 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | extern int sysctl_overcommit_memory; |
| 54 | extern int sysctl_overcommit_ratio; |
Jerome Marchand | 49f0ce5 | 2014-01-21 15:49:14 -0800 | [diff] [blame] | 55 | extern unsigned long sysctl_overcommit_kbytes; |
KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 56 | extern struct percpu_counter vm_committed_as; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
Tim Chen | 917d929 | 2013-07-03 15:02:44 -0700 | [diff] [blame] | 58 | #ifdef CONFIG_SMP |
| 59 | extern s32 vm_committed_as_batch; |
| 60 | #else |
| 61 | #define vm_committed_as_batch 0 |
| 62 | #endif |
| 63 | |
K. Y. Srinivasan | 997071b | 2012-11-15 14:34:42 -0800 | [diff] [blame] | 64 | unsigned long vm_memory_committed(void); |
| 65 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | static inline void vm_acct_memory(long pages) |
| 67 | { |
Nikolay Borisov | 104b4e5 | 2017-06-20 21:01:20 +0300 | [diff] [blame] | 68 | percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | |
| 71 | static inline void vm_unacct_memory(long pages) |
| 72 | { |
| 73 | vm_acct_memory(-pages); |
| 74 | } |
| 75 | |
| 76 | /* |
Dave Kleikamp | b845f31 | 2008-07-08 00:28:51 +1000 | [diff] [blame] | 77 | * Allow architectures to handle additional protection bits |
| 78 | */ |
| 79 | |
| 80 | #ifndef arch_calc_vm_prot_bits |
Dave Hansen | e6bfb70 | 2016-02-12 13:02:31 -0800 | [diff] [blame] | 81 | #define arch_calc_vm_prot_bits(prot, pkey) 0 |
Dave Kleikamp | b845f31 | 2008-07-08 00:28:51 +1000 | [diff] [blame] | 82 | #endif |
| 83 | |
| 84 | #ifndef arch_vm_get_page_prot |
| 85 | #define arch_vm_get_page_prot(vm_flags) __pgprot(0) |
| 86 | #endif |
| 87 | |
| 88 | #ifndef arch_validate_prot |
| 89 | /* |
| 90 | * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have |
| 91 | * already been masked out. |
| 92 | * |
| 93 | * Returns true if the prot flags are valid |
| 94 | */ |
Khalid Aziz | 9035cf9 | 2018-02-21 10:15:49 -0700 | [diff] [blame] | 95 | static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) |
Dave Kleikamp | b845f31 | 2008-07-08 00:28:51 +1000 | [diff] [blame] | 96 | { |
| 97 | return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; |
| 98 | } |
| 99 | #define arch_validate_prot arch_validate_prot |
| 100 | #endif |
| 101 | |
| 102 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | * Optimisation macro. It is equivalent to: |
| 104 | * (x & bit1) ? bit2 : 0 |
| 105 | * but this version is faster. |
| 106 | * ("bit1" and "bit2" must be single bits) |
| 107 | */ |
| 108 | #define _calc_vm_trans(x, bit1, bit2) \ |
Jan Kara | 592e254 | 2017-11-03 12:21:21 +0100 | [diff] [blame] | 109 | ((!(bit1) || !(bit2)) ? 0 : \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ |
Jan Kara | 592e254 | 2017-11-03 12:21:21 +0100 | [diff] [blame] | 111 | : ((x) & (bit1)) / ((bit1) / (bit2)))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | |
| 113 | /* |
| 114 | * Combine the mmap "prot" argument into "vm_flags" used internally. |
| 115 | */ |
| 116 | static inline unsigned long |
Dave Hansen | e6bfb70 | 2016-02-12 13:02:31 -0800 | [diff] [blame] | 117 | calc_vm_prot_bits(unsigned long prot, unsigned long pkey) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | { |
| 119 | return _calc_vm_trans(prot, PROT_READ, VM_READ ) | |
| 120 | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | |
Dave Kleikamp | b845f31 | 2008-07-08 00:28:51 +1000 | [diff] [blame] | 121 | _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | |
Dave Hansen | e6bfb70 | 2016-02-12 13:02:31 -0800 | [diff] [blame] | 122 | arch_calc_vm_prot_bits(prot, pkey); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | /* |
| 126 | * Combine the mmap "flags" argument into "vm_flags" used internally. |
| 127 | */ |
| 128 | static inline unsigned long |
| 129 | calc_vm_flag_bits(unsigned long flags) |
| 130 | { |
| 131 | return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | |
| 132 | _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | |
Jan Kara | b6fb293 | 2017-11-01 16:36:41 +0100 | [diff] [blame] | 133 | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | |
| 134 | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | } |
Jerome Marchand | 00619bc | 2013-11-12 15:08:31 -0800 | [diff] [blame] | 136 | |
| 137 | unsigned long vm_commit_limit(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | #endif /* _LINUX_MMAN_H */ |