Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MMAN_H |
| 2 | #define _LINUX_MMAN_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <asm/mman.h> |
| 5 | |
| 6 | #define MREMAP_MAYMOVE 1 |
| 7 | #define MREMAP_FIXED 2 |
| 8 | |
| 9 | #define OVERCOMMIT_GUESS 0 |
| 10 | #define OVERCOMMIT_ALWAYS 1 |
| 11 | #define OVERCOMMIT_NEVER 2 |
David Woodhouse | 9cdcb56 | 2006-04-25 14:18:07 +0100 | [diff] [blame] | 12 | |
| 13 | #ifdef __KERNEL__ |
David Woodhouse | 9cdcb56 | 2006-04-25 14:18:07 +0100 | [diff] [blame] | 14 | #include <linux/mm.h> |
| 15 | |
| 16 | #include <asm/atomic.h> |
| 17 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | extern int sysctl_overcommit_memory; |
| 19 | extern int sysctl_overcommit_ratio; |
Alan Cox | 80119ef | 2008-05-23 13:04:31 -0700 | [diff] [blame] | 20 | extern atomic_long_t vm_committed_space; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
| 22 | #ifdef CONFIG_SMP |
| 23 | extern void vm_acct_memory(long pages); |
| 24 | #else |
| 25 | static inline void vm_acct_memory(long pages) |
| 26 | { |
Alan Cox | 80119ef | 2008-05-23 13:04:31 -0700 | [diff] [blame] | 27 | atomic_long_add(pages, &vm_committed_space); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | } |
| 29 | #endif |
| 30 | |
| 31 | static inline void vm_unacct_memory(long pages) |
| 32 | { |
| 33 | vm_acct_memory(-pages); |
| 34 | } |
| 35 | |
| 36 | /* |
Dave Kleikamp | b845f31 | 2008-07-08 00:28:51 +1000 | [diff] [blame] | 37 | * Allow architectures to handle additional protection bits |
| 38 | */ |
| 39 | |
| 40 | #ifndef arch_calc_vm_prot_bits |
| 41 | #define arch_calc_vm_prot_bits(prot) 0 |
| 42 | #endif |
| 43 | |
| 44 | #ifndef arch_vm_get_page_prot |
| 45 | #define arch_vm_get_page_prot(vm_flags) __pgprot(0) |
| 46 | #endif |
| 47 | |
| 48 | #ifndef arch_validate_prot |
| 49 | /* |
| 50 | * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have |
| 51 | * already been masked out. |
| 52 | * |
| 53 | * Returns true if the prot flags are valid |
| 54 | */ |
| 55 | static inline int arch_validate_prot(unsigned long prot) |
| 56 | { |
| 57 | return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; |
| 58 | } |
| 59 | #define arch_validate_prot arch_validate_prot |
| 60 | #endif |
| 61 | |
| 62 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | * Optimisation macro. It is equivalent to: |
| 64 | * (x & bit1) ? bit2 : 0 |
| 65 | * but this version is faster. |
| 66 | * ("bit1" and "bit2" must be single bits) |
| 67 | */ |
| 68 | #define _calc_vm_trans(x, bit1, bit2) \ |
| 69 | ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ |
| 70 | : ((x) & (bit1)) / ((bit1) / (bit2))) |
| 71 | |
| 72 | /* |
| 73 | * Combine the mmap "prot" argument into "vm_flags" used internally. |
| 74 | */ |
| 75 | static inline unsigned long |
| 76 | calc_vm_prot_bits(unsigned long prot) |
| 77 | { |
| 78 | return _calc_vm_trans(prot, PROT_READ, VM_READ ) | |
| 79 | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | |
Dave Kleikamp | b845f31 | 2008-07-08 00:28:51 +1000 | [diff] [blame] | 80 | _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | |
| 81 | arch_calc_vm_prot_bits(prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | } |
| 83 | |
| 84 | /* |
| 85 | * Combine the mmap "flags" argument into "vm_flags" used internally. |
| 86 | */ |
| 87 | static inline unsigned long |
| 88 | calc_vm_flag_bits(unsigned long flags) |
| 89 | { |
| 90 | return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | |
| 91 | _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | |
| 92 | _calc_vm_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE) | |
| 93 | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ); |
| 94 | } |
David Woodhouse | 9cdcb56 | 2006-04-25 14:18:07 +0100 | [diff] [blame] | 95 | #endif /* __KERNEL__ */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | #endif /* _LINUX_MMAN_H */ |