Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MMAN_H |
| 2 | #define _LINUX_MMAN_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <asm/mman.h> |
| 5 | |
| 6 | #define MREMAP_MAYMOVE 1 |
| 7 | #define MREMAP_FIXED 2 |
| 8 | |
| 9 | #define OVERCOMMIT_GUESS 0 |
| 10 | #define OVERCOMMIT_ALWAYS 1 |
| 11 | #define OVERCOMMIT_NEVER 2 |
David Woodhouse | 9cdcb56 | 2006-04-25 14:18:07 +0100 | [diff] [blame] | 12 | |
| 13 | #ifdef __KERNEL__ |
David Woodhouse | 9cdcb56 | 2006-04-25 14:18:07 +0100 | [diff] [blame] | 14 | #include <linux/mm.h> |
KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 15 | #include <linux/percpu_counter.h> |
David Woodhouse | 9cdcb56 | 2006-04-25 14:18:07 +0100 | [diff] [blame] | 16 | |
| 17 | #include <asm/atomic.h> |
| 18 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | extern int sysctl_overcommit_memory; |
| 20 | extern int sysctl_overcommit_ratio; |
KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 21 | extern struct percpu_counter vm_committed_as; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | static inline void vm_acct_memory(long pages) |
| 24 | { |
KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 25 | percpu_counter_add(&vm_committed_as, pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
| 28 | static inline void vm_unacct_memory(long pages) |
| 29 | { |
| 30 | vm_acct_memory(-pages); |
| 31 | } |
| 32 | |
| 33 | /* |
Dave Kleikamp | b845f31 | 2008-07-08 00:28:51 +1000 | [diff] [blame] | 34 | * Allow architectures to handle additional protection bits |
| 35 | */ |
| 36 | |
| 37 | #ifndef arch_calc_vm_prot_bits |
| 38 | #define arch_calc_vm_prot_bits(prot) 0 |
| 39 | #endif |
| 40 | |
| 41 | #ifndef arch_vm_get_page_prot |
| 42 | #define arch_vm_get_page_prot(vm_flags) __pgprot(0) |
| 43 | #endif |
| 44 | |
| 45 | #ifndef arch_validate_prot |
| 46 | /* |
| 47 | * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have |
| 48 | * already been masked out. |
| 49 | * |
| 50 | * Returns true if the prot flags are valid |
| 51 | */ |
| 52 | static inline int arch_validate_prot(unsigned long prot) |
| 53 | { |
| 54 | return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; |
| 55 | } |
| 56 | #define arch_validate_prot arch_validate_prot |
| 57 | #endif |
| 58 | |
| 59 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | * Optimisation macro. It is equivalent to: |
| 61 | * (x & bit1) ? bit2 : 0 |
| 62 | * but this version is faster. |
| 63 | * ("bit1" and "bit2" must be single bits) |
| 64 | */ |
| 65 | #define _calc_vm_trans(x, bit1, bit2) \ |
| 66 | ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ |
| 67 | : ((x) & (bit1)) / ((bit1) / (bit2))) |
| 68 | |
| 69 | /* |
| 70 | * Combine the mmap "prot" argument into "vm_flags" used internally. |
| 71 | */ |
| 72 | static inline unsigned long |
| 73 | calc_vm_prot_bits(unsigned long prot) |
| 74 | { |
| 75 | return _calc_vm_trans(prot, PROT_READ, VM_READ ) | |
| 76 | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | |
Dave Kleikamp | b845f31 | 2008-07-08 00:28:51 +1000 | [diff] [blame] | 77 | _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | |
| 78 | arch_calc_vm_prot_bits(prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | } |
| 80 | |
| 81 | /* |
| 82 | * Combine the mmap "flags" argument into "vm_flags" used internally. |
| 83 | */ |
| 84 | static inline unsigned long |
| 85 | calc_vm_flag_bits(unsigned long flags) |
| 86 | { |
| 87 | return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | |
| 88 | _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | |
| 89 | _calc_vm_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE) | |
| 90 | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ); |
| 91 | } |
David Woodhouse | 9cdcb56 | 2006-04-25 14:18:07 +0100 | [diff] [blame] | 92 | #endif /* __KERNEL__ */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | #endif /* _LINUX_MMAN_H */ |