Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MMAN_H |
| 2 | #define _LINUX_MMAN_H |
| 3 | |
David Woodhouse | 9cdcb56 | 2006-04-25 14:18:07 +0100 | [diff] [blame] | 4 | #include <linux/mm.h> |
KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 5 | #include <linux/percpu_counter.h> |
David Woodhouse | 9cdcb56 | 2006-04-25 14:18:07 +0100 | [diff] [blame] | 6 | |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 7 | #include <linux/atomic.h> |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 8 | #include <uapi/linux/mman.h> |
David Woodhouse | 9cdcb56 | 2006-04-25 14:18:07 +0100 | [diff] [blame] | 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | extern int sysctl_overcommit_memory; |
| 11 | extern int sysctl_overcommit_ratio; |
KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 12 | extern struct percpu_counter vm_committed_as; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | static inline void vm_acct_memory(long pages) |
| 15 | { |
KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 16 | percpu_counter_add(&vm_committed_as, pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
| 19 | static inline void vm_unacct_memory(long pages) |
| 20 | { |
| 21 | vm_acct_memory(-pages); |
| 22 | } |
| 23 | |
| 24 | /* |
Dave Kleikamp | b845f31 | 2008-07-08 00:28:51 +1000 | [diff] [blame] | 25 | * Allow architectures to handle additional protection bits |
| 26 | */ |
| 27 | |
| 28 | #ifndef arch_calc_vm_prot_bits |
| 29 | #define arch_calc_vm_prot_bits(prot) 0 |
| 30 | #endif |
| 31 | |
| 32 | #ifndef arch_vm_get_page_prot |
| 33 | #define arch_vm_get_page_prot(vm_flags) __pgprot(0) |
| 34 | #endif |
| 35 | |
| 36 | #ifndef arch_validate_prot |
| 37 | /* |
| 38 | * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have |
| 39 | * already been masked out. |
| 40 | * |
| 41 | * Returns true if the prot flags are valid |
| 42 | */ |
| 43 | static inline int arch_validate_prot(unsigned long prot) |
| 44 | { |
| 45 | return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; |
| 46 | } |
| 47 | #define arch_validate_prot arch_validate_prot |
| 48 | #endif |
| 49 | |
| 50 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | * Optimisation macro. It is equivalent to: |
| 52 | * (x & bit1) ? bit2 : 0 |
| 53 | * but this version is faster. |
| 54 | * ("bit1" and "bit2" must be single bits) |
| 55 | */ |
| 56 | #define _calc_vm_trans(x, bit1, bit2) \ |
| 57 | ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ |
| 58 | : ((x) & (bit1)) / ((bit1) / (bit2))) |
| 59 | |
| 60 | /* |
| 61 | * Combine the mmap "prot" argument into "vm_flags" used internally. |
| 62 | */ |
| 63 | static inline unsigned long |
| 64 | calc_vm_prot_bits(unsigned long prot) |
| 65 | { |
| 66 | return _calc_vm_trans(prot, PROT_READ, VM_READ ) | |
| 67 | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | |
Dave Kleikamp | b845f31 | 2008-07-08 00:28:51 +1000 | [diff] [blame] | 68 | _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | |
| 69 | arch_calc_vm_prot_bits(prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | } |
| 71 | |
| 72 | /* |
| 73 | * Combine the mmap "flags" argument into "vm_flags" used internally. |
| 74 | */ |
| 75 | static inline unsigned long |
| 76 | calc_vm_flag_bits(unsigned long flags) |
| 77 | { |
| 78 | return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | |
| 79 | _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ); |
| 81 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | #endif /* _LINUX_MMAN_H */ |