blob: 30d1073bac3ba63459123db829283fa9ec7eb8e5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_MMAN_H
2#define _LINUX_MMAN_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <asm/mman.h>
5
6#define MREMAP_MAYMOVE 1
7#define MREMAP_FIXED 2
8
9#define OVERCOMMIT_GUESS 0
10#define OVERCOMMIT_ALWAYS 1
11#define OVERCOMMIT_NEVER 2
David Woodhouse9cdcb562006-04-25 14:18:07 +010012
13#ifdef __KERNEL__
David Woodhouse9cdcb562006-04-25 14:18:07 +010014#include <linux/mm.h>
15
16#include <asm/atomic.h>
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018extern int sysctl_overcommit_memory;
19extern int sysctl_overcommit_ratio;
Alan Cox80119ef2008-05-23 13:04:31 -070020extern atomic_long_t vm_committed_space;
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#ifdef CONFIG_SMP
23extern void vm_acct_memory(long pages);
24#else
25static inline void vm_acct_memory(long pages)
26{
Alan Cox80119ef2008-05-23 13:04:31 -070027 atomic_long_add(pages, &vm_committed_space);
Linus Torvalds1da177e2005-04-16 15:20:36 -070028}
29#endif
30
31static inline void vm_unacct_memory(long pages)
32{
33 vm_acct_memory(-pages);
34}
35
36/*
Dave Kleikampb845f312008-07-08 00:28:51 +100037 * Allow architectures to handle additional protection bits
38 */
39
40#ifndef arch_calc_vm_prot_bits
41#define arch_calc_vm_prot_bits(prot) 0
42#endif
43
44#ifndef arch_vm_get_page_prot
45#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
46#endif
47
48#ifndef arch_validate_prot
49/*
50 * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
51 * already been masked out.
52 *
53 * Returns true if the prot flags are valid
54 */
55static inline int arch_validate_prot(unsigned long prot)
56{
57 return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
58}
59#define arch_validate_prot arch_validate_prot
60#endif
61
62/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 * Optimisation macro. It is equivalent to:
64 * (x & bit1) ? bit2 : 0
65 * but this version is faster.
66 * ("bit1" and "bit2" must be single bits)
67 */
68#define _calc_vm_trans(x, bit1, bit2) \
69 ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
70 : ((x) & (bit1)) / ((bit1) / (bit2)))
71
72/*
73 * Combine the mmap "prot" argument into "vm_flags" used internally.
74 */
75static inline unsigned long
76calc_vm_prot_bits(unsigned long prot)
77{
78 return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
79 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
Dave Kleikampb845f312008-07-08 00:28:51 +100080 _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
81 arch_calc_vm_prot_bits(prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84/*
85 * Combine the mmap "flags" argument into "vm_flags" used internally.
86 */
87static inline unsigned long
88calc_vm_flag_bits(unsigned long flags)
89{
90 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
91 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
92 _calc_vm_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE) |
93 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
94}
David Woodhouse9cdcb562006-04-25 14:18:07 +010095#endif /* __KERNEL__ */
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#endif /* _LINUX_MMAN_H */