blob: 8b74e9b1d0ad53911cd15e31891be149bc7932af [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_MMAN_H
2#define _LINUX_MMAN_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <asm/mman.h>
5
6#define MREMAP_MAYMOVE 1
7#define MREMAP_FIXED 2
8
9#define OVERCOMMIT_GUESS 0
10#define OVERCOMMIT_ALWAYS 1
11#define OVERCOMMIT_NEVER 2
David Woodhouse9cdcb562006-04-25 14:18:07 +010012
13#ifdef __KERNEL__
David Woodhouse9cdcb562006-04-25 14:18:07 +010014#include <linux/mm.h>
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -070015#include <linux/percpu_counter.h>
David Woodhouse9cdcb562006-04-25 14:18:07 +010016
Arun Sharma60063492011-07-26 16:09:06 -070017#include <linux/atomic.h>
David Woodhouse9cdcb562006-04-25 14:18:07 +010018
Linus Torvalds1da177e2005-04-16 15:20:36 -070019extern int sysctl_overcommit_memory;
20extern int sysctl_overcommit_ratio;
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -070021extern struct percpu_counter vm_committed_as;
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Linus Torvalds1da177e2005-04-16 15:20:36 -070023static inline void vm_acct_memory(long pages)
24{
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -070025 percpu_counter_add(&vm_committed_as, pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -070026}
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28static inline void vm_unacct_memory(long pages)
29{
30 vm_acct_memory(-pages);
31}
32
33/*
Dave Kleikampb845f312008-07-08 00:28:51 +100034 * Allow architectures to handle additional protection bits
35 */
36
37#ifndef arch_calc_vm_prot_bits
38#define arch_calc_vm_prot_bits(prot) 0
39#endif
40
41#ifndef arch_vm_get_page_prot
42#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
43#endif
44
45#ifndef arch_validate_prot
46/*
47 * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
48 * already been masked out.
49 *
50 * Returns true if the prot flags are valid
51 */
52static inline int arch_validate_prot(unsigned long prot)
53{
54 return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
55}
56#define arch_validate_prot arch_validate_prot
57#endif
58
59/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 * Optimisation macro. It is equivalent to:
61 * (x & bit1) ? bit2 : 0
62 * but this version is faster.
63 * ("bit1" and "bit2" must be single bits)
64 */
65#define _calc_vm_trans(x, bit1, bit2) \
66 ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
67 : ((x) & (bit1)) / ((bit1) / (bit2)))
68
69/*
70 * Combine the mmap "prot" argument into "vm_flags" used internally.
71 */
72static inline unsigned long
73calc_vm_prot_bits(unsigned long prot)
74{
75 return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
76 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
Dave Kleikampb845f312008-07-08 00:28:51 +100077 _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
78 arch_calc_vm_prot_bits(prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
81/*
82 * Combine the mmap "flags" argument into "vm_flags" used internally.
83 */
84static inline unsigned long
85calc_vm_flag_bits(unsigned long flags)
86{
87 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
88 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
89 _calc_vm_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE) |
90 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
91}
David Woodhouse9cdcb562006-04-25 14:18:07 +010092#endif /* __KERNEL__ */
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#endif /* _LINUX_MMAN_H */