blob: d09dde1e57fb43e2f5be170d31e8fceb8c41a389 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_MMAN_H
2#define _LINUX_MMAN_H
3
David Woodhouse9cdcb562006-04-25 14:18:07 +01004#include <linux/mm.h>
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -07005#include <linux/percpu_counter.h>
David Woodhouse9cdcb562006-04-25 14:18:07 +01006
Arun Sharma600634972011-07-26 16:09:06 -07007#include <linux/atomic.h>
David Howells607ca462012-10-13 10:46:48 +01008#include <uapi/linux/mman.h>
David Woodhouse9cdcb562006-04-25 14:18:07 +01009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010extern int sysctl_overcommit_memory;
11extern int sysctl_overcommit_ratio;
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -070012extern struct percpu_counter vm_committed_as;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Linus Torvalds1da177e2005-04-16 15:20:36 -070014static inline void vm_acct_memory(long pages)
15{
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -070016 percpu_counter_add(&vm_committed_as, pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017}
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19static inline void vm_unacct_memory(long pages)
20{
21 vm_acct_memory(-pages);
22}
23
24/*
Dave Kleikampb845f312008-07-08 00:28:51 +100025 * Allow architectures to handle additional protection bits
26 */
27
28#ifndef arch_calc_vm_prot_bits
29#define arch_calc_vm_prot_bits(prot) 0
30#endif
31
32#ifndef arch_vm_get_page_prot
33#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
34#endif
35
36#ifndef arch_validate_prot
37/*
38 * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
39 * already been masked out.
40 *
41 * Returns true if the prot flags are valid
42 */
43static inline int arch_validate_prot(unsigned long prot)
44{
45 return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
46}
47#define arch_validate_prot arch_validate_prot
48#endif
49
50/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 * Optimisation macro. It is equivalent to:
52 * (x & bit1) ? bit2 : 0
53 * but this version is faster.
54 * ("bit1" and "bit2" must be single bits)
55 */
56#define _calc_vm_trans(x, bit1, bit2) \
57 ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
58 : ((x) & (bit1)) / ((bit1) / (bit2)))
59
60/*
61 * Combine the mmap "prot" argument into "vm_flags" used internally.
62 */
63static inline unsigned long
64calc_vm_prot_bits(unsigned long prot)
65{
66 return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
67 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
Dave Kleikampb845f312008-07-08 00:28:51 +100068 _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
69 arch_calc_vm_prot_bits(prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070}
71
72/*
73 * Combine the mmap "flags" argument into "vm_flags" used internally.
74 */
75static inline unsigned long
76calc_vm_flag_bits(unsigned long flags)
77{
78 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
79 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
81}
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#endif /* _LINUX_MMAN_H */