blob: 61c7a87e5d2b358484dc16ab1b0db79253fc93b3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_MMAN_H
2#define _LINUX_MMAN_H
3
David Woodhouse9cdcb562006-04-25 14:18:07 +01004#include <linux/mm.h>
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -07005#include <linux/percpu_counter.h>
David Woodhouse9cdcb562006-04-25 14:18:07 +01006
Arun Sharma600634972011-07-26 16:09:06 -07007#include <linux/atomic.h>
David Howells607ca462012-10-13 10:46:48 +01008#include <uapi/linux/mman.h>
David Woodhouse9cdcb562006-04-25 14:18:07 +01009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010extern int sysctl_overcommit_memory;
11extern int sysctl_overcommit_ratio;
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -070012extern struct percpu_counter vm_committed_as;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
K. Y. Srinivasan997071b2012-11-15 14:34:42 -080014unsigned long vm_memory_committed(void);
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016static inline void vm_acct_memory(long pages)
17{
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -070018 percpu_counter_add(&vm_committed_as, pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -070019}
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21static inline void vm_unacct_memory(long pages)
22{
23 vm_acct_memory(-pages);
24}
25
26/*
Dave Kleikampb845f312008-07-08 00:28:51 +100027 * Allow architectures to handle additional protection bits
28 */
29
30#ifndef arch_calc_vm_prot_bits
31#define arch_calc_vm_prot_bits(prot) 0
32#endif
33
34#ifndef arch_vm_get_page_prot
35#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
36#endif
37
38#ifndef arch_validate_prot
39/*
40 * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
41 * already been masked out.
42 *
43 * Returns true if the prot flags are valid
44 */
45static inline int arch_validate_prot(unsigned long prot)
46{
47 return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
48}
49#define arch_validate_prot arch_validate_prot
50#endif
51
52/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 * Optimisation macro. It is equivalent to:
54 * (x & bit1) ? bit2 : 0
55 * but this version is faster.
56 * ("bit1" and "bit2" must be single bits)
57 */
58#define _calc_vm_trans(x, bit1, bit2) \
59 ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
60 : ((x) & (bit1)) / ((bit1) / (bit2)))
61
62/*
63 * Combine the mmap "prot" argument into "vm_flags" used internally.
64 */
65static inline unsigned long
66calc_vm_prot_bits(unsigned long prot)
67{
68 return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
69 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
Dave Kleikampb845f312008-07-08 00:28:51 +100070 _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
71 arch_calc_vm_prot_bits(prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072}
73
74/*
75 * Combine the mmap "flags" argument into "vm_flags" used internally.
76 */
77static inline unsigned long
78calc_vm_flag_bits(unsigned long flags)
79{
80 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
81 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
Michel Lespinasse18693052013-02-22 16:32:46 -080082 ((flags & MAP_LOCKED) ? (VM_LOCKED | VM_POPULATE) : 0) |
83 (((flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE) ?
84 VM_POPULATE : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#endif /* _LINUX_MMAN_H */