blob: 634c4c51fe3adaee4b65d9a977e954f42ecf6131 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_MMAN_H
2#define _LINUX_MMAN_H
3
David Woodhouse9cdcb562006-04-25 14:18:07 +01004#include <linux/mm.h>
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -07005#include <linux/percpu_counter.h>
David Woodhouse9cdcb562006-04-25 14:18:07 +01006
Arun Sharma600634972011-07-26 16:09:06 -07007#include <linux/atomic.h>
David Howells607ca462012-10-13 10:46:48 +01008#include <uapi/linux/mman.h>
David Woodhouse9cdcb562006-04-25 14:18:07 +01009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010extern int sysctl_overcommit_memory;
11extern int sysctl_overcommit_ratio;
Jerome Marchand49f0ce52014-01-21 15:49:14 -080012extern unsigned long sysctl_overcommit_kbytes;
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -070013extern struct percpu_counter vm_committed_as;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Tim Chen917d9292013-07-03 15:02:44 -070015#ifdef CONFIG_SMP
16extern s32 vm_committed_as_batch;
17#else
18#define vm_committed_as_batch 0
19#endif
20
K. Y. Srinivasan997071b2012-11-15 14:34:42 -080021unsigned long vm_memory_committed(void);
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023static inline void vm_acct_memory(long pages)
24{
Tim Chen917d9292013-07-03 15:02:44 -070025 __percpu_counter_add(&vm_committed_as, pages, vm_committed_as_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070026}
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28static inline void vm_unacct_memory(long pages)
29{
30 vm_acct_memory(-pages);
31}
32
33/*
Dave Kleikampb845f312008-07-08 00:28:51 +100034 * Allow architectures to handle additional protection bits
35 */
36
37#ifndef arch_calc_vm_prot_bits
Dave Hansene6bfb702016-02-12 13:02:31 -080038#define arch_calc_vm_prot_bits(prot, pkey) 0
Dave Kleikampb845f312008-07-08 00:28:51 +100039#endif
40
41#ifndef arch_vm_get_page_prot
42#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
43#endif
44
45#ifndef arch_validate_prot
46/*
47 * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
48 * already been masked out.
49 *
50 * Returns true if the prot flags are valid
51 */
Chen Gang949bed22016-08-02 14:03:42 -070052static inline bool arch_validate_prot(unsigned long prot)
Dave Kleikampb845f312008-07-08 00:28:51 +100053{
54 return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
55}
56#define arch_validate_prot arch_validate_prot
57#endif
58
59/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 * Optimisation macro. It is equivalent to:
61 * (x & bit1) ? bit2 : 0
62 * but this version is faster.
63 * ("bit1" and "bit2" must be single bits)
64 */
65#define _calc_vm_trans(x, bit1, bit2) \
66 ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
67 : ((x) & (bit1)) / ((bit1) / (bit2)))
68
69/*
70 * Combine the mmap "prot" argument into "vm_flags" used internally.
71 */
72static inline unsigned long
Dave Hansene6bfb702016-02-12 13:02:31 -080073calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
75 return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
76 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
Dave Kleikampb845f312008-07-08 00:28:51 +100077 _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
Dave Hansene6bfb702016-02-12 13:02:31 -080078 arch_calc_vm_prot_bits(prot, pkey);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
81/*
82 * Combine the mmap "flags" argument into "vm_flags" used internally.
83 */
84static inline unsigned long
85calc_vm_flag_bits(unsigned long flags)
86{
87 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
88 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
Michel Lespinasse09a9f1d2013-03-28 16:26:23 -070089 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
Jerome Marchand00619bc2013-11-12 15:08:31 -080091
92unsigned long vm_commit_limit(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#endif /* _LINUX_MMAN_H */