blob: a2df87615e71945bf0191b09776a3f5131eeb180 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H
3
Sasha Levin309381fea2014-01-23 15:52:54 -08004#include <linux/mmdebug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/mmzone.h>
6#include <linux/stddef.h>
7#include <linux/linkage.h>
Rusty Russell082edb72009-03-13 23:43:37 +10308#include <linux/topology.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
10struct vm_area_struct;
11
Vlastimil Babka1f7866b2016-03-15 14:55:45 -070012/*
13 * In case of changes, please don't forget to update
Vlastimil Babka420adbe92016-03-15 14:55:52 -070014 * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
Vlastimil Babka1f7866b2016-03-15 14:55:45 -070015 */
16
Namhyung Kim16b56cf2010-10-26 14:22:04 -070017/* Plain integer GFP bitmasks. Do not use this directly. */
18#define ___GFP_DMA 0x01u
19#define ___GFP_HIGHMEM 0x02u
20#define ___GFP_DMA32 0x04u
21#define ___GFP_MOVABLE 0x08u
Mel Gorman016c13d2015-11-06 16:28:18 -080022#define ___GFP_RECLAIMABLE 0x10u
Namhyung Kim16b56cf2010-10-26 14:22:04 -070023#define ___GFP_HIGH 0x20u
24#define ___GFP_IO 0x40u
25#define ___GFP_FS 0x80u
26#define ___GFP_COLD 0x100u
27#define ___GFP_NOWARN 0x200u
28#define ___GFP_REPEAT 0x400u
29#define ___GFP_NOFAIL 0x800u
30#define ___GFP_NORETRY 0x1000u
Mel Gormanb37f1dd2012-07-31 16:44:03 -070031#define ___GFP_MEMALLOC 0x2000u
Namhyung Kim16b56cf2010-10-26 14:22:04 -070032#define ___GFP_COMP 0x4000u
33#define ___GFP_ZERO 0x8000u
34#define ___GFP_NOMEMALLOC 0x10000u
35#define ___GFP_HARDWALL 0x20000u
36#define ___GFP_THISNODE 0x40000u
Mel Gormand0164ad2015-11-06 16:28:21 -080037#define ___GFP_ATOMIC 0x80000u
Vladimir Davydova9bb7e62016-01-14 15:18:12 -080038#define ___GFP_ACCOUNT 0x100000u
Linus Torvaldscaf49192012-12-10 10:51:16 -080039#define ___GFP_NOTRACK 0x200000u
Mel Gormand0164ad2015-11-06 16:28:21 -080040#define ___GFP_DIRECT_RECLAIM 0x400000u
Linus Torvaldscaf49192012-12-10 10:51:16 -080041#define ___GFP_OTHER_NODE 0x800000u
42#define ___GFP_WRITE 0x1000000u
Mel Gormand0164ad2015-11-06 16:28:21 -080043#define ___GFP_KSWAPD_RECLAIM 0x2000000u
Heesub Shin483242b2013-01-07 11:10:13 +090044#define ___GFP_CMA 0x4000000u
Andrew Morton05b0afd2012-12-12 13:51:56 -080045/* If the above are modified, __GFP_BITS_SHIFT may need updating */
Namhyung Kim16b56cf2010-10-26 14:22:04 -070046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047/*
Mel Gormandd56b042015-11-06 16:28:43 -080048 * Physical address zone modifiers (see linux/mmzone.h - low four bits)
Christoph Lametere53ef382006-09-25 23:31:14 -070049 *
Christoph Lametere53ef382006-09-25 23:31:14 -070050 * Do not put any conditional on these. If necessary modify the definitions
matt mooney263ff5d2010-05-24 14:32:44 -070051 * without the underscores and use them consistently. The definitions here may
Christoph Lametere53ef382006-09-25 23:31:14 -070052 * be used in bit comparisons.
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 */
Namhyung Kim16b56cf2010-10-26 14:22:04 -070054#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
55#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
56#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
Mel Gormandd56b042015-11-06 16:28:43 -080057#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
Heesub Shin483242b2013-01-07 11:10:13 +090058#define __GFP_CMA ((__force gfp_t)___GFP_CMA)
59#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE| \
60 __GFP_CMA)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
Mel Gormandd56b042015-11-06 16:28:43 -080062 * Page mobility and placement hints
63 *
64 * These flags provide hints about how mobile the page is. Pages with similar
65 * mobility are placed within the same pageblocks to minimise problems due
66 * to external fragmentation.
67 *
68 * __GFP_MOVABLE (also a zone modifier) indicates that the page can be
69 * moved by page migration during memory compaction or can be reclaimed.
70 *
71 * __GFP_RECLAIMABLE is used for slab allocations that specify
72 * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
73 *
74 * __GFP_WRITE indicates the caller intends to dirty the page. Where possible,
75 * these pages will be spread between local zones to avoid all the dirty
76 * pages being in one zone (fair zone allocation policy).
77 *
78 * __GFP_HARDWALL enforces the cpuset memory allocation policy.
79 *
80 * __GFP_THISNODE forces the allocation to be satisified from the requested
81 * node with no fallbacks or placement policy enforcements.
Vladimir Davydova9bb7e62016-01-14 15:18:12 -080082 *
Vladimir Davydov49491482016-07-26 15:24:24 -070083 * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
Mel Gormandd56b042015-11-06 16:28:43 -080084 */
85#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
86#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
87#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
88#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
Vladimir Davydova9bb7e62016-01-14 15:18:12 -080089#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
Mel Gormandd56b042015-11-06 16:28:43 -080090
91/*
92 * Watermark modifiers -- controls access to emergency reserves
93 *
94 * __GFP_HIGH indicates that the caller is high-priority and that granting
95 * the request is necessary before the system can make forward progress.
96 * For example, creating an IO context to clean pages.
97 *
98 * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
99 * high priority. Users are typically interrupt handlers. This may be
100 * used in conjunction with __GFP_HIGH
101 *
102 * __GFP_MEMALLOC allows access to all memory. This should only be used when
103 * the caller guarantees the allocation will allow more memory to be freed
104 * very shortly e.g. process exiting or swapping. Users either should
105 * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
106 *
107 * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
108 * This takes precedence over the __GFP_MEMALLOC flag if both are set.
Mel Gormandd56b042015-11-06 16:28:43 -0800109 */
110#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
111#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
112#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
113#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
Mel Gormandd56b042015-11-06 16:28:43 -0800114
115/*
116 * Reclaim modifiers
117 *
118 * __GFP_IO can start physical IO.
119 *
120 * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the
121 * allocator recursing into the filesystem which might already be holding
122 * locks.
123 *
124 * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
125 * This flag can be cleared to avoid unnecessary delays when a fallback
126 * option is available.
127 *
128 * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
129 * the low watermark is reached and have it reclaim pages until the high
130 * watermark is reached. A caller may wish to clear this flag when fallback
131 * options are available and the reclaim is likely to disrupt the system. The
132 * canonical example is THP allocation where a fallback is cheap but
133 * reclaim/compaction may cause indirect stalls.
134 *
135 * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 *
137 * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
Mel Gormandd56b042015-11-06 16:28:43 -0800138 * _might_ fail. This depends upon the particular VM implementation.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 *
140 * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
Mel Gormandd56b042015-11-06 16:28:43 -0800141 * cannot handle allocation failures. New users should be evaluated carefully
142 * (and the flag should be used only when there is no reasonable failure
143 * policy) but it is definitely preferable to use the flag rather than
144 * opencode endless loop around allocator.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 *
David Rientjes28c015d2015-09-08 15:00:31 -0700146 * __GFP_NORETRY: The VM implementation must not retry indefinitely and will
Mel Gormandd56b042015-11-06 16:28:43 -0800147 * return NULL when direct reclaim and memory compaction have failed to allow
148 * the allocation to succeed. The OOM killer is not called with the current
149 * implementation.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 */
Mel Gormandd56b042015-11-06 16:28:43 -0800151#define __GFP_IO ((__force gfp_t)___GFP_IO)
152#define __GFP_FS ((__force gfp_t)___GFP_FS)
Mel Gormand0164ad2015-11-06 16:28:21 -0800153#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
154#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
Mel Gormandd56b042015-11-06 16:28:43 -0800155#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
156#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
157#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
158#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
Mel Gormand0164ad2015-11-06 16:28:21 -0800159
160/*
Mel Gormandd56b042015-11-06 16:28:43 -0800161 * Action modifiers
162 *
163 * __GFP_COLD indicates that the caller does not expect to be used in the near
164 * future. Where possible, a cache-cold page will be returned.
165 *
166 * __GFP_NOWARN suppresses allocation failure reports.
167 *
168 * __GFP_COMP address compound page metadata.
169 *
170 * __GFP_ZERO returns a zeroed page on success.
171 *
172 * __GFP_NOTRACK avoids tracking with kmemcheck.
173 *
174 * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
175 * distinguishing in the source between false positives and allocations that
176 * cannot be supported (e.g. page tables).
177 *
178 * __GFP_OTHER_NODE is for allocations that are on a remote node but that
179 * should not be accounted for as a remote allocation in vmstat. A
180 * typical user would be khugepaged collapsing a huge page on a remote
181 * node.
Vegard Nossum2dff4402008-05-31 15:56:17 +0200182 */
Mel Gormandd56b042015-11-06 16:28:43 -0800183#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
184#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
185#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
186#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
187#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
Vegard Nossum2dff4402008-05-31 15:56:17 +0200188#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
Mel Gormandd56b042015-11-06 16:28:43 -0800189#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
Vegard Nossum2dff4402008-05-31 15:56:17 +0200190
Mel Gormandd56b042015-11-06 16:28:43 -0800191/* Room for N __GFP_FOO bits */
Laura Abbott94aa88f2014-01-09 14:30:11 -0800192#define __GFP_BITS_SHIFT 27
Al Viroaf4ca452005-10-21 02:55:38 -0400193#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Mel Gormand0164ad2015-11-06 16:28:21 -0800195/*
Mel Gormandd56b042015-11-06 16:28:43 -0800196 * Useful GFP flag combinations that are commonly used. It is recommended
197 * that subsystems start with one of these combinations and then set/clear
198 * __GFP_FOO flags as necessary.
199 *
200 * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
201 * watermark is applied to allow access to "atomic reserves"
202 *
203 * GFP_KERNEL is typical for kernel-internal allocations. The caller requires
204 * ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
205 *
Vladimir Davydova9bb7e62016-01-14 15:18:12 -0800206 * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
207 * accounted to kmemcg.
208 *
Mel Gormandd56b042015-11-06 16:28:43 -0800209 * GFP_NOWAIT is for kernel allocations that should not stall for direct
210 * reclaim, start physical IO or use any filesystem callback.
211 *
212 * GFP_NOIO will use direct reclaim to discard clean pages or slab pages
213 * that do not require the starting of any physical IO.
214 *
215 * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
216 *
217 * GFP_USER is for userspace allocations that also need to be directly
218 * accessibly by the kernel or hardware. It is typically used by hardware
219 * for buffers that are mapped to userspace (e.g. graphics) that hardware
220 * still must DMA to. cpuset limits are enforced for these allocations.
221 *
222 * GFP_DMA exists for historical reasons and should be avoided where possible.
223 * The flags indicates that the caller requires that the lowest zone be
224 * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
225 * it would require careful auditing as some users really require it and
226 * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the
227 * lowest zone as a type of emergency reserve.
228 *
229 * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit
230 * address.
231 *
232 * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
233 * do not need to be directly accessible by the kernel but that cannot
234 * move once in use. An example may be a hardware allocation that maps
235 * data directly into userspace but has no addressing limitations.
236 *
237 * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
238 * need direct access to but can use kmap() when access is required. They
239 * are expected to be movable via page reclaim or page migration. Typically,
240 * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE.
241 *
Vlastimil Babka25160352016-07-28 15:49:25 -0700242 * GFP_TRANSHUGE and GFP_TRANSHUGE_LIGHT are used for THP allocations. They are
243 * compound allocations that will generally fail quickly if memory is not
244 * available and will not wake kswapd/kcompactd on failure. The _LIGHT
245 * version does not attempt reclaim/compaction at all and is by default used
246 * in page fault path, while the non-light is used by khugepaged.
Mel Gormand0164ad2015-11-06 16:28:21 -0800247 */
248#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
Mel Gormandd56b042015-11-06 16:28:43 -0800249#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
Vladimir Davydova9bb7e62016-01-14 15:18:12 -0800250#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
Mel Gormand0164ad2015-11-06 16:28:21 -0800251#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
Mel Gorman71baba42015-11-06 16:28:28 -0800252#define GFP_NOIO (__GFP_RECLAIM)
253#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
Mel Gorman71baba42015-11-06 16:28:28 -0800254#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
Mel Gormane12ba742007-10-16 01:25:52 -0700255 __GFP_RECLAIMABLE)
Mel Gorman71baba42015-11-06 16:28:28 -0800256#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
Mel Gormandd56b042015-11-06 16:28:43 -0800257#define GFP_DMA __GFP_DMA
258#define GFP_DMA32 __GFP_DMA32
Jianyu Zhan2d483662014-12-12 16:55:43 -0800259#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
260#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
Vlastimil Babka25160352016-07-28 15:49:25 -0700261#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
262 __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
263#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Mel Gormandd56b042015-11-06 16:28:43 -0800265/* Convert GFP flags to their corresponding migrate type */
Mel Gormane12ba742007-10-16 01:25:52 -0700266#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
Mel Gorman016c13d2015-11-06 16:28:18 -0800267#define GFP_MOVABLE_SHIFT 3
Christoph Lameter6cb06222007-10-16 01:25:41 -0700268
David Rientjes43e7a342014-10-09 15:27:25 -0700269static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
Mel Gorman467c9962007-10-16 01:26:02 -0700270{
Mel Gorman016c13d2015-11-06 16:28:18 -0800271 VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
272 BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
273 BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
Mel Gorman467c9962007-10-16 01:26:02 -0700274
275 if (unlikely(page_group_by_mobility_disabled))
276 return MIGRATE_UNMOVABLE;
277
278 /* Group based on mobility */
Heesub Shin483242b2013-01-07 11:10:13 +0900279#ifndef CONFIG_CMA
Mel Gorman016c13d2015-11-06 16:28:18 -0800280 return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
Heesub Shin483242b2013-01-07 11:10:13 +0900281#else
282 return ((gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT) |
283 ((gfp_flags & __GFP_CMA) != 0);
284#endif
Mel Gorman467c9962007-10-16 01:26:02 -0700285}
Mel Gormandd56b042015-11-06 16:28:43 -0800286#undef GFP_MOVABLE_MASK
287#undef GFP_MOVABLE_SHIFT
Andi Kleena2f1b422005-11-05 17:25:53 +0100288
Mel Gormand0164ad2015-11-06 16:28:21 -0800289static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
290{
Joshua Clayton543dfb22016-01-14 15:22:10 -0800291 return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
Mel Gormand0164ad2015-11-06 16:28:21 -0800292}
293
Tejun Heo37d6ef42019-10-24 13:50:27 -0700294/**
295 * gfpflags_normal_context - is gfp_flags a normal sleepable context?
296 * @gfp_flags: gfp_flags to test
297 *
298 * Test whether @gfp_flags indicates that the allocation is from the
299 * %current context and allowed to sleep.
300 *
301 * An allocation being allowed to block doesn't mean it owns the %current
302 * context. When direct reclaim path tries to allocate memory, the
303 * allocation context is nested inside whatever %current was doing at the
304 * time of the original allocation. The nested allocation may be allowed
305 * to block but modifying anything %current owns can corrupt the outer
306 * context's expectations.
307 *
308 * %true result from this function indicates that the allocation context
309 * can sleep and use anything that's associated with %current.
310 */
311static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
312{
313 return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
314 __GFP_DIRECT_RECLAIM;
315}
316
Christoph Lameterb70d94e2009-06-16 15:32:46 -0700317#ifdef CONFIG_HIGHMEM
318#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
319#else
320#define OPT_ZONE_HIGHMEM ZONE_NORMAL
321#endif
322
323#ifdef CONFIG_ZONE_DMA
324#define OPT_ZONE_DMA ZONE_DMA
325#else
326#define OPT_ZONE_DMA ZONE_NORMAL
327#endif
328
329#ifdef CONFIG_ZONE_DMA32
330#define OPT_ZONE_DMA32 ZONE_DMA32
331#else
332#define OPT_ZONE_DMA32 ZONE_NORMAL
333#endif
334
335/*
336 * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
337 * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long
338 * and there are 16 of them to cover all possible combinations of
matt mooney263ff5d2010-05-24 14:32:44 -0700339 * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM.
Christoph Lameterb70d94e2009-06-16 15:32:46 -0700340 *
341 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
342 * But GFP_MOVABLE is not only a zone specifier but also an allocation
343 * policy. Therefore __GFP_MOVABLE plus another zone selector is valid.
matt mooney263ff5d2010-05-24 14:32:44 -0700344 * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1".
Christoph Lameterb70d94e2009-06-16 15:32:46 -0700345 *
346 * bit result
347 * =================
348 * 0x0 => NORMAL
349 * 0x1 => DMA or NORMAL
350 * 0x2 => HIGHMEM or NORMAL
351 * 0x3 => BAD (DMA+HIGHMEM)
352 * 0x4 => DMA32 or DMA or NORMAL
353 * 0x5 => BAD (DMA+DMA32)
354 * 0x6 => BAD (HIGHMEM+DMA32)
355 * 0x7 => BAD (HIGHMEM+DMA32+DMA)
356 * 0x8 => NORMAL (MOVABLE+0)
357 * 0x9 => DMA or NORMAL (MOVABLE+DMA)
358 * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
359 * 0xb => BAD (MOVABLE+HIGHMEM+DMA)
Zhang Yanfei537926c2013-07-08 16:00:02 -0700360 * 0xc => DMA32 (MOVABLE+DMA32)
Christoph Lameterb70d94e2009-06-16 15:32:46 -0700361 * 0xd => BAD (MOVABLE+DMA32+DMA)
362 * 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
363 * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
364 *
Dan Williamsb11a7b92016-03-17 14:19:41 -0700365 * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms.
Christoph Lameterb70d94e2009-06-16 15:32:46 -0700366 */
367
Dan Williamsb11a7b92016-03-17 14:19:41 -0700368#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4
369/* ZONE_DEVICE is not a valid GFP zone specifier */
370#define GFP_ZONES_SHIFT 2
371#else
372#define GFP_ZONES_SHIFT ZONES_SHIFT
373#endif
374
375#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG
376#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
Christoph Lameterb70d94e2009-06-16 15:32:46 -0700377#endif
378
379#define GFP_ZONE_TABLE ( \
Dan Williamsb11a7b92016-03-17 14:19:41 -0700380 (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \
381 | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \
382 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \
383 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \
384 | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \
385 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \
386 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\
387 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\
Christoph Lameterb70d94e2009-06-16 15:32:46 -0700388)
389
390/*
matt mooney263ff5d2010-05-24 14:32:44 -0700391 * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32
Christoph Lameterb70d94e2009-06-16 15:32:46 -0700392 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
393 * entry starting with bit 0. Bit is set if the combination is not
394 * allowed.
395 */
396#define GFP_ZONE_BAD ( \
Namhyung Kim16b56cf2010-10-26 14:22:04 -0700397 1 << (___GFP_DMA | ___GFP_HIGHMEM) \
398 | 1 << (___GFP_DMA | ___GFP_DMA32) \
399 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
400 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
401 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
402 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
403 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
404 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
Christoph Lameterb70d94e2009-06-16 15:32:46 -0700405)
406
Christoph Lameter19655d32006-09-25 23:31:19 -0700407static inline enum zone_type gfp_zone(gfp_t flags)
Christoph Lameter4e4785b2006-09-25 23:31:17 -0700408{
Christoph Lameterb70d94e2009-06-16 15:32:46 -0700409 enum zone_type z;
Namhyung Kim16b56cf2010-10-26 14:22:04 -0700410 int bit = (__force int) (flags & GFP_ZONEMASK);
Christoph Lameterb70d94e2009-06-16 15:32:46 -0700411
Dan Williamsb11a7b92016-03-17 14:19:41 -0700412 z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
413 ((1 << GFP_ZONES_SHIFT) - 1);
Dave Hansen82d4b572011-05-24 17:11:42 -0700414 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
Christoph Lameterb70d94e2009-06-16 15:32:46 -0700415 return z;
Christoph Lameter4e4785b2006-09-25 23:31:17 -0700416}
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418/*
419 * There is only one page-allocator function, and two main namespaces to
420 * it. The alloc_page*() variants return 'struct page *' and as such
421 * can allocate highmem pages, the *get*page*() variants return
422 * virtual kernel addresses to the allocated page(s).
423 */
424
Mel Gorman54a6eb52008-04-28 02:12:16 -0700425static inline int gfp_zonelist(gfp_t flags)
426{
Yaowei Baic00eb152016-01-14 15:19:00 -0800427#ifdef CONFIG_NUMA
428 if (unlikely(flags & __GFP_THISNODE))
429 return ZONELIST_NOFALLBACK;
430#endif
431 return ZONELIST_FALLBACK;
Mel Gorman54a6eb52008-04-28 02:12:16 -0700432}
433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434/*
435 * We get the zone list from the current node and the gfp_mask.
436 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
Mel Gorman54a6eb52008-04-28 02:12:16 -0700437 * There are two zonelists per node, one for all zones with memory and
438 * one containing just zones from the node the zonelist belongs to.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 *
440 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
441 * optimized to &contig_page_data at compile-time.
442 */
Mel Gorman0e884602008-04-28 02:12:14 -0700443static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
444{
Mel Gorman54a6eb52008-04-28 02:12:16 -0700445 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
Mel Gorman0e884602008-04-28 02:12:14 -0700446}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448#ifndef HAVE_ARCH_FREE_PAGE
449static inline void arch_free_page(struct page *page, int order) { }
450#endif
Nick Piggincc1025092006-12-06 20:32:00 -0800451#ifndef HAVE_ARCH_ALLOC_PAGE
452static inline void arch_alloc_page(struct page *page, int order) { }
453#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
KOSAKI Motohiroe4048e52008-07-23 21:27:01 -0700455struct page *
Mel Gormand2391712009-06-16 15:31:52 -0700456__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
KOSAKI Motohiroe4048e52008-07-23 21:27:01 -0700457 struct zonelist *zonelist, nodemask_t *nodemask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
KOSAKI Motohiroe4048e52008-07-23 21:27:01 -0700459static inline struct page *
460__alloc_pages(gfp_t gfp_mask, unsigned int order,
461 struct zonelist *zonelist)
462{
Mel Gormand2391712009-06-16 15:31:52 -0700463 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
KOSAKI Motohiroe4048e52008-07-23 21:27:01 -0700464}
465
Vlastimil Babka96db8002015-09-08 15:03:50 -0700466/*
467 * Allocate pages, preferring the node given as nid. The node must be valid and
468 * online. For more general interface, see alloc_pages_node().
469 */
470static inline struct page *
471__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
472{
Vlastimil Babka0bc35a92015-09-08 15:03:53 -0700473 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
474 VM_WARN_ON(!node_online(nid));
Vlastimil Babka96db8002015-09-08 15:03:50 -0700475
476 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
477}
478
479/*
480 * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
Vlastimil Babka82c1fc72015-09-08 15:03:56 -0700481 * prefer the current CPU's closest node. Otherwise node must be valid and
482 * online.
Vlastimil Babka96db8002015-09-08 15:03:50 -0700483 */
Al Virodd0fc662005-10-07 07:46:04 +0100484static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 unsigned int order)
486{
Vlastimil Babka0bc35a92015-09-08 15:03:53 -0700487 if (nid == NUMA_NO_NODE)
Vlastimil Babka82c1fc72015-09-08 15:03:56 -0700488 nid = numa_mem_id();
Andi Kleen819a6922006-01-11 22:43:45 +0100489
Vlastimil Babka0bc35a92015-09-08 15:03:53 -0700490 return __alloc_pages_node(nid, gfp_mask, order);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491}
492
493#ifdef CONFIG_NUMA
Al Virodd0fc662005-10-07 07:46:04 +0100494extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
496static inline struct page *
Al Virodd0fc662005-10-07 07:46:04 +0100497alloc_pages(gfp_t gfp_mask, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 return alloc_pages_current(gfp_mask, order);
500}
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800501extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
Andi Kleen2f5f9482011-03-04 17:36:29 -0800502 struct vm_area_struct *vma, unsigned long addr,
Vlastimil Babkabe97a412015-02-11 15:27:15 -0800503 int node, bool hugepage);
504#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
505 alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506#else
507#define alloc_pages(gfp_mask, order) \
508 alloc_pages_node(numa_node_id(), gfp_mask, order)
Vlastimil Babkabe97a412015-02-11 15:27:15 -0800509#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800510 alloc_pages(gfp_mask, order)
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -0800511#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
512 alloc_pages(gfp_mask, order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513#endif
514#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
Andi Kleen2f5f9482011-03-04 17:36:29 -0800515#define alloc_page_vma(gfp_mask, vma, addr) \
Vlastimil Babkabe97a412015-02-11 15:27:15 -0800516 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
Andi Kleen236344d2011-03-04 17:36:30 -0800517#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
Vlastimil Babkabe97a412015-02-11 15:27:15 -0800518 alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800520extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
521extern unsigned long get_zeroed_page(gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Timur Tabi2be0ffe2008-07-23 21:28:11 -0700523void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
524void free_pages_exact(void *virt, size_t size);
Fabian Fredericke1931812014-08-06 16:04:59 -0700525void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
Timur Tabi2be0ffe2008-07-23 21:28:11 -0700526
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527#define __get_free_page(gfp_mask) \
matt mooneyfd238552010-05-24 14:32:45 -0700528 __get_free_pages((gfp_mask), 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
530#define __get_dma_pages(gfp_mask, order) \
matt mooneyfd238552010-05-24 14:32:45 -0700531 __get_free_pages((gfp_mask) | GFP_DMA, (order))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800533extern void __free_pages(struct page *page, unsigned int order);
534extern void free_pages(unsigned long addr, unsigned int order);
Mel Gormanb745bc82014-06-04 16:10:22 -0700535extern void free_hot_cold_page(struct page *page, bool cold);
536extern void free_hot_cold_page_list(struct list_head *list, bool cold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Alexander Duyckb63ae8c2015-05-06 21:11:57 -0700538struct page_frag_cache;
539extern void *__alloc_page_frag(struct page_frag_cache *nc,
540 unsigned int fragsz, gfp_t gfp_mask);
541extern void __free_page_frag(void *addr);
542
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543#define __free_page(page) __free_pages((page), 0)
matt mooneyfd238552010-05-24 14:32:45 -0700544#define free_page(addr) free_pages((addr), 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
546void page_alloc_init(void);
Christoph Lameter4037d452007-05-09 02:35:14 -0700547void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
Vlastimil Babka93481ff2014-12-10 15:43:01 -0800548void drain_all_pages(struct zone *zone);
Sami Tolvanen97d5fd22017-08-16 10:45:46 -0700549void drain_local_pages(void *zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Mel Gorman0e1cc952015-06-30 14:57:27 -0700551void page_alloc_init_late(void);
Mel Gorman0e1cc952015-06-30 14:57:27 -0700552
Mel Gormanf90ac392012-01-10 15:07:15 -0800553/*
554 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
555 * GFP flags are used before interrupts are enabled. Once interrupts are
556 * enabled, it is set to __GFP_BITS_MASK while the system is running. During
557 * hibernation, it is used by PM to avoid I/O during memory allocation while
558 * devices are suspended.
559 */
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +1000560extern gfp_t gfp_allowed_mask;
561
Mel Gormanc93bdd02012-07-31 16:44:19 -0700562/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
563bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
564
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100565extern void pm_restrict_gfp_mask(void);
566extern void pm_restore_gfp_mask(void);
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +1000567
Mel Gormanf90ac392012-01-10 15:07:15 -0800568#ifdef CONFIG_PM_SLEEP
569extern bool pm_suspended_storage(void);
570#else
571static inline bool pm_suspended_storage(void)
572{
573 return false;
574}
575#endif /* CONFIG_PM_SLEEP */
576
Vlastimil Babka080fe202016-02-05 15:36:41 -0800577#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +0100578/* The below functions must be run on a range from a single zone. */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200579extern int alloc_contig_range(unsigned long start, unsigned long end,
580 unsigned migratetype);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +0100581extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
Vlastimil Babka080fe202016-02-05 15:36:41 -0800582#endif
Michal Nazarewicz041d3a82011-12-29 13:09:50 +0100583
Vlastimil Babka080fe202016-02-05 15:36:41 -0800584#ifdef CONFIG_CMA
Michal Nazarewicz47118af2011-12-29 13:09:50 +0100585/* CMA stuff */
586extern void init_cma_reserved_pageblock(struct page *page);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +0100587#endif
588
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589#endif /* __LINUX_GFP_H */