blob: af7407e8cfc53a476cd6d58561358cc8ed3a3b67 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H
3
4#include <linux/mmzone.h>
5#include <linux/stddef.h>
6#include <linux/linkage.h>
7#include <linux/config.h>
8
9struct vm_area_struct;
10
11/*
12 * GFP bitmasks..
13 */
14/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
15#define __GFP_DMA 0x01
16#define __GFP_HIGHMEM 0x02
17
18/*
19 * Action modifiers - doesn't change the zoning
20 *
21 * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
22 * _might_ fail. This depends upon the particular VM implementation.
23 *
24 * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
25 * cannot handle allocation failures.
26 *
27 * __GFP_NORETRY: The VM implementation must not retry indefinitely.
28 */
29#define __GFP_WAIT 0x10u /* Can wait and reschedule? */
30#define __GFP_HIGH 0x20u /* Should access emergency pools? */
31#define __GFP_IO 0x40u /* Can start physical IO? */
32#define __GFP_FS 0x80u /* Can call down to low-level FS? */
33#define __GFP_COLD 0x100u /* Cache-cold page required */
34#define __GFP_NOWARN 0x200u /* Suppress page allocation failure warning */
35#define __GFP_REPEAT 0x400u /* Retry the allocation. Might fail */
36#define __GFP_NOFAIL 0x800u /* Retry for ever. Cannot fail */
37#define __GFP_NORETRY 0x1000u /* Do not retry. Might fail */
38#define __GFP_NO_GROW 0x2000u /* Slab internal usage */
39#define __GFP_COMP 0x4000u /* Add compound page metadata */
40#define __GFP_ZERO 0x8000u /* Return zeroed page on success */
Nick Pigginb84a35b2005-05-01 08:58:36 -070041#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Nick Pigginb84a35b2005-05-01 08:58:36 -070043#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
45
46/* if you forget to add the bitmask here kernel will crash, period */
47#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
48 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
Nick Pigginb84a35b2005-05-01 08:58:36 -070049 __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
50 __GFP_NOMEMALLOC)
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
52#define GFP_ATOMIC (__GFP_HIGH)
53#define GFP_NOIO (__GFP_WAIT)
54#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
55#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
56#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS)
57#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
58
59/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
60 platforms, used as appropriate on others */
61
62#define GFP_DMA __GFP_DMA
63
64
65/*
66 * There is only one page-allocator function, and two main namespaces to
67 * it. The alloc_page*() variants return 'struct page *' and as such
68 * can allocate highmem pages, the *get*page*() variants return
69 * virtual kernel addresses to the allocated page(s).
70 */
71
72/*
73 * We get the zone list from the current node and the gfp_mask.
74 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
75 *
76 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
77 * optimized to &contig_page_data at compile-time.
78 */
79
80#ifndef HAVE_ARCH_FREE_PAGE
81static inline void arch_free_page(struct page *page, int order) { }
82#endif
83
84extern struct page *
85FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
86
87static inline struct page *alloc_pages_node(int nid, unsigned int __nocast gfp_mask,
88 unsigned int order)
89{
90 if (unlikely(order >= MAX_ORDER))
91 return NULL;
92
93 return __alloc_pages(gfp_mask, order,
94 NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
95}
96
97#ifdef CONFIG_NUMA
98extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask, unsigned order);
99
100static inline struct page *
101alloc_pages(unsigned int __nocast gfp_mask, unsigned int order)
102{
103 if (unlikely(order >= MAX_ORDER))
104 return NULL;
105
106 return alloc_pages_current(gfp_mask, order);
107}
108extern struct page *alloc_page_vma(unsigned __nocast gfp_mask,
109 struct vm_area_struct *vma, unsigned long addr);
110#else
111#define alloc_pages(gfp_mask, order) \
112 alloc_pages_node(numa_node_id(), gfp_mask, order)
113#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
114#endif
115#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
116
117extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, unsigned int order));
118extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask));
119
120#define __get_free_page(gfp_mask) \
121 __get_free_pages((gfp_mask),0)
122
123#define __get_dma_pages(gfp_mask, order) \
124 __get_free_pages((gfp_mask) | GFP_DMA,(order))
125
126extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
127extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
128extern void FASTCALL(free_hot_page(struct page *page));
129extern void FASTCALL(free_cold_page(struct page *page));
130
131#define __free_page(page) __free_pages((page), 0)
132#define free_page(addr) free_pages((addr),0)
133
134void page_alloc_init(void);
135
136#endif /* __LINUX_GFP_H */