Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_GFP_H |
| 2 | #define __LINUX_GFP_H |
| 3 | |
| 4 | #include <linux/mmzone.h> |
| 5 | #include <linux/stddef.h> |
| 6 | #include <linux/linkage.h> |
| 7 | #include <linux/config.h> |
| 8 | |
| 9 | struct vm_area_struct; |
| 10 | |
| 11 | /* |
| 12 | * GFP bitmasks.. |
| 13 | */ |
| 14 | /* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */ |
Alexey Dobriyan | 0db925a | 2005-07-07 17:56:58 -0700 | [diff] [blame] | 15 | #define __GFP_DMA 0x01u |
| 16 | #define __GFP_HIGHMEM 0x02u |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
| 18 | /* |
| 19 | * Action modifiers - doesn't change the zoning |
| 20 | * |
| 21 | * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt |
| 22 | * _might_ fail. This depends upon the particular VM implementation. |
| 23 | * |
| 24 | * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller |
| 25 | * cannot handle allocation failures. |
| 26 | * |
| 27 | * __GFP_NORETRY: The VM implementation must not retry indefinitely. |
| 28 | */ |
| 29 | #define __GFP_WAIT 0x10u /* Can wait and reschedule? */ |
| 30 | #define __GFP_HIGH 0x20u /* Should access emergency pools? */ |
| 31 | #define __GFP_IO 0x40u /* Can start physical IO? */ |
| 32 | #define __GFP_FS 0x80u /* Can call down to low-level FS? */ |
| 33 | #define __GFP_COLD 0x100u /* Cache-cold page required */ |
| 34 | #define __GFP_NOWARN 0x200u /* Suppress page allocation failure warning */ |
| 35 | #define __GFP_REPEAT 0x400u /* Retry the allocation. Might fail */ |
| 36 | #define __GFP_NOFAIL 0x800u /* Retry for ever. Cannot fail */ |
| 37 | #define __GFP_NORETRY 0x1000u /* Do not retry. Might fail */ |
| 38 | #define __GFP_NO_GROW 0x2000u /* Slab internal usage */ |
| 39 | #define __GFP_COMP 0x4000u /* Add compound page metadata */ |
| 40 | #define __GFP_ZERO 0x8000u /* Return zeroed page on success */ |
Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 41 | #define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */ |
Martin Hicks | 0c35bba | 2005-06-21 17:14:42 -0700 | [diff] [blame] | 42 | #define __GFP_NORECLAIM 0x20000u /* No realy zone reclaim during allocation */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 44 | #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) |
| 46 | |
| 47 | /* if you forget to add the bitmask here kernel will crash, period */ |
| 48 | #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ |
| 49 | __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ |
Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 50 | __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ |
Martin Hicks | 0c35bba | 2005-06-21 17:14:42 -0700 | [diff] [blame] | 51 | __GFP_NOMEMALLOC|__GFP_NORECLAIM) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | |
| 53 | #define GFP_ATOMIC (__GFP_HIGH) |
| 54 | #define GFP_NOIO (__GFP_WAIT) |
| 55 | #define GFP_NOFS (__GFP_WAIT | __GFP_IO) |
| 56 | #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) |
| 57 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS) |
| 58 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM) |
| 59 | |
| 60 | /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some |
| 61 | platforms, used as appropriate on others */ |
| 62 | |
| 63 | #define GFP_DMA __GFP_DMA |
| 64 | |
| 65 | |
| 66 | /* |
| 67 | * There is only one page-allocator function, and two main namespaces to |
| 68 | * it. The alloc_page*() variants return 'struct page *' and as such |
| 69 | * can allocate highmem pages, the *get*page*() variants return |
| 70 | * virtual kernel addresses to the allocated page(s). |
| 71 | */ |
| 72 | |
| 73 | /* |
| 74 | * We get the zone list from the current node and the gfp_mask. |
| 75 | * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. |
| 76 | * |
| 77 | * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets |
| 78 | * optimized to &contig_page_data at compile-time. |
| 79 | */ |
| 80 | |
| 81 | #ifndef HAVE_ARCH_FREE_PAGE |
| 82 | static inline void arch_free_page(struct page *page, int order) { } |
| 83 | #endif |
| 84 | |
| 85 | extern struct page * |
| 86 | FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *)); |
| 87 | |
| 88 | static inline struct page *alloc_pages_node(int nid, unsigned int __nocast gfp_mask, |
| 89 | unsigned int order) |
| 90 | { |
| 91 | if (unlikely(order >= MAX_ORDER)) |
| 92 | return NULL; |
| 93 | |
| 94 | return __alloc_pages(gfp_mask, order, |
| 95 | NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK)); |
| 96 | } |
| 97 | |
| 98 | #ifdef CONFIG_NUMA |
| 99 | extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask, unsigned order); |
| 100 | |
| 101 | static inline struct page * |
| 102 | alloc_pages(unsigned int __nocast gfp_mask, unsigned int order) |
| 103 | { |
| 104 | if (unlikely(order >= MAX_ORDER)) |
| 105 | return NULL; |
| 106 | |
| 107 | return alloc_pages_current(gfp_mask, order); |
| 108 | } |
| 109 | extern struct page *alloc_page_vma(unsigned __nocast gfp_mask, |
| 110 | struct vm_area_struct *vma, unsigned long addr); |
| 111 | #else |
| 112 | #define alloc_pages(gfp_mask, order) \ |
| 113 | alloc_pages_node(numa_node_id(), gfp_mask, order) |
| 114 | #define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0) |
| 115 | #endif |
| 116 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) |
| 117 | |
| 118 | extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, unsigned int order)); |
| 119 | extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask)); |
| 120 | |
| 121 | #define __get_free_page(gfp_mask) \ |
| 122 | __get_free_pages((gfp_mask),0) |
| 123 | |
| 124 | #define __get_dma_pages(gfp_mask, order) \ |
| 125 | __get_free_pages((gfp_mask) | GFP_DMA,(order)) |
| 126 | |
| 127 | extern void FASTCALL(__free_pages(struct page *page, unsigned int order)); |
| 128 | extern void FASTCALL(free_pages(unsigned long addr, unsigned int order)); |
| 129 | extern void FASTCALL(free_hot_page(struct page *page)); |
| 130 | extern void FASTCALL(free_cold_page(struct page *page)); |
| 131 | |
| 132 | #define __free_page(page) __free_pages((page), 0) |
| 133 | #define free_page(addr) free_pages((addr),0) |
| 134 | |
| 135 | void page_alloc_init(void); |
Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 136 | #ifdef CONFIG_NUMA |
| 137 | void drain_remote_pages(void); |
| 138 | #else |
| 139 | static inline void drain_remote_pages(void) { }; |
| 140 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | |
| 142 | #endif /* __LINUX_GFP_H */ |