Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_GFP_H |
| 2 | #define __LINUX_GFP_H |
| 3 | |
| 4 | #include <linux/mmzone.h> |
| 5 | #include <linux/stddef.h> |
| 6 | #include <linux/linkage.h> |
Rusty Russell | 082edb7 | 2009-03-13 23:43:37 +1030 | [diff] [blame] | 7 | #include <linux/topology.h> |
Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 8 | #include <linux/mmdebug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
| 10 | struct vm_area_struct; |
| 11 | |
| 12 | /* |
| 13 | * GFP bitmasks.. |
Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 14 | * |
| 15 | * Zone modifiers (see linux/mmzone.h - low three bits) |
| 16 | * |
Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 17 | * Do not put any conditional on these. If necessary modify the definitions |
| 18 | * without the underscores and use the consistently. The definitions here may |
| 19 | * be used in bit comparisons. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | */ |
Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 21 | #define __GFP_DMA ((__force gfp_t)0x01u) |
| 22 | #define __GFP_HIGHMEM ((__force gfp_t)0x02u) |
Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 23 | #define __GFP_DMA32 ((__force gfp_t)0x04u) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
| 25 | /* |
| 26 | * Action modifiers - doesn't change the zoning |
| 27 | * |
| 28 | * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt |
| 29 | * _might_ fail. This depends upon the particular VM implementation. |
| 30 | * |
| 31 | * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller |
| 32 | * cannot handle allocation failures. |
| 33 | * |
| 34 | * __GFP_NORETRY: The VM implementation must not retry indefinitely. |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 35 | * |
| 36 | * __GFP_MOVABLE: Flag that this page will be movable by the page migration |
| 37 | * mechanism or reclaimed |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | */ |
Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 39 | #define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ |
| 40 | #define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ |
| 41 | #define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */ |
| 42 | #define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */ |
| 43 | #define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */ |
| 44 | #define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */ |
Nishanth Aravamudan | ab857d0 | 2008-04-29 00:58:23 -0700 | [diff] [blame] | 45 | #define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */ |
| 46 | #define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */ |
| 47 | #define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */ |
Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 48 | #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ |
| 49 | #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ |
| 50 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ |
Paul Jackson | 2d6c666 | 2005-11-13 16:06:44 -0800 | [diff] [blame] | 51 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ |
Christoph Lameter | 9b819d2 | 2006-09-25 23:31:40 -0700 | [diff] [blame] | 52 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ |
Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 53 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ |
| 54 | #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 56 | #define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */ |
Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 57 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | |
Jeff Dike | 7b04d71 | 2006-04-10 22:53:27 -0700 | [diff] [blame] | 59 | /* This equals 0, but use constants in case they ever change */ |
| 60 | #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) |
Paul Jackson | 4eac915 | 2006-01-11 12:17:19 -0800 | [diff] [blame] | 61 | /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | #define GFP_ATOMIC (__GFP_HIGH) |
| 63 | #define GFP_NOIO (__GFP_WAIT) |
| 64 | #define GFP_NOFS (__GFP_WAIT | __GFP_IO) |
| 65 | #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) |
Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 66 | #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ |
| 67 | __GFP_RECLAIMABLE) |
Paul Jackson | f90b1d2 | 2005-09-06 15:18:10 -0700 | [diff] [blame] | 68 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
| 69 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ |
| 70 | __GFP_HIGHMEM) |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 71 | #define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ |
| 72 | __GFP_HARDWALL | __GFP_HIGHMEM | \ |
| 73 | __GFP_MOVABLE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
Christoph Lameter | 77f700d | 2006-09-27 01:50:07 -0700 | [diff] [blame] | 75 | #ifdef CONFIG_NUMA |
Christoph Lameter | 980128f | 2006-09-25 23:31:46 -0700 | [diff] [blame] | 76 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) |
Christoph Lameter | 77f700d | 2006-09-27 01:50:07 -0700 | [diff] [blame] | 77 | #else |
Al Viro | f2e97df | 2007-02-09 16:38:55 +0000 | [diff] [blame] | 78 | #define GFP_THISNODE ((__force gfp_t)0) |
Christoph Lameter | 77f700d | 2006-09-27 01:50:07 -0700 | [diff] [blame] | 79 | #endif |
| 80 | |
Christoph Lameter | 6cb0622 | 2007-10-16 01:25:41 -0700 | [diff] [blame] | 81 | /* This mask makes up all the page movable related flags */ |
Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 82 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
Christoph Lameter | 6cb0622 | 2007-10-16 01:25:41 -0700 | [diff] [blame] | 83 | |
| 84 | /* Control page allocator reclaim behavior */ |
| 85 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ |
| 86 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ |
| 87 | __GFP_NORETRY|__GFP_NOMEMALLOC) |
| 88 | |
Pekka Enberg | 7e85ee0 | 2009-06-12 14:03:06 +0300 | [diff] [blame] | 89 | /* Control slab gfp mask during early boot */ |
| 90 | #define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) |
| 91 | |
Christoph Lameter | 6cb0622 | 2007-10-16 01:25:41 -0700 | [diff] [blame] | 92 | /* Control allocation constraints */ |
| 93 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) |
| 94 | |
| 95 | /* Do not use these with a slab allocator */ |
| 96 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) |
Christoph Lameter | 980128f | 2006-09-25 23:31:46 -0700 | [diff] [blame] | 97 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some |
| 99 | platforms, used as appropriate on others */ |
| 100 | |
| 101 | #define GFP_DMA __GFP_DMA |
| 102 | |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 103 | /* 4GB DMA on some platforms */ |
| 104 | #define GFP_DMA32 __GFP_DMA32 |
| 105 | |
Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 106 | /* Convert GFP flags to their corresponding migrate type */ |
| 107 | static inline int allocflags_to_migratetype(gfp_t gfp_flags) |
| 108 | { |
| 109 | WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); |
| 110 | |
| 111 | if (unlikely(page_group_by_mobility_disabled)) |
| 112 | return MIGRATE_UNMOVABLE; |
| 113 | |
| 114 | /* Group based on mobility */ |
| 115 | return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | |
| 116 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); |
| 117 | } |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 118 | |
Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 119 | static inline enum zone_type gfp_zone(gfp_t flags) |
Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 120 | { |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 121 | #ifdef CONFIG_ZONE_DMA |
Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 122 | if (flags & __GFP_DMA) |
KAMEZAWA Hiroyuki | 8cece85 | 2008-04-28 02:13:36 -0700 | [diff] [blame] | 123 | return ZONE_DMA; |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 124 | #endif |
Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 125 | #ifdef CONFIG_ZONE_DMA32 |
| 126 | if (flags & __GFP_DMA32) |
KAMEZAWA Hiroyuki | 8cece85 | 2008-04-28 02:13:36 -0700 | [diff] [blame] | 127 | return ZONE_DMA32; |
Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 128 | #endif |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 129 | if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == |
| 130 | (__GFP_HIGHMEM | __GFP_MOVABLE)) |
KAMEZAWA Hiroyuki | 8cece85 | 2008-04-28 02:13:36 -0700 | [diff] [blame] | 131 | return ZONE_MOVABLE; |
Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 132 | #ifdef CONFIG_HIGHMEM |
| 133 | if (flags & __GFP_HIGHMEM) |
KAMEZAWA Hiroyuki | 8cece85 | 2008-04-28 02:13:36 -0700 | [diff] [blame] | 134 | return ZONE_HIGHMEM; |
Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 135 | #endif |
KAMEZAWA Hiroyuki | 8cece85 | 2008-04-28 02:13:36 -0700 | [diff] [blame] | 136 | return ZONE_NORMAL; |
Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 137 | } |
| 138 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | /* |
| 140 | * There is only one page-allocator function, and two main namespaces to |
| 141 | * it. The alloc_page*() variants return 'struct page *' and as such |
| 142 | * can allocate highmem pages, the *get*page*() variants return |
| 143 | * virtual kernel addresses to the allocated page(s). |
| 144 | */ |
| 145 | |
Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 146 | static inline int gfp_zonelist(gfp_t flags) |
| 147 | { |
| 148 | if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE)) |
| 149 | return 1; |
| 150 | |
| 151 | return 0; |
| 152 | } |
| 153 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | /* |
| 155 | * We get the zone list from the current node and the gfp_mask. |
| 156 | * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. |
Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 157 | * There are two zonelists per node, one for all zones with memory and |
| 158 | * one containing just zones from the node the zonelist belongs to. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | * |
| 160 | * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets |
| 161 | * optimized to &contig_page_data at compile-time. |
| 162 | */ |
Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 163 | static inline struct zonelist *node_zonelist(int nid, gfp_t flags) |
| 164 | { |
Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 165 | return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); |
Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 166 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | |
| 168 | #ifndef HAVE_ARCH_FREE_PAGE |
| 169 | static inline void arch_free_page(struct page *page, int order) { } |
| 170 | #endif |
Nick Piggin | cc10250 | 2006-12-06 20:32:00 -0800 | [diff] [blame] | 171 | #ifndef HAVE_ARCH_ALLOC_PAGE |
| 172 | static inline void arch_alloc_page(struct page *page, int order) { } |
| 173 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | |
KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 175 | struct page * |
Mel Gorman | d239171 | 2009-06-16 15:31:52 -0700 | [diff] [blame] | 176 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, |
KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 177 | struct zonelist *zonelist, nodemask_t *nodemask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | |
KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 179 | static inline struct page * |
| 180 | __alloc_pages(gfp_t gfp_mask, unsigned int order, |
| 181 | struct zonelist *zonelist) |
| 182 | { |
Mel Gorman | d239171 | 2009-06-16 15:31:52 -0700 | [diff] [blame] | 183 | return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); |
KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 184 | } |
| 185 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 186 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | unsigned int order) |
| 188 | { |
Andi Kleen | 819a692 | 2006-01-11 22:43:45 +0100 | [diff] [blame] | 189 | /* Unknown node is current node */ |
| 190 | if (nid < 0) |
| 191 | nid = numa_node_id(); |
| 192 | |
Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 193 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | } |
| 195 | |
Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 196 | static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, |
| 197 | unsigned int order) |
| 198 | { |
| 199 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); |
| 200 | |
| 201 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); |
| 202 | } |
| 203 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | #ifdef CONFIG_NUMA |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 205 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | |
| 207 | static inline struct page * |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 208 | alloc_pages(gfp_t gfp_mask, unsigned int order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | return alloc_pages_current(gfp_mask, order); |
| 211 | } |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 212 | extern struct page *alloc_page_vma(gfp_t gfp_mask, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | struct vm_area_struct *vma, unsigned long addr); |
| 214 | #else |
| 215 | #define alloc_pages(gfp_mask, order) \ |
| 216 | alloc_pages_node(numa_node_id(), gfp_mask, order) |
| 217 | #define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0) |
| 218 | #endif |
| 219 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) |
| 220 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 221 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); |
| 222 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | |
Timur Tabi | 2be0ffe | 2008-07-23 21:28:11 -0700 | [diff] [blame] | 224 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); |
| 225 | void free_pages_exact(void *virt, size_t size); |
| 226 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | #define __get_free_page(gfp_mask) \ |
| 228 | __get_free_pages((gfp_mask),0) |
| 229 | |
| 230 | #define __get_dma_pages(gfp_mask, order) \ |
| 231 | __get_free_pages((gfp_mask) | GFP_DMA,(order)) |
| 232 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 233 | extern void __free_pages(struct page *page, unsigned int order); |
| 234 | extern void free_pages(unsigned long addr, unsigned int order); |
| 235 | extern void free_hot_page(struct page *page); |
| 236 | extern void free_cold_page(struct page *page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | |
| 238 | #define __free_page(page) __free_pages((page), 0) |
| 239 | #define free_page(addr) free_pages((addr),0) |
| 240 | |
| 241 | void page_alloc_init(void); |
Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 242 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); |
Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 243 | void drain_all_pages(void); |
| 244 | void drain_local_pages(void *dummy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | |
Rafael J. Wysocki | 7f33d49 | 2009-06-16 15:32:41 -0700 | [diff] [blame] | 246 | extern bool oom_killer_disabled; |
| 247 | |
| 248 | static inline void oom_killer_disable(void) |
| 249 | { |
| 250 | oom_killer_disabled = true; |
| 251 | } |
| 252 | |
| 253 | static inline void oom_killer_enable(void) |
| 254 | { |
| 255 | oom_killer_disabled = false; |
| 256 | } |
| 257 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | #endif /* __LINUX_GFP_H */ |