Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* internal.h: mm/ internal definitions |
| 2 | * |
| 3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 11 | #ifndef __MM_INTERNAL_H |
| 12 | #define __MM_INTERNAL_H |
| 13 | |
| 14 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 16 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
| 17 | unsigned long floor, unsigned long ceiling); |
| 18 | |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 19 | static inline void set_page_count(struct page *page, int v) |
Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 20 | { |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 21 | atomic_set(&page->_count, v); |
| 22 | } |
| 23 | |
| 24 | /* |
| 25 | * Turn a non-refcounted page (->_count == 0) into refcounted with |
| 26 | * a count of one. |
| 27 | */ |
| 28 | static inline void set_page_refcounted(struct page *page) |
| 29 | { |
Qi Yong | ae1276b | 2008-02-04 22:29:27 -0800 | [diff] [blame] | 30 | VM_BUG_ON(PageTail(page)); |
Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 31 | VM_BUG_ON(atomic_read(&page->_count)); |
Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 32 | set_page_count(page, 1); |
Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 33 | } |
| 34 | |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 35 | static inline void __put_page(struct page *page) |
| 36 | { |
| 37 | atomic_dec(&page->_count); |
| 38 | } |
| 39 | |
Andrea Arcangeli | 70b50f9 | 2011-11-02 13:36:59 -0700 | [diff] [blame] | 40 | static inline void __get_page_tail_foll(struct page *page, |
| 41 | bool get_page_head) |
| 42 | { |
| 43 | /* |
| 44 | * If we're getting a tail page, the elevated page->_count is |
| 45 | * required only in the head page and we will elevate the head |
| 46 | * page->_count and tail page->_mapcount. |
| 47 | * |
| 48 | * We elevate page_tail->_mapcount for tail pages to force |
| 49 | * page_tail->_count to be zero at all times to avoid getting |
| 50 | * false positives from get_page_unless_zero() with |
| 51 | * speculative page access (like in |
| 52 | * page_cache_get_speculative()) on tail pages. |
| 53 | */ |
| 54 | VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); |
| 55 | VM_BUG_ON(atomic_read(&page->_count) != 0); |
| 56 | VM_BUG_ON(page_mapcount(page) < 0); |
| 57 | if (get_page_head) |
| 58 | atomic_inc(&page->first_page->_count); |
| 59 | atomic_inc(&page->_mapcount); |
| 60 | } |
| 61 | |
| 62 | /* |
| 63 | * This is meant to be called as the FOLL_GET operation of |
| 64 | * follow_page() and it must be called while holding the proper PT |
| 65 | * lock while the pte (or pmd_trans_huge) is still mapping the page. |
| 66 | */ |
| 67 | static inline void get_page_foll(struct page *page) |
| 68 | { |
| 69 | if (unlikely(PageTail(page))) |
| 70 | /* |
| 71 | * This is safe only because |
| 72 | * __split_huge_page_refcount() can't run under |
| 73 | * get_page_foll() because we hold the proper PT lock. |
| 74 | */ |
| 75 | __get_page_tail_foll(page, true); |
| 76 | else { |
| 77 | /* |
| 78 | * Getting a normal page or the head of a compound page |
| 79 | * requires to already have an elevated page->_count. |
| 80 | */ |
| 81 | VM_BUG_ON(atomic_read(&page->_count) <= 0); |
| 82 | atomic_inc(&page->_count); |
| 83 | } |
| 84 | } |
| 85 | |
Hugh Dickins | 03f6462 | 2009-09-21 17:03:35 -0700 | [diff] [blame] | 86 | extern unsigned long highest_memmap_pfn; |
| 87 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 88 | /* |
| 89 | * in mm/vmscan.c: |
| 90 | */ |
Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 91 | extern int isolate_lru_page(struct page *page); |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 92 | extern void putback_lru_page(struct page *page); |
Lisa Du | e093521 | 2013-09-11 14:22:36 -0700 | [diff] [blame] | 93 | extern unsigned long zone_reclaimable_pages(struct zone *zone); |
| 94 | extern bool zone_reclaimable(struct zone *zone); |
Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 95 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 96 | /* |
| 97 | * in mm/page_alloc.c |
| 98 | */ |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 99 | extern void __free_pages_bootmem(struct page *page, unsigned int order); |
Wu Fengguang | 20a0307 | 2009-06-16 15:32:22 -0700 | [diff] [blame] | 100 | extern void prep_compound_page(struct page *page, unsigned long order); |
Wu Fengguang | 8d22ba1 | 2009-12-16 12:19:58 +0100 | [diff] [blame] | 101 | #ifdef CONFIG_MEMORY_FAILURE |
| 102 | extern bool is_free_buddy_page(struct page *page); |
| 103 | #endif |
Wu Fengguang | 20a0307 | 2009-06-16 15:32:22 -0700 | [diff] [blame] | 104 | |
Michal Nazarewicz | 02ff1de | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 105 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| 106 | |
| 107 | /* |
| 108 | * in mm/compaction.c |
| 109 | */ |
| 110 | /* |
| 111 | * compact_control is used to track pages being migrated and the free pages |
| 112 | * they are being migrated to during memory compaction. The free_pfn starts |
| 113 | * at the end of a zone and migrate_pfn begins at the start. Movable pages |
| 114 | * are moved to the end of a zone during a compaction run and the run |
| 115 | * completes when free_pfn <= migrate_pfn |
| 116 | */ |
| 117 | struct compact_control { |
| 118 | struct list_head freepages; /* List of free pages to migrate to */ |
| 119 | struct list_head migratepages; /* List of pages being migrated */ |
| 120 | unsigned long nr_freepages; /* Number of isolated free pages */ |
| 121 | unsigned long nr_migratepages; /* Number of pages to migrate */ |
| 122 | unsigned long free_pfn; /* isolate_freepages search base */ |
| 123 | unsigned long migrate_pfn; /* isolate_migratepages search base */ |
| 124 | bool sync; /* Synchronous migration */ |
Mel Gorman | 6b50664 | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 125 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
Mel Gorman | d58f1f2 | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 126 | bool finished_update_free; /* True when the zone cached pfns are |
| 127 | * no longer being updated |
| 128 | */ |
| 129 | bool finished_update_migrate; |
Michal Nazarewicz | 02ff1de | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 130 | |
| 131 | int order; /* order a direct compactor needs */ |
| 132 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ |
| 133 | struct zone *zone; |
Shaohua Li | 3cbf351 | 2012-10-08 16:32:27 -0700 | [diff] [blame] | 134 | bool contended; /* True if a lock was contended */ |
Michal Nazarewicz | 02ff1de | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 135 | }; |
| 136 | |
| 137 | unsigned long |
Mel Gorman | 6b50664 | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 138 | isolate_freepages_range(struct compact_control *cc, |
| 139 | unsigned long start_pfn, unsigned long end_pfn); |
Michal Nazarewicz | 02ff1de | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 140 | unsigned long |
| 141 | isolate_migratepages_range(struct zone *zone, struct compact_control *cc, |
Minchan Kim | d05b452 | 2012-10-08 16:33:48 -0700 | [diff] [blame] | 142 | unsigned long low_pfn, unsigned long end_pfn, bool unevictable); |
Michal Nazarewicz | 02ff1de | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 143 | |
| 144 | #endif |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 145 | |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 146 | /* |
| 147 | * function for dealing with page's order in buddy system. |
| 148 | * zone->lock is already acquired when we use these. |
| 149 | * So, we don't need atomic page->flags operations here. |
| 150 | */ |
| 151 | static inline unsigned long page_order(struct page *page) |
| 152 | { |
KAMEZAWA Hiroyuki | 572438f | 2010-10-26 14:22:08 -0700 | [diff] [blame] | 153 | /* PageBuddy() must be checked by the caller */ |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 154 | return page_private(page); |
| 155 | } |
Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 156 | |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 157 | /* mm/util.c */ |
| 158 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
| 159 | struct vm_area_struct *prev, struct rb_node *rb_parent); |
| 160 | |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 161 | #ifdef CONFIG_MMU |
| 162 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, |
| 163 | unsigned long start, unsigned long end); |
| 164 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, |
| 165 | unsigned long start, unsigned long end); |
| 166 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) |
| 167 | { |
| 168 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); |
| 169 | } |
| 170 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 171 | /* |
| 172 | * Called only in fault path via page_evictable() for a new page |
| 173 | * to determine if it's being mapped into a LOCKED vma. |
| 174 | * If so, mark page as mlocked. |
| 175 | */ |
| 176 | static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) |
| 177 | { |
| 178 | VM_BUG_ON(PageLRU(page)); |
| 179 | |
| 180 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) |
| 181 | return 0; |
| 182 | |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 183 | if (!TestSetPageMlocked(page)) { |
| 184 | inc_zone_page_state(page, NR_MLOCK); |
| 185 | count_vm_event(UNEVICTABLE_PGMLOCKED); |
| 186 | } |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 187 | return 1; |
| 188 | } |
| 189 | |
| 190 | /* |
Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 191 | * must be called with vma's mmap_sem held for read or write, and page locked. |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 192 | */ |
| 193 | extern void mlock_vma_page(struct page *page); |
Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 194 | extern void munlock_vma_page(struct page *page); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 195 | |
| 196 | /* |
| 197 | * Clear the page's PageMlocked(). This can be useful in a situation where |
| 198 | * we want to unconditionally remove a page from the pagecache -- e.g., |
| 199 | * on truncation or freeing. |
| 200 | * |
| 201 | * It is legal to call this function for any page, mlocked or not. |
| 202 | * If called for a page that is still mapped by mlocked vmas, all we do |
| 203 | * is revert to lazy LRU behaviour -- semantics are not broken. |
| 204 | */ |
| 205 | extern void __clear_page_mlock(struct page *page); |
| 206 | static inline void clear_page_mlock(struct page *page) |
| 207 | { |
| 208 | if (unlikely(TestClearPageMlocked(page))) |
| 209 | __clear_page_mlock(page); |
| 210 | } |
| 211 | |
| 212 | /* |
| 213 | * mlock_migrate_page - called only from migrate_page_copy() to |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 214 | * migrate the Mlocked page flag; update statistics. |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 215 | */ |
| 216 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) |
| 217 | { |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 218 | if (TestClearPageMlocked(page)) { |
| 219 | unsigned long flags; |
| 220 | |
| 221 | local_irq_save(flags); |
| 222 | __dec_zone_page_state(page, NR_MLOCK); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 223 | SetPageMlocked(newpage); |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 224 | __inc_zone_page_state(newpage, NR_MLOCK); |
| 225 | local_irq_restore(flags); |
| 226 | } |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 227 | } |
| 228 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 229 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 230 | extern unsigned long vma_address(struct page *page, |
| 231 | struct vm_area_struct *vma); |
| 232 | #endif |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 233 | #else /* !CONFIG_MMU */ |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 234 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) |
| 235 | { |
| 236 | return 0; |
| 237 | } |
| 238 | static inline void clear_page_mlock(struct page *page) { } |
| 239 | static inline void mlock_vma_page(struct page *page) { } |
| 240 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
| 241 | |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 242 | #endif /* !CONFIG_MMU */ |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 243 | |
Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 244 | /* |
Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 245 | * Return the mem_map entry representing the 'offset' subpage within |
| 246 | * the maximally aligned gigantic page 'base'. Handle any discontiguity |
| 247 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. |
| 248 | */ |
| 249 | static inline struct page *mem_map_offset(struct page *base, int offset) |
| 250 | { |
| 251 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) |
| 252 | return pfn_to_page(page_to_pfn(base) + offset); |
| 253 | return base + offset; |
| 254 | } |
| 255 | |
| 256 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 257 | * Iterator over all subpages within the maximally aligned gigantic |
Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 258 | * page 'base'. Handle any discontiguity in the mem_map. |
| 259 | */ |
| 260 | static inline struct page *mem_map_next(struct page *iter, |
| 261 | struct page *base, int offset) |
| 262 | { |
| 263 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { |
| 264 | unsigned long pfn = page_to_pfn(base) + offset; |
| 265 | if (!pfn_valid(pfn)) |
| 266 | return NULL; |
| 267 | return pfn_to_page(pfn); |
| 268 | } |
| 269 | return iter + 1; |
| 270 | } |
| 271 | |
| 272 | /* |
Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 273 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, |
| 274 | * so all functions starting at paging_init should be marked __init |
| 275 | * in those cases. SPARSEMEM, however, allows for memory hotplug, |
| 276 | * and alloc_bootmem_node is not used. |
| 277 | */ |
| 278 | #ifdef CONFIG_SPARSEMEM |
| 279 | #define __paginginit __meminit |
| 280 | #else |
| 281 | #define __paginginit __init |
| 282 | #endif |
| 283 | |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 284 | /* Memory initialisation debug and verification */ |
| 285 | enum mminit_level { |
| 286 | MMINIT_WARNING, |
| 287 | MMINIT_VERIFY, |
| 288 | MMINIT_TRACE |
| 289 | }; |
| 290 | |
| 291 | #ifdef CONFIG_DEBUG_MEMORY_INIT |
| 292 | |
| 293 | extern int mminit_loglevel; |
| 294 | |
| 295 | #define mminit_dprintk(level, prefix, fmt, arg...) \ |
| 296 | do { \ |
| 297 | if (level < mminit_loglevel) { \ |
| 298 | printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \ |
| 299 | printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \ |
| 300 | } \ |
| 301 | } while (0) |
| 302 | |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 303 | extern void mminit_verify_pageflags_layout(void); |
| 304 | extern void mminit_verify_page_links(struct page *page, |
| 305 | enum zone_type zone, unsigned long nid, unsigned long pfn); |
Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 306 | extern void mminit_verify_zonelist(void); |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 307 | |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 308 | #else |
| 309 | |
| 310 | static inline void mminit_dprintk(enum mminit_level level, |
| 311 | const char *prefix, const char *fmt, ...) |
| 312 | { |
| 313 | } |
| 314 | |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 315 | static inline void mminit_verify_pageflags_layout(void) |
| 316 | { |
| 317 | } |
| 318 | |
| 319 | static inline void mminit_verify_page_links(struct page *page, |
| 320 | enum zone_type zone, unsigned long nid, unsigned long pfn) |
| 321 | { |
| 322 | } |
Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 323 | |
| 324 | static inline void mminit_verify_zonelist(void) |
| 325 | { |
| 326 | } |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 327 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 328 | |
| 329 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ |
| 330 | #if defined(CONFIG_SPARSEMEM) |
| 331 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 332 | unsigned long *end_pfn); |
| 333 | #else |
| 334 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 335 | unsigned long *end_pfn) |
| 336 | { |
| 337 | } |
| 338 | #endif /* CONFIG_SPARSEMEM */ |
| 339 | |
Mel Gorman | fa5e084 | 2009-06-16 15:33:22 -0700 | [diff] [blame] | 340 | #define ZONE_RECLAIM_NOSCAN -2 |
| 341 | #define ZONE_RECLAIM_FULL -1 |
| 342 | #define ZONE_RECLAIM_SOME 0 |
| 343 | #define ZONE_RECLAIM_SUCCESS 1 |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 344 | #endif |
Wu Fengguang | 7c116f2 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 345 | |
Wu Fengguang | 31d3d34 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 346 | extern int hwpoison_filter(struct page *p); |
| 347 | |
Wu Fengguang | 7c116f2 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 348 | extern u32 hwpoison_filter_dev_major; |
| 349 | extern u32 hwpoison_filter_dev_minor; |
Wu Fengguang | 478c5ff | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 350 | extern u64 hwpoison_filter_flags_mask; |
| 351 | extern u64 hwpoison_filter_flags_value; |
Andi Kleen | 4fd466e | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 352 | extern u64 hwpoison_filter_memcg; |
Haicheng Li | 1bfe5fe | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 353 | extern u32 hwpoison_filter_enable; |
Bartlomiej Zolnierkiewicz | 1964431 | 2012-10-08 16:32:05 -0700 | [diff] [blame] | 354 | |
| 355 | /* The ALLOC_WMARK bits are used as an index to zone->watermark */ |
| 356 | #define ALLOC_WMARK_MIN WMARK_MIN |
| 357 | #define ALLOC_WMARK_LOW WMARK_LOW |
| 358 | #define ALLOC_WMARK_HIGH WMARK_HIGH |
| 359 | #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ |
| 360 | |
| 361 | /* Mask to get the watermark bits */ |
| 362 | #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) |
| 363 | |
| 364 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ |
| 365 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ |
| 366 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ |
| 367 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ |
Minchan Kim | aab50f1 | 2012-10-08 16:31:55 -0700 | [diff] [blame] | 368 | |
| 369 | unsigned long reclaim_clean_pages_from_list(struct zone *zone, |
| 370 | struct list_head *page_list); |