| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* internal.h: mm/ internal definitions | 
|  | 2 | * | 
|  | 3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | 
|  | 4 | * Written by David Howells (dhowells@redhat.com) | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or | 
|  | 7 | * modify it under the terms of the GNU General Public License | 
|  | 8 | * as published by the Free Software Foundation; either version | 
|  | 9 | * 2 of the License, or (at your option) any later version. | 
|  | 10 | */ | 
| Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 11 | #ifndef __MM_INTERNAL_H | 
|  | 12 | #define __MM_INTERNAL_H | 
|  | 13 |  | 
|  | 14 | #include <linux/mm.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 |  | 
| Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 16 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, | 
|  | 17 | unsigned long floor, unsigned long ceiling); | 
|  | 18 |  | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 19 | static inline void set_page_count(struct page *page, int v) | 
| Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 20 | { | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 21 | atomic_set(&page->_count, v); | 
|  | 22 | } | 
|  | 23 |  | 
|  | 24 | /* | 
|  | 25 | * Turn a non-refcounted page (->_count == 0) into refcounted with | 
|  | 26 | * a count of one. | 
|  | 27 | */ | 
|  | 28 | static inline void set_page_refcounted(struct page *page) | 
|  | 29 | { | 
| Qi Yong | ae1276b | 2008-02-04 22:29:27 -0800 | [diff] [blame] | 30 | VM_BUG_ON(PageTail(page)); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 31 | VM_BUG_ON(atomic_read(&page->_count)); | 
| Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 32 | set_page_count(page, 1); | 
| Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 33 | } | 
|  | 34 |  | 
| Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 35 | static inline void __put_page(struct page *page) | 
|  | 36 | { | 
|  | 37 | atomic_dec(&page->_count); | 
|  | 38 | } | 
|  | 39 |  | 
| Andrea Arcangeli | 70b50f9 | 2011-11-02 13:36:59 -0700 | [diff] [blame] | 40 | static inline void __get_page_tail_foll(struct page *page, | 
|  | 41 | bool get_page_head) | 
|  | 42 | { | 
|  | 43 | /* | 
|  | 44 | * If we're getting a tail page, the elevated page->_count is | 
|  | 45 | * required only in the head page and we will elevate the head | 
|  | 46 | * page->_count and tail page->_mapcount. | 
|  | 47 | * | 
|  | 48 | * We elevate page_tail->_mapcount for tail pages to force | 
|  | 49 | * page_tail->_count to be zero at all times to avoid getting | 
|  | 50 | * false positives from get_page_unless_zero() with | 
|  | 51 | * speculative page access (like in | 
|  | 52 | * page_cache_get_speculative()) on tail pages. | 
|  | 53 | */ | 
|  | 54 | VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); | 
|  | 55 | VM_BUG_ON(atomic_read(&page->_count) != 0); | 
|  | 56 | VM_BUG_ON(page_mapcount(page) < 0); | 
|  | 57 | if (get_page_head) | 
|  | 58 | atomic_inc(&page->first_page->_count); | 
|  | 59 | atomic_inc(&page->_mapcount); | 
|  | 60 | } | 
|  | 61 |  | 
|  | 62 | /* | 
|  | 63 | * This is meant to be called as the FOLL_GET operation of | 
|  | 64 | * follow_page() and it must be called while holding the proper PT | 
|  | 65 | * lock while the pte (or pmd_trans_huge) is still mapping the page. | 
|  | 66 | */ | 
|  | 67 | static inline void get_page_foll(struct page *page) | 
|  | 68 | { | 
|  | 69 | if (unlikely(PageTail(page))) | 
|  | 70 | /* | 
|  | 71 | * This is safe only because | 
|  | 72 | * __split_huge_page_refcount() can't run under | 
|  | 73 | * get_page_foll() because we hold the proper PT lock. | 
|  | 74 | */ | 
|  | 75 | __get_page_tail_foll(page, true); | 
|  | 76 | else { | 
|  | 77 | /* | 
|  | 78 | * Getting a normal page or the head of a compound page | 
|  | 79 | * requires to already have an elevated page->_count. | 
|  | 80 | */ | 
|  | 81 | VM_BUG_ON(atomic_read(&page->_count) <= 0); | 
|  | 82 | atomic_inc(&page->_count); | 
|  | 83 | } | 
|  | 84 | } | 
|  | 85 |  | 
| Hugh Dickins | 03f6462 | 2009-09-21 17:03:35 -0700 | [diff] [blame] | 86 | extern unsigned long highest_memmap_pfn; | 
|  | 87 |  | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 88 | /* | 
|  | 89 | * in mm/vmscan.c: | 
|  | 90 | */ | 
| Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 91 | extern int isolate_lru_page(struct page *page); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 92 | extern void putback_lru_page(struct page *page); | 
| Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 93 |  | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 94 | /* | 
|  | 95 | * in mm/page_alloc.c | 
|  | 96 | */ | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 97 | extern void __free_pages_bootmem(struct page *page, unsigned int order); | 
| Wu Fengguang | 20a0307 | 2009-06-16 15:32:22 -0700 | [diff] [blame] | 98 | extern void prep_compound_page(struct page *page, unsigned long order); | 
| Wu Fengguang | 8d22ba1 | 2009-12-16 12:19:58 +0100 | [diff] [blame] | 99 | #ifdef CONFIG_MEMORY_FAILURE | 
|  | 100 | extern bool is_free_buddy_page(struct page *page); | 
|  | 101 | #endif | 
| Wu Fengguang | 20a0307 | 2009-06-16 15:32:22 -0700 | [diff] [blame] | 102 |  | 
| Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 103 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | 
|  | 104 |  | 
|  | 105 | /* | 
|  | 106 | * in mm/compaction.c | 
|  | 107 | */ | 
|  | 108 | /* | 
|  | 109 | * compact_control is used to track pages being migrated and the free pages | 
|  | 110 | * they are being migrated to during memory compaction. The free_pfn starts | 
|  | 111 | * at the end of a zone and migrate_pfn begins at the start. Movable pages | 
|  | 112 | * are moved to the end of a zone during a compaction run and the run | 
|  | 113 | * completes when free_pfn <= migrate_pfn | 
|  | 114 | */ | 
|  | 115 | struct compact_control { | 
|  | 116 | struct list_head freepages;	/* List of free pages to migrate to */ | 
|  | 117 | struct list_head migratepages;	/* List of pages being migrated */ | 
|  | 118 | unsigned long nr_freepages;	/* Number of isolated free pages */ | 
|  | 119 | unsigned long nr_migratepages;	/* Number of pages to migrate */ | 
|  | 120 | unsigned long free_pfn;		/* isolate_freepages search base */ | 
|  | 121 | unsigned long migrate_pfn;	/* isolate_migratepages search base */ | 
| Linus Torvalds | 68e3e92 | 2012-06-03 20:05:57 -0700 | [diff] [blame] | 122 | bool sync;			/* Synchronous migration */ | 
| Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 123 | bool ignore_skip_hint;		/* Scan blocks even if marked skip */ | 
| Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 124 | bool finished_update_free;	/* True when the zone cached pfns are | 
|  | 125 | * no longer being updated | 
|  | 126 | */ | 
|  | 127 | bool finished_update_migrate; | 
| Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 128 |  | 
|  | 129 | int order;			/* order a direct compactor needs */ | 
|  | 130 | int migratetype;		/* MOVABLE, RECLAIMABLE etc */ | 
|  | 131 | struct zone *zone; | 
| Shaohua Li | e64c523 | 2012-10-08 16:32:27 -0700 | [diff] [blame] | 132 | bool contended;			/* True if a lock was contended */ | 
| Mel Gorman | 1fb3f8c | 2012-10-08 16:29:12 -0700 | [diff] [blame] | 133 | struct page **page;		/* Page captured of requested size */ | 
| Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 134 | }; | 
|  | 135 |  | 
|  | 136 | unsigned long | 
| Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 137 | isolate_freepages_range(struct compact_control *cc, | 
|  | 138 | unsigned long start_pfn, unsigned long end_pfn); | 
| Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 139 | unsigned long | 
|  | 140 | isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | 
| Minchan Kim | e46a287 | 2012-10-08 16:33:48 -0700 | [diff] [blame] | 141 | unsigned long low_pfn, unsigned long end_pfn, bool unevictable); | 
| Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 142 |  | 
|  | 143 | #endif | 
| Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 144 |  | 
| Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 145 | /* | 
|  | 146 | * function for dealing with page's order in buddy system. | 
|  | 147 | * zone->lock is already acquired when we use these. | 
|  | 148 | * So, we don't need atomic page->flags operations here. | 
|  | 149 | */ | 
|  | 150 | static inline unsigned long page_order(struct page *page) | 
|  | 151 | { | 
| KAMEZAWA Hiroyuki | 572438f | 2010-10-26 14:22:08 -0700 | [diff] [blame] | 152 | /* PageBuddy() must be checked by the caller */ | 
| Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 153 | return page_private(page); | 
|  | 154 | } | 
| Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 155 |  | 
| Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 156 | /* mm/util.c */ | 
|  | 157 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, | 
|  | 158 | struct vm_area_struct *prev, struct rb_node *rb_parent); | 
|  | 159 |  | 
| Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 160 | #ifdef CONFIG_MMU | 
|  | 161 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, | 
|  | 162 | unsigned long start, unsigned long end); | 
|  | 163 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, | 
|  | 164 | unsigned long start, unsigned long end); | 
|  | 165 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | 
|  | 166 | { | 
|  | 167 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); | 
|  | 168 | } | 
|  | 169 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 170 | /* | 
| Hugh Dickins | 39b5f29 | 2012-10-08 16:33:18 -0700 | [diff] [blame] | 171 | * Called only in fault path, to determine if a new page is being | 
|  | 172 | * mapped into a LOCKED vma.  If it is, mark page as mlocked. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 173 | */ | 
| Ying Han | 096a7cf | 2012-05-29 15:06:25 -0700 | [diff] [blame] | 174 | static inline int mlocked_vma_newpage(struct vm_area_struct *vma, | 
|  | 175 | struct page *page) | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 176 | { | 
|  | 177 | VM_BUG_ON(PageLRU(page)); | 
|  | 178 |  | 
|  | 179 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) | 
|  | 180 | return 0; | 
|  | 181 |  | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 182 | if (!TestSetPageMlocked(page)) { | 
| David Rientjes | 8449d21 | 2012-10-08 16:34:06 -0700 | [diff] [blame] | 183 | mod_zone_page_state(page_zone(page), NR_MLOCK, | 
|  | 184 | hpage_nr_pages(page)); | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 185 | count_vm_event(UNEVICTABLE_PGMLOCKED); | 
|  | 186 | } | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 187 | return 1; | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | /* | 
| Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 191 | * must be called with vma's mmap_sem held for read or write, and page locked. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 192 | */ | 
|  | 193 | extern void mlock_vma_page(struct page *page); | 
| Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 194 | extern void munlock_vma_page(struct page *page); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 195 |  | 
|  | 196 | /* | 
|  | 197 | * Clear the page's PageMlocked().  This can be useful in a situation where | 
|  | 198 | * we want to unconditionally remove a page from the pagecache -- e.g., | 
|  | 199 | * on truncation or freeing. | 
|  | 200 | * | 
|  | 201 | * It is legal to call this function for any page, mlocked or not. | 
|  | 202 | * If called for a page that is still mapped by mlocked vmas, all we do | 
|  | 203 | * is revert to lazy LRU behaviour -- semantics are not broken. | 
|  | 204 | */ | 
| Hugh Dickins | e6c509f | 2012-10-08 16:33:19 -0700 | [diff] [blame] | 205 | extern void clear_page_mlock(struct page *page); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 206 |  | 
|  | 207 | /* | 
|  | 208 | * mlock_migrate_page - called only from migrate_page_copy() to | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 209 | * migrate the Mlocked page flag; update statistics. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 210 | */ | 
|  | 211 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) | 
|  | 212 | { | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 213 | if (TestClearPageMlocked(page)) { | 
|  | 214 | unsigned long flags; | 
|  | 215 |  | 
|  | 216 | local_irq_save(flags); | 
|  | 217 | __dec_zone_page_state(page, NR_MLOCK); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 218 | SetPageMlocked(newpage); | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 219 | __inc_zone_page_state(newpage, NR_MLOCK); | 
|  | 220 | local_irq_restore(flags); | 
|  | 221 | } | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 222 | } | 
|  | 223 |  | 
| Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 224 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
|  | 225 | extern unsigned long vma_address(struct page *page, | 
|  | 226 | struct vm_area_struct *vma); | 
|  | 227 | #endif | 
| Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 228 | #else /* !CONFIG_MMU */ | 
| Ying Han | 096a7cf | 2012-05-29 15:06:25 -0700 | [diff] [blame] | 229 | static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p) | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 230 | { | 
|  | 231 | return 0; | 
|  | 232 | } | 
|  | 233 | static inline void clear_page_mlock(struct page *page) { } | 
|  | 234 | static inline void mlock_vma_page(struct page *page) { } | 
|  | 235 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | 
|  | 236 |  | 
| Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 237 | #endif /* !CONFIG_MMU */ | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 238 |  | 
| Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 239 | /* | 
| Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 240 | * Return the mem_map entry representing the 'offset' subpage within | 
|  | 241 | * the maximally aligned gigantic page 'base'.  Handle any discontiguity | 
|  | 242 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. | 
|  | 243 | */ | 
|  | 244 | static inline struct page *mem_map_offset(struct page *base, int offset) | 
|  | 245 | { | 
|  | 246 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) | 
|  | 247 | return pfn_to_page(page_to_pfn(base) + offset); | 
|  | 248 | return base + offset; | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | /* | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 252 | * Iterator over all subpages within the maximally aligned gigantic | 
| Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 253 | * page 'base'.  Handle any discontiguity in the mem_map. | 
|  | 254 | */ | 
|  | 255 | static inline struct page *mem_map_next(struct page *iter, | 
|  | 256 | struct page *base, int offset) | 
|  | 257 | { | 
|  | 258 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { | 
|  | 259 | unsigned long pfn = page_to_pfn(base) + offset; | 
|  | 260 | if (!pfn_valid(pfn)) | 
|  | 261 | return NULL; | 
|  | 262 | return pfn_to_page(pfn); | 
|  | 263 | } | 
|  | 264 | return iter + 1; | 
|  | 265 | } | 
|  | 266 |  | 
|  | 267 | /* | 
| Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 268 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, | 
|  | 269 | * so all functions starting at paging_init should be marked __init | 
|  | 270 | * in those cases. SPARSEMEM, however, allows for memory hotplug, | 
|  | 271 | * and alloc_bootmem_node is not used. | 
|  | 272 | */ | 
|  | 273 | #ifdef CONFIG_SPARSEMEM | 
|  | 274 | #define __paginginit __meminit | 
|  | 275 | #else | 
|  | 276 | #define __paginginit __init | 
|  | 277 | #endif | 
|  | 278 |  | 
| Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 279 | /* Memory initialisation debug and verification */ | 
|  | 280 | enum mminit_level { | 
|  | 281 | MMINIT_WARNING, | 
|  | 282 | MMINIT_VERIFY, | 
|  | 283 | MMINIT_TRACE | 
|  | 284 | }; | 
|  | 285 |  | 
|  | 286 | #ifdef CONFIG_DEBUG_MEMORY_INIT | 
|  | 287 |  | 
|  | 288 | extern int mminit_loglevel; | 
|  | 289 |  | 
|  | 290 | #define mminit_dprintk(level, prefix, fmt, arg...) \ | 
|  | 291 | do { \ | 
|  | 292 | if (level < mminit_loglevel) { \ | 
|  | 293 | printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \ | 
|  | 294 | printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \ | 
|  | 295 | } \ | 
|  | 296 | } while (0) | 
|  | 297 |  | 
| Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 298 | extern void mminit_verify_pageflags_layout(void); | 
|  | 299 | extern void mminit_verify_page_links(struct page *page, | 
|  | 300 | enum zone_type zone, unsigned long nid, unsigned long pfn); | 
| Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 301 | extern void mminit_verify_zonelist(void); | 
| Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 302 |  | 
| Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 303 | #else | 
|  | 304 |  | 
|  | 305 | static inline void mminit_dprintk(enum mminit_level level, | 
|  | 306 | const char *prefix, const char *fmt, ...) | 
|  | 307 | { | 
|  | 308 | } | 
|  | 309 |  | 
| Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 310 | static inline void mminit_verify_pageflags_layout(void) | 
|  | 311 | { | 
|  | 312 | } | 
|  | 313 |  | 
|  | 314 | static inline void mminit_verify_page_links(struct page *page, | 
|  | 315 | enum zone_type zone, unsigned long nid, unsigned long pfn) | 
|  | 316 | { | 
|  | 317 | } | 
| Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 318 |  | 
|  | 319 | static inline void mminit_verify_zonelist(void) | 
|  | 320 | { | 
|  | 321 | } | 
| Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 322 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 323 |  | 
|  | 324 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ | 
|  | 325 | #if defined(CONFIG_SPARSEMEM) | 
|  | 326 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, | 
|  | 327 | unsigned long *end_pfn); | 
|  | 328 | #else | 
|  | 329 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, | 
|  | 330 | unsigned long *end_pfn) | 
|  | 331 | { | 
|  | 332 | } | 
|  | 333 | #endif /* CONFIG_SPARSEMEM */ | 
|  | 334 |  | 
| Mel Gorman | fa5e084 | 2009-06-16 15:33:22 -0700 | [diff] [blame] | 335 | #define ZONE_RECLAIM_NOSCAN	-2 | 
|  | 336 | #define ZONE_RECLAIM_FULL	-1 | 
|  | 337 | #define ZONE_RECLAIM_SOME	0 | 
|  | 338 | #define ZONE_RECLAIM_SUCCESS	1 | 
| Wu Fengguang | 7c116f2 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 339 |  | 
| Wu Fengguang | 31d3d34 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 340 | extern int hwpoison_filter(struct page *p); | 
|  | 341 |  | 
| Wu Fengguang | 7c116f2 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 342 | extern u32 hwpoison_filter_dev_major; | 
|  | 343 | extern u32 hwpoison_filter_dev_minor; | 
| Wu Fengguang | 478c5ff | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 344 | extern u64 hwpoison_filter_flags_mask; | 
|  | 345 | extern u64 hwpoison_filter_flags_value; | 
| Andi Kleen | 4fd466e | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 346 | extern u64 hwpoison_filter_memcg; | 
| Haicheng Li | 1bfe5fe | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 347 | extern u32 hwpoison_filter_enable; | 
| Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 348 |  | 
|  | 349 | extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, | 
|  | 350 | unsigned long, unsigned long, | 
|  | 351 | unsigned long, unsigned long); | 
| Xishi Qiu | ca57df7 | 2012-07-31 16:43:19 -0700 | [diff] [blame] | 352 |  | 
|  | 353 | extern void set_pageblock_order(void); | 
| Minchan Kim | 02c6de8 | 2012-10-08 16:31:55 -0700 | [diff] [blame] | 354 | unsigned long reclaim_clean_pages_from_list(struct zone *zone, | 
|  | 355 | struct list_head *page_list); | 
| Bartlomiej Zolnierkiewicz | d95ea5d | 2012-10-08 16:32:05 -0700 | [diff] [blame] | 356 | /* The ALLOC_WMARK bits are used as an index to zone->watermark */ | 
|  | 357 | #define ALLOC_WMARK_MIN		WMARK_MIN | 
|  | 358 | #define ALLOC_WMARK_LOW		WMARK_LOW | 
|  | 359 | #define ALLOC_WMARK_HIGH	WMARK_HIGH | 
|  | 360 | #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */ | 
|  | 361 |  | 
|  | 362 | /* Mask to get the watermark bits */ | 
|  | 363 | #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1) | 
|  | 364 |  | 
|  | 365 | #define ALLOC_HARDER		0x10 /* try to alloc harder */ | 
|  | 366 | #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */ | 
|  | 367 | #define ALLOC_CPUSET		0x40 /* check for correct cpuset */ | 
|  | 368 | #define ALLOC_CMA		0x80 /* allow allocations from CMA areas */ | 
|  | 369 |  | 
| Michel Lespinasse | db97141 | 2012-10-08 16:29:34 -0700 | [diff] [blame] | 370 | #endif	/* __MM_INTERNAL_H */ |