Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* internal.h: mm/ internal definitions |
| 2 | * |
| 3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 11 | #ifndef __MM_INTERNAL_H |
| 12 | #define __MM_INTERNAL_H |
| 13 | |
Fabian Frederick | 29f175d | 2014-04-07 15:37:55 -0700 | [diff] [blame] | 14 | #include <linux/fs.h> |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 15 | #include <linux/mm.h> |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 16 | #include <linux/pagemap.h> |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 17 | #include <linux/tracepoint-defs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
Mel Gorman | dd56b04 | 2015-11-06 16:28:43 -0800 | [diff] [blame] | 19 | /* |
| 20 | * The set of flags that only affect watermark checking and reclaim |
| 21 | * behaviour. This is used by the MM to obey the caller constraints |
| 22 | * about IO, FS and watermark checking while ignoring placement |
| 23 | * hints such as HIGHMEM usage. |
| 24 | */ |
| 25 | #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ |
| 26 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ |
Mel Gorman | e838a45 | 2016-06-24 14:49:37 -0700 | [diff] [blame] | 27 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ |
| 28 | __GFP_ATOMIC) |
Mel Gorman | dd56b04 | 2015-11-06 16:28:43 -0800 | [diff] [blame] | 29 | |
| 30 | /* The GFP flags allowed during early boot */ |
| 31 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) |
| 32 | |
| 33 | /* Control allocation cpuset and node placement constraints */ |
| 34 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) |
| 35 | |
| 36 | /* Do not use these with a slab allocator */ |
| 37 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) |
| 38 | |
Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 39 | int do_swap_page(struct fault_env *fe, pte_t orig_pte); |
Ebru Akagunduz | 8a966ed | 2016-07-26 15:25:03 -0700 | [diff] [blame] | 40 | |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 41 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
| 42 | unsigned long floor, unsigned long ceiling); |
| 43 | |
Michal Hocko | aac4536 | 2016-03-25 14:20:24 -0700 | [diff] [blame] | 44 | void unmap_page_range(struct mmu_gather *tlb, |
| 45 | struct vm_area_struct *vma, |
| 46 | unsigned long addr, unsigned long end, |
| 47 | struct zap_details *details); |
| 48 | |
Fabian Frederick | 29f175d | 2014-04-07 15:37:55 -0700 | [diff] [blame] | 49 | extern int __do_page_cache_readahead(struct address_space *mapping, |
| 50 | struct file *filp, pgoff_t offset, unsigned long nr_to_read, |
| 51 | unsigned long lookahead_size); |
| 52 | |
| 53 | /* |
| 54 | * Submit IO for the read-ahead request in file_ra_state. |
| 55 | */ |
| 56 | static inline unsigned long ra_submit(struct file_ra_state *ra, |
| 57 | struct address_space *mapping, struct file *filp) |
| 58 | { |
| 59 | return __do_page_cache_readahead(mapping, filp, |
| 60 | ra->start, ra->size, ra->async_size); |
| 61 | } |
| 62 | |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 63 | /* |
Joonsoo Kim | 0139aa7 | 2016-05-19 17:10:49 -0700 | [diff] [blame] | 64 | * Turn a non-refcounted page (->_refcount == 0) into refcounted with |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 65 | * a count of one. |
| 66 | */ |
| 67 | static inline void set_page_refcounted(struct page *page) |
| 68 | { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 69 | VM_BUG_ON_PAGE(PageTail(page), page); |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame] | 70 | VM_BUG_ON_PAGE(page_ref_count(page), page); |
Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 71 | set_page_count(page, 1); |
Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 72 | } |
| 73 | |
Hugh Dickins | 03f6462 | 2009-09-21 17:03:35 -0700 | [diff] [blame] | 74 | extern unsigned long highest_memmap_pfn; |
| 75 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 76 | /* |
| 77 | * in mm/vmscan.c: |
| 78 | */ |
Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 79 | extern int isolate_lru_page(struct page *page); |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 80 | extern void putback_lru_page(struct page *page); |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 81 | extern bool pgdat_reclaimable(struct pglist_data *pgdat); |
Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 82 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 83 | /* |
Bob Liu | 6219049 | 2012-12-11 16:00:37 -0800 | [diff] [blame] | 84 | * in mm/rmap.c: |
| 85 | */ |
| 86 | extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); |
| 87 | |
| 88 | /* |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 89 | * in mm/page_alloc.c |
| 90 | */ |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 91 | |
| 92 | /* |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 93 | * Structure for holding the mostly immutable allocation parameters passed |
| 94 | * between functions involved in allocations, including the alloc_pages* |
| 95 | * family of functions. |
| 96 | * |
| 97 | * nodemask, migratetype and high_zoneidx are initialized only once in |
| 98 | * __alloc_pages_nodemask() and then never change. |
| 99 | * |
| 100 | * zonelist, preferred_zone and classzone_idx are set first in |
| 101 | * __alloc_pages_nodemask() for the fast path, and might be later changed |
| 102 | * in __alloc_pages_slowpath(). All other functions pass the whole strucure |
| 103 | * by a const pointer. |
| 104 | */ |
| 105 | struct alloc_context { |
| 106 | struct zonelist *zonelist; |
| 107 | nodemask_t *nodemask; |
Mel Gorman | c33d6c0 | 2016-05-19 17:14:10 -0700 | [diff] [blame] | 108 | struct zoneref *preferred_zoneref; |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 109 | int migratetype; |
| 110 | enum zone_type high_zoneidx; |
Mel Gorman | c9ab0c4 | 2015-11-06 16:28:12 -0800 | [diff] [blame] | 111 | bool spread_dirty_pages; |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 112 | }; |
| 113 | |
Mel Gorman | 93ea996 | 2016-05-19 17:14:13 -0700 | [diff] [blame] | 114 | #define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref) |
| 115 | |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 116 | /* |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 117 | * Locate the struct page for both the matching buddy in our |
| 118 | * pair (buddy1) and the combined O(n+1) page they form (page). |
| 119 | * |
| 120 | * 1) Any buddy B1 will have an order O twin B2 which satisfies |
| 121 | * the following equation: |
| 122 | * B2 = B1 ^ (1 << O) |
| 123 | * For example, if the starting buddy (buddy2) is #8 its order |
| 124 | * 1 buddy is #10: |
| 125 | * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 |
| 126 | * |
| 127 | * 2) Any buddy B will have an order O+1 parent P which |
| 128 | * satisfies the following equation: |
| 129 | * P = B & ~(1 << O) |
| 130 | * |
| 131 | * Assumption: *_mem_map is contiguous at least up to MAX_ORDER |
| 132 | */ |
| 133 | static inline unsigned long |
| 134 | __find_buddy_index(unsigned long page_idx, unsigned int order) |
| 135 | { |
| 136 | return page_idx ^ (1 << order); |
| 137 | } |
| 138 | |
Joonsoo Kim | 7cf91a9 | 2016-03-15 14:57:51 -0700 | [diff] [blame] | 139 | extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, |
| 140 | unsigned long end_pfn, struct zone *zone); |
| 141 | |
| 142 | static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, |
| 143 | unsigned long end_pfn, struct zone *zone) |
| 144 | { |
| 145 | if (zone->contiguous) |
| 146 | return pfn_to_page(start_pfn); |
| 147 | |
| 148 | return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); |
| 149 | } |
| 150 | |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 151 | extern int __isolate_free_page(struct page *page, unsigned int order); |
Mel Gorman | d70ddd7 | 2015-06-30 14:56:52 -0700 | [diff] [blame] | 152 | extern void __free_pages_bootmem(struct page *page, unsigned long pfn, |
| 153 | unsigned int order); |
Kirill A. Shutemov | d00181b | 2015-11-06 16:29:57 -0800 | [diff] [blame] | 154 | extern void prep_compound_page(struct page *page, unsigned int order); |
Joonsoo Kim | 46f24fd | 2016-07-26 15:23:58 -0700 | [diff] [blame] | 155 | extern void post_alloc_hook(struct page *page, unsigned int order, |
| 156 | gfp_t gfp_flags); |
Han Pingtian | 42aa83c | 2014-01-23 15:53:28 -0800 | [diff] [blame] | 157 | extern int user_min_free_kbytes; |
Wu Fengguang | 20a0307 | 2009-06-16 15:32:22 -0700 | [diff] [blame] | 158 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 159 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| 160 | |
| 161 | /* |
| 162 | * in mm/compaction.c |
| 163 | */ |
| 164 | /* |
| 165 | * compact_control is used to track pages being migrated and the free pages |
| 166 | * they are being migrated to during memory compaction. The free_pfn starts |
| 167 | * at the end of a zone and migrate_pfn begins at the start. Movable pages |
| 168 | * are moved to the end of a zone during a compaction run and the run |
| 169 | * completes when free_pfn <= migrate_pfn |
| 170 | */ |
| 171 | struct compact_control { |
| 172 | struct list_head freepages; /* List of free pages to migrate to */ |
| 173 | struct list_head migratepages; /* List of pages being migrated */ |
| 174 | unsigned long nr_freepages; /* Number of isolated free pages */ |
| 175 | unsigned long nr_migratepages; /* Number of pages to migrate */ |
| 176 | unsigned long free_pfn; /* isolate_freepages search base */ |
| 177 | unsigned long migrate_pfn; /* isolate_migratepages search base */ |
Joonsoo Kim | 1a16718 | 2015-09-08 15:03:59 -0700 | [diff] [blame] | 178 | unsigned long last_migrated_pfn;/* Not yet flushed page being freed */ |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 179 | enum migrate_mode mode; /* Async or sync migration mode */ |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 180 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
Vlastimil Babka | 9f7e338 | 2016-10-07 17:00:37 -0700 | [diff] [blame] | 181 | bool ignore_block_suitable; /* Scan blocks considered unsuitable */ |
Vlastimil Babka | accf624 | 2016-03-17 14:18:15 -0700 | [diff] [blame] | 182 | bool direct_compaction; /* False from kcompactd or /proc/... */ |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 183 | bool whole_zone; /* Whole zone should/has been scanned */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 184 | int order; /* order a direct compactor needs */ |
David Rientjes | 6d7ce55 | 2014-10-09 15:27:27 -0700 | [diff] [blame] | 185 | const gfp_t gfp_mask; /* gfp mask of a direct compactor */ |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 186 | const unsigned int alloc_flags; /* alloc flags of a direct compactor */ |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 187 | const int classzone_idx; /* zone index of a direct compactor */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 188 | struct zone *zone; |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 189 | bool contended; /* Signal lock or sched contention */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 190 | }; |
| 191 | |
| 192 | unsigned long |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 193 | isolate_freepages_range(struct compact_control *cc, |
| 194 | unsigned long start_pfn, unsigned long end_pfn); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 195 | unsigned long |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 196 | isolate_migratepages_range(struct compact_control *cc, |
| 197 | unsigned long low_pfn, unsigned long end_pfn); |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 198 | int find_suitable_fallback(struct free_area *area, unsigned int order, |
| 199 | int migratetype, bool only_stealable, bool *can_steal); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 200 | |
| 201 | #endif |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 202 | |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 203 | /* |
Mel Gorman | 6c14466 | 2014-01-23 15:53:38 -0800 | [diff] [blame] | 204 | * This function returns the order of a free page in the buddy system. In |
| 205 | * general, page_zone(page)->lock must be held by the caller to prevent the |
| 206 | * page from being allocated in parallel and returning garbage as the order. |
| 207 | * If a caller does not hold page_zone(page)->lock, it must guarantee that the |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 208 | * page cannot be allocated or merged in parallel. Alternatively, it must |
| 209 | * handle invalid values gracefully, and use page_order_unsafe() below. |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 210 | */ |
Kirill A. Shutemov | d00181b | 2015-11-06 16:29:57 -0800 | [diff] [blame] | 211 | static inline unsigned int page_order(struct page *page) |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 212 | { |
KAMEZAWA Hiroyuki | 572438f | 2010-10-26 14:22:08 -0700 | [diff] [blame] | 213 | /* PageBuddy() must be checked by the caller */ |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 214 | return page_private(page); |
| 215 | } |
Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 216 | |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 217 | /* |
| 218 | * Like page_order(), but for callers who cannot afford to hold the zone lock. |
| 219 | * PageBuddy() should be checked first by the caller to minimize race window, |
| 220 | * and invalid values must be handled gracefully. |
| 221 | * |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 222 | * READ_ONCE is used so that if the caller assigns the result into a local |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 223 | * variable and e.g. tests it for valid range before using, the compiler cannot |
| 224 | * decide to remove the variable and inline the page_private(page) multiple |
| 225 | * times, potentially observing different values in the tests and the actual |
| 226 | * use of the result. |
| 227 | */ |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 228 | #define page_order_unsafe(page) READ_ONCE(page_private(page)) |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 229 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 230 | static inline bool is_cow_mapping(vm_flags_t flags) |
| 231 | { |
| 232 | return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; |
| 233 | } |
| 234 | |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 235 | /* |
| 236 | * These three helpers classifies VMAs for virtual memory accounting. |
| 237 | */ |
| 238 | |
| 239 | /* |
| 240 | * Executable code area - executable, not writable, not stack |
| 241 | */ |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 242 | static inline bool is_exec_mapping(vm_flags_t flags) |
| 243 | { |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 244 | return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 245 | } |
| 246 | |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 247 | /* |
| 248 | * Stack area - atomatically grows in one direction |
| 249 | * |
| 250 | * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: |
| 251 | * do_mmap() forbids all other combinations. |
| 252 | */ |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 253 | static inline bool is_stack_mapping(vm_flags_t flags) |
| 254 | { |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 255 | return (flags & VM_STACK) == VM_STACK; |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 256 | } |
| 257 | |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 258 | /* |
| 259 | * Data area - private, writable, not stack |
| 260 | */ |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 261 | static inline bool is_data_mapping(vm_flags_t flags) |
| 262 | { |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 263 | return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 264 | } |
| 265 | |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 266 | /* mm/util.c */ |
| 267 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
| 268 | struct vm_area_struct *prev, struct rb_node *rb_parent); |
| 269 | |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 270 | #ifdef CONFIG_MMU |
Kirill A. Shutemov | fc05f56 | 2015-04-14 15:44:39 -0700 | [diff] [blame] | 271 | extern long populate_vma_page_range(struct vm_area_struct *vma, |
Michel Lespinasse | cea10a1 | 2013-02-22 16:32:44 -0800 | [diff] [blame] | 272 | unsigned long start, unsigned long end, int *nonblocking); |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 273 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, |
| 274 | unsigned long start, unsigned long end); |
| 275 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) |
| 276 | { |
| 277 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); |
| 278 | } |
| 279 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 280 | /* |
Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 281 | * must be called with vma's mmap_sem held for read or write, and page locked. |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 282 | */ |
| 283 | extern void mlock_vma_page(struct page *page); |
Michel Lespinasse | ff6a6da | 2013-02-27 17:02:44 -0800 | [diff] [blame] | 284 | extern unsigned int munlock_vma_page(struct page *page); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 285 | |
| 286 | /* |
| 287 | * Clear the page's PageMlocked(). This can be useful in a situation where |
| 288 | * we want to unconditionally remove a page from the pagecache -- e.g., |
| 289 | * on truncation or freeing. |
| 290 | * |
| 291 | * It is legal to call this function for any page, mlocked or not. |
| 292 | * If called for a page that is still mapped by mlocked vmas, all we do |
| 293 | * is revert to lazy LRU behaviour -- semantics are not broken. |
| 294 | */ |
Hugh Dickins | e6c509f | 2012-10-08 16:33:19 -0700 | [diff] [blame] | 295 | extern void clear_page_mlock(struct page *page); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 296 | |
| 297 | /* |
Hugh Dickins | 51afb12 | 2015-11-05 18:49:37 -0800 | [diff] [blame] | 298 | * mlock_migrate_page - called only from migrate_misplaced_transhuge_page() |
| 299 | * (because that does not go through the full procedure of migration ptes): |
| 300 | * to migrate the Mlocked page flag; update statistics. |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 301 | */ |
| 302 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) |
| 303 | { |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 304 | if (TestClearPageMlocked(page)) { |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 305 | int nr_pages = hpage_nr_pages(page); |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 306 | |
Hugh Dickins | 51afb12 | 2015-11-05 18:49:37 -0800 | [diff] [blame] | 307 | /* Holding pmd lock, no change in irq context: __mod is safe */ |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 308 | __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 309 | SetPageMlocked(newpage); |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 310 | __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 311 | } |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 312 | } |
| 313 | |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 314 | extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); |
| 315 | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 316 | /* |
| 317 | * At what user virtual address is page expected in @vma? |
| 318 | */ |
| 319 | static inline unsigned long |
| 320 | __vma_address(struct page *page, struct vm_area_struct *vma) |
| 321 | { |
| 322 | pgoff_t pgoff = page_to_pgoff(page); |
| 323 | return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
| 324 | } |
| 325 | |
| 326 | static inline unsigned long |
| 327 | vma_address(struct page *page, struct vm_area_struct *vma) |
| 328 | { |
| 329 | unsigned long address = __vma_address(page, vma); |
| 330 | |
| 331 | /* page should be within @vma mapping range */ |
| 332 | VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); |
| 333 | |
| 334 | return address; |
| 335 | } |
| 336 | |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 337 | #else /* !CONFIG_MMU */ |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 338 | static inline void clear_page_mlock(struct page *page) { } |
| 339 | static inline void mlock_vma_page(struct page *page) { } |
| 340 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
| 341 | |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 342 | #endif /* !CONFIG_MMU */ |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 343 | |
Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 344 | /* |
Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 345 | * Return the mem_map entry representing the 'offset' subpage within |
| 346 | * the maximally aligned gigantic page 'base'. Handle any discontiguity |
| 347 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. |
| 348 | */ |
| 349 | static inline struct page *mem_map_offset(struct page *base, int offset) |
| 350 | { |
| 351 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) |
Fabian Frederick | bc7f84c | 2014-08-06 16:05:17 -0700 | [diff] [blame] | 352 | return nth_page(base, offset); |
Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 353 | return base + offset; |
| 354 | } |
| 355 | |
| 356 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 357 | * Iterator over all subpages within the maximally aligned gigantic |
Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 358 | * page 'base'. Handle any discontiguity in the mem_map. |
| 359 | */ |
| 360 | static inline struct page *mem_map_next(struct page *iter, |
| 361 | struct page *base, int offset) |
| 362 | { |
| 363 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { |
| 364 | unsigned long pfn = page_to_pfn(base) + offset; |
| 365 | if (!pfn_valid(pfn)) |
| 366 | return NULL; |
| 367 | return pfn_to_page(pfn); |
| 368 | } |
| 369 | return iter + 1; |
| 370 | } |
| 371 | |
| 372 | /* |
Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 373 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, |
| 374 | * so all functions starting at paging_init should be marked __init |
| 375 | * in those cases. SPARSEMEM, however, allows for memory hotplug, |
| 376 | * and alloc_bootmem_node is not used. |
| 377 | */ |
| 378 | #ifdef CONFIG_SPARSEMEM |
| 379 | #define __paginginit __meminit |
| 380 | #else |
| 381 | #define __paginginit __init |
| 382 | #endif |
| 383 | |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 384 | /* Memory initialisation debug and verification */ |
| 385 | enum mminit_level { |
| 386 | MMINIT_WARNING, |
| 387 | MMINIT_VERIFY, |
| 388 | MMINIT_TRACE |
| 389 | }; |
| 390 | |
| 391 | #ifdef CONFIG_DEBUG_MEMORY_INIT |
| 392 | |
| 393 | extern int mminit_loglevel; |
| 394 | |
| 395 | #define mminit_dprintk(level, prefix, fmt, arg...) \ |
| 396 | do { \ |
| 397 | if (level < mminit_loglevel) { \ |
Rasmus Villemoes | fc5199d | 2015-02-12 15:00:02 -0800 | [diff] [blame] | 398 | if (level <= MMINIT_WARNING) \ |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 399 | pr_warn("mminit::" prefix " " fmt, ##arg); \ |
Rasmus Villemoes | fc5199d | 2015-02-12 15:00:02 -0800 | [diff] [blame] | 400 | else \ |
| 401 | printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 402 | } \ |
| 403 | } while (0) |
| 404 | |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 405 | extern void mminit_verify_pageflags_layout(void); |
Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 406 | extern void mminit_verify_zonelist(void); |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 407 | #else |
| 408 | |
| 409 | static inline void mminit_dprintk(enum mminit_level level, |
| 410 | const char *prefix, const char *fmt, ...) |
| 411 | { |
| 412 | } |
| 413 | |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 414 | static inline void mminit_verify_pageflags_layout(void) |
| 415 | { |
| 416 | } |
| 417 | |
Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 418 | static inline void mminit_verify_zonelist(void) |
| 419 | { |
| 420 | } |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 421 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 422 | |
| 423 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ |
| 424 | #if defined(CONFIG_SPARSEMEM) |
| 425 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 426 | unsigned long *end_pfn); |
| 427 | #else |
| 428 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 429 | unsigned long *end_pfn) |
| 430 | { |
| 431 | } |
| 432 | #endif /* CONFIG_SPARSEMEM */ |
| 433 | |
Mel Gorman | a5f5f91 | 2016-07-28 15:46:32 -0700 | [diff] [blame] | 434 | #define NODE_RECLAIM_NOSCAN -2 |
| 435 | #define NODE_RECLAIM_FULL -1 |
| 436 | #define NODE_RECLAIM_SOME 0 |
| 437 | #define NODE_RECLAIM_SUCCESS 1 |
Wu Fengguang | 7c116f2 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 438 | |
Wu Fengguang | 31d3d34 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 439 | extern int hwpoison_filter(struct page *p); |
| 440 | |
Wu Fengguang | 7c116f2 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 441 | extern u32 hwpoison_filter_dev_major; |
| 442 | extern u32 hwpoison_filter_dev_minor; |
Wu Fengguang | 478c5ff | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 443 | extern u64 hwpoison_filter_flags_mask; |
| 444 | extern u64 hwpoison_filter_flags_value; |
Andi Kleen | 4fd466e | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 445 | extern u64 hwpoison_filter_memcg; |
Haicheng Li | 1bfe5fe | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 446 | extern u32 hwpoison_filter_enable; |
Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 447 | |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 448 | extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, |
Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 449 | unsigned long, unsigned long, |
Michal Hocko | 9fbeb5a | 2016-05-23 16:25:30 -0700 | [diff] [blame] | 450 | unsigned long, unsigned long); |
Xishi Qiu | ca57df7 | 2012-07-31 16:43:19 -0700 | [diff] [blame] | 451 | |
| 452 | extern void set_pageblock_order(void); |
Minchan Kim | 02c6de8 | 2012-10-08 16:31:55 -0700 | [diff] [blame] | 453 | unsigned long reclaim_clean_pages_from_list(struct zone *zone, |
| 454 | struct list_head *page_list); |
Bartlomiej Zolnierkiewicz | d95ea5d | 2012-10-08 16:32:05 -0700 | [diff] [blame] | 455 | /* The ALLOC_WMARK bits are used as an index to zone->watermark */ |
| 456 | #define ALLOC_WMARK_MIN WMARK_MIN |
| 457 | #define ALLOC_WMARK_LOW WMARK_LOW |
| 458 | #define ALLOC_WMARK_HIGH WMARK_HIGH |
| 459 | #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ |
| 460 | |
| 461 | /* Mask to get the watermark bits */ |
| 462 | #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) |
| 463 | |
| 464 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ |
| 465 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ |
| 466 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ |
| 467 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ |
| 468 | |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 469 | enum ttu_flags; |
| 470 | struct tlbflush_unmap_batch; |
| 471 | |
| 472 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
| 473 | void try_to_unmap_flush(void); |
Mel Gorman | d950c94 | 2015-09-04 15:47:35 -0700 | [diff] [blame] | 474 | void try_to_unmap_flush_dirty(void); |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 475 | #else |
| 476 | static inline void try_to_unmap_flush(void) |
| 477 | { |
| 478 | } |
Mel Gorman | d950c94 | 2015-09-04 15:47:35 -0700 | [diff] [blame] | 479 | static inline void try_to_unmap_flush_dirty(void) |
| 480 | { |
| 481 | } |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 482 | |
| 483 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 484 | |
| 485 | extern const struct trace_print_flags pageflag_names[]; |
| 486 | extern const struct trace_print_flags vmaflag_names[]; |
| 487 | extern const struct trace_print_flags gfpflag_names[]; |
| 488 | |
Michel Lespinasse | db97141 | 2012-10-08 16:29:34 -0700 | [diff] [blame] | 489 | #endif /* __MM_INTERNAL_H */ |