KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/page_isolation.c |
| 3 | */ |
| 4 | |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 5 | #include <linux/mm.h> |
| 6 | #include <linux/page-isolation.h> |
| 7 | #include <linux/pageblock-flags.h> |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 8 | #include <linux/memory.h> |
Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 9 | #include <linux/hugetlb.h> |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 10 | #include "internal.h" |
| 11 | |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 12 | int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 13 | { |
| 14 | struct zone *zone; |
| 15 | unsigned long flags, pfn; |
| 16 | struct memory_isolate_notify arg; |
| 17 | int notifier_ret; |
| 18 | int ret = -EBUSY; |
| 19 | |
| 20 | zone = page_zone(page); |
| 21 | |
| 22 | spin_lock_irqsave(&zone->lock, flags); |
| 23 | |
| 24 | pfn = page_to_pfn(page); |
| 25 | arg.start_pfn = pfn; |
| 26 | arg.nr_pages = pageblock_nr_pages; |
| 27 | arg.pages_found = 0; |
| 28 | |
| 29 | /* |
| 30 | * It may be possible to isolate a pageblock even if the |
| 31 | * migratetype is not MIGRATE_MOVABLE. The memory isolation |
| 32 | * notifier chain is used by balloon drivers to return the |
| 33 | * number of pages in a range that are held by the balloon |
| 34 | * driver to shrink memory. If all the pages are accounted for |
| 35 | * by balloons, are free, or on the LRU, isolation can continue. |
| 36 | * Later, for example, when memory hotplug notifier runs, these |
| 37 | * pages reported as "can be isolated" should be isolated(freed) |
| 38 | * by the balloon driver through the memory notifier chain. |
| 39 | */ |
| 40 | notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); |
| 41 | notifier_ret = notifier_to_errno(notifier_ret); |
| 42 | if (notifier_ret) |
| 43 | goto out; |
| 44 | /* |
| 45 | * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. |
| 46 | * We just check MOVABLE pages. |
| 47 | */ |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 48 | if (!has_unmovable_pages(zone, page, arg.pages_found, |
| 49 | skip_hwpoisoned_pages)) |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 50 | ret = 0; |
| 51 | |
| 52 | /* |
| 53 | * immobile means "not-on-lru" paes. If immobile is larger than |
| 54 | * removable-by-driver pages reported by notifier, we'll fail. |
| 55 | */ |
| 56 | |
| 57 | out: |
| 58 | if (!ret) { |
Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 59 | unsigned long nr_pages; |
Bartlomiej Zolnierkiewicz | d1ce749 | 2012-10-08 16:32:02 -0700 | [diff] [blame] | 60 | int migratetype = get_pageblock_migratetype(page); |
Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 61 | |
Bartlomiej Zolnierkiewicz | a458431 | 2013-01-04 15:35:08 -0800 | [diff] [blame] | 62 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); |
Joonsoo Kim | ad53f92 | 2014-11-13 15:19:11 -0800 | [diff] [blame] | 63 | zone->nr_isolate_pageblock++; |
Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 64 | nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); |
| 65 | |
Bartlomiej Zolnierkiewicz | d1ce749 | 2012-10-08 16:32:02 -0700 | [diff] [blame] | 66 | __mod_zone_freepage_state(zone, -nr_pages, migratetype); |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 67 | } |
| 68 | |
| 69 | spin_unlock_irqrestore(&zone->lock, flags); |
| 70 | if (!ret) |
Vlastimil Babka | ec25af8 | 2014-12-10 15:43:04 -0800 | [diff] [blame] | 71 | drain_all_pages(zone); |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 72 | return ret; |
| 73 | } |
| 74 | |
| 75 | void unset_migratetype_isolate(struct page *page, unsigned migratetype) |
| 76 | { |
| 77 | struct zone *zone; |
Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 78 | unsigned long flags, nr_pages; |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 79 | struct page *isolated_page = NULL; |
| 80 | unsigned int order; |
| 81 | unsigned long page_idx, buddy_idx; |
| 82 | struct page *buddy; |
Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 83 | |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 84 | zone = page_zone(page); |
| 85 | spin_lock_irqsave(&zone->lock, flags); |
| 86 | if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) |
| 87 | goto out; |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 88 | |
| 89 | /* |
| 90 | * Because freepage with more than pageblock_order on isolated |
| 91 | * pageblock is restricted to merge due to freepage counting problem, |
| 92 | * it is possible that there is free buddy page. |
| 93 | * move_freepages_block() doesn't care of merge so we need other |
| 94 | * approach in order to merge them. Isolation and free will make |
| 95 | * these pages to be merged. |
| 96 | */ |
| 97 | if (PageBuddy(page)) { |
| 98 | order = page_order(page); |
| 99 | if (order >= pageblock_order) { |
| 100 | page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); |
| 101 | buddy_idx = __find_buddy_index(page_idx, order); |
| 102 | buddy = page + (buddy_idx - page_idx); |
| 103 | |
| 104 | if (!is_migrate_isolate_page(buddy)) { |
| 105 | __isolate_free_page(page, order); |
Laura Abbott | cfa8694 | 2015-03-25 15:55:26 -0700 | [diff] [blame] | 106 | kernel_map_pages(page, (1 << order), 1); |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 107 | set_page_refcounted(page); |
| 108 | isolated_page = page; |
| 109 | } |
| 110 | } |
| 111 | } |
| 112 | |
| 113 | /* |
| 114 | * If we isolate freepage with more than pageblock_order, there |
| 115 | * should be no freepage in the range, so we could avoid costly |
| 116 | * pageblock scanning for freepage moving. |
| 117 | */ |
| 118 | if (!isolated_page) { |
| 119 | nr_pages = move_freepages_block(zone, page, migratetype); |
| 120 | __mod_zone_freepage_state(zone, nr_pages, migratetype); |
| 121 | } |
Bartlomiej Zolnierkiewicz | a458431 | 2013-01-04 15:35:08 -0800 | [diff] [blame] | 122 | set_pageblock_migratetype(page, migratetype); |
Joonsoo Kim | ad53f92 | 2014-11-13 15:19:11 -0800 | [diff] [blame] | 123 | zone->nr_isolate_pageblock--; |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 124 | out: |
| 125 | spin_unlock_irqrestore(&zone->lock, flags); |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 126 | if (isolated_page) |
| 127 | __free_pages(isolated_page, order); |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 128 | } |
| 129 | |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 130 | static inline struct page * |
| 131 | __first_valid_page(unsigned long pfn, unsigned long nr_pages) |
| 132 | { |
| 133 | int i; |
| 134 | for (i = 0; i < nr_pages; i++) |
| 135 | if (pfn_valid_within(pfn + i)) |
| 136 | break; |
| 137 | if (unlikely(i == nr_pages)) |
| 138 | return NULL; |
| 139 | return pfn_to_page(pfn + i); |
| 140 | } |
| 141 | |
| 142 | /* |
| 143 | * start_isolate_page_range() -- make page-allocation-type of range of pages |
| 144 | * to be MIGRATE_ISOLATE. |
| 145 | * @start_pfn: The lower PFN of the range to be isolated. |
| 146 | * @end_pfn: The upper PFN of the range to be isolated. |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 147 | * @migratetype: migrate type to set in error recovery. |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 148 | * |
| 149 | * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in |
| 150 | * the range will never be allocated. Any free pages and pages freed in the |
| 151 | * future will not be allocated again. |
| 152 | * |
| 153 | * start_pfn/end_pfn must be aligned to pageblock_order. |
| 154 | * Returns 0 on success and -EBUSY if any part of range cannot be isolated. |
| 155 | */ |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 156 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 157 | unsigned migratetype, bool skip_hwpoisoned_pages) |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 158 | { |
| 159 | unsigned long pfn; |
| 160 | unsigned long undo_pfn; |
| 161 | struct page *page; |
| 162 | |
| 163 | BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); |
| 164 | BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); |
| 165 | |
| 166 | for (pfn = start_pfn; |
| 167 | pfn < end_pfn; |
| 168 | pfn += pageblock_nr_pages) { |
| 169 | page = __first_valid_page(pfn, pageblock_nr_pages); |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 170 | if (page && |
| 171 | set_migratetype_isolate(page, skip_hwpoisoned_pages)) { |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 172 | undo_pfn = pfn; |
| 173 | goto undo; |
| 174 | } |
| 175 | } |
| 176 | return 0; |
| 177 | undo: |
| 178 | for (pfn = start_pfn; |
KAMEZAWA Hiroyuki | dbc0e4c | 2007-11-14 16:59:12 -0800 | [diff] [blame] | 179 | pfn < undo_pfn; |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 180 | pfn += pageblock_nr_pages) |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 181 | unset_migratetype_isolate(pfn_to_page(pfn), migratetype); |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 182 | |
| 183 | return -EBUSY; |
| 184 | } |
| 185 | |
| 186 | /* |
| 187 | * Make isolated pages available again. |
| 188 | */ |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 189 | int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
| 190 | unsigned migratetype) |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 191 | { |
| 192 | unsigned long pfn; |
| 193 | struct page *page; |
| 194 | BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); |
| 195 | BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); |
| 196 | for (pfn = start_pfn; |
| 197 | pfn < end_pfn; |
| 198 | pfn += pageblock_nr_pages) { |
| 199 | page = __first_valid_page(pfn, pageblock_nr_pages); |
KAMEZAWA Hiroyuki | dbc0e4c | 2007-11-14 16:59:12 -0800 | [diff] [blame] | 200 | if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE) |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 201 | continue; |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 202 | unset_migratetype_isolate(page, migratetype); |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 203 | } |
| 204 | return 0; |
| 205 | } |
| 206 | /* |
| 207 | * Test all pages in the range is free(means isolated) or not. |
| 208 | * all pages in [start_pfn...end_pfn) must be in the same zone. |
| 209 | * zone->lock must be held before call this. |
| 210 | * |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 211 | * Returns 1 if all pages in the range are isolated. |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 212 | */ |
| 213 | static int |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 214 | __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, |
| 215 | bool skip_hwpoisoned_pages) |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 216 | { |
| 217 | struct page *page; |
| 218 | |
| 219 | while (pfn < end_pfn) { |
| 220 | if (!pfn_valid_within(pfn)) { |
| 221 | pfn++; |
| 222 | continue; |
| 223 | } |
| 224 | page = pfn_to_page(pfn); |
Minchan Kim | 41d575a | 2012-10-08 16:32:14 -0700 | [diff] [blame] | 225 | if (PageBuddy(page)) { |
Minchan Kim | 435b405 | 2012-10-08 16:32:16 -0700 | [diff] [blame] | 226 | /* |
| 227 | * If race between isolatation and allocation happens, |
| 228 | * some free pages could be in MIGRATE_MOVABLE list |
| 229 | * although pageblock's migratation type of the page |
| 230 | * is MIGRATE_ISOLATE. Catch it and move the page into |
| 231 | * MIGRATE_ISOLATE list. |
| 232 | */ |
| 233 | if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) { |
| 234 | struct page *end_page; |
| 235 | |
| 236 | end_page = page + (1 << page_order(page)) - 1; |
| 237 | move_freepages(page_zone(page), page, end_page, |
| 238 | MIGRATE_ISOLATE); |
| 239 | } |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 240 | pfn += 1 << page_order(page); |
Minchan Kim | 41d575a | 2012-10-08 16:32:14 -0700 | [diff] [blame] | 241 | } |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 242 | else if (page_count(page) == 0 && |
Minchan Kim | b12c4ad | 2012-10-08 16:32:08 -0700 | [diff] [blame] | 243 | get_freepage_migratetype(page) == MIGRATE_ISOLATE) |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 244 | pfn += 1; |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 245 | else if (skip_hwpoisoned_pages && PageHWPoison(page)) { |
| 246 | /* |
| 247 | * The HWPoisoned page may be not in buddy |
| 248 | * system, and page_count() is not 0. |
| 249 | */ |
| 250 | pfn++; |
| 251 | continue; |
| 252 | } |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 253 | else |
| 254 | break; |
| 255 | } |
| 256 | if (pfn < end_pfn) |
| 257 | return 0; |
| 258 | return 1; |
| 259 | } |
| 260 | |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 261 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, |
| 262 | bool skip_hwpoisoned_pages) |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 263 | { |
Gerald Schaefer | 6c1b7f6 | 2008-10-02 14:50:16 -0700 | [diff] [blame] | 264 | unsigned long pfn, flags; |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 265 | struct page *page; |
Gerald Schaefer | 6c1b7f6 | 2008-10-02 14:50:16 -0700 | [diff] [blame] | 266 | struct zone *zone; |
| 267 | int ret; |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 268 | |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 269 | /* |
Tang Chen | 85dbe70 | 2013-06-20 18:10:19 +0800 | [diff] [blame] | 270 | * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages |
| 271 | * are not aligned to pageblock_nr_pages. |
| 272 | * Then we just check migratetype first. |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 273 | */ |
| 274 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { |
| 275 | page = __first_valid_page(pfn, pageblock_nr_pages); |
KAMEZAWA Hiroyuki | dbc0e4c | 2007-11-14 16:59:12 -0800 | [diff] [blame] | 276 | if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE) |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 277 | break; |
| 278 | } |
Gerald Schaefer | a70dcb9 | 2008-11-06 12:53:36 -0800 | [diff] [blame] | 279 | page = __first_valid_page(start_pfn, end_pfn - start_pfn); |
| 280 | if ((pfn < end_pfn) || !page) |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 281 | return -EBUSY; |
Tang Chen | 85dbe70 | 2013-06-20 18:10:19 +0800 | [diff] [blame] | 282 | /* Check all pages are free or marked as ISOLATED */ |
Gerald Schaefer | a70dcb9 | 2008-11-06 12:53:36 -0800 | [diff] [blame] | 283 | zone = page_zone(page); |
Gerald Schaefer | 6c1b7f6 | 2008-10-02 14:50:16 -0700 | [diff] [blame] | 284 | spin_lock_irqsave(&zone->lock, flags); |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 285 | ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn, |
| 286 | skip_hwpoisoned_pages); |
Gerald Schaefer | 6c1b7f6 | 2008-10-02 14:50:16 -0700 | [diff] [blame] | 287 | spin_unlock_irqrestore(&zone->lock, flags); |
| 288 | return ret ? 0 : -EBUSY; |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 289 | } |
Minchan Kim | 723a064 | 2012-10-08 16:32:52 -0700 | [diff] [blame] | 290 | |
| 291 | struct page *alloc_migrate_target(struct page *page, unsigned long private, |
| 292 | int **resultp) |
| 293 | { |
| 294 | gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; |
| 295 | |
Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 296 | /* |
| 297 | * TODO: allocate a destination hugepage from a nearest neighbor node, |
| 298 | * accordance with memory policy of the user process if possible. For |
| 299 | * now as a simple work-around, we use the next node for destination. |
| 300 | */ |
| 301 | if (PageHuge(page)) { |
| 302 | nodemask_t src = nodemask_of_node(page_to_nid(page)); |
| 303 | nodemask_t dst; |
| 304 | nodes_complement(dst, src); |
| 305 | return alloc_huge_page_node(page_hstate(compound_head(page)), |
| 306 | next_node(page_to_nid(page), dst)); |
| 307 | } |
| 308 | |
Minchan Kim | 723a064 | 2012-10-08 16:32:52 -0700 | [diff] [blame] | 309 | if (PageHighMem(page)) |
| 310 | gfp_mask |= __GFP_HIGHMEM; |
| 311 | |
| 312 | return alloc_page(gfp_mask); |
| 313 | } |