| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * linux/mm/page_isolation.c | 
 | 3 |  */ | 
 | 4 |  | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 5 | #include <linux/mm.h> | 
 | 6 | #include <linux/page-isolation.h> | 
 | 7 | #include <linux/pageblock-flags.h> | 
| Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 8 | #include <linux/memory.h> | 
| Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 9 | #include <linux/hugetlb.h> | 
| Joonsoo Kim | 83358ec | 2016-07-26 15:23:43 -0700 | [diff] [blame] | 10 | #include <linux/page_owner.h> | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 11 | #include "internal.h" | 
 | 12 |  | 
| Joonsoo Kim | 0f0848e | 2016-01-14 15:18:42 -0800 | [diff] [blame] | 13 | #define CREATE_TRACE_POINTS | 
 | 14 | #include <trace/events/page_isolation.h> | 
 | 15 |  | 
| Naoya Horiguchi | c5b4e1b | 2015-09-08 15:02:09 -0700 | [diff] [blame] | 16 | static int set_migratetype_isolate(struct page *page, | 
 | 17 | 				bool skip_hwpoisoned_pages) | 
| Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 18 | { | 
 | 19 | 	struct zone *zone; | 
 | 20 | 	unsigned long flags, pfn; | 
 | 21 | 	struct memory_isolate_notify arg; | 
 | 22 | 	int notifier_ret; | 
 | 23 | 	int ret = -EBUSY; | 
 | 24 |  | 
 | 25 | 	zone = page_zone(page); | 
 | 26 |  | 
 | 27 | 	spin_lock_irqsave(&zone->lock, flags); | 
 | 28 |  | 
 | 29 | 	pfn = page_to_pfn(page); | 
 | 30 | 	arg.start_pfn = pfn; | 
 | 31 | 	arg.nr_pages = pageblock_nr_pages; | 
 | 32 | 	arg.pages_found = 0; | 
 | 33 |  | 
 | 34 | 	/* | 
 | 35 | 	 * It may be possible to isolate a pageblock even if the | 
 | 36 | 	 * migratetype is not MIGRATE_MOVABLE. The memory isolation | 
 | 37 | 	 * notifier chain is used by balloon drivers to return the | 
 | 38 | 	 * number of pages in a range that are held by the balloon | 
 | 39 | 	 * driver to shrink memory. If all the pages are accounted for | 
 | 40 | 	 * by balloons, are free, or on the LRU, isolation can continue. | 
 | 41 | 	 * Later, for example, when memory hotplug notifier runs, these | 
 | 42 | 	 * pages reported as "can be isolated" should be isolated(freed) | 
 | 43 | 	 * by the balloon driver through the memory notifier chain. | 
 | 44 | 	 */ | 
 | 45 | 	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); | 
 | 46 | 	notifier_ret = notifier_to_errno(notifier_ret); | 
 | 47 | 	if (notifier_ret) | 
 | 48 | 		goto out; | 
 | 49 | 	/* | 
 | 50 | 	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. | 
 | 51 | 	 * We just check MOVABLE pages. | 
 | 52 | 	 */ | 
| Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 53 | 	if (!has_unmovable_pages(zone, page, arg.pages_found, | 
 | 54 | 				 skip_hwpoisoned_pages)) | 
| Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 55 | 		ret = 0; | 
 | 56 |  | 
 | 57 | 	/* | 
| Yisheng Xie | ac34dcd | 2016-10-07 17:01:16 -0700 | [diff] [blame] | 58 | 	 * immobile means "not-on-lru" pages. If immobile is larger than | 
| Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 59 | 	 * removable-by-driver pages reported by notifier, we'll fail. | 
 | 60 | 	 */ | 
 | 61 |  | 
 | 62 | out: | 
 | 63 | 	if (!ret) { | 
| Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 64 | 		unsigned long nr_pages; | 
| Bartlomiej Zolnierkiewicz | d1ce749 | 2012-10-08 16:32:02 -0700 | [diff] [blame] | 65 | 		int migratetype = get_pageblock_migratetype(page); | 
| Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 66 |  | 
| Bartlomiej Zolnierkiewicz | a458431 | 2013-01-04 15:35:08 -0800 | [diff] [blame] | 67 | 		set_pageblock_migratetype(page, MIGRATE_ISOLATE); | 
| Joonsoo Kim | ad53f92 | 2014-11-13 15:19:11 -0800 | [diff] [blame] | 68 | 		zone->nr_isolate_pageblock++; | 
| Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 69 | 		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); | 
 | 70 |  | 
| Bartlomiej Zolnierkiewicz | d1ce749 | 2012-10-08 16:32:02 -0700 | [diff] [blame] | 71 | 		__mod_zone_freepage_state(zone, -nr_pages, migratetype); | 
| Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 72 | 	} | 
 | 73 |  | 
 | 74 | 	spin_unlock_irqrestore(&zone->lock, flags); | 
 | 75 | 	if (!ret) | 
| Vlastimil Babka | ec25af8 | 2014-12-10 15:43:04 -0800 | [diff] [blame] | 76 | 		drain_all_pages(zone); | 
| Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 77 | 	return ret; | 
 | 78 | } | 
 | 79 |  | 
| Naoya Horiguchi | c5b4e1b | 2015-09-08 15:02:09 -0700 | [diff] [blame] | 80 | static void unset_migratetype_isolate(struct page *page, unsigned migratetype) | 
| Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 81 | { | 
 | 82 | 	struct zone *zone; | 
| Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 83 | 	unsigned long flags, nr_pages; | 
| Joonsoo Kim | e3a2713 | 2016-07-26 15:24:01 -0700 | [diff] [blame] | 84 | 	bool isolated_page = false; | 
| Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 85 | 	unsigned int order; | 
| Vlastimil Babka | 76741e7 | 2017-02-22 15:41:48 -0800 | [diff] [blame] | 86 | 	unsigned long pfn, buddy_pfn; | 
| Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 87 | 	struct page *buddy; | 
| Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 88 |  | 
| Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 89 | 	zone = page_zone(page); | 
 | 90 | 	spin_lock_irqsave(&zone->lock, flags); | 
 | 91 | 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) | 
 | 92 | 		goto out; | 
| Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 93 |  | 
 | 94 | 	/* | 
 | 95 | 	 * Because freepage with more than pageblock_order on isolated | 
 | 96 | 	 * pageblock is restricted to merge due to freepage counting problem, | 
 | 97 | 	 * it is possible that there is free buddy page. | 
 | 98 | 	 * move_freepages_block() doesn't care of merge so we need other | 
 | 99 | 	 * approach in order to merge them. Isolation and free will make | 
 | 100 | 	 * these pages to be merged. | 
 | 101 | 	 */ | 
 | 102 | 	if (PageBuddy(page)) { | 
 | 103 | 		order = page_order(page); | 
 | 104 | 		if (order >= pageblock_order) { | 
| Vlastimil Babka | 76741e7 | 2017-02-22 15:41:48 -0800 | [diff] [blame] | 105 | 			pfn = page_to_pfn(page); | 
 | 106 | 			buddy_pfn = __find_buddy_pfn(pfn, order); | 
 | 107 | 			buddy = page + (buddy_pfn - pfn); | 
| Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 108 |  | 
| Vlastimil Babka | 13ad59d | 2017-02-22 15:41:51 -0800 | [diff] [blame] | 109 | 			if (pfn_valid_within(buddy_pfn) && | 
| Hui Zhu | 1ae7013 | 2015-05-14 15:17:04 -0700 | [diff] [blame] | 110 | 			    !is_migrate_isolate_page(buddy)) { | 
| Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 111 | 				__isolate_free_page(page, order); | 
| Joonsoo Kim | e3a2713 | 2016-07-26 15:24:01 -0700 | [diff] [blame] | 112 | 				isolated_page = true; | 
| Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 113 | 			} | 
 | 114 | 		} | 
 | 115 | 	} | 
 | 116 |  | 
 | 117 | 	/* | 
 | 118 | 	 * If we isolate freepage with more than pageblock_order, there | 
 | 119 | 	 * should be no freepage in the range, so we could avoid costly | 
 | 120 | 	 * pageblock scanning for freepage moving. | 
 | 121 | 	 */ | 
 | 122 | 	if (!isolated_page) { | 
 | 123 | 		nr_pages = move_freepages_block(zone, page, migratetype); | 
 | 124 | 		__mod_zone_freepage_state(zone, nr_pages, migratetype); | 
 | 125 | 	} | 
| Bartlomiej Zolnierkiewicz | a458431 | 2013-01-04 15:35:08 -0800 | [diff] [blame] | 126 | 	set_pageblock_migratetype(page, migratetype); | 
| Joonsoo Kim | ad53f92 | 2014-11-13 15:19:11 -0800 | [diff] [blame] | 127 | 	zone->nr_isolate_pageblock--; | 
| Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 128 | out: | 
 | 129 | 	spin_unlock_irqrestore(&zone->lock, flags); | 
| Joonsoo Kim | 83358ec | 2016-07-26 15:23:43 -0700 | [diff] [blame] | 130 | 	if (isolated_page) { | 
| Joonsoo Kim | 46f24fd | 2016-07-26 15:23:58 -0700 | [diff] [blame] | 131 | 		post_alloc_hook(page, order, __GFP_MOVABLE); | 
| Joonsoo Kim | e3a2713 | 2016-07-26 15:24:01 -0700 | [diff] [blame] | 132 | 		__free_pages(page, order); | 
| Joonsoo Kim | 83358ec | 2016-07-26 15:23:43 -0700 | [diff] [blame] | 133 | 	} | 
| Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 134 | } | 
 | 135 |  | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 136 | static inline struct page * | 
 | 137 | __first_valid_page(unsigned long pfn, unsigned long nr_pages) | 
 | 138 | { | 
 | 139 | 	int i; | 
 | 140 | 	for (i = 0; i < nr_pages; i++) | 
 | 141 | 		if (pfn_valid_within(pfn + i)) | 
 | 142 | 			break; | 
 | 143 | 	if (unlikely(i == nr_pages)) | 
 | 144 | 		return NULL; | 
 | 145 | 	return pfn_to_page(pfn + i); | 
 | 146 | } | 
 | 147 |  | 
 | 148 | /* | 
 | 149 |  * start_isolate_page_range() -- make page-allocation-type of range of pages | 
 | 150 |  * to be MIGRATE_ISOLATE. | 
 | 151 |  * @start_pfn: The lower PFN of the range to be isolated. | 
 | 152 |  * @end_pfn: The upper PFN of the range to be isolated. | 
| Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 153 |  * @migratetype: migrate type to set in error recovery. | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 154 |  * | 
 | 155 |  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in | 
 | 156 |  * the range will never be allocated. Any free pages and pages freed in the | 
 | 157 |  * future will not be allocated again. | 
 | 158 |  * | 
 | 159 |  * start_pfn/end_pfn must be aligned to pageblock_order. | 
 | 160 |  * Returns 0 on success and -EBUSY if any part of range cannot be isolated. | 
 | 161 |  */ | 
| Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 162 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | 
| Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 163 | 			     unsigned migratetype, bool skip_hwpoisoned_pages) | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 164 | { | 
 | 165 | 	unsigned long pfn; | 
 | 166 | 	unsigned long undo_pfn; | 
 | 167 | 	struct page *page; | 
 | 168 |  | 
| Naoya Horiguchi | fec174d | 2016-01-14 15:22:13 -0800 | [diff] [blame] | 169 | 	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); | 
 | 170 | 	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 171 |  | 
 | 172 | 	for (pfn = start_pfn; | 
 | 173 | 	     pfn < end_pfn; | 
 | 174 | 	     pfn += pageblock_nr_pages) { | 
 | 175 | 		page = __first_valid_page(pfn, pageblock_nr_pages); | 
| Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 176 | 		if (page && | 
 | 177 | 		    set_migratetype_isolate(page, skip_hwpoisoned_pages)) { | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 178 | 			undo_pfn = pfn; | 
 | 179 | 			goto undo; | 
 | 180 | 		} | 
 | 181 | 	} | 
 | 182 | 	return 0; | 
 | 183 | undo: | 
 | 184 | 	for (pfn = start_pfn; | 
| KAMEZAWA Hiroyuki | dbc0e4c | 2007-11-14 16:59:12 -0800 | [diff] [blame] | 185 | 	     pfn < undo_pfn; | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 186 | 	     pfn += pageblock_nr_pages) | 
| Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 187 | 		unset_migratetype_isolate(pfn_to_page(pfn), migratetype); | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 188 |  | 
 | 189 | 	return -EBUSY; | 
 | 190 | } | 
 | 191 |  | 
 | 192 | /* | 
 | 193 |  * Make isolated pages available again. | 
 | 194 |  */ | 
| Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 195 | int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | 
 | 196 | 			    unsigned migratetype) | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 197 | { | 
 | 198 | 	unsigned long pfn; | 
 | 199 | 	struct page *page; | 
| Wang Xiaoqiang | 6f8d2b8 | 2016-01-15 16:57:13 -0800 | [diff] [blame] | 200 |  | 
 | 201 | 	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); | 
 | 202 | 	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); | 
 | 203 |  | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 204 | 	for (pfn = start_pfn; | 
 | 205 | 	     pfn < end_pfn; | 
 | 206 | 	     pfn += pageblock_nr_pages) { | 
 | 207 | 		page = __first_valid_page(pfn, pageblock_nr_pages); | 
| KAMEZAWA Hiroyuki | dbc0e4c | 2007-11-14 16:59:12 -0800 | [diff] [blame] | 208 | 		if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE) | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 209 | 			continue; | 
| Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 210 | 		unset_migratetype_isolate(page, migratetype); | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 211 | 	} | 
 | 212 | 	return 0; | 
 | 213 | } | 
 | 214 | /* | 
 | 215 |  * Test all pages in the range is free(means isolated) or not. | 
 | 216 |  * all pages in [start_pfn...end_pfn) must be in the same zone. | 
 | 217 |  * zone->lock must be held before call this. | 
 | 218 |  * | 
| Neil Zhang | ec3b688 | 2016-04-01 14:31:37 -0700 | [diff] [blame] | 219 |  * Returns the last tested pfn. | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 220 |  */ | 
| Joonsoo Kim | fea85cf | 2016-01-14 15:18:39 -0800 | [diff] [blame] | 221 | static unsigned long | 
| Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 222 | __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, | 
 | 223 | 				  bool skip_hwpoisoned_pages) | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 224 | { | 
 | 225 | 	struct page *page; | 
 | 226 |  | 
 | 227 | 	while (pfn < end_pfn) { | 
 | 228 | 		if (!pfn_valid_within(pfn)) { | 
 | 229 | 			pfn++; | 
 | 230 | 			continue; | 
 | 231 | 		} | 
 | 232 | 		page = pfn_to_page(pfn); | 
| Vlastimil Babka | aa016d1 | 2015-09-08 15:01:22 -0700 | [diff] [blame] | 233 | 		if (PageBuddy(page)) | 
| Minchan Kim | 435b405 | 2012-10-08 16:32:16 -0700 | [diff] [blame] | 234 | 			/* | 
| Vlastimil Babka | aa016d1 | 2015-09-08 15:01:22 -0700 | [diff] [blame] | 235 | 			 * If the page is on a free list, it has to be on | 
 | 236 | 			 * the correct MIGRATE_ISOLATE freelist. There is no | 
 | 237 | 			 * simple way to verify that as VM_BUG_ON(), though. | 
| Minchan Kim | 435b405 | 2012-10-08 16:32:16 -0700 | [diff] [blame] | 238 | 			 */ | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 239 | 			pfn += 1 << page_order(page); | 
| Vlastimil Babka | aa016d1 | 2015-09-08 15:01:22 -0700 | [diff] [blame] | 240 | 		else if (skip_hwpoisoned_pages && PageHWPoison(page)) | 
 | 241 | 			/* A HWPoisoned page cannot be also PageBuddy */ | 
| Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 242 | 			pfn++; | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 243 | 		else | 
 | 244 | 			break; | 
 | 245 | 	} | 
| Joonsoo Kim | fea85cf | 2016-01-14 15:18:39 -0800 | [diff] [blame] | 246 |  | 
 | 247 | 	return pfn; | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 248 | } | 
 | 249 |  | 
| Joonsoo Kim | b9eb631 | 2016-05-19 17:12:06 -0700 | [diff] [blame] | 250 | /* Caller should ensure that requested range is in a single zone */ | 
| Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 251 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, | 
 | 252 | 			bool skip_hwpoisoned_pages) | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 253 | { | 
| Gerald Schaefer | 6c1b7f6 | 2008-10-02 14:50:16 -0700 | [diff] [blame] | 254 | 	unsigned long pfn, flags; | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 255 | 	struct page *page; | 
| Gerald Schaefer | 6c1b7f6 | 2008-10-02 14:50:16 -0700 | [diff] [blame] | 256 | 	struct zone *zone; | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 257 |  | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 258 | 	/* | 
| Tang Chen | 85dbe70 | 2013-06-20 18:10:19 +0800 | [diff] [blame] | 259 | 	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages | 
 | 260 | 	 * are not aligned to pageblock_nr_pages. | 
 | 261 | 	 * Then we just check migratetype first. | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 262 | 	 */ | 
 | 263 | 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { | 
 | 264 | 		page = __first_valid_page(pfn, pageblock_nr_pages); | 
| KAMEZAWA Hiroyuki | dbc0e4c | 2007-11-14 16:59:12 -0800 | [diff] [blame] | 265 | 		if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE) | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 266 | 			break; | 
 | 267 | 	} | 
| Gerald Schaefer | a70dcb9 | 2008-11-06 12:53:36 -0800 | [diff] [blame] | 268 | 	page = __first_valid_page(start_pfn, end_pfn - start_pfn); | 
 | 269 | 	if ((pfn < end_pfn) || !page) | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 270 | 		return -EBUSY; | 
| Tang Chen | 85dbe70 | 2013-06-20 18:10:19 +0800 | [diff] [blame] | 271 | 	/* Check all pages are free or marked as ISOLATED */ | 
| Gerald Schaefer | a70dcb9 | 2008-11-06 12:53:36 -0800 | [diff] [blame] | 272 | 	zone = page_zone(page); | 
| Gerald Schaefer | 6c1b7f6 | 2008-10-02 14:50:16 -0700 | [diff] [blame] | 273 | 	spin_lock_irqsave(&zone->lock, flags); | 
| Joonsoo Kim | fea85cf | 2016-01-14 15:18:39 -0800 | [diff] [blame] | 274 | 	pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, | 
| Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 275 | 						skip_hwpoisoned_pages); | 
| Gerald Schaefer | 6c1b7f6 | 2008-10-02 14:50:16 -0700 | [diff] [blame] | 276 | 	spin_unlock_irqrestore(&zone->lock, flags); | 
| Joonsoo Kim | fea85cf | 2016-01-14 15:18:39 -0800 | [diff] [blame] | 277 |  | 
| Joonsoo Kim | 0f0848e | 2016-01-14 15:18:42 -0800 | [diff] [blame] | 278 | 	trace_test_pages_isolated(start_pfn, end_pfn, pfn); | 
 | 279 |  | 
| Joonsoo Kim | fea85cf | 2016-01-14 15:18:39 -0800 | [diff] [blame] | 280 | 	return pfn < end_pfn ? -EBUSY : 0; | 
| KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 281 | } | 
| Minchan Kim | 723a064 | 2012-10-08 16:32:52 -0700 | [diff] [blame] | 282 |  | 
 | 283 | struct page *alloc_migrate_target(struct page *page, unsigned long private, | 
 | 284 | 				  int **resultp) | 
 | 285 | { | 
 | 286 | 	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; | 
 | 287 |  | 
| Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 288 | 	/* | 
 | 289 | 	 * TODO: allocate a destination hugepage from a nearest neighbor node, | 
 | 290 | 	 * accordance with memory policy of the user process if possible. For | 
 | 291 | 	 * now as a simple work-around, we use the next node for destination. | 
 | 292 | 	 */ | 
| Andrew Morton | 0edaf86 | 2016-05-19 17:10:58 -0700 | [diff] [blame] | 293 | 	if (PageHuge(page)) | 
| Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 294 | 		return alloc_huge_page_node(page_hstate(compound_head(page)), | 
| Andrew Morton | 0edaf86 | 2016-05-19 17:10:58 -0700 | [diff] [blame] | 295 | 					    next_node_in(page_to_nid(page), | 
 | 296 | 							 node_online_map)); | 
| Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 297 |  | 
| Minchan Kim | 723a064 | 2012-10-08 16:32:52 -0700 | [diff] [blame] | 298 | 	if (PageHighMem(page)) | 
 | 299 | 		gfp_mask |= __GFP_HIGHMEM; | 
 | 300 |  | 
 | 301 | 	return alloc_page(gfp_mask); | 
 | 302 | } |