blob: 5e139fec6c6cc95a68c01d0841a5d41b05a20120 [file] [log] [blame]
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07001/*
2 * linux/mm/page_isolation.c
3 */
4
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07005#include <linux/mm.h>
6#include <linux/page-isolation.h>
7#include <linux/pageblock-flags.h>
Minchan Kimee6f5092012-07-31 16:43:50 -07008#include <linux/memory.h>
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07009#include <linux/hugetlb.h>
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070010#include "internal.h"
11
Joonsoo Kim0f0848e2016-01-14 15:18:42 -080012#define CREATE_TRACE_POINTS
13#include <trace/events/page_isolation.h>
14
Naoya Horiguchic5b4e1b2015-09-08 15:02:09 -070015static int set_migratetype_isolate(struct page *page,
16 bool skip_hwpoisoned_pages)
Minchan Kimee6f5092012-07-31 16:43:50 -070017{
18 struct zone *zone;
19 unsigned long flags, pfn;
20 struct memory_isolate_notify arg;
21 int notifier_ret;
22 int ret = -EBUSY;
23
24 zone = page_zone(page);
25
26 spin_lock_irqsave(&zone->lock, flags);
27
28 pfn = page_to_pfn(page);
29 arg.start_pfn = pfn;
30 arg.nr_pages = pageblock_nr_pages;
31 arg.pages_found = 0;
32
33 /*
34 * It may be possible to isolate a pageblock even if the
35 * migratetype is not MIGRATE_MOVABLE. The memory isolation
36 * notifier chain is used by balloon drivers to return the
37 * number of pages in a range that are held by the balloon
38 * driver to shrink memory. If all the pages are accounted for
39 * by balloons, are free, or on the LRU, isolation can continue.
40 * Later, for example, when memory hotplug notifier runs, these
41 * pages reported as "can be isolated" should be isolated(freed)
42 * by the balloon driver through the memory notifier chain.
43 */
44 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
45 notifier_ret = notifier_to_errno(notifier_ret);
46 if (notifier_ret)
47 goto out;
48 /*
49 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
50 * We just check MOVABLE pages.
51 */
Wen Congyangb023f462012-12-11 16:00:45 -080052 if (!has_unmovable_pages(zone, page, arg.pages_found,
53 skip_hwpoisoned_pages))
Minchan Kimee6f5092012-07-31 16:43:50 -070054 ret = 0;
55
56 /*
57 * immobile means "not-on-lru" paes. If immobile is larger than
58 * removable-by-driver pages reported by notifier, we'll fail.
59 */
60
61out:
62 if (!ret) {
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070063 unsigned long nr_pages;
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -070064 int migratetype = get_pageblock_migratetype(page);
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070065
Bartlomiej Zolnierkiewicza4584312013-01-04 15:35:08 -080066 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
Joonsoo Kimad53f922014-11-13 15:19:11 -080067 zone->nr_isolate_pageblock++;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070068 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
69
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -070070 __mod_zone_freepage_state(zone, -nr_pages, migratetype);
Minchan Kimee6f5092012-07-31 16:43:50 -070071 }
72
73 spin_unlock_irqrestore(&zone->lock, flags);
74 if (!ret)
Vlastimil Babkaec25af82014-12-10 15:43:04 -080075 drain_all_pages(zone);
Minchan Kimee6f5092012-07-31 16:43:50 -070076 return ret;
77}
78
Naoya Horiguchic5b4e1b2015-09-08 15:02:09 -070079static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
Minchan Kimee6f5092012-07-31 16:43:50 -070080{
81 struct zone *zone;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070082 unsigned long flags, nr_pages;
Joonsoo Kim3c605092014-11-13 15:19:21 -080083 struct page *isolated_page = NULL;
84 unsigned int order;
85 unsigned long page_idx, buddy_idx;
86 struct page *buddy;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070087
Minchan Kimee6f5092012-07-31 16:43:50 -070088 zone = page_zone(page);
89 spin_lock_irqsave(&zone->lock, flags);
90 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
91 goto out;
Joonsoo Kim3c605092014-11-13 15:19:21 -080092
93 /*
94 * Because freepage with more than pageblock_order on isolated
95 * pageblock is restricted to merge due to freepage counting problem,
96 * it is possible that there is free buddy page.
97 * move_freepages_block() doesn't care of merge so we need other
98 * approach in order to merge them. Isolation and free will make
99 * these pages to be merged.
100 */
101 if (PageBuddy(page)) {
102 order = page_order(page);
103 if (order >= pageblock_order) {
104 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
105 buddy_idx = __find_buddy_index(page_idx, order);
106 buddy = page + (buddy_idx - page_idx);
107
Hui Zhu1ae70132015-05-14 15:17:04 -0700108 if (pfn_valid_within(page_to_pfn(buddy)) &&
109 !is_migrate_isolate_page(buddy)) {
Joonsoo Kim3c605092014-11-13 15:19:21 -0800110 __isolate_free_page(page, order);
Laura Abbottcfa86942015-03-25 15:55:26 -0700111 kernel_map_pages(page, (1 << order), 1);
Joonsoo Kim3c605092014-11-13 15:19:21 -0800112 set_page_refcounted(page);
113 isolated_page = page;
114 }
115 }
116 }
117
118 /*
119 * If we isolate freepage with more than pageblock_order, there
120 * should be no freepage in the range, so we could avoid costly
121 * pageblock scanning for freepage moving.
122 */
123 if (!isolated_page) {
124 nr_pages = move_freepages_block(zone, page, migratetype);
125 __mod_zone_freepage_state(zone, nr_pages, migratetype);
126 }
Bartlomiej Zolnierkiewicza4584312013-01-04 15:35:08 -0800127 set_pageblock_migratetype(page, migratetype);
Joonsoo Kimad53f922014-11-13 15:19:11 -0800128 zone->nr_isolate_pageblock--;
Minchan Kimee6f5092012-07-31 16:43:50 -0700129out:
130 spin_unlock_irqrestore(&zone->lock, flags);
Joonsoo Kim3c605092014-11-13 15:19:21 -0800131 if (isolated_page)
132 __free_pages(isolated_page, order);
Minchan Kimee6f5092012-07-31 16:43:50 -0700133}
134
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700135static inline struct page *
136__first_valid_page(unsigned long pfn, unsigned long nr_pages)
137{
138 int i;
139 for (i = 0; i < nr_pages; i++)
140 if (pfn_valid_within(pfn + i))
141 break;
142 if (unlikely(i == nr_pages))
143 return NULL;
144 return pfn_to_page(pfn + i);
145}
146
147/*
148 * start_isolate_page_range() -- make page-allocation-type of range of pages
149 * to be MIGRATE_ISOLATE.
150 * @start_pfn: The lower PFN of the range to be isolated.
151 * @end_pfn: The upper PFN of the range to be isolated.
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200152 * @migratetype: migrate type to set in error recovery.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700153 *
154 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
155 * the range will never be allocated. Any free pages and pages freed in the
156 * future will not be allocated again.
157 *
158 * start_pfn/end_pfn must be aligned to pageblock_order.
159 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
160 */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200161int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
Wen Congyangb023f462012-12-11 16:00:45 -0800162 unsigned migratetype, bool skip_hwpoisoned_pages)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700163{
164 unsigned long pfn;
165 unsigned long undo_pfn;
166 struct page *page;
167
Naoya Horiguchifec174d2016-01-14 15:22:13 -0800168 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
169 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700170
171 for (pfn = start_pfn;
172 pfn < end_pfn;
173 pfn += pageblock_nr_pages) {
174 page = __first_valid_page(pfn, pageblock_nr_pages);
Wen Congyangb023f462012-12-11 16:00:45 -0800175 if (page &&
176 set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700177 undo_pfn = pfn;
178 goto undo;
179 }
180 }
181 return 0;
182undo:
183 for (pfn = start_pfn;
KAMEZAWA Hiroyukidbc0e4c2007-11-14 16:59:12 -0800184 pfn < undo_pfn;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700185 pfn += pageblock_nr_pages)
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200186 unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700187
188 return -EBUSY;
189}
190
191/*
192 * Make isolated pages available again.
193 */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200194int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
195 unsigned migratetype)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700196{
197 unsigned long pfn;
198 struct page *page;
199 BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
200 BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
201 for (pfn = start_pfn;
202 pfn < end_pfn;
203 pfn += pageblock_nr_pages) {
204 page = __first_valid_page(pfn, pageblock_nr_pages);
KAMEZAWA Hiroyukidbc0e4c2007-11-14 16:59:12 -0800205 if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700206 continue;
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200207 unset_migratetype_isolate(page, migratetype);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700208 }
209 return 0;
210}
211/*
212 * Test all pages in the range is free(means isolated) or not.
213 * all pages in [start_pfn...end_pfn) must be in the same zone.
214 * zone->lock must be held before call this.
215 *
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200216 * Returns 1 if all pages in the range are isolated.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700217 */
Joonsoo Kimfea85cf2016-01-14 15:18:39 -0800218static unsigned long
Wen Congyangb023f462012-12-11 16:00:45 -0800219__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
220 bool skip_hwpoisoned_pages)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700221{
222 struct page *page;
223
224 while (pfn < end_pfn) {
225 if (!pfn_valid_within(pfn)) {
226 pfn++;
227 continue;
228 }
229 page = pfn_to_page(pfn);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -0700230 if (PageBuddy(page))
Minchan Kim435b4052012-10-08 16:32:16 -0700231 /*
Vlastimil Babkaaa016d12015-09-08 15:01:22 -0700232 * If the page is on a free list, it has to be on
233 * the correct MIGRATE_ISOLATE freelist. There is no
234 * simple way to verify that as VM_BUG_ON(), though.
Minchan Kim435b4052012-10-08 16:32:16 -0700235 */
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700236 pfn += 1 << page_order(page);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -0700237 else if (skip_hwpoisoned_pages && PageHWPoison(page))
238 /* A HWPoisoned page cannot be also PageBuddy */
Wen Congyangb023f462012-12-11 16:00:45 -0800239 pfn++;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700240 else
241 break;
242 }
Joonsoo Kimfea85cf2016-01-14 15:18:39 -0800243
244 return pfn;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700245}
246
Wen Congyangb023f462012-12-11 16:00:45 -0800247int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
248 bool skip_hwpoisoned_pages)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700249{
Gerald Schaefer6c1b7f62008-10-02 14:50:16 -0700250 unsigned long pfn, flags;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700251 struct page *page;
Gerald Schaefer6c1b7f62008-10-02 14:50:16 -0700252 struct zone *zone;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700253
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700254 /*
Tang Chen85dbe702013-06-20 18:10:19 +0800255 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
256 * are not aligned to pageblock_nr_pages.
257 * Then we just check migratetype first.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700258 */
259 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
260 page = __first_valid_page(pfn, pageblock_nr_pages);
KAMEZAWA Hiroyukidbc0e4c2007-11-14 16:59:12 -0800261 if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700262 break;
263 }
Gerald Schaefera70dcb92008-11-06 12:53:36 -0800264 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
265 if ((pfn < end_pfn) || !page)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700266 return -EBUSY;
Tang Chen85dbe702013-06-20 18:10:19 +0800267 /* Check all pages are free or marked as ISOLATED */
Gerald Schaefera70dcb92008-11-06 12:53:36 -0800268 zone = page_zone(page);
Gerald Schaefer6c1b7f62008-10-02 14:50:16 -0700269 spin_lock_irqsave(&zone->lock, flags);
Joonsoo Kimfea85cf2016-01-14 15:18:39 -0800270 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
Wen Congyangb023f462012-12-11 16:00:45 -0800271 skip_hwpoisoned_pages);
Gerald Schaefer6c1b7f62008-10-02 14:50:16 -0700272 spin_unlock_irqrestore(&zone->lock, flags);
Joonsoo Kimfea85cf2016-01-14 15:18:39 -0800273
Joonsoo Kim0f0848e2016-01-14 15:18:42 -0800274 trace_test_pages_isolated(start_pfn, end_pfn, pfn);
275
Joonsoo Kimfea85cf2016-01-14 15:18:39 -0800276 return pfn < end_pfn ? -EBUSY : 0;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700277}
Minchan Kim723a0642012-10-08 16:32:52 -0700278
279struct page *alloc_migrate_target(struct page *page, unsigned long private,
280 int **resultp)
281{
282 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
283
Naoya Horiguchic8721bb2013-09-11 14:22:09 -0700284 /*
285 * TODO: allocate a destination hugepage from a nearest neighbor node,
286 * accordance with memory policy of the user process if possible. For
287 * now as a simple work-around, we use the next node for destination.
288 */
289 if (PageHuge(page)) {
290 nodemask_t src = nodemask_of_node(page_to_nid(page));
291 nodemask_t dst;
292 nodes_complement(dst, src);
293 return alloc_huge_page_node(page_hstate(compound_head(page)),
294 next_node(page_to_nid(page), dst));
295 }
296
Minchan Kim723a0642012-10-08 16:32:52 -0700297 if (PageHighMem(page))
298 gfp_mask |= __GFP_HIGHMEM;
299
300 return alloc_page(gfp_mask);
301}