blob: bdd2beaf012e3e7e21953adbcbb35bf951d10d81 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
KOSAKI Motohiro10ed2732008-03-04 14:28:32 -080022#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/bootmem.h>
Yinghai Luedbe7d22010-08-25 13:39:16 -070024#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/compiler.h>
Randy Dunlap9f158332005-09-13 01:25:16 -070026#include <linux/kernel.h>
Vegard Nossumb1eeab62008-11-25 16:55:53 +010027#include <linux/kmemcheck.h>
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -080028#include <linux/kasan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/suspend.h>
31#include <linux/pagevec.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
Dave Hansena238ab52011-05-24 17:12:16 -070034#include <linux/ratelimit.h>
David Rientjes5a3135c22007-10-16 23:25:53 -070035#include <linux/oom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/notifier.h>
37#include <linux/topology.h>
38#include <linux/sysctl.h>
39#include <linux/cpu.h>
40#include <linux/cpuset.h>
Dave Hansenbdc8cb92005-10-29 18:16:53 -070041#include <linux/memory_hotplug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/nodemask.h>
43#include <linux/vmalloc.h>
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -070044#include <linux/vmstat.h>
Christoph Lameter4be38e32006-01-06 00:11:17 -080045#include <linux/mempolicy.h>
Dan Williams4b94ffd2016-01-15 16:56:22 -080046#include <linux/memremap.h>
Yasunori Goto68113782006-06-23 02:03:11 -070047#include <linux/stop_machine.h>
Mel Gormanc7132162006-09-27 01:49:43 -070048#include <linux/sort.h>
49#include <linux/pfn.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070050#include <linux/backing-dev.h>
Akinobu Mita933e3122006-12-08 02:39:45 -080051#include <linux/fault-inject.h>
KAMEZAWA Hiroyukia5d76b52007-10-16 01:26:11 -070052#include <linux/page-isolation.h>
Joonsoo Kimeefa8642014-12-12 16:55:46 -080053#include <linux/page_ext.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070054#include <linux/debugobjects.h>
Catalin Marinasdbb1f812009-06-11 13:23:19 +010055#include <linux/kmemleak.h>
Mel Gorman56de7262010-05-24 14:32:30 -070056#include <linux/compaction.h>
Mel Gorman0d3d0622009-09-21 17:02:44 -070057#include <trace/events/kmem.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070058#include <linux/prefetch.h>
Lisa Du6e543d52013-09-11 14:22:36 -070059#include <linux/mm_inline.h>
Michal Nazarewicz041d3a82011-12-29 13:09:50 +010060#include <linux/migrate.h>
Joonsoo Kime30825f2014-12-12 16:55:49 -080061#include <linux/page_ext.h>
David Rientjes949f7ec2013-04-29 15:07:48 -070062#include <linux/hugetlb.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060063#include <linux/sched/rt.h>
Joonsoo Kim48c96a32014-12-12 16:56:01 -080064#include <linux/page_owner.h>
Mel Gorman0e1cc952015-06-30 14:57:27 -070065#include <linux/kthread.h>
Vladimir Davydov49491482016-07-26 15:24:24 -070066#include <linux/memcontrol.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Jiang Liu7ee3d4e2013-07-03 15:03:41 -070068#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <asm/tlbflush.h>
Andrew Mortonac924c62006-05-15 09:43:59 -070070#include <asm/div64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include "internal.h"
72
Cody P Schaferc8e251f2013-07-03 15:01:29 -070073/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
74static DEFINE_MUTEX(pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -070075#define MIN_PERCPU_PAGELIST_FRACTION (8)
Cody P Schaferc8e251f2013-07-03 15:01:29 -070076
Lee Schermerhorn72812012010-05-26 14:44:56 -070077#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
78DEFINE_PER_CPU(int, numa_node);
79EXPORT_PER_CPU_SYMBOL(numa_node);
80#endif
81
Lee Schermerhorn7aac7892010-05-26 14:45:00 -070082#ifdef CONFIG_HAVE_MEMORYLESS_NODES
83/*
84 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
85 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
86 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
87 * defined in <linux/topology.h>.
88 */
89DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
90EXPORT_PER_CPU_SYMBOL(_numa_mem_);
Joonsoo Kimad2c8142014-10-09 15:26:13 -070091int _node_numa_mem_[MAX_NUMNODES];
Lee Schermerhorn7aac7892010-05-26 14:45:00 -070092#endif
93
Emese Revfy38addce2016-06-20 20:41:19 +020094#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
Kees Cook58bea412016-10-19 00:08:04 +020095volatile unsigned long latent_entropy __latent_entropy;
Emese Revfy38addce2016-06-20 20:41:19 +020096EXPORT_SYMBOL(latent_entropy);
97#endif
98
Linus Torvalds1da177e2005-04-16 15:20:36 -070099/*
Christoph Lameter13808912007-10-16 01:25:27 -0700100 * Array of node states.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 */
Christoph Lameter13808912007-10-16 01:25:27 -0700102nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
103 [N_POSSIBLE] = NODE_MASK_ALL,
104 [N_ONLINE] = { { [0] = 1UL } },
105#ifndef CONFIG_NUMA
106 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
107#ifdef CONFIG_HIGHMEM
108 [N_HIGH_MEMORY] = { { [0] = 1UL } },
109#endif
Lai Jiangshan20b2f522012-12-12 13:52:00 -0800110#ifdef CONFIG_MOVABLE_NODE
111 [N_MEMORY] = { { [0] = 1UL } },
112#endif
Christoph Lameter13808912007-10-16 01:25:27 -0700113 [N_CPU] = { { [0] = 1UL } },
114#endif /* NUMA */
115};
116EXPORT_SYMBOL(node_states);
117
Jiang Liuc3d5f5f2013-07-03 15:03:14 -0700118/* Protect totalram_pages and zone->managed_pages */
119static DEFINE_SPINLOCK(managed_page_count_lock);
120
Ravikiran G Thirumalai6c231b72005-09-06 15:17:45 -0700121unsigned long totalram_pages __read_mostly;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -0700122unsigned long totalreserve_pages __read_mostly;
Pintu Kumare48322a2014-12-18 16:17:15 -0800123unsigned long totalcma_pages __read_mostly;
Johannes Weinerab8fabd2012-01-10 15:07:42 -0800124
Hugh Dickins1b76b022012-05-11 01:00:07 -0700125int percpu_pagelist_fraction;
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +1000126gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -0700128/*
129 * A cached value of the page's pageblock's migratetype, used when the page is
130 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
131 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
132 * Also the migratetype set in the page does not necessarily match the pcplist
133 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
134 * other index - this ensures that it will be put on the correct CMA freelist.
135 */
136static inline int get_pcppage_migratetype(struct page *page)
137{
138 return page->index;
139}
140
141static inline void set_pcppage_migratetype(struct page *page, int migratetype)
142{
143 page->index = migratetype;
144}
145
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800146#ifdef CONFIG_PM_SLEEP
147/*
148 * The following functions are used by the suspend/hibernate code to temporarily
149 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
150 * while devices are suspended. To avoid races with the suspend/hibernate code,
151 * they should always be called with pm_mutex held (gfp_allowed_mask also should
152 * only be modified with pm_mutex held, unless the suspend/hibernate code is
153 * guaranteed not to run in parallel with that modification).
154 */
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100155
156static gfp_t saved_gfp_mask;
157
158void pm_restore_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800159{
160 WARN_ON(!mutex_is_locked(&pm_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100161 if (saved_gfp_mask) {
162 gfp_allowed_mask = saved_gfp_mask;
163 saved_gfp_mask = 0;
164 }
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800165}
166
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100167void pm_restrict_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800168{
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800169 WARN_ON(!mutex_is_locked(&pm_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100170 WARN_ON(saved_gfp_mask);
171 saved_gfp_mask = gfp_allowed_mask;
Mel Gormand0164ad2015-11-06 16:28:21 -0800172 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800173}
Mel Gormanf90ac392012-01-10 15:07:15 -0800174
175bool pm_suspended_storage(void)
176{
Mel Gormand0164ad2015-11-06 16:28:21 -0800177 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
Mel Gormanf90ac392012-01-10 15:07:15 -0800178 return false;
179 return true;
180}
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800181#endif /* CONFIG_PM_SLEEP */
182
Mel Gormand9c23402007-10-16 01:26:01 -0700183#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800184unsigned int pageblock_order __read_mostly;
Mel Gormand9c23402007-10-16 01:26:01 -0700185#endif
186
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800187static void __free_pages_ok(struct page *page, unsigned int order);
David Howellsa226f6c2006-01-06 00:11:08 -0800188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189/*
190 * results with 256, 32 in the lowmem_reserve sysctl:
191 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
192 * 1G machine -> (16M dma, 784M normal, 224M high)
193 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
194 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
Yaowei Bai84109e12015-02-12 15:00:22 -0800195 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
Andi Kleena2f1b422005-11-05 17:25:53 +0100196 *
197 * TBD: should special case ZONE_DMA32 machines here - in those we normally
198 * don't need any ZONE_NORMAL reservation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 */
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700200int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800201#ifdef CONFIG_ZONE_DMA
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700202 256,
Christoph Lameter4b51d662007-02-10 01:43:10 -0800203#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700204#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700205 256,
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700206#endif
Christoph Lametere53ef382006-09-25 23:31:14 -0700207#ifdef CONFIG_HIGHMEM
Mel Gorman2a1e2742007-07-17 04:03:12 -0700208 32,
Christoph Lametere53ef382006-09-25 23:31:14 -0700209#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700210 32,
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700211};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
213EXPORT_SYMBOL(totalram_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Helge Deller15ad7cd2006-12-06 20:40:36 -0800215static char * const zone_names[MAX_NR_ZONES] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800216#ifdef CONFIG_ZONE_DMA
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700217 "DMA",
Christoph Lameter4b51d662007-02-10 01:43:10 -0800218#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700219#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700220 "DMA32",
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700221#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700222 "Normal",
Christoph Lametere53ef382006-09-25 23:31:14 -0700223#ifdef CONFIG_HIGHMEM
Mel Gorman2a1e2742007-07-17 04:03:12 -0700224 "HighMem",
Christoph Lametere53ef382006-09-25 23:31:14 -0700225#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700226 "Movable",
Dan Williams033fbae2015-08-09 15:29:06 -0400227#ifdef CONFIG_ZONE_DEVICE
228 "Device",
229#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700230};
231
Vlastimil Babka60f30352016-03-15 14:56:08 -0700232char * const migratetype_names[MIGRATE_TYPES] = {
233 "Unmovable",
234 "Movable",
235 "Reclaimable",
Vlastimil Babka60f30352016-03-15 14:56:08 -0700236#ifdef CONFIG_CMA
237 "CMA",
238#endif
Liam Mark2c0f71c2014-06-23 14:13:47 -0700239 "HighAtomic",
Vlastimil Babka60f30352016-03-15 14:56:08 -0700240#ifdef CONFIG_MEMORY_ISOLATION
241 "Isolate",
242#endif
243};
244
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800245compound_page_dtor * const compound_page_dtors[] = {
246 NULL,
247 free_compound_page,
248#ifdef CONFIG_HUGETLB_PAGE
249 free_huge_page,
250#endif
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800251#ifdef CONFIG_TRANSPARENT_HUGEPAGE
252 free_transhuge_page,
253#endif
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800254};
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256int min_free_kbytes = 1024;
Han Pingtian42aa83c2014-01-23 15:53:28 -0800257int user_min_free_kbytes = -1;
Johannes Weiner795ae7a2016-03-17 14:19:14 -0700258int watermark_scale_factor = 10;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Jan Beulich2c85f512009-09-21 17:03:07 -0700260static unsigned long __meminitdata nr_kernel_pages;
261static unsigned long __meminitdata nr_all_pages;
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700262static unsigned long __meminitdata dma_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
Tejun Heo0ee332c2011-12-08 10:22:09 -0800264#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
265static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
266static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
267static unsigned long __initdata required_kernelcore;
268static unsigned long __initdata required_movablecore;
269static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
Taku Izumi342332e2016-03-15 14:55:22 -0700270static bool mirrored_kernelcore;
Mel Gormanc7132162006-09-27 01:49:43 -0700271
Tejun Heo0ee332c2011-12-08 10:22:09 -0800272/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
273int movable_zone;
274EXPORT_SYMBOL(movable_zone);
275#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -0700276
Miklos Szeredi418508c2007-05-23 13:57:55 -0700277#if MAX_NUMNODES > 1
278int nr_node_ids __read_mostly = MAX_NUMNODES;
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700279int nr_online_nodes __read_mostly = 1;
Miklos Szeredi418508c2007-05-23 13:57:55 -0700280EXPORT_SYMBOL(nr_node_ids);
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700281EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi418508c2007-05-23 13:57:55 -0700282#endif
283
Mel Gorman9ef9acb2007-10-16 01:25:54 -0700284int page_group_by_mobility_disabled __read_mostly;
285
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700286#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
287static inline void reset_deferred_meminit(pg_data_t *pgdat)
288{
Michal Hocko292f70c2017-06-02 14:46:49 -0700289 unsigned long max_initialise;
290 unsigned long reserved_lowmem;
291
292 /*
293 * Initialise at least 2G of a node but also take into account that
294 * two large system hashes that can take up 1GB for 0.25TB/node.
295 */
296 max_initialise = max(2UL << (30 - PAGE_SHIFT),
297 (pgdat->node_spanned_pages >> 8));
298
299 /*
300 * Compensate the all the memblock reservations (e.g. crash kernel)
301 * from the initial estimation to make sure we will initialize enough
302 * memory to boot.
303 */
304 reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
305 pgdat->node_start_pfn + max_initialise);
306 max_initialise += reserved_lowmem;
307
308 pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700309 pgdat->first_deferred_pfn = ULONG_MAX;
310}
311
312/* Returns true if the struct page for the pfn is uninitialised */
Mel Gorman0e1cc952015-06-30 14:57:27 -0700313static inline bool __meminit early_page_uninitialised(unsigned long pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700314{
Mel Gormanef70b6f2016-07-14 12:07:23 -0700315 int nid = early_pfn_to_nid(pfn);
316
317 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700318 return true;
319
320 return false;
321}
322
323/*
324 * Returns false when the remaining initialisation should be deferred until
325 * later in the boot cycle when it can be parallelised.
326 */
327static inline bool update_defer_init(pg_data_t *pgdat,
328 unsigned long pfn, unsigned long zone_end,
329 unsigned long *nr_initialised)
330{
331 /* Always populate low zones for address-contrained allocations */
332 if (zone_end < pgdat_end_pfn(pgdat))
333 return true;
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700334 (*nr_initialised)++;
Michal Hocko292f70c2017-06-02 14:46:49 -0700335 if ((*nr_initialised > pgdat->static_init_size) &&
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700336 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
337 pgdat->first_deferred_pfn = pfn;
338 return false;
339 }
340
341 return true;
342}
343#else
344static inline void reset_deferred_meminit(pg_data_t *pgdat)
345{
346}
347
348static inline bool early_page_uninitialised(unsigned long pfn)
349{
350 return false;
351}
352
353static inline bool update_defer_init(pg_data_t *pgdat,
354 unsigned long pfn, unsigned long zone_end,
355 unsigned long *nr_initialised)
356{
357 return true;
358}
359#endif
360
Mel Gorman0b423ca2016-05-19 17:14:27 -0700361/* Return a pointer to the bitmap storing bits affecting a block of pages */
362static inline unsigned long *get_pageblock_bitmap(struct page *page,
363 unsigned long pfn)
364{
365#ifdef CONFIG_SPARSEMEM
366 return __pfn_to_section(pfn)->pageblock_flags;
367#else
368 return page_zone(page)->pageblock_flags;
369#endif /* CONFIG_SPARSEMEM */
370}
371
372static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
373{
374#ifdef CONFIG_SPARSEMEM
375 pfn &= (PAGES_PER_SECTION-1);
376 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
377#else
378 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
379 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
380#endif /* CONFIG_SPARSEMEM */
381}
382
383/**
384 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
385 * @page: The page within the block of interest
386 * @pfn: The target page frame number
387 * @end_bitidx: The last bit of interest to retrieve
388 * @mask: mask of bits that the caller is interested in
389 *
390 * Return: pageblock_bits flags
391 */
392static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
393 unsigned long pfn,
394 unsigned long end_bitidx,
395 unsigned long mask)
396{
397 unsigned long *bitmap;
398 unsigned long bitidx, word_bitidx;
399 unsigned long word;
400
401 bitmap = get_pageblock_bitmap(page, pfn);
402 bitidx = pfn_to_bitidx(page, pfn);
403 word_bitidx = bitidx / BITS_PER_LONG;
404 bitidx &= (BITS_PER_LONG-1);
405
406 word = bitmap[word_bitidx];
407 bitidx += end_bitidx;
408 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
409}
410
411unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
412 unsigned long end_bitidx,
413 unsigned long mask)
414{
415 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
416}
417
418static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
419{
420 return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
421}
422
423/**
424 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
425 * @page: The page within the block of interest
426 * @flags: The flags to set
427 * @pfn: The target page frame number
428 * @end_bitidx: The last bit of interest
429 * @mask: mask of bits that the caller is interested in
430 */
431void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
432 unsigned long pfn,
433 unsigned long end_bitidx,
434 unsigned long mask)
435{
436 unsigned long *bitmap;
437 unsigned long bitidx, word_bitidx;
438 unsigned long old_word, word;
439
440 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
441
442 bitmap = get_pageblock_bitmap(page, pfn);
443 bitidx = pfn_to_bitidx(page, pfn);
444 word_bitidx = bitidx / BITS_PER_LONG;
445 bitidx &= (BITS_PER_LONG-1);
446
447 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
448
449 bitidx += end_bitidx;
450 mask <<= (BITS_PER_LONG - bitidx - 1);
451 flags <<= (BITS_PER_LONG - bitidx - 1);
452
453 word = READ_ONCE(bitmap[word_bitidx]);
454 for (;;) {
455 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
456 if (word == old_word)
457 break;
458 word = old_word;
459 }
460}
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700461
Minchan Kimee6f5092012-07-31 16:43:50 -0700462void set_pageblock_migratetype(struct page *page, int migratetype)
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700463{
KOSAKI Motohiro5d0f3f72013-11-12 15:08:18 -0800464 if (unlikely(page_group_by_mobility_disabled &&
465 migratetype < MIGRATE_PCPTYPES))
Mel Gorman49255c62009-06-16 15:31:58 -0700466 migratetype = MIGRATE_UNMOVABLE;
467
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700468 set_pageblock_flags_group(page, (unsigned long)migratetype,
469 PB_migrate, PB_migrate_end);
470}
471
Nick Piggin13e74442006-01-06 00:10:58 -0800472#ifdef CONFIG_DEBUG_VM
Dave Hansenc6a57e12005-10-29 18:16:52 -0700473static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474{
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700475 int ret = 0;
476 unsigned seq;
477 unsigned long pfn = page_to_pfn(page);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800478 unsigned long sp, start_pfn;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700479
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700480 do {
481 seq = zone_span_seqbegin(zone);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800482 start_pfn = zone->zone_start_pfn;
483 sp = zone->spanned_pages;
Cody P Schafer108bcc92013-02-22 16:35:23 -0800484 if (!zone_spans_pfn(zone, pfn))
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700485 ret = 1;
486 } while (zone_span_seqretry(zone, seq));
487
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800488 if (ret)
Dave Hansen613813e2014-06-04 16:07:27 -0700489 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
490 pfn, zone_to_nid(zone), zone->name,
491 start_pfn, start_pfn + sp);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800492
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700493 return ret;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700494}
495
496static int page_is_consistent(struct zone *zone, struct page *page)
497{
Andy Whitcroft14e07292007-05-06 14:49:14 -0700498 if (!pfn_valid_within(page_to_pfn(page)))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700499 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 if (zone != page_zone(page))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700501 return 0;
502
503 return 1;
504}
505/*
506 * Temporary debugging check for pages not lying within a given zone.
507 */
508static int bad_range(struct zone *zone, struct page *page)
509{
510 if (page_outside_zone_boundaries(zone, page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 return 1;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700512 if (!page_is_consistent(zone, page))
513 return 1;
514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 return 0;
516}
Nick Piggin13e74442006-01-06 00:10:58 -0800517#else
518static inline int bad_range(struct zone *zone, struct page *page)
519{
520 return 0;
521}
522#endif
523
Kirill A. Shutemovd230dec2014-04-07 15:37:38 -0700524static void bad_page(struct page *page, const char *reason,
525 unsigned long bad_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526{
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800527 static unsigned long resume;
528 static unsigned long nr_shown;
529 static unsigned long nr_unshown;
530
531 /*
532 * Allow a burst of 60 reports, then keep quiet for that minute;
533 * or allow a steady drip of one report per second.
534 */
535 if (nr_shown == 60) {
536 if (time_before(jiffies, resume)) {
537 nr_unshown++;
538 goto out;
539 }
540 if (nr_unshown) {
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700541 pr_alert(
Hugh Dickins1e9e6362009-01-06 14:40:13 -0800542 "BUG: Bad page state: %lu messages suppressed\n",
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800543 nr_unshown);
544 nr_unshown = 0;
545 }
546 nr_shown = 0;
547 }
548 if (nr_shown++ == 0)
549 resume = jiffies + 60 * HZ;
550
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700551 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
Hugh Dickins3dc14742009-01-06 14:40:08 -0800552 current->comm, page_to_pfn(page));
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700553 __dump_page(page, reason);
554 bad_flags &= page->flags;
555 if (bad_flags)
556 pr_alert("bad because of flags: %#lx(%pGp)\n",
557 bad_flags, &bad_flags);
Vlastimil Babka4e462112016-03-15 14:56:21 -0700558 dump_page_owner(page);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700559
Dave Jones4f318882011-10-31 17:07:24 -0700560 print_modules();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 dump_stack();
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800562out:
Hugh Dickins8cc3b392009-01-06 14:40:06 -0800563 /* Leave bad fields for debug, except PageBuddy could make trouble */
Mel Gorman22b751c2013-02-22 16:34:59 -0800564 page_mapcount_reset(page); /* remove PageBuddy */
Rusty Russell373d4d02013-01-21 17:17:39 +1030565 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566}
567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568/*
569 * Higher-order pages are called "compound pages". They are structured thusly:
570 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800571 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800573 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
574 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800576 * The first tail page's ->compound_dtor holds the offset in array of compound
577 * page destructors. See compound_page_dtors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800579 * The first tail page's ->compound_order holds the order of allocation.
Hugh Dickins41d78ba2006-02-14 13:52:58 -0800580 * This usage means that zero-order pages may not be compound.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 */
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800582
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800583void free_compound_page(struct page *page)
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800584{
Christoph Lameterd85f3382007-05-06 14:49:39 -0700585 __free_pages_ok(page, compound_order(page));
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800586}
587
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800588void prep_compound_page(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589{
590 int i;
591 int nr_pages = 1 << order;
592
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800593 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
Christoph Lameterd85f3382007-05-06 14:49:39 -0700594 set_compound_order(page, order);
Christoph Lameter6d777952007-05-06 14:49:40 -0700595 __SetPageHead(page);
Andy Whitcroft18229df2008-11-06 12:53:27 -0800596 for (i = 1; i < nr_pages; i++) {
597 struct page *p = page + i;
Youquan Song58a84aa2011-12-08 14:34:18 -0800598 set_page_count(p, 0);
Kirill A. Shutemov1c290f62016-01-15 16:52:07 -0800599 p->mapping = TAIL_MAPPING;
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800600 set_compound_head(p, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 }
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800602 atomic_set(compound_mapcount_ptr(page), -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603}
604
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800605#ifdef CONFIG_DEBUG_PAGEALLOC
606unsigned int _debug_guardpage_minorder;
Christian Borntraegerea6eabb2016-03-15 14:55:30 -0700607bool _debug_pagealloc_enabled __read_mostly
608 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
Joonsoo Kim505f6d22016-03-17 14:17:56 -0700609EXPORT_SYMBOL(_debug_pagealloc_enabled);
Joonsoo Kime30825f2014-12-12 16:55:49 -0800610bool _debug_guardpage_enabled __read_mostly;
611
Joonsoo Kim031bc572014-12-12 16:55:52 -0800612static int __init early_debug_pagealloc(char *buf)
613{
614 if (!buf)
615 return -EINVAL;
Minfei Huang2a138dc2016-05-20 16:58:13 -0700616 return kstrtobool(buf, &_debug_pagealloc_enabled);
Joonsoo Kim031bc572014-12-12 16:55:52 -0800617}
618early_param("debug_pagealloc", early_debug_pagealloc);
619
Joonsoo Kime30825f2014-12-12 16:55:49 -0800620static bool need_debug_guardpage(void)
621{
Joonsoo Kim031bc572014-12-12 16:55:52 -0800622 /* If we don't use debug_pagealloc, we don't need guard page */
623 if (!debug_pagealloc_enabled())
624 return false;
625
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700626 if (!debug_guardpage_minorder())
627 return false;
628
Joonsoo Kime30825f2014-12-12 16:55:49 -0800629 return true;
630}
631
632static void init_debug_guardpage(void)
633{
Joonsoo Kim031bc572014-12-12 16:55:52 -0800634 if (!debug_pagealloc_enabled())
635 return;
636
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700637 if (!debug_guardpage_minorder())
638 return;
639
Joonsoo Kime30825f2014-12-12 16:55:49 -0800640 _debug_guardpage_enabled = true;
641}
642
643struct page_ext_operations debug_guardpage_ops = {
644 .need = need_debug_guardpage,
645 .init = init_debug_guardpage,
646};
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800647
648static int __init debug_guardpage_minorder_setup(char *buf)
649{
650 unsigned long res;
651
652 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
Joe Perches11705322016-03-17 14:19:50 -0700653 pr_err("Bad debug_guardpage_minorder value\n");
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800654 return 0;
655 }
656 _debug_guardpage_minorder = res;
Joe Perches11705322016-03-17 14:19:50 -0700657 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800658 return 0;
659}
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700660early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800661
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700662static inline bool set_page_guard(struct zone *zone, struct page *page,
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800663 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800664{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800665 struct page_ext *page_ext;
666
667 if (!debug_guardpage_enabled())
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700668 return false;
669
670 if (order >= debug_guardpage_minorder())
671 return false;
Joonsoo Kime30825f2014-12-12 16:55:49 -0800672
673 page_ext = lookup_page_ext(page);
Yang Shif86e4272016-06-03 14:55:38 -0700674 if (unlikely(!page_ext))
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700675 return false;
Yang Shif86e4272016-06-03 14:55:38 -0700676
Joonsoo Kime30825f2014-12-12 16:55:49 -0800677 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
678
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800679 INIT_LIST_HEAD(&page->lru);
680 set_page_private(page, order);
681 /* Guard pages are not available for any usage */
682 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700683
684 return true;
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800685}
686
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800687static inline void clear_page_guard(struct zone *zone, struct page *page,
688 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800689{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800690 struct page_ext *page_ext;
691
692 if (!debug_guardpage_enabled())
693 return;
694
695 page_ext = lookup_page_ext(page);
Yang Shif86e4272016-06-03 14:55:38 -0700696 if (unlikely(!page_ext))
697 return;
698
Joonsoo Kime30825f2014-12-12 16:55:49 -0800699 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
700
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800701 set_page_private(page, 0);
702 if (!is_migrate_isolate(migratetype))
703 __mod_zone_freepage_state(zone, (1 << order), migratetype);
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800704}
705#else
Joonsoo Kim980ac162016-10-07 16:58:27 -0700706struct page_ext_operations debug_guardpage_ops;
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700707static inline bool set_page_guard(struct zone *zone, struct page *page,
708 unsigned int order, int migratetype) { return false; }
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800709static inline void clear_page_guard(struct zone *zone, struct page *page,
710 unsigned int order, int migratetype) {}
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800711#endif
712
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700713static inline void set_page_order(struct page *page, unsigned int order)
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700714{
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700715 set_page_private(page, order);
Nick Piggin676165a2006-04-10 11:21:48 +1000716 __SetPageBuddy(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717}
718
719static inline void rmv_page_order(struct page *page)
720{
Nick Piggin676165a2006-04-10 11:21:48 +1000721 __ClearPageBuddy(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700722 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723}
724
725/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 * This function checks whether a page is free && is the buddy
727 * we can do coalesce a page and its buddy if
Nick Piggin13e74442006-01-06 00:10:58 -0800728 * (a) the buddy is not in a hole &&
Nick Piggin676165a2006-04-10 11:21:48 +1000729 * (b) the buddy is in the buddy system &&
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700730 * (c) a page and its buddy have the same order &&
731 * (d) a page and its buddy are in the same zone.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 *
Wang Sheng-Huicf6fe942013-09-11 14:22:48 -0700733 * For recording whether a page is in the buddy system, we set ->_mapcount
734 * PAGE_BUDDY_MAPCOUNT_VALUE.
735 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
736 * serialized by zone->lock.
Nick Piggin676165a2006-04-10 11:21:48 +1000737 *
738 * For recording page's order, we use page_private(page).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 */
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700740static inline int page_is_buddy(struct page *page, struct page *buddy,
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700741 unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742{
Andy Whitcroft14e07292007-05-06 14:49:14 -0700743 if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin13e74442006-01-06 00:10:58 -0800744 return 0;
Nick Piggin13e74442006-01-06 00:10:58 -0800745
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800746 if (page_is_guard(buddy) && page_order(buddy) == order) {
Mel Gormand34c5fa2014-06-04 16:10:10 -0700747 if (page_zone_id(page) != page_zone_id(buddy))
748 return 0;
749
Weijie Yang4c5018c2015-02-10 14:11:39 -0800750 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
751
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800752 return 1;
753 }
754
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700755 if (PageBuddy(buddy) && page_order(buddy) == order) {
Mel Gormand34c5fa2014-06-04 16:10:10 -0700756 /*
757 * zone check is done late to avoid uselessly
758 * calculating zone/node ids for pages that could
759 * never merge.
760 */
761 if (page_zone_id(page) != page_zone_id(buddy))
762 return 0;
763
Weijie Yang4c5018c2015-02-10 14:11:39 -0800764 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
765
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700766 return 1;
Nick Piggin676165a2006-04-10 11:21:48 +1000767 }
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700768 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769}
770
771/*
772 * Freeing function for a buddy system allocator.
773 *
774 * The concept of a buddy system is to maintain direct-mapped table
775 * (containing bit values) for memory blocks of various "orders".
776 * The bottom level table contains the map for the smallest allocatable
777 * units of memory (here, pages), and each level above it describes
778 * pairs of units from the levels below, hence, "buddies".
779 * At a high level, all that happens here is marking the table entry
780 * at the bottom level available, and propagating the changes upward
781 * as necessary, plus some accounting needed to play nicely with other
782 * parts of the VM system.
783 * At each level, we keep a list of pages, which are heads of continuous
Wang Sheng-Huicf6fe942013-09-11 14:22:48 -0700784 * free pages of length of (1 << order) and marked with _mapcount
785 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
786 * field.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 * So when we are allocating or freeing one, we can derive the state of the
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100788 * other. That is, if we allocate a small block, and both were
789 * free, the remainder of the region must be split into blocks.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 * If a block is freed, and its buddy is also free, then this
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100791 * triggers coalescing into a block of larger size.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +0100793 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 */
795
Nick Piggin48db57f2006-01-08 01:00:42 -0800796static inline void __free_one_page(struct page *page,
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700797 unsigned long pfn,
Mel Gormaned0ae212009-06-16 15:32:07 -0700798 struct zone *zone, unsigned int order,
799 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800{
801 unsigned long page_idx;
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700802 unsigned long combined_idx;
KyongHo Cho43506fa2011-01-13 15:47:24 -0800803 unsigned long uninitialized_var(buddy_idx);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700804 struct page *buddy;
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700805 unsigned int max_order;
806
807 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
Cody P Schaferd29bb972013-02-22 16:35:25 -0800809 VM_BUG_ON(!zone_is_initialized(zone));
Kirill A. Shutemov6e9f0d52015-02-11 15:25:50 -0800810 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Mel Gormaned0ae212009-06-16 15:32:07 -0700812 VM_BUG_ON(migratetype == -1);
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700813 if (likely(!is_migrate_isolate(migratetype)))
Joonsoo Kim8f82b552014-11-13 15:19:18 -0800814 __mod_zone_freepage_state(zone, 1 << order, migratetype);
Mel Gormaned0ae212009-06-16 15:32:07 -0700815
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700816 page_idx = pfn & ((1 << MAX_ORDER) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
Sasha Levin309381fea2014-01-23 15:52:54 -0800818 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
819 VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700821continue_merging:
Joonsoo Kim3c605092014-11-13 15:19:21 -0800822 while (order < max_order - 1) {
KyongHo Cho43506fa2011-01-13 15:47:24 -0800823 buddy_idx = __find_buddy_index(page_idx, order);
824 buddy = page + (buddy_idx - page_idx);
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700825 if (!page_is_buddy(page, buddy, order))
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700826 goto done_merging;
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800827 /*
828 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
829 * merge with it and move up one order.
830 */
831 if (page_is_guard(buddy)) {
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800832 clear_page_guard(zone, buddy, order, migratetype);
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -0800833 } else {
834 list_del(&buddy->lru);
835 zone->free_area[order].nr_free--;
836 rmv_page_order(buddy);
837 }
KyongHo Cho43506fa2011-01-13 15:47:24 -0800838 combined_idx = buddy_idx & page_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 page = page + (combined_idx - page_idx);
840 page_idx = combined_idx;
841 order++;
842 }
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700843 if (max_order < MAX_ORDER) {
844 /* If we are here, it means order is >= pageblock_order.
845 * We want to prevent merge between freepages on isolate
846 * pageblock and normal pageblock. Without this, pageblock
847 * isolation could cause incorrect freepage or CMA accounting.
848 *
849 * We don't want to hit this code for the more frequent
850 * low-order merging.
851 */
852 if (unlikely(has_isolate_pageblock(zone))) {
853 int buddy_mt;
854
855 buddy_idx = __find_buddy_index(page_idx, order);
856 buddy = page + (buddy_idx - page_idx);
857 buddy_mt = get_pageblock_migratetype(buddy);
858
859 if (migratetype != buddy_mt
860 && (is_migrate_isolate(migratetype) ||
861 is_migrate_isolate(buddy_mt)))
862 goto done_merging;
863 }
864 max_order++;
865 goto continue_merging;
866 }
867
868done_merging:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 set_page_order(page, order);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700870
871 /*
872 * If this is not the largest possible page, check if the buddy
873 * of the next-highest order is free. If it is, it's possible
874 * that pages are being freed that will coalesce soon. In case,
875 * that is happening, add the free page to the tail of the list
876 * so it's less likely to be used soon and more likely to be merged
877 * as a higher order page
878 */
Mel Gormanb7f50cf2010-10-26 14:21:11 -0700879 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700880 struct page *higher_page, *higher_buddy;
KyongHo Cho43506fa2011-01-13 15:47:24 -0800881 combined_idx = buddy_idx & page_idx;
882 higher_page = page + (combined_idx - page_idx);
883 buddy_idx = __find_buddy_index(combined_idx, order + 1);
Li Haifeng0ba8f2d2012-09-17 14:09:21 -0700884 higher_buddy = higher_page + (buddy_idx - combined_idx);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700885 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
886 list_add_tail(&page->lru,
887 &zone->free_area[order].free_list[migratetype]);
888 goto out;
889 }
890 }
891
892 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
893out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 zone->free_area[order].nr_free++;
895}
896
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700897/*
898 * A bad page could be due to a number of fields. Instead of multiple branches,
899 * try and check multiple fields with one check. The caller must do a detailed
900 * check if necessary.
901 */
902static inline bool page_expected_state(struct page *page,
903 unsigned long check_flags)
904{
905 if (unlikely(atomic_read(&page->_mapcount) != -1))
906 return false;
907
908 if (unlikely((unsigned long)page->mapping |
909 page_ref_count(page) |
910#ifdef CONFIG_MEMCG
911 (unsigned long)page->mem_cgroup |
912#endif
913 (page->flags & check_flags)))
914 return false;
915
916 return true;
917}
918
Mel Gormanbb552ac2016-05-19 17:14:18 -0700919static void free_pages_check_bad(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920{
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700921 const char *bad_reason;
922 unsigned long bad_flags;
923
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700924 bad_reason = NULL;
925 bad_flags = 0;
Dave Hansenf0b791a2014-01-23 15:52:49 -0800926
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800927 if (unlikely(atomic_read(&page->_mapcount) != -1))
Dave Hansenf0b791a2014-01-23 15:52:49 -0800928 bad_reason = "nonzero mapcount";
929 if (unlikely(page->mapping != NULL))
930 bad_reason = "non-NULL mapping";
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700931 if (unlikely(page_ref_count(page) != 0))
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700932 bad_reason = "nonzero _refcount";
Dave Hansenf0b791a2014-01-23 15:52:49 -0800933 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
934 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
935 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
936 }
Johannes Weiner9edad6e2014-12-10 15:44:58 -0800937#ifdef CONFIG_MEMCG
938 if (unlikely(page->mem_cgroup))
939 bad_reason = "page still charged to cgroup";
940#endif
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700941 bad_page(page, bad_reason, bad_flags);
Mel Gormanbb552ac2016-05-19 17:14:18 -0700942}
943
944static inline int free_pages_check(struct page *page)
945{
Mel Gormanda838d42016-05-19 17:14:21 -0700946 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
Mel Gormanbb552ac2016-05-19 17:14:18 -0700947 return 0;
Mel Gormanbb552ac2016-05-19 17:14:18 -0700948
949 /* Something has gone sideways, find it */
950 free_pages_check_bad(page);
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700951 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952}
953
Mel Gorman4db75482016-05-19 17:14:32 -0700954static int free_tail_pages_check(struct page *head_page, struct page *page)
955{
956 int ret = 1;
957
958 /*
959 * We rely page->lru.next never has bit 0 set, unless the page
960 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
961 */
962 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
963
964 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
965 ret = 0;
966 goto out;
967 }
968 switch (page - head_page) {
969 case 1:
970 /* the first tail page: ->mapping is compound_mapcount() */
971 if (unlikely(compound_mapcount(page))) {
972 bad_page(page, "nonzero compound_mapcount", 0);
973 goto out;
974 }
975 break;
976 case 2:
977 /*
978 * the second tail page: ->mapping is
979 * page_deferred_list().next -- ignore value.
980 */
981 break;
982 default:
983 if (page->mapping != TAIL_MAPPING) {
984 bad_page(page, "corrupted mapping in tail page", 0);
985 goto out;
986 }
987 break;
988 }
989 if (unlikely(!PageTail(page))) {
990 bad_page(page, "PageTail not set", 0);
991 goto out;
992 }
993 if (unlikely(compound_head(page) != head_page)) {
994 bad_page(page, "compound_head not consistent", 0);
995 goto out;
996 }
997 ret = 0;
998out:
999 page->mapping = NULL;
1000 clear_compound_head(page);
1001 return ret;
1002}
1003
Mel Gormane2769db2016-05-19 17:14:38 -07001004static __always_inline bool free_pages_prepare(struct page *page,
1005 unsigned int order, bool check_free)
1006{
1007 int bad = 0;
1008
1009 VM_BUG_ON_PAGE(PageTail(page), page);
1010
1011 trace_mm_page_free(page, order);
1012 kmemcheck_free_shadow(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001013
1014 /*
1015 * Check tail pages before head page information is cleared to
1016 * avoid checking PageCompound for order-0 pages.
1017 */
1018 if (unlikely(order)) {
1019 bool compound = PageCompound(page);
1020 int i;
1021
1022 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1023
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001024 if (compound)
1025 ClearPageDoubleMap(page);
Mel Gormane2769db2016-05-19 17:14:38 -07001026 for (i = 1; i < (1 << order); i++) {
1027 if (compound)
1028 bad += free_tail_pages_check(page, page + i);
1029 if (unlikely(free_pages_check(page + i))) {
1030 bad++;
1031 continue;
1032 }
1033 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1034 }
1035 }
Minchan Kimbda807d2016-07-26 15:23:05 -07001036 if (PageMappingFlags(page))
Mel Gormane2769db2016-05-19 17:14:38 -07001037 page->mapping = NULL;
Vladimir Davydovc4159a72016-08-08 23:03:12 +03001038 if (memcg_kmem_enabled() && PageKmemcg(page))
Vladimir Davydov49491482016-07-26 15:24:24 -07001039 memcg_kmem_uncharge(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001040 if (check_free)
1041 bad += free_pages_check(page);
1042 if (bad)
1043 return false;
1044
1045 page_cpupid_reset_last(page);
1046 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1047 reset_page_owner(page, order);
1048
1049 if (!PageHighMem(page)) {
1050 debug_check_no_locks_freed(page_address(page),
1051 PAGE_SIZE << order);
1052 debug_check_no_obj_freed(page_address(page),
1053 PAGE_SIZE << order);
1054 }
1055 arch_free_page(page, order);
1056 kernel_poison_pages(page, 1 << order, 0);
1057 kernel_map_pages(page, 1 << order, 0);
seokhoon.yoon29b52de2016-05-20 16:58:47 -07001058 kasan_free_pages(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001059
1060 return true;
1061}
Mel Gorman4db75482016-05-19 17:14:32 -07001062
1063#ifdef CONFIG_DEBUG_VM
1064static inline bool free_pcp_prepare(struct page *page)
1065{
Mel Gormane2769db2016-05-19 17:14:38 -07001066 return free_pages_prepare(page, 0, true);
Mel Gorman4db75482016-05-19 17:14:32 -07001067}
1068
1069static inline bool bulkfree_pcp_prepare(struct page *page)
1070{
1071 return false;
1072}
1073#else
1074static bool free_pcp_prepare(struct page *page)
1075{
Mel Gormane2769db2016-05-19 17:14:38 -07001076 return free_pages_prepare(page, 0, false);
Mel Gorman4db75482016-05-19 17:14:32 -07001077}
1078
1079static bool bulkfree_pcp_prepare(struct page *page)
1080{
1081 return free_pages_check(page);
1082}
1083#endif /* CONFIG_DEBUG_VM */
1084
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085/*
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001086 * Frees a number of pages from the PCP lists
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 * Assumes all pages on list are in same zone, and of same order.
Renaud Lienhart207f36e2005-09-10 00:26:59 -07001088 * count is the number of pages to free.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 *
1090 * If the zone was previously in an "all pages pinned" state then look to
1091 * see if this freeing clears that state.
1092 *
1093 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1094 * pinned" detection logic.
1095 */
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001096static void free_pcppages_bulk(struct zone *zone, int count,
1097 struct per_cpu_pages *pcp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098{
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001099 int migratetype = 0;
Mel Gormana6f9edd2009-09-21 17:03:20 -07001100 int batch_free = 0;
Mel Gorman0d5d8232014-08-06 16:07:16 -07001101 unsigned long nr_scanned;
Mel Gorman37779992016-05-19 17:13:58 -07001102 bool isolated_pageblocks;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001103
Nick Pigginc54ad302006-01-06 00:10:56 -08001104 spin_lock(&zone->lock);
Mel Gorman37779992016-05-19 17:13:58 -07001105 isolated_pageblocks = has_isolate_pageblock(zone);
Mel Gorman599d0c92016-07-28 15:45:31 -07001106 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
Mel Gorman0d5d8232014-08-06 16:07:16 -07001107 if (nr_scanned)
Mel Gorman599d0c92016-07-28 15:45:31 -07001108 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
Mel Gormanf2260e62009-06-16 15:32:13 -07001109
Mel Gormane5b31ac2016-05-19 17:14:24 -07001110 while (count) {
Nick Piggin48db57f2006-01-08 01:00:42 -08001111 struct page *page;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001112 struct list_head *list;
Nick Piggin48db57f2006-01-08 01:00:42 -08001113
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001114 /*
Mel Gormana6f9edd2009-09-21 17:03:20 -07001115 * Remove pages from lists in a round-robin fashion. A
1116 * batch_free count is maintained that is incremented when an
1117 * empty list is encountered. This is so more pages are freed
1118 * off fuller lists instead of spinning excessively around empty
1119 * lists
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001120 */
1121 do {
Mel Gormana6f9edd2009-09-21 17:03:20 -07001122 batch_free++;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001123 if (++migratetype == MIGRATE_PCPTYPES)
1124 migratetype = 0;
1125 list = &pcp->lists[migratetype];
1126 } while (list_empty(list));
1127
Namhyung Kim1d168712011-03-22 16:32:45 -07001128 /* This is the only non-empty list. Free them all. */
1129 if (batch_free == MIGRATE_PCPTYPES)
Mel Gormane5b31ac2016-05-19 17:14:24 -07001130 batch_free = count;
Namhyung Kim1d168712011-03-22 16:32:45 -07001131
Mel Gormana6f9edd2009-09-21 17:03:20 -07001132 do {
Bartlomiej Zolnierkiewicz770c8aa2012-10-08 16:31:57 -07001133 int mt; /* migratetype of the to-be-freed page */
1134
Geliang Tanga16601c2016-01-14 15:20:30 -08001135 page = list_last_entry(list, struct page, lru);
Mel Gormana6f9edd2009-09-21 17:03:20 -07001136 /* must delete as __free_one_page list manipulates */
1137 list_del(&page->lru);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -07001138
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001139 mt = get_pcppage_migratetype(page);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -07001140 /* MIGRATE_ISOLATE page should not go to pcplists */
1141 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1142 /* Pageblock could have been isolated meanwhile */
Mel Gorman37779992016-05-19 17:13:58 -07001143 if (unlikely(isolated_pageblocks))
Joonsoo Kim51bb1a42014-11-13 15:19:14 -08001144 mt = get_pageblock_migratetype(page);
Joonsoo Kim51bb1a42014-11-13 15:19:14 -08001145
Mel Gorman4db75482016-05-19 17:14:32 -07001146 if (bulkfree_pcp_prepare(page))
1147 continue;
1148
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001149 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
Bartlomiej Zolnierkiewicz770c8aa2012-10-08 16:31:57 -07001150 trace_mm_page_pcpu_drain(page, 0, mt);
Mel Gormane5b31ac2016-05-19 17:14:24 -07001151 } while (--count && --batch_free && !list_empty(list));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 }
Nick Pigginc54ad302006-01-06 00:10:56 -08001153 spin_unlock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154}
1155
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001156static void free_one_page(struct zone *zone,
1157 struct page *page, unsigned long pfn,
Mel Gorman7aeb09f2014-06-04 16:10:21 -07001158 unsigned int order,
Mel Gormaned0ae212009-06-16 15:32:07 -07001159 int migratetype)
Nick Piggin48db57f2006-01-08 01:00:42 -08001160{
Mel Gorman0d5d8232014-08-06 16:07:16 -07001161 unsigned long nr_scanned;
Christoph Lameter006d22d2006-09-25 23:31:48 -07001162 spin_lock(&zone->lock);
Mel Gorman599d0c92016-07-28 15:45:31 -07001163 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
Mel Gorman0d5d8232014-08-06 16:07:16 -07001164 if (nr_scanned)
Mel Gorman599d0c92016-07-28 15:45:31 -07001165 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
Mel Gormanf2260e62009-06-16 15:32:13 -07001166
Joonsoo Kimad53f922014-11-13 15:19:11 -08001167 if (unlikely(has_isolate_pageblock(zone) ||
1168 is_migrate_isolate(migratetype))) {
1169 migratetype = get_pfnblock_migratetype(page, pfn);
Joonsoo Kimad53f922014-11-13 15:19:11 -08001170 }
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001171 __free_one_page(page, pfn, zone, order, migratetype);
Christoph Lameter006d22d2006-09-25 23:31:48 -07001172 spin_unlock(&zone->lock);
Nick Piggin48db57f2006-01-08 01:00:42 -08001173}
1174
Robin Holt1e8ce832015-06-30 14:56:45 -07001175static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1176 unsigned long zone, int nid)
1177{
Robin Holt1e8ce832015-06-30 14:56:45 -07001178 set_page_links(page, zone, nid, pfn);
Robin Holt1e8ce832015-06-30 14:56:45 -07001179 init_page_count(page);
1180 page_mapcount_reset(page);
1181 page_cpupid_reset_last(page);
Robin Holt1e8ce832015-06-30 14:56:45 -07001182
Robin Holt1e8ce832015-06-30 14:56:45 -07001183 INIT_LIST_HEAD(&page->lru);
1184#ifdef WANT_PAGE_VIRTUAL
1185 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1186 if (!is_highmem_idx(zone))
1187 set_page_address(page, __va(pfn << PAGE_SHIFT));
1188#endif
1189}
1190
1191static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
1192 int nid)
1193{
1194 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
1195}
1196
Mel Gorman7e18adb2015-06-30 14:57:05 -07001197#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1198static void init_reserved_page(unsigned long pfn)
1199{
1200 pg_data_t *pgdat;
1201 int nid, zid;
1202
1203 if (!early_page_uninitialised(pfn))
1204 return;
1205
1206 nid = early_pfn_to_nid(pfn);
1207 pgdat = NODE_DATA(nid);
1208
1209 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1210 struct zone *zone = &pgdat->node_zones[zid];
1211
1212 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1213 break;
1214 }
1215 __init_single_pfn(pfn, zid, nid);
1216}
1217#else
1218static inline void init_reserved_page(unsigned long pfn)
1219{
1220}
1221#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1222
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001223/*
1224 * Initialised pages do not have PageReserved set. This function is
1225 * called for each range allocated by the bootmem allocator and
1226 * marks the pages PageReserved. The remaining valid pages are later
1227 * sent to the buddy page allocator.
1228 */
Stefan Bader4b50bcc2016-05-20 16:58:38 -07001229void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001230{
1231 unsigned long start_pfn = PFN_DOWN(start);
1232 unsigned long end_pfn = PFN_UP(end);
1233
Mel Gorman7e18adb2015-06-30 14:57:05 -07001234 for (; start_pfn < end_pfn; start_pfn++) {
1235 if (pfn_valid(start_pfn)) {
1236 struct page *page = pfn_to_page(start_pfn);
1237
1238 init_reserved_page(start_pfn);
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -08001239
1240 /* Avoid false-positive PageTail() */
1241 INIT_LIST_HEAD(&page->lru);
1242
Mel Gorman7e18adb2015-06-30 14:57:05 -07001243 SetPageReserved(page);
1244 }
1245 }
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001246}
1247
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001248static void __free_pages_ok(struct page *page, unsigned int order)
1249{
1250 unsigned long flags;
Minchan Kim95e34412012-10-08 16:32:11 -07001251 int migratetype;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001252 unsigned long pfn = page_to_pfn(page);
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001253
Mel Gormane2769db2016-05-19 17:14:38 -07001254 if (!free_pages_prepare(page, order, true))
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001255 return;
1256
Mel Gormancfc47a22014-06-04 16:10:19 -07001257 migratetype = get_pfnblock_migratetype(page, pfn);
Nick Pigginc54ad302006-01-06 00:10:56 -08001258 local_irq_save(flags);
Christoph Lameterf8891e52006-06-30 01:55:45 -07001259 __count_vm_events(PGFREE, 1 << order);
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001260 free_one_page(page_zone(page), page, pfn, order, migratetype);
Nick Pigginc54ad302006-01-06 00:10:56 -08001261 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262}
1263
Laura Abbott7d0717562013-06-28 12:52:17 -07001264static void __free_pages_boot_core(struct page *page, unsigned int order)
David Howellsa226f6c2006-01-06 00:11:08 -08001265{
Johannes Weinerc3993072012-01-10 15:08:10 -08001266 unsigned int nr_pages = 1 << order;
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001267 struct page *p = page;
Johannes Weinerc3993072012-01-10 15:08:10 -08001268 unsigned int loop;
David Howellsa226f6c2006-01-06 00:11:08 -08001269
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001270 prefetchw(p);
1271 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1272 prefetchw(p + 1);
Johannes Weinerc3993072012-01-10 15:08:10 -08001273 __ClearPageReserved(p);
1274 set_page_count(p, 0);
David Howellsa226f6c2006-01-06 00:11:08 -08001275 }
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001276 __ClearPageReserved(p);
1277 set_page_count(p, 0);
Johannes Weinerc3993072012-01-10 15:08:10 -08001278
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001279 page_zone(page)->managed_pages += nr_pages;
Johannes Weinerc3993072012-01-10 15:08:10 -08001280 set_page_refcounted(page);
1281 __free_pages(page, order);
David Howellsa226f6c2006-01-06 00:11:08 -08001282}
1283
Mel Gorman75a592a2015-06-30 14:56:59 -07001284#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1285 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
Mel Gorman7ace9912015-08-06 15:46:13 -07001286
Mel Gorman75a592a2015-06-30 14:56:59 -07001287static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1288
1289int __meminit early_pfn_to_nid(unsigned long pfn)
1290{
Mel Gorman7ace9912015-08-06 15:46:13 -07001291 static DEFINE_SPINLOCK(early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001292 int nid;
1293
Mel Gorman7ace9912015-08-06 15:46:13 -07001294 spin_lock(&early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001295 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
Mel Gorman7ace9912015-08-06 15:46:13 -07001296 if (nid < 0)
Mel Gormane4568d32016-07-14 12:07:20 -07001297 nid = first_online_node;
Mel Gorman7ace9912015-08-06 15:46:13 -07001298 spin_unlock(&early_pfn_lock);
1299
1300 return nid;
Mel Gorman75a592a2015-06-30 14:56:59 -07001301}
1302#endif
1303
1304#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1305static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1306 struct mminit_pfnnid_cache *state)
1307{
1308 int nid;
1309
1310 nid = __early_pfn_to_nid(pfn, state);
1311 if (nid >= 0 && nid != node)
1312 return false;
1313 return true;
1314}
1315
1316/* Only safe to use early in boot when initialisation is single-threaded */
1317static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1318{
1319 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1320}
1321
1322#else
1323
1324static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1325{
1326 return true;
1327}
1328static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1329 struct mminit_pfnnid_cache *state)
1330{
1331 return true;
1332}
1333#endif
1334
1335
Laura Abbott7d0717562013-06-28 12:52:17 -07001336void __free_pages_bootmem(struct page *page, unsigned long pfn,
Mel Gorman3a80a7f2015-06-30 14:57:02 -07001337 unsigned int order)
1338{
1339 if (early_page_uninitialised(pfn))
1340 return;
Li Zhang949698a2016-05-19 17:11:37 -07001341 return __free_pages_boot_core(page, order);
Mel Gorman3a80a7f2015-06-30 14:57:02 -07001342}
1343
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001344/*
1345 * Check that the whole (or subset of) a pageblock given by the interval of
1346 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1347 * with the migration of free compaction scanner. The scanners then need to
1348 * use only pfn_valid_within() check for arches that allow holes within
1349 * pageblocks.
1350 *
1351 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1352 *
1353 * It's possible on some configurations to have a setup like node0 node1 node0
1354 * i.e. it's possible that all pages within a zones range of pages do not
1355 * belong to a single zone. We assume that a border between node0 and node1
1356 * can occur within a single pageblock, but not a node0 node1 node0
1357 * interleaving within a single pageblock. It is therefore sufficient to check
1358 * the first and last page of a pageblock and avoid checking each individual
1359 * page in a pageblock.
1360 */
1361struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1362 unsigned long end_pfn, struct zone *zone)
1363{
1364 struct page *start_page;
1365 struct page *end_page;
1366
1367 /* end_pfn is one past the range we are checking */
1368 end_pfn--;
1369
1370 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1371 return NULL;
1372
1373 start_page = pfn_to_page(start_pfn);
1374
1375 if (page_zone(start_page) != zone)
1376 return NULL;
1377
1378 end_page = pfn_to_page(end_pfn);
1379
1380 /* This gives a shorter code than deriving page_zone(end_page) */
1381 if (page_zone_id(start_page) != page_zone_id(end_page))
1382 return NULL;
1383
1384 return start_page;
1385}
1386
1387void set_zone_contiguous(struct zone *zone)
1388{
1389 unsigned long block_start_pfn = zone->zone_start_pfn;
1390 unsigned long block_end_pfn;
1391
1392 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1393 for (; block_start_pfn < zone_end_pfn(zone);
1394 block_start_pfn = block_end_pfn,
1395 block_end_pfn += pageblock_nr_pages) {
1396
1397 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1398
1399 if (!__pageblock_pfn_to_page(block_start_pfn,
1400 block_end_pfn, zone))
1401 return;
1402 }
1403
1404 /* We confirm that there is no hole */
1405 zone->contiguous = true;
1406}
1407
1408void clear_zone_contiguous(struct zone *zone)
1409{
1410 zone->contiguous = false;
1411}
1412
Mel Gorman7e18adb2015-06-30 14:57:05 -07001413#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Mel Gorman0e1cc952015-06-30 14:57:27 -07001414static void __init deferred_free_range(struct page *page,
Mel Gormana4de83d2015-06-30 14:57:16 -07001415 unsigned long pfn, int nr_pages)
1416{
1417 int i;
1418
1419 if (!page)
1420 return;
1421
1422 /* Free a large naturally-aligned chunk if possible */
Xishi Qiue7801492016-10-07 16:58:09 -07001423 if (nr_pages == pageblock_nr_pages &&
1424 (pfn & (pageblock_nr_pages - 1)) == 0) {
Mel Gormanac5d2532015-06-30 14:57:20 -07001425 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Xishi Qiue7801492016-10-07 16:58:09 -07001426 __free_pages_boot_core(page, pageblock_order);
Mel Gormana4de83d2015-06-30 14:57:16 -07001427 return;
1428 }
1429
Xishi Qiue7801492016-10-07 16:58:09 -07001430 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1431 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1432 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Li Zhang949698a2016-05-19 17:11:37 -07001433 __free_pages_boot_core(page, 0);
Xishi Qiue7801492016-10-07 16:58:09 -07001434 }
Mel Gormana4de83d2015-06-30 14:57:16 -07001435}
1436
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001437/* Completion tracking for deferred_init_memmap() threads */
1438static atomic_t pgdat_init_n_undone __initdata;
1439static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1440
1441static inline void __init pgdat_init_report_one_done(void)
1442{
1443 if (atomic_dec_and_test(&pgdat_init_n_undone))
1444 complete(&pgdat_init_all_done_comp);
1445}
Mel Gorman0e1cc952015-06-30 14:57:27 -07001446
Mel Gorman7e18adb2015-06-30 14:57:05 -07001447/* Initialise remaining memory on a node */
Mel Gorman0e1cc952015-06-30 14:57:27 -07001448static int __init deferred_init_memmap(void *data)
Mel Gorman7e18adb2015-06-30 14:57:05 -07001449{
Mel Gorman0e1cc952015-06-30 14:57:27 -07001450 pg_data_t *pgdat = data;
1451 int nid = pgdat->node_id;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001452 struct mminit_pfnnid_cache nid_init_state = { };
1453 unsigned long start = jiffies;
1454 unsigned long nr_pages = 0;
1455 unsigned long walk_start, walk_end;
1456 int i, zid;
1457 struct zone *zone;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001458 unsigned long first_init_pfn = pgdat->first_deferred_pfn;
Mel Gorman0e1cc952015-06-30 14:57:27 -07001459 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001460
Mel Gorman0e1cc952015-06-30 14:57:27 -07001461 if (first_init_pfn == ULONG_MAX) {
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001462 pgdat_init_report_one_done();
Mel Gorman0e1cc952015-06-30 14:57:27 -07001463 return 0;
1464 }
1465
1466 /* Bind memory initialisation thread to a local node if possible */
1467 if (!cpumask_empty(cpumask))
1468 set_cpus_allowed_ptr(current, cpumask);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001469
1470 /* Sanity check boundaries */
1471 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1472 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1473 pgdat->first_deferred_pfn = ULONG_MAX;
1474
1475 /* Only the highest zone is deferred so find it */
1476 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1477 zone = pgdat->node_zones + zid;
1478 if (first_init_pfn < zone_end_pfn(zone))
1479 break;
1480 }
1481
1482 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1483 unsigned long pfn, end_pfn;
Mel Gorman54608c32015-06-30 14:57:09 -07001484 struct page *page = NULL;
Mel Gormana4de83d2015-06-30 14:57:16 -07001485 struct page *free_base_page = NULL;
1486 unsigned long free_base_pfn = 0;
1487 int nr_to_free = 0;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001488
1489 end_pfn = min(walk_end, zone_end_pfn(zone));
1490 pfn = first_init_pfn;
1491 if (pfn < walk_start)
1492 pfn = walk_start;
1493 if (pfn < zone->zone_start_pfn)
1494 pfn = zone->zone_start_pfn;
1495
1496 for (; pfn < end_pfn; pfn++) {
Mel Gorman54608c32015-06-30 14:57:09 -07001497 if (!pfn_valid_within(pfn))
Mel Gormana4de83d2015-06-30 14:57:16 -07001498 goto free_range;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001499
Mel Gorman54608c32015-06-30 14:57:09 -07001500 /*
1501 * Ensure pfn_valid is checked every
Xishi Qiue7801492016-10-07 16:58:09 -07001502 * pageblock_nr_pages for memory holes
Mel Gorman54608c32015-06-30 14:57:09 -07001503 */
Xishi Qiue7801492016-10-07 16:58:09 -07001504 if ((pfn & (pageblock_nr_pages - 1)) == 0) {
Mel Gorman54608c32015-06-30 14:57:09 -07001505 if (!pfn_valid(pfn)) {
1506 page = NULL;
Mel Gormana4de83d2015-06-30 14:57:16 -07001507 goto free_range;
Mel Gorman54608c32015-06-30 14:57:09 -07001508 }
1509 }
1510
1511 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1512 page = NULL;
Mel Gormana4de83d2015-06-30 14:57:16 -07001513 goto free_range;
Mel Gorman54608c32015-06-30 14:57:09 -07001514 }
1515
1516 /* Minimise pfn page lookups and scheduler checks */
Xishi Qiue7801492016-10-07 16:58:09 -07001517 if (page && (pfn & (pageblock_nr_pages - 1)) != 0) {
Mel Gorman54608c32015-06-30 14:57:09 -07001518 page++;
1519 } else {
Mel Gormana4de83d2015-06-30 14:57:16 -07001520 nr_pages += nr_to_free;
1521 deferred_free_range(free_base_page,
1522 free_base_pfn, nr_to_free);
1523 free_base_page = NULL;
1524 free_base_pfn = nr_to_free = 0;
1525
Mel Gorman54608c32015-06-30 14:57:09 -07001526 page = pfn_to_page(pfn);
1527 cond_resched();
1528 }
Mel Gorman7e18adb2015-06-30 14:57:05 -07001529
1530 if (page->flags) {
1531 VM_BUG_ON(page_zone(page) != zone);
Mel Gormana4de83d2015-06-30 14:57:16 -07001532 goto free_range;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001533 }
1534
1535 __init_single_page(page, pfn, zid, nid);
Mel Gormana4de83d2015-06-30 14:57:16 -07001536 if (!free_base_page) {
1537 free_base_page = page;
1538 free_base_pfn = pfn;
1539 nr_to_free = 0;
1540 }
1541 nr_to_free++;
1542
1543 /* Where possible, batch up pages for a single free */
1544 continue;
1545free_range:
1546 /* Free the current block of pages to allocator */
1547 nr_pages += nr_to_free;
1548 deferred_free_range(free_base_page, free_base_pfn,
1549 nr_to_free);
1550 free_base_page = NULL;
1551 free_base_pfn = nr_to_free = 0;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001552 }
Xishi Qiue7801492016-10-07 16:58:09 -07001553 /* Free the last block of pages to allocator */
1554 nr_pages += nr_to_free;
1555 deferred_free_range(free_base_page, free_base_pfn, nr_to_free);
Mel Gormana4de83d2015-06-30 14:57:16 -07001556
Mel Gorman7e18adb2015-06-30 14:57:05 -07001557 first_init_pfn = max(end_pfn, first_init_pfn);
1558 }
1559
1560 /* Sanity check that the next zone really is unpopulated */
1561 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1562
Mel Gorman0e1cc952015-06-30 14:57:27 -07001563 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
Mel Gorman7e18adb2015-06-30 14:57:05 -07001564 jiffies_to_msecs(jiffies - start));
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001565
1566 pgdat_init_report_one_done();
Mel Gorman0e1cc952015-06-30 14:57:27 -07001567 return 0;
1568}
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001569#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
Mel Gorman0e1cc952015-06-30 14:57:27 -07001570
1571void __init page_alloc_init_late(void)
1572{
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001573 struct zone *zone;
1574
1575#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Mel Gorman0e1cc952015-06-30 14:57:27 -07001576 int nid;
1577
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001578 /* There will be num_node_state(N_MEMORY) threads */
1579 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
Mel Gorman0e1cc952015-06-30 14:57:27 -07001580 for_each_node_state(nid, N_MEMORY) {
Mel Gorman0e1cc952015-06-30 14:57:27 -07001581 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1582 }
1583
1584 /* Block until all are initialised */
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001585 wait_for_completion(&pgdat_init_all_done_comp);
Mel Gorman4248b0d2015-08-06 15:46:20 -07001586
1587 /* Reinit limits that are based on free pages after the kernel is up */
1588 files_maxfiles_init();
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001589#endif
1590
1591 for_each_populated_zone(zone)
1592 set_zone_contiguous(zone);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001593}
Mel Gorman7e18adb2015-06-30 14:57:05 -07001594
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001595#ifdef CONFIG_CMA
Li Zhong9cf510a2013-08-23 13:52:52 +08001596/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001597void __init init_cma_reserved_pageblock(struct page *page)
1598{
1599 unsigned i = pageblock_nr_pages;
1600 struct page *p = page;
1601
1602 do {
1603 __ClearPageReserved(p);
1604 set_page_count(p, 0);
1605 } while (++p, --i);
1606
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001607 set_pageblock_migratetype(page, MIGRATE_CMA);
Michal Nazarewiczdc783272014-07-02 15:22:35 -07001608
1609 if (pageblock_order >= MAX_ORDER) {
1610 i = pageblock_nr_pages;
1611 p = page;
1612 do {
1613 set_page_refcounted(p);
1614 __free_pages(p, MAX_ORDER - 1);
1615 p += MAX_ORDER_NR_PAGES;
1616 } while (i -= MAX_ORDER_NR_PAGES);
1617 } else {
1618 set_page_refcounted(page);
1619 __free_pages(page, pageblock_order);
1620 }
1621
Jiang Liu3dcc0572013-07-03 15:03:21 -07001622 adjust_managed_page_count(page, pageblock_nr_pages);
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001623}
1624#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
1626/*
1627 * The order of subdivision here is critical for the IO subsystem.
1628 * Please do not alter this order without good reasons and regression
1629 * testing. Specifically, as large blocks of memory are subdivided,
1630 * the order in which smaller blocks are delivered depends on the order
1631 * they're subdivided in this function. This is the primary factor
1632 * influencing the order in which pages are delivered to the IO
1633 * subsystem according to empirical testing, and this is also justified
1634 * by considering the behavior of a buddy system containing a single
1635 * large block of memory acted on by a series of small allocations.
1636 * This behavior is a critical factor in sglist merging's success.
1637 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01001638 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 */
Nick Piggin085cc7d2006-01-06 00:11:01 -08001640static inline void expand(struct zone *zone, struct page *page,
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001641 int low, int high, struct free_area *area,
1642 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643{
1644 unsigned long size = 1 << high;
1645
1646 while (high > low) {
1647 area--;
1648 high--;
1649 size >>= 1;
Sasha Levin309381fea2014-01-23 15:52:54 -08001650 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -08001651
Joonsoo Kimacbc15a2016-10-07 16:58:15 -07001652 /*
1653 * Mark as guard pages (or page), that will allow to
1654 * merge back to allocator when buddy will be freed.
1655 * Corresponding page table entries will not be touched,
1656 * pages will stay not present in virtual address space
1657 */
1658 if (set_page_guard(zone, &page[size], high, migratetype))
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -08001659 continue;
Joonsoo Kimacbc15a2016-10-07 16:58:15 -07001660
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001661 list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 area->nr_free++;
1663 set_page_order(&page[size], high);
1664 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665}
1666
Vlastimil Babka4e611802016-05-19 17:14:41 -07001667static void check_new_page_bad(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668{
Vlastimil Babka4e611802016-05-19 17:14:41 -07001669 const char *bad_reason = NULL;
1670 unsigned long bad_flags = 0;
Dave Hansenf0b791a2014-01-23 15:52:49 -08001671
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08001672 if (unlikely(atomic_read(&page->_mapcount) != -1))
Dave Hansenf0b791a2014-01-23 15:52:49 -08001673 bad_reason = "nonzero mapcount";
1674 if (unlikely(page->mapping != NULL))
1675 bad_reason = "non-NULL mapping";
Joonsoo Kimfe896d12016-03-17 14:19:26 -07001676 if (unlikely(page_ref_count(page) != 0))
Dave Hansenf0b791a2014-01-23 15:52:49 -08001677 bad_reason = "nonzero _count";
Naoya Horiguchif4c18e62015-08-06 15:47:08 -07001678 if (unlikely(page->flags & __PG_HWPOISON)) {
1679 bad_reason = "HWPoisoned (hardware-corrupted)";
1680 bad_flags = __PG_HWPOISON;
Naoya Horiguchie570f562016-05-20 16:58:50 -07001681 /* Don't complain about hwpoisoned pages */
1682 page_mapcount_reset(page); /* remove PageBuddy */
1683 return;
Naoya Horiguchif4c18e62015-08-06 15:47:08 -07001684 }
Dave Hansenf0b791a2014-01-23 15:52:49 -08001685 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1686 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1687 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1688 }
Johannes Weiner9edad6e2014-12-10 15:44:58 -08001689#ifdef CONFIG_MEMCG
1690 if (unlikely(page->mem_cgroup))
1691 bad_reason = "page still charged to cgroup";
1692#endif
Vlastimil Babka4e611802016-05-19 17:14:41 -07001693 bad_page(page, bad_reason, bad_flags);
1694}
1695
1696/*
1697 * This page is about to be returned from the page allocator
1698 */
1699static inline int check_new_page(struct page *page)
1700{
1701 if (likely(page_expected_state(page,
1702 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1703 return 0;
1704
1705 check_new_page_bad(page);
1706 return 1;
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001707}
1708
Vinayak Menon92821682017-03-31 11:13:06 +11001709static inline bool free_pages_prezeroed(void)
Laura Abbott1414c7f2016-03-15 14:56:30 -07001710{
1711 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
Vinayak Menon92821682017-03-31 11:13:06 +11001712 page_poisoning_enabled();
Laura Abbott1414c7f2016-03-15 14:56:30 -07001713}
1714
Mel Gorman479f8542016-05-19 17:14:35 -07001715#ifdef CONFIG_DEBUG_VM
1716static bool check_pcp_refill(struct page *page)
1717{
1718 return false;
1719}
1720
1721static bool check_new_pcp(struct page *page)
1722{
1723 return check_new_page(page);
1724}
1725#else
1726static bool check_pcp_refill(struct page *page)
1727{
1728 return check_new_page(page);
1729}
1730static bool check_new_pcp(struct page *page)
1731{
1732 return false;
1733}
1734#endif /* CONFIG_DEBUG_VM */
1735
1736static bool check_new_pages(struct page *page, unsigned int order)
1737{
1738 int i;
1739 for (i = 0; i < (1 << order); i++) {
1740 struct page *p = page + i;
1741
1742 if (unlikely(check_new_page(p)))
1743 return true;
1744 }
1745
1746 return false;
1747}
1748
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07001749inline void post_alloc_hook(struct page *page, unsigned int order,
1750 gfp_t gfp_flags)
1751{
1752 set_page_private(page, 0);
1753 set_page_refcounted(page);
1754
1755 arch_alloc_page(page, order);
Se Wang (Patrick) Oh6c576992015-06-25 15:15:06 -07001756 kasan_alloc_pages(page, order);
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07001757 kernel_map_pages(page, 1 << order, 1);
1758 kernel_poison_pages(page, 1 << order, 1);
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07001759 set_page_owner(page, order, gfp_flags);
1760}
1761
Mel Gorman479f8542016-05-19 17:14:35 -07001762static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
Mel Gormanc6038442016-05-19 17:13:38 -07001763 unsigned int alloc_flags)
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001764{
1765 int i;
Hugh Dickins689bceb2005-11-21 21:32:20 -08001766
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07001767 post_alloc_hook(page, order, gfp_flags);
Nick Piggin17cf4402006-03-22 00:08:41 -08001768
Vinayak Menon92821682017-03-31 11:13:06 +11001769 if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
Anisse Astierf4d28972015-06-24 16:56:36 -07001770 for (i = 0; i < (1 << order); i++)
1771 clear_highpage(page + i);
Nick Piggin17cf4402006-03-22 00:08:41 -08001772
1773 if (order && (gfp_flags & __GFP_COMP))
1774 prep_compound_page(page, order);
1775
Vlastimil Babka75379192015-02-11 15:25:38 -08001776 /*
Michal Hocko2f064f32015-08-21 14:11:51 -07001777 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
Vlastimil Babka75379192015-02-11 15:25:38 -08001778 * allocate the page. The expectation is that the caller is taking
1779 * steps that will free more memory. The caller should avoid the page
1780 * being used for !PFMEMALLOC purposes.
1781 */
Michal Hocko2f064f32015-08-21 14:11:51 -07001782 if (alloc_flags & ALLOC_NO_WATERMARKS)
1783 set_page_pfmemalloc(page);
1784 else
1785 clear_page_pfmemalloc(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786}
1787
Mel Gorman56fd56b2007-10-16 01:25:58 -07001788/*
1789 * Go through the free lists for the given migratetype and remove
1790 * the smallest available page from the freelists
1791 */
Mel Gorman728ec982009-06-16 15:32:04 -07001792static inline
1793struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
Mel Gorman56fd56b2007-10-16 01:25:58 -07001794 int migratetype)
1795{
1796 unsigned int current_order;
Pintu Kumarb8af2942013-09-11 14:20:34 -07001797 struct free_area *area;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001798 struct page *page;
1799
1800 /* Find a page of the appropriate size in the preferred list */
1801 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1802 area = &(zone->free_area[current_order]);
Geliang Tanga16601c2016-01-14 15:20:30 -08001803 page = list_first_entry_or_null(&area->free_list[migratetype],
Mel Gorman56fd56b2007-10-16 01:25:58 -07001804 struct page, lru);
Geliang Tanga16601c2016-01-14 15:20:30 -08001805 if (!page)
1806 continue;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001807 list_del(&page->lru);
1808 rmv_page_order(page);
1809 area->nr_free--;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001810 expand(zone, page, order, current_order, area, migratetype);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001811 set_pcppage_migratetype(page, migratetype);
Mel Gorman56fd56b2007-10-16 01:25:58 -07001812 return page;
1813 }
1814
1815 return NULL;
1816}
1817
1818
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001819/*
1820 * This array describes the order lists are fallen back to when
1821 * the free lists for the desirable migrate type are depleted
1822 */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001823static int fallbacks[MIGRATE_TYPES][4] = {
Mel Gorman974a7862015-11-06 16:28:34 -08001824 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1825 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1826 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
Joonsoo Kimdc676472015-04-14 15:45:15 -07001827#ifdef CONFIG_CMA
Mel Gorman974a7862015-11-06 16:28:34 -08001828 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001829#endif
Minchan Kim194159f2013-02-22 16:33:58 -08001830#ifdef CONFIG_MEMORY_ISOLATION
Mel Gorman974a7862015-11-06 16:28:34 -08001831 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
Minchan Kim194159f2013-02-22 16:33:58 -08001832#endif
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001833};
1834
Liam Mark06a5f872013-03-27 12:34:51 -07001835int *get_migratetype_fallbacks(int mtype)
1836{
1837 return fallbacks[mtype];
1838}
1839
Joonsoo Kimdc676472015-04-14 15:45:15 -07001840#ifdef CONFIG_CMA
1841static struct page *__rmqueue_cma_fallback(struct zone *zone,
1842 unsigned int order)
1843{
1844 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1845}
1846#else
1847static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1848 unsigned int order) { return NULL; }
1849#endif
1850
Mel Gormanc361be52007-10-16 01:25:51 -07001851/*
1852 * Move the free pages in a range to the free lists of the requested type.
Mel Gormand9c23402007-10-16 01:26:01 -07001853 * Note that start_page and end_pages are not aligned on a pageblock
Mel Gormanc361be52007-10-16 01:25:51 -07001854 * boundary. If alignment is required, use move_freepages_block()
1855 */
Minchan Kim435b4052012-10-08 16:32:16 -07001856int move_freepages(struct zone *zone,
Adrian Bunkb69a7282008-07-23 21:28:12 -07001857 struct page *start_page, struct page *end_page,
1858 int migratetype)
Mel Gormanc361be52007-10-16 01:25:51 -07001859{
1860 struct page *page;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08001861 unsigned int order;
Mel Gormand1003132007-10-16 01:26:00 -07001862 int pages_moved = 0;
Mel Gormanc361be52007-10-16 01:25:51 -07001863
1864#ifndef CONFIG_HOLES_IN_ZONE
1865 /*
1866 * page_zone is not safe to call in this context when
1867 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1868 * anyway as we check zone boundaries in move_freepages_block().
1869 * Remove at a later date when no bug reports exist related to
Mel Gormanac0e5b72007-10-16 01:25:58 -07001870 * grouping pages by mobility
Mel Gormanc361be52007-10-16 01:25:51 -07001871 */
Mel Gorman97ee4ba2014-10-09 15:28:28 -07001872 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
Mel Gormanc361be52007-10-16 01:25:51 -07001873#endif
1874
1875 for (page = start_page; page <= end_page;) {
Adam Litke344c7902008-09-02 14:35:38 -07001876 /* Make sure we are not inadvertently changing nodes */
Sasha Levin309381fea2014-01-23 15:52:54 -08001877 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
Adam Litke344c7902008-09-02 14:35:38 -07001878
Mel Gormanc361be52007-10-16 01:25:51 -07001879 if (!pfn_valid_within(page_to_pfn(page))) {
1880 page++;
1881 continue;
1882 }
1883
1884 if (!PageBuddy(page)) {
1885 page++;
1886 continue;
1887 }
1888
1889 order = page_order(page);
Kirill A. Shutemov84be48d2011-03-22 16:33:41 -07001890 list_move(&page->lru,
1891 &zone->free_area[order].free_list[migratetype]);
Mel Gormanc361be52007-10-16 01:25:51 -07001892 page += 1 << order;
Mel Gormand1003132007-10-16 01:26:00 -07001893 pages_moved += 1 << order;
Mel Gormanc361be52007-10-16 01:25:51 -07001894 }
1895
Mel Gormand1003132007-10-16 01:26:00 -07001896 return pages_moved;
Mel Gormanc361be52007-10-16 01:25:51 -07001897}
1898
Minchan Kimee6f5092012-07-31 16:43:50 -07001899int move_freepages_block(struct zone *zone, struct page *page,
Linus Torvalds68e3e922012-06-03 20:05:57 -07001900 int migratetype)
Mel Gormanc361be52007-10-16 01:25:51 -07001901{
1902 unsigned long start_pfn, end_pfn;
1903 struct page *start_page, *end_page;
1904
1905 start_pfn = page_to_pfn(page);
Mel Gormand9c23402007-10-16 01:26:01 -07001906 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
Mel Gormanc361be52007-10-16 01:25:51 -07001907 start_page = pfn_to_page(start_pfn);
Mel Gormand9c23402007-10-16 01:26:01 -07001908 end_page = start_page + pageblock_nr_pages - 1;
1909 end_pfn = start_pfn + pageblock_nr_pages - 1;
Mel Gormanc361be52007-10-16 01:25:51 -07001910
1911 /* Do not cross zone boundaries */
Cody P Schafer108bcc92013-02-22 16:35:23 -08001912 if (!zone_spans_pfn(zone, start_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07001913 start_page = page;
Cody P Schafer108bcc92013-02-22 16:35:23 -08001914 if (!zone_spans_pfn(zone, end_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07001915 return 0;
1916
1917 return move_freepages(zone, start_page, end_page, migratetype);
1918}
1919
Mel Gorman2f66a682009-09-21 17:02:31 -07001920static void change_pageblock_range(struct page *pageblock_page,
1921 int start_order, int migratetype)
1922{
1923 int nr_pageblocks = 1 << (start_order - pageblock_order);
1924
1925 while (nr_pageblocks--) {
1926 set_pageblock_migratetype(pageblock_page, migratetype);
1927 pageblock_page += pageblock_nr_pages;
1928 }
1929}
1930
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001931/*
Vlastimil Babka9c0415e2015-02-11 15:28:21 -08001932 * When we are falling back to another migratetype during allocation, try to
1933 * steal extra free pages from the same pageblocks to satisfy further
1934 * allocations, instead of polluting multiple pageblocks.
1935 *
1936 * If we are stealing a relatively large buddy page, it is likely there will
1937 * be more free pages in the pageblock, so try to steal them all. For
1938 * reclaimable and unmovable allocations, we steal regardless of page size,
1939 * as fragmentation caused by those allocations polluting movable pageblocks
1940 * is worse than movable allocations stealing from unmovable and reclaimable
1941 * pageblocks.
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001942 */
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001943static bool can_steal_fallback(unsigned int order, int start_mt)
1944{
1945 /*
1946 * Leaving this order check is intended, although there is
1947 * relaxed order check in next check. The reason is that
1948 * we can actually steal whole pageblock if this condition met,
1949 * but, below check doesn't guarantee it and that is just heuristic
1950 * so could be changed anytime.
1951 */
1952 if (order >= pageblock_order)
1953 return true;
1954
1955 if (order >= pageblock_order / 2 ||
1956 start_mt == MIGRATE_RECLAIMABLE ||
1957 start_mt == MIGRATE_UNMOVABLE ||
1958 page_group_by_mobility_disabled)
1959 return true;
1960
1961 return false;
1962}
1963
1964/*
1965 * This function implements actual steal behaviour. If order is large enough,
1966 * we can steal whole pageblock. If not, we first move freepages in this
1967 * pageblock and check whether half of pages are moved or not. If half of
1968 * pages are moved, we can change migratetype of pageblock and permanently
1969 * use it's pages as requested migratetype in the future.
1970 */
1971static void steal_suitable_fallback(struct zone *zone, struct page *page,
1972 int start_type)
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001973{
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08001974 unsigned int current_order = page_order(page);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001975 int pages;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001976
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001977 /* Take ownership for orders >= pageblock_order */
1978 if (current_order >= pageblock_order) {
1979 change_pageblock_range(page, current_order, start_type);
Vlastimil Babka3a1086f2015-02-11 15:28:18 -08001980 return;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001981 }
1982
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001983 pages = move_freepages_block(zone, page, start_type);
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001984
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001985 /* Claim the whole block if over half of it is free */
1986 if (pages >= (1 << (pageblock_order-1)) ||
1987 page_group_by_mobility_disabled)
1988 set_pageblock_migratetype(page, start_type);
1989}
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001990
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001991/*
1992 * Check whether there is a suitable fallback freepage with requested order.
1993 * If only_stealable is true, this function returns fallback_mt only if
1994 * we can steal other freepages all together. This would help to reduce
1995 * fragmentation due to mixed migratetype pages in one pageblock.
1996 */
1997int find_suitable_fallback(struct free_area *area, unsigned int order,
1998 int migratetype, bool only_stealable, bool *can_steal)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001999{
2000 int i;
2001 int fallback_mt;
2002
2003 if (area->nr_free == 0)
2004 return -1;
2005
2006 *can_steal = false;
2007 for (i = 0;; i++) {
2008 fallback_mt = fallbacks[migratetype][i];
Mel Gorman974a7862015-11-06 16:28:34 -08002009 if (fallback_mt == MIGRATE_TYPES)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002010 break;
2011
2012 if (list_empty(&area->free_list[fallback_mt]))
2013 continue;
2014
2015 if (can_steal_fallback(order, migratetype))
2016 *can_steal = true;
2017
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002018 if (!only_stealable)
2019 return fallback_mt;
2020
2021 if (*can_steal)
2022 return fallback_mt;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002023 }
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002024
2025 return -1;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002026}
2027
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002028/*
2029 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2030 * there are no empty page blocks that contain a page with a suitable order
2031 */
2032static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2033 unsigned int alloc_order)
2034{
2035 int mt;
2036 unsigned long max_managed, flags;
2037
2038 /*
2039 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2040 * Check is race-prone but harmless.
2041 */
2042 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
2043 if (zone->nr_reserved_highatomic >= max_managed)
2044 return;
2045
2046 spin_lock_irqsave(&zone->lock, flags);
2047
2048 /* Recheck the nr_reserved_highatomic limit under the lock */
2049 if (zone->nr_reserved_highatomic >= max_managed)
2050 goto out_unlock;
2051
2052 /* Yoink! */
2053 mt = get_pageblock_migratetype(page);
2054 if (mt != MIGRATE_HIGHATOMIC &&
2055 !is_migrate_isolate(mt) && !is_migrate_cma(mt)) {
2056 zone->nr_reserved_highatomic += pageblock_nr_pages;
2057 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2058 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
2059 }
2060
2061out_unlock:
2062 spin_unlock_irqrestore(&zone->lock, flags);
2063}
2064
2065/*
2066 * Used when an allocation is about to fail under memory pressure. This
2067 * potentially hurts the reliability of high-order allocations when under
2068 * intense memory pressure but failed atomic allocations should be easier
2069 * to recover from than an OOM.
Minchan Kim8ddf5f92016-12-12 16:42:14 -08002070 *
2071 * If @force is true, try to unreserve a pageblock even though highatomic
2072 * pageblock is exhausted.
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002073 */
Minchan Kim8ddf5f92016-12-12 16:42:14 -08002074static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2075 bool force)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002076{
2077 struct zonelist *zonelist = ac->zonelist;
2078 unsigned long flags;
2079 struct zoneref *z;
2080 struct zone *zone;
2081 struct page *page;
2082 int order;
Minchan Kim34bd01b2016-12-12 16:42:11 -08002083 bool ret;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002084
2085 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2086 ac->nodemask) {
Minchan Kim8ddf5f92016-12-12 16:42:14 -08002087 /*
2088 * Preserve at least one pageblock unless memory pressure
2089 * is really high.
2090 */
2091 if (!force && zone->nr_reserved_highatomic <=
2092 pageblock_nr_pages)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002093 continue;
2094
2095 spin_lock_irqsave(&zone->lock, flags);
2096 for (order = 0; order < MAX_ORDER; order++) {
2097 struct free_area *area = &(zone->free_area[order]);
2098
Geliang Tanga16601c2016-01-14 15:20:30 -08002099 page = list_first_entry_or_null(
2100 &area->free_list[MIGRATE_HIGHATOMIC],
2101 struct page, lru);
2102 if (!page)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002103 continue;
2104
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002105 /*
Minchan Kim870f3452016-12-12 16:42:08 -08002106 * In page freeing path, migratetype change is racy so
2107 * we can counter several free pages in a pageblock
2108 * in this loop althoug we changed the pageblock type
2109 * from highatomic to ac->migratetype. So we should
2110 * adjust the count once.
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002111 */
Minchan Kim870f3452016-12-12 16:42:08 -08002112 if (get_pageblock_migratetype(page) ==
2113 MIGRATE_HIGHATOMIC) {
2114 /*
2115 * It should never happen but changes to
2116 * locking could inadvertently allow a per-cpu
2117 * drain to add pages to MIGRATE_HIGHATOMIC
2118 * while unreserving so be safe and watch for
2119 * underflows.
2120 */
2121 zone->nr_reserved_highatomic -= min(
2122 pageblock_nr_pages,
2123 zone->nr_reserved_highatomic);
2124 }
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002125
2126 /*
2127 * Convert to ac->migratetype and avoid the normal
2128 * pageblock stealing heuristics. Minimally, the caller
2129 * is doing the work and needs the pages. More
2130 * importantly, if the block was always converted to
2131 * MIGRATE_UNMOVABLE or another type then the number
2132 * of pageblocks that cannot be completely freed
2133 * may increase.
2134 */
2135 set_pageblock_migratetype(page, ac->migratetype);
Minchan Kim34bd01b2016-12-12 16:42:11 -08002136 ret = move_freepages_block(zone, page, ac->migratetype);
Minchan Kim8ddf5f92016-12-12 16:42:14 -08002137 if (ret) {
2138 spin_unlock_irqrestore(&zone->lock, flags);
2139 return ret;
2140 }
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002141 }
2142 spin_unlock_irqrestore(&zone->lock, flags);
2143 }
Minchan Kim34bd01b2016-12-12 16:42:11 -08002144
2145 return false;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002146}
2147
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002148/* Remove an element from the buddy allocator from the fallback list */
Mel Gorman0ac3a402009-06-16 15:32:06 -07002149static inline struct page *
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002150__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002151{
Pintu Kumarb8af2942013-09-11 14:20:34 -07002152 struct free_area *area;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002153 unsigned int current_order;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002154 struct page *page;
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002155 int fallback_mt;
2156 bool can_steal;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002157
2158 /* Find the largest possible block of pages in the other list */
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002159 for (current_order = MAX_ORDER-1;
2160 current_order >= order && current_order <= MAX_ORDER-1;
2161 --current_order) {
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002162 area = &(zone->free_area[current_order]);
2163 fallback_mt = find_suitable_fallback(area, current_order,
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002164 start_migratetype, false, &can_steal);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002165 if (fallback_mt == -1)
2166 continue;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002167
Geliang Tanga16601c2016-01-14 15:20:30 -08002168 page = list_first_entry(&area->free_list[fallback_mt],
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002169 struct page, lru);
Minchan Kim38c28bf2016-12-12 16:42:05 -08002170 if (can_steal &&
2171 get_pageblock_migratetype(page) != MIGRATE_HIGHATOMIC)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002172 steal_suitable_fallback(zone, page, start_migratetype);
Mel Gormane0104872007-10-16 01:25:53 -07002173
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002174 /* Remove the page from the freelists */
2175 area->nr_free--;
2176 list_del(&page->lru);
2177 rmv_page_order(page);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002178
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002179 expand(zone, page, order, current_order, area,
2180 start_migratetype);
2181 /*
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002182 * The pcppage_migratetype may differ from pageblock's
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002183 * migratetype depending on the decisions in
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002184 * find_suitable_fallback(). This is OK as long as it does not
2185 * differ for MIGRATE_CMA pageblocks. Those can be used as
2186 * fallback only via special __rmqueue_cma_fallback() function
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002187 */
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002188 set_pcppage_migratetype(page, start_migratetype);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002189
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002190 trace_mm_page_alloc_extfrag(page, order, current_order,
2191 start_migratetype, fallback_mt);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002192
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002193 return page;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002194 }
2195
Mel Gorman728ec982009-06-16 15:32:04 -07002196 return NULL;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002197}
2198
Mel Gorman56fd56b2007-10-16 01:25:58 -07002199/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 * Do the hard work of removing an element from the buddy allocator.
2201 * Call me with the zone->lock already held.
2202 */
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002203static struct page *__rmqueue(struct zone *zone, unsigned int order,
Mel Gorman6ac02062016-01-14 15:20:28 -08002204 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 struct page *page;
2207
Mel Gorman56fd56b2007-10-16 01:25:58 -07002208 page = __rmqueue_smallest(zone, order, migratetype);
Mel Gorman974a7862015-11-06 16:28:34 -08002209 if (unlikely(!page)) {
Heesub Shin483242b2013-01-07 11:10:13 +09002210 page = __rmqueue_fallback(zone, order, migratetype);
2211 }
Joonsoo Kimdc676472015-04-14 15:45:15 -07002212
Heesub Shin483242b2013-01-07 11:10:13 +09002213 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2214 return page;
2215}
2216
Liam Mark2c0f71c2014-06-23 14:13:47 -07002217#ifdef CONFIG_CMA
2218static struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
Heesub Shin483242b2013-01-07 11:10:13 +09002219{
2220 struct page *page = 0;
Liam Mark2c0f71c2014-06-23 14:13:47 -07002221 if (IS_ENABLED(CONFIG_CMA))
2222 if (!zone->cma_alloc)
2223 page = __rmqueue_cma_fallback(zone, order);
2224 trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002225 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226}
Liam Mark2c0f71c2014-06-23 14:13:47 -07002227#else
2228static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
2229{
2230 return NULL;
2231}
2232#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002234/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 * Obtain a specified number of elements from the buddy allocator, all under
2236 * a single hold of the lock, for efficiency. Add them to the supplied list.
2237 * Returns the number of new pages which were placed at *list.
2238 */
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002239static int rmqueue_bulk(struct zone *zone, unsigned int order,
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002240 unsigned long count, struct list_head *list,
Liam Mark2c0f71c2014-06-23 14:13:47 -07002241 int migratetype, bool cold)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242{
Mel Gorman44919a22016-12-12 16:44:41 -08002243 int i, alloced = 0;
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002244
Nick Pigginc54ad302006-01-06 00:10:56 -08002245 spin_lock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 for (i = 0; i < count; ++i) {
Heesub Shin483242b2013-01-07 11:10:13 +09002247 struct page *page;
Liam Mark2c0f71c2014-06-23 14:13:47 -07002248
2249 /*
2250 * If migrate type CMA is being requested only try to
2251 * satisfy the request with CMA pages to try and increase
2252 * CMA utlization.
2253 */
2254 if (is_migrate_cma(migratetype))
2255 page = __rmqueue_cma(zone, order);
Heesub Shin483242b2013-01-07 11:10:13 +09002256 else
2257 page = __rmqueue(zone, order, migratetype);
Nick Piggin085cc7d2006-01-06 00:11:01 -08002258 if (unlikely(page == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 break;
Mel Gorman81eabcb2007-12-17 16:20:05 -08002260
Mel Gorman479f8542016-05-19 17:14:35 -07002261 if (unlikely(check_pcp_refill(page)))
2262 continue;
2263
Mel Gorman81eabcb2007-12-17 16:20:05 -08002264 /*
2265 * Split buddy pages returned by expand() are received here
2266 * in physical page order. The page is added to the callers and
2267 * list and the list head then moves forward. From the callers
2268 * perspective, the linked list is ordered by page number in
2269 * some conditions. This is useful for IO devices that can
2270 * merge IO requests if the physical pages are ordered
2271 * properly.
2272 */
Mel Gormanb745bc82014-06-04 16:10:22 -07002273 if (likely(!cold))
Mel Gormane084b2d2009-07-29 15:02:04 -07002274 list_add(&page->lru, list);
2275 else
2276 list_add_tail(&page->lru, list);
Mel Gorman81eabcb2007-12-17 16:20:05 -08002277 list = &page->lru;
Mel Gorman44919a22016-12-12 16:44:41 -08002278 alloced++;
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002279 if (is_migrate_cma(get_pcppage_migratetype(page)))
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07002280 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2281 -(1 << order));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 }
Mel Gorman44919a22016-12-12 16:44:41 -08002283
2284 /*
2285 * i pages were removed from the buddy list even if some leak due
2286 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2287 * on i. Do not confuse with 'alloced' which is the number of
2288 * pages added to the pcp list.
2289 */
Mel Gormanf2260e62009-06-16 15:32:13 -07002290 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
Nick Pigginc54ad302006-01-06 00:10:56 -08002291 spin_unlock(&zone->lock);
Mel Gorman44919a22016-12-12 16:44:41 -08002292 return alloced;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293}
2294
Liam Mark2c0f71c2014-06-23 14:13:47 -07002295/*
2296 * Return the pcp list that corresponds to the migrate type if that list isn't
2297 * empty.
2298 * If the list is empty return NULL.
2299 */
2300static struct list_head *get_populated_pcp_list(struct zone *zone,
2301 unsigned int order, struct per_cpu_pages *pcp,
2302 int migratetype, int cold)
2303{
2304 struct list_head *list = &pcp->lists[migratetype];
2305
2306 if (list_empty(list)) {
2307 pcp->count += rmqueue_bulk(zone, order,
2308 pcp->batch, list,
2309 migratetype, cold);
2310
2311 if (list_empty(list))
2312 list = NULL;
2313 }
2314 return list;
2315}
2316
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002317#ifdef CONFIG_NUMA
Christoph Lameter8fce4d82006-03-09 17:33:54 -08002318/*
Christoph Lameter4037d452007-05-09 02:35:14 -07002319 * Called from the vmstat counter updater to drain pagesets of this
2320 * currently executing processor on remote nodes after they have
2321 * expired.
2322 *
Christoph Lameter879336c2006-03-22 00:09:08 -08002323 * Note that this function must be called with the thread pinned to
2324 * a single processor.
Christoph Lameter8fce4d82006-03-09 17:33:54 -08002325 */
Christoph Lameter4037d452007-05-09 02:35:14 -07002326void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002327{
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002328 unsigned long flags;
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07002329 int to_drain, batch;
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002330
Christoph Lameter4037d452007-05-09 02:35:14 -07002331 local_irq_save(flags);
Jason Low4db0c3c2015-04-15 16:14:08 -07002332 batch = READ_ONCE(pcp->batch);
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07002333 to_drain = min(pcp->count, batch);
KOSAKI Motohiro2a135152012-07-31 16:42:53 -07002334 if (to_drain > 0) {
2335 free_pcppages_bulk(zone, to_drain, pcp);
2336 pcp->count -= to_drain;
2337 }
Christoph Lameter4037d452007-05-09 02:35:14 -07002338 local_irq_restore(flags);
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002339}
2340#endif
2341
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002342/*
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002343 * Drain pcplists of the indicated processor and zone.
2344 *
2345 * The processor must either be the current processor and the
2346 * thread pinned to the current processor or a processor that
2347 * is not online.
2348 */
2349static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2350{
2351 unsigned long flags;
2352 struct per_cpu_pageset *pset;
2353 struct per_cpu_pages *pcp;
2354
2355 local_irq_save(flags);
2356 pset = per_cpu_ptr(zone->pageset, cpu);
2357
2358 pcp = &pset->pcp;
2359 if (pcp->count) {
2360 free_pcppages_bulk(zone, pcp->count, pcp);
2361 pcp->count = 0;
2362 }
2363 local_irq_restore(flags);
2364}
2365
2366/*
2367 * Drain pcplists of all zones on the indicated processor.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002368 *
2369 * The processor must either be the current processor and the
2370 * thread pinned to the current processor or a processor that
2371 * is not online.
2372 */
2373static void drain_pages(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374{
2375 struct zone *zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07002377 for_each_populated_zone(zone) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002378 drain_pages_zone(cpu, zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 }
2380}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002382/*
2383 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002384 *
2385 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2386 * the single zone's pages.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002387 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002388void drain_local_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002389{
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002390 int cpu = smp_processor_id();
2391
2392 if (zone)
2393 drain_pages_zone(cpu, zone);
2394 else
2395 drain_pages(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002396}
2397
2398/*
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002399 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2400 *
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002401 * When zone parameter is non-NULL, spill just the single zone's pages.
2402 *
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002403 * Note that this code is protected against sending an IPI to an offline
2404 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
2405 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
2406 * nothing keeps CPUs from showing up after we populated the cpumask and
2407 * before the call to on_each_cpu_mask().
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002408 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002409void drain_all_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002410{
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002411 int cpu;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002412
2413 /*
2414 * Allocate in the BSS so we wont require allocation in
2415 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2416 */
2417 static cpumask_t cpus_with_pcps;
2418
2419 /*
2420 * We don't care about racing with CPU hotplug event
2421 * as offline notification will cause the notified
2422 * cpu to drain that CPU pcps and on_each_cpu_mask
2423 * disables preemption as part of its processing
2424 */
2425 for_each_online_cpu(cpu) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002426 struct per_cpu_pageset *pcp;
2427 struct zone *z;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002428 bool has_pcps = false;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002429
2430 if (zone) {
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002431 pcp = per_cpu_ptr(zone->pageset, cpu);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002432 if (pcp->pcp.count)
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002433 has_pcps = true;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002434 } else {
2435 for_each_populated_zone(z) {
2436 pcp = per_cpu_ptr(z->pageset, cpu);
2437 if (pcp->pcp.count) {
2438 has_pcps = true;
2439 break;
2440 }
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002441 }
2442 }
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002443
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002444 if (has_pcps)
2445 cpumask_set_cpu(cpu, &cpus_with_pcps);
2446 else
2447 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2448 }
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002449 on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
2450 zone, 1);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002451}
2452
Rafael J. Wysocki296699d2007-07-29 23:27:18 +02002453#ifdef CONFIG_HIBERNATION
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454
2455void mark_free_pages(struct zone *zone)
2456{
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002457 unsigned long pfn, max_zone_pfn;
2458 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002459 unsigned int order, t;
Geliang Tang86760a22016-01-14 15:20:33 -08002460 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461
Xishi Qiu8080fc02013-09-11 14:21:45 -07002462 if (zone_is_empty(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 return;
2464
2465 spin_lock_irqsave(&zone->lock, flags);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002466
Cody P Schafer108bcc92013-02-22 16:35:23 -08002467 max_zone_pfn = zone_end_pfn(zone);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002468 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2469 if (pfn_valid(pfn)) {
Geliang Tang86760a22016-01-14 15:20:33 -08002470 page = pfn_to_page(pfn);
Joonsoo Kimba6b0972016-05-19 17:12:16 -07002471
2472 if (page_zone(page) != zone)
2473 continue;
2474
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002475 if (!swsusp_page_is_forbidden(page))
2476 swsusp_unset_page_free(page);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002479 for_each_migratetype_order(order, t) {
Geliang Tang86760a22016-01-14 15:20:33 -08002480 list_for_each_entry(page,
2481 &zone->free_area[order].free_list[t], lru) {
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002482 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
Geliang Tang86760a22016-01-14 15:20:33 -08002484 pfn = page_to_pfn(page);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002485 for (i = 0; i < (1UL << order); i++)
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002486 swsusp_set_page_free(pfn_to_page(pfn + i));
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002487 }
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002488 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 spin_unlock_irqrestore(&zone->lock, flags);
2490}
Mel Gormane2c55dc2007-10-16 01:25:50 -07002491#endif /* CONFIG_PM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492
2493/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 * Free a 0-order page
Mel Gormanb745bc82014-06-04 16:10:22 -07002495 * cold == true ? free a cold page : free a hot page
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 */
Mel Gormanb745bc82014-06-04 16:10:22 -07002497void free_hot_cold_page(struct page *page, bool cold)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498{
2499 struct zone *zone = page_zone(page);
2500 struct per_cpu_pages *pcp;
2501 unsigned long flags;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07002502 unsigned long pfn = page_to_pfn(page);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002503 int migratetype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504
Mel Gorman4db75482016-05-19 17:14:32 -07002505 if (!free_pcp_prepare(page))
Hugh Dickins689bceb2005-11-21 21:32:20 -08002506 return;
2507
Mel Gormandc4b0ca2014-06-04 16:10:17 -07002508 migratetype = get_pfnblock_migratetype(page, pfn);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002509 set_pcppage_migratetype(page, migratetype);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 local_irq_save(flags);
Christoph Lameterf8891e52006-06-30 01:55:45 -07002511 __count_vm_event(PGFREE);
Mel Gormanda456f12009-06-16 15:32:08 -07002512
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002513 /*
2514 * We only track unmovable, reclaimable and movable on pcp lists.
2515 * Free ISOLATE pages back to the allocator because they are being
2516 * offlined but treat RESERVE as movable pages so we can get those
2517 * areas back if necessary. Otherwise, we may have to free
2518 * excessively into the page allocator
2519 */
2520 if (migratetype >= MIGRATE_PCPTYPES) {
Minchan Kim194159f2013-02-22 16:33:58 -08002521 if (unlikely(is_migrate_isolate(migratetype))) {
Mel Gormandc4b0ca2014-06-04 16:10:17 -07002522 free_one_page(zone, page, pfn, 0, migratetype);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002523 goto out;
2524 }
2525 migratetype = MIGRATE_MOVABLE;
2526 }
2527
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09002528 pcp = &this_cpu_ptr(zone->pageset)->pcp;
Mel Gormanb745bc82014-06-04 16:10:22 -07002529 if (!cold)
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002530 list_add(&page->lru, &pcp->lists[migratetype]);
Mel Gormanb745bc82014-06-04 16:10:22 -07002531 else
2532 list_add_tail(&page->lru, &pcp->lists[migratetype]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 pcp->count++;
Nick Piggin48db57f2006-01-08 01:00:42 -08002534 if (pcp->count >= pcp->high) {
Jason Low4db0c3c2015-04-15 16:14:08 -07002535 unsigned long batch = READ_ONCE(pcp->batch);
Cody P Schafer998d39c2013-07-03 15:01:32 -07002536 free_pcppages_bulk(zone, batch, pcp);
2537 pcp->count -= batch;
Nick Piggin48db57f2006-01-08 01:00:42 -08002538 }
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002539
2540out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542}
2543
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002544/*
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002545 * Free a list of 0-order pages
2546 */
Mel Gormanb745bc82014-06-04 16:10:22 -07002547void free_hot_cold_page_list(struct list_head *list, bool cold)
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002548{
2549 struct page *page, *next;
2550
2551 list_for_each_entry_safe(page, next, list, lru) {
Konstantin Khlebnikovb413d482012-01-10 15:07:09 -08002552 trace_mm_page_free_batched(page, cold);
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002553 free_hot_cold_page(page, cold);
2554 }
2555}
2556
2557/*
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002558 * split_page takes a non-compound higher-order page, and splits it into
2559 * n (1<<order) sub-pages: page[0..n]
2560 * Each sub-page must be freed individually.
2561 *
2562 * Note: this is probably too low level an operation for use in drivers.
2563 * Please consult with lkml before using this in your driver.
2564 */
2565void split_page(struct page *page, unsigned int order)
2566{
2567 int i;
2568
Sasha Levin309381fea2014-01-23 15:52:54 -08002569 VM_BUG_ON_PAGE(PageCompound(page), page);
2570 VM_BUG_ON_PAGE(!page_count(page), page);
Vegard Nossumb1eeab62008-11-25 16:55:53 +01002571
2572#ifdef CONFIG_KMEMCHECK
2573 /*
2574 * Split shadow pages too, because free(page[0]) would
2575 * otherwise free the whole shadow.
2576 */
2577 if (kmemcheck_page_is_tracked(page))
2578 split_page(virt_to_page(page[0].shadow), order);
2579#endif
2580
Joonsoo Kima9627bc2016-07-26 15:23:49 -07002581 for (i = 1; i < (1 << order); i++)
Nick Piggin7835e982006-03-22 00:08:40 -08002582 set_page_refcounted(page + i);
Joonsoo Kima9627bc2016-07-26 15:23:49 -07002583 split_page_owner(page, order);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002584}
K. Y. Srinivasan5853ff22013-03-25 15:47:38 -07002585EXPORT_SYMBOL_GPL(split_page);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002586
Joonsoo Kim3c605092014-11-13 15:19:21 -08002587int __isolate_free_page(struct page *page, unsigned int order)
Mel Gorman748446b2010-05-24 14:32:27 -07002588{
Mel Gorman748446b2010-05-24 14:32:27 -07002589 unsigned long watermark;
2590 struct zone *zone;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07002591 int mt;
Mel Gorman748446b2010-05-24 14:32:27 -07002592
2593 BUG_ON(!PageBuddy(page));
2594
2595 zone = page_zone(page);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002596 mt = get_pageblock_migratetype(page);
Mel Gorman748446b2010-05-24 14:32:27 -07002597
Minchan Kim194159f2013-02-22 16:33:58 -08002598 if (!is_migrate_isolate(mt)) {
Vlastimil Babka8348faf2016-10-07 16:58:00 -07002599 /*
2600 * Obey watermarks as if the page was being allocated. We can
2601 * emulate a high-order watermark check with a raised order-0
2602 * watermark, because we already know our high-order page
2603 * exists.
2604 */
2605 watermark = min_wmark_pages(zone) + (1UL << order);
Vlastimil Babka984fdba2016-10-07 16:57:57 -07002606 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002607 return 0;
2608
Mel Gorman8fb74b92013-01-11 14:32:16 -08002609 __mod_zone_freepage_state(zone, -(1UL << order), mt);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002610 }
Mel Gorman748446b2010-05-24 14:32:27 -07002611
2612 /* Remove page from free list */
2613 list_del(&page->lru);
2614 zone->free_area[order].nr_free--;
2615 rmv_page_order(page);
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07002616
zhong jiang400bc7f2016-07-28 15:45:07 -07002617 /*
2618 * Set the pageblock if the isolated page is at least half of a
2619 * pageblock
2620 */
Mel Gorman748446b2010-05-24 14:32:27 -07002621 if (order >= pageblock_order - 1) {
2622 struct page *endpage = page + (1 << order) - 1;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002623 for (; page < endpage; page += pageblock_nr_pages) {
2624 int mt = get_pageblock_migratetype(page);
Minchan Kim38c28bf2016-12-12 16:42:05 -08002625 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
2626 && mt != MIGRATE_HIGHATOMIC)
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002627 set_pageblock_migratetype(page,
2628 MIGRATE_MOVABLE);
2629 }
Mel Gorman748446b2010-05-24 14:32:27 -07002630 }
2631
Joonsoo Kimf3a14ce2015-07-17 16:24:15 -07002632
Mel Gorman8fb74b92013-01-11 14:32:16 -08002633 return 1UL << order;
Mel Gorman1fb3f8c2012-10-08 16:29:12 -07002634}
2635
2636/*
Mel Gorman060e7412016-05-19 17:13:27 -07002637 * Update NUMA hit/miss statistics
2638 *
2639 * Must be called with interrupts disabled.
2640 *
2641 * When __GFP_OTHER_NODE is set assume the node of the preferred
2642 * zone is the local node. This is useful for daemons who allocate
2643 * memory on behalf of other processes.
2644 */
2645static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
2646 gfp_t flags)
2647{
2648#ifdef CONFIG_NUMA
2649 int local_nid = numa_node_id();
2650 enum zone_stat_item local_stat = NUMA_LOCAL;
2651
2652 if (unlikely(flags & __GFP_OTHER_NODE)) {
2653 local_stat = NUMA_OTHER;
2654 local_nid = preferred_zone->node;
2655 }
2656
2657 if (z->node == local_nid) {
2658 __inc_zone_state(z, NUMA_HIT);
2659 __inc_zone_state(z, local_stat);
2660 } else {
2661 __inc_zone_state(z, NUMA_MISS);
2662 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
2663 }
2664#endif
2665}
2666
2667/*
Vlastimil Babka75379192015-02-11 15:25:38 -08002668 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 */
Mel Gorman0a15c3e2009-06-16 15:32:05 -07002670static inline
2671struct page *buffered_rmqueue(struct zone *preferred_zone,
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002672 struct zone *zone, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07002673 gfp_t gfp_flags, unsigned int alloc_flags,
2674 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675{
2676 unsigned long flags;
Liam Mark2c0f71c2014-06-23 14:13:47 -07002677 struct page *page = NULL;
Mel Gormanb745bc82014-06-04 16:10:22 -07002678 bool cold = ((gfp_flags & __GFP_COLD) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679
Nick Piggin48db57f2006-01-08 01:00:42 -08002680 if (likely(order == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 struct per_cpu_pages *pcp;
Liam Mark2c0f71c2014-06-23 14:13:47 -07002682 struct list_head *list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 local_irq_save(flags);
Mel Gorman479f8542016-05-19 17:14:35 -07002685 do {
2686 pcp = &this_cpu_ptr(zone->pageset)->pcp;
Liam Mark2c0f71c2014-06-23 14:13:47 -07002687
2688 /* First try to get CMA pages */
2689 if (migratetype == MIGRATE_MOVABLE &&
2690 gfp_flags & __GFP_CMA) {
2691 list = get_populated_pcp_list(zone, 0, pcp,
2692 get_cma_migrate_type(), cold);
2693 }
2694
2695 if (list == NULL) {
2696 /*
2697 * Either CMA is not suitable or there are no
2698 * free CMA pages.
2699 */
2700 list = get_populated_pcp_list(zone, 0, pcp,
2701 migratetype, cold);
2702 if (unlikely(list == NULL) ||
2703 unlikely(list_empty(list)))
Mel Gorman479f8542016-05-19 17:14:35 -07002704 goto failed;
2705 }
Mel Gormanb92a6ed2007-10-16 01:25:50 -07002706
Mel Gorman479f8542016-05-19 17:14:35 -07002707 if (cold)
2708 page = list_last_entry(list, struct page, lru);
2709 else
2710 page = list_first_entry(list, struct page, lru);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002711
Vlastimil Babka83b93552016-06-03 14:55:52 -07002712 list_del(&page->lru);
2713 pcp->count--;
2714
2715 } while (check_new_pcp(page));
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002716 } else {
Michal Hocko0f352e52016-03-17 14:19:32 -07002717 /*
2718 * We most definitely don't want callers attempting to
2719 * allocate greater than order-1 page units with __GFP_NOFAIL.
2720 */
2721 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 spin_lock_irqsave(&zone->lock, flags);
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002723
Mel Gorman479f8542016-05-19 17:14:35 -07002724 do {
2725 page = NULL;
2726 if (alloc_flags & ALLOC_HARDER) {
2727 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2728 if (page)
2729 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2730 }
Liam Mark2c0f71c2014-06-23 14:13:47 -07002731 if (!page && migratetype == MIGRATE_MOVABLE &&
2732 gfp_flags & __GFP_CMA)
2733 page = __rmqueue_cma(zone, order);
2734
2735 if (!page)
2736 page = __rmqueue(zone, order, migratetype);
Mel Gorman479f8542016-05-19 17:14:35 -07002737 } while (page && check_new_pages(page, order));
Liam Mark2c0f71c2014-06-23 14:13:47 -07002738
Nick Piggina74609f2006-01-06 00:11:20 -08002739 spin_unlock(&zone->lock);
2740 if (!page)
2741 goto failed;
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07002742 __mod_zone_freepage_state(zone, -(1 << order),
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002743 get_pcppage_migratetype(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 }
2745
Mel Gorman16709d12016-07-28 15:46:56 -07002746 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
Andi Kleen78afd562011-03-22 16:33:12 -07002747 zone_statistics(preferred_zone, zone, gfp_flags);
Nick Piggina74609f2006-01-06 00:11:20 -08002748 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749
Sasha Levin309381fea2014-01-23 15:52:54 -08002750 VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 return page;
Nick Piggina74609f2006-01-06 00:11:20 -08002752
2753failed:
2754 local_irq_restore(flags);
Nick Piggina74609f2006-01-06 00:11:20 -08002755 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756}
2757
Akinobu Mita933e3122006-12-08 02:39:45 -08002758#ifdef CONFIG_FAIL_PAGE_ALLOC
2759
Akinobu Mitab2588c42011-07-26 16:09:03 -07002760static struct {
Akinobu Mita933e3122006-12-08 02:39:45 -08002761 struct fault_attr attr;
2762
Viresh Kumar621a5f72015-09-26 15:04:07 -07002763 bool ignore_gfp_highmem;
Mel Gorman71baba42015-11-06 16:28:28 -08002764 bool ignore_gfp_reclaim;
Akinobu Mita54114992007-07-15 23:40:23 -07002765 u32 min_order;
Akinobu Mita933e3122006-12-08 02:39:45 -08002766} fail_page_alloc = {
2767 .attr = FAULT_ATTR_INITIALIZER,
Mel Gorman71baba42015-11-06 16:28:28 -08002768 .ignore_gfp_reclaim = true,
Viresh Kumar621a5f72015-09-26 15:04:07 -07002769 .ignore_gfp_highmem = true,
Akinobu Mita54114992007-07-15 23:40:23 -07002770 .min_order = 1,
Akinobu Mita933e3122006-12-08 02:39:45 -08002771};
2772
2773static int __init setup_fail_page_alloc(char *str)
2774{
2775 return setup_fault_attr(&fail_page_alloc.attr, str);
2776}
2777__setup("fail_page_alloc=", setup_fail_page_alloc);
2778
Gavin Shandeaf3862012-07-31 16:41:51 -07002779static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08002780{
Akinobu Mita54114992007-07-15 23:40:23 -07002781 if (order < fail_page_alloc.min_order)
Gavin Shandeaf3862012-07-31 16:41:51 -07002782 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002783 if (gfp_mask & __GFP_NOFAIL)
Gavin Shandeaf3862012-07-31 16:41:51 -07002784 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002785 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
Gavin Shandeaf3862012-07-31 16:41:51 -07002786 return false;
Mel Gorman71baba42015-11-06 16:28:28 -08002787 if (fail_page_alloc.ignore_gfp_reclaim &&
2788 (gfp_mask & __GFP_DIRECT_RECLAIM))
Gavin Shandeaf3862012-07-31 16:41:51 -07002789 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002790
2791 return should_fail(&fail_page_alloc.attr, 1 << order);
2792}
2793
2794#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2795
2796static int __init fail_page_alloc_debugfs(void)
2797{
Al Virof4ae40a2011-07-24 04:33:43 -04002798 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
Akinobu Mita933e3122006-12-08 02:39:45 -08002799 struct dentry *dir;
Akinobu Mita933e3122006-12-08 02:39:45 -08002800
Akinobu Mitadd48c082011-08-03 16:21:01 -07002801 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2802 &fail_page_alloc.attr);
2803 if (IS_ERR(dir))
2804 return PTR_ERR(dir);
Akinobu Mita933e3122006-12-08 02:39:45 -08002805
Akinobu Mitab2588c42011-07-26 16:09:03 -07002806 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
Mel Gorman71baba42015-11-06 16:28:28 -08002807 &fail_page_alloc.ignore_gfp_reclaim))
Akinobu Mitab2588c42011-07-26 16:09:03 -07002808 goto fail;
2809 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2810 &fail_page_alloc.ignore_gfp_highmem))
2811 goto fail;
2812 if (!debugfs_create_u32("min-order", mode, dir,
2813 &fail_page_alloc.min_order))
2814 goto fail;
Akinobu Mita933e3122006-12-08 02:39:45 -08002815
Akinobu Mitab2588c42011-07-26 16:09:03 -07002816 return 0;
2817fail:
Akinobu Mitadd48c082011-08-03 16:21:01 -07002818 debugfs_remove_recursive(dir);
Akinobu Mita933e3122006-12-08 02:39:45 -08002819
Akinobu Mitab2588c42011-07-26 16:09:03 -07002820 return -ENOMEM;
Akinobu Mita933e3122006-12-08 02:39:45 -08002821}
2822
2823late_initcall(fail_page_alloc_debugfs);
2824
2825#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2826
2827#else /* CONFIG_FAIL_PAGE_ALLOC */
2828
Gavin Shandeaf3862012-07-31 16:41:51 -07002829static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08002830{
Gavin Shandeaf3862012-07-31 16:41:51 -07002831 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002832}
2833
2834#endif /* CONFIG_FAIL_PAGE_ALLOC */
2835
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836/*
Mel Gorman97a16fc2015-11-06 16:28:40 -08002837 * Return true if free base pages are above 'mark'. For high-order checks it
2838 * will return true of the order-0 watermark is reached and there is at least
2839 * one free page of a suitable size. Checking now avoids taking the zone lock
2840 * to check in the allocation paths if no pages are free.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 */
Michal Hocko86a294a2016-05-20 16:57:12 -07002842bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2843 int classzone_idx, unsigned int alloc_flags,
2844 long free_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845{
Christoph Lameterd23ad422007-02-10 01:43:02 -08002846 long min = mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 int o;
Mel Gormanc6038442016-05-19 17:13:38 -07002848 const bool alloc_harder = (alloc_flags & ALLOC_HARDER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002850 /* free_pages may go negative - that's OK */
Michal Hockodf0a6da2012-01-10 15:08:02 -08002851 free_pages -= (1 << order) - 1;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002852
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002853 if (alloc_flags & ALLOC_HIGH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 min -= min / 2;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002855
2856 /*
2857 * If the caller does not have rights to ALLOC_HARDER then subtract
2858 * the high-atomic reserves. This will over-estimate the size of the
2859 * atomic reserve but it avoids a search.
2860 */
Mel Gorman97a16fc2015-11-06 16:28:40 -08002861 if (likely(!alloc_harder))
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002862 free_pages -= z->nr_reserved_highatomic;
2863 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 min -= min / 4;
Mel Gormane2b19192015-11-06 16:28:09 -08002865
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07002866#ifdef CONFIG_CMA
2867 /* If allocation can't use CMA areas don't use free CMA pages */
2868 if (!(alloc_flags & ALLOC_CMA))
Mel Gorman97a16fc2015-11-06 16:28:40 -08002869 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07002870#endif
Tomasz Stanislawski026b0812013-06-12 14:05:02 -07002871
Mel Gorman97a16fc2015-11-06 16:28:40 -08002872 /*
2873 * Check watermarks for an order-0 allocation request. If these
2874 * are not met, then a high-order request also cannot go ahead
2875 * even if a suitable page happened to be free.
2876 */
2877 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
Mel Gorman88f5acf2011-01-13 15:45:41 -08002878 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879
Mel Gorman97a16fc2015-11-06 16:28:40 -08002880 /* If this is an order-0 request then the watermark is fine */
2881 if (!order)
2882 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883
Mel Gorman97a16fc2015-11-06 16:28:40 -08002884 /* For a high-order request, check at least one suitable page is free */
2885 for (o = order; o < MAX_ORDER; o++) {
2886 struct free_area *area = &z->free_area[o];
2887 int mt;
2888
2889 if (!area->nr_free)
2890 continue;
2891
2892 if (alloc_harder)
2893 return true;
2894
2895 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
Vinayak Menonca917712016-06-07 15:23:29 +05302896#ifdef CONFIG_CMA
2897 /*
2898 * Note that this check is needed only
2899 * when MIGRATE_CMA < MIGRATE_PCPTYPES.
2900 */
2901 if (mt == MIGRATE_CMA)
2902 continue;
2903#endif
Mel Gorman97a16fc2015-11-06 16:28:40 -08002904 if (!list_empty(&area->free_list[mt]))
2905 return true;
2906 }
2907
2908#ifdef CONFIG_CMA
2909 if ((alloc_flags & ALLOC_CMA) &&
2910 !list_empty(&area->free_list[MIGRATE_CMA])) {
2911 return true;
2912 }
2913#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 }
Mel Gorman97a16fc2015-11-06 16:28:40 -08002915 return false;
Mel Gorman88f5acf2011-01-13 15:45:41 -08002916}
2917
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002918bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
Mel Gormanc6038442016-05-19 17:13:38 -07002919 int classzone_idx, unsigned int alloc_flags)
Mel Gorman88f5acf2011-01-13 15:45:41 -08002920{
2921 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2922 zone_page_state(z, NR_FREE_PAGES));
2923}
2924
Mel Gorman48ee5f32016-05-19 17:14:07 -07002925static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
2926 unsigned long mark, int classzone_idx, unsigned int alloc_flags)
2927{
2928 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2929 long cma_pages = 0;
2930
2931#ifdef CONFIG_CMA
2932 /* If allocation can't use CMA areas don't use free CMA pages */
2933 if (!(alloc_flags & ALLOC_CMA))
2934 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
2935#endif
2936
2937 /*
2938 * Fast check for order-0 only. If this fails then the reserves
2939 * need to be calculated. There is a corner case where the check
2940 * passes but only the high-order atomic reserve are free. If
2941 * the caller is !atomic then it'll uselessly search the free
2942 * list. That corner case is then slower but it is harmless.
2943 */
2944 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
2945 return true;
2946
2947 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2948 free_pages);
2949}
2950
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002951bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
Mel Gormane2b19192015-11-06 16:28:09 -08002952 unsigned long mark, int classzone_idx)
Mel Gorman88f5acf2011-01-13 15:45:41 -08002953{
2954 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2955
2956 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
2957 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
2958
Mel Gormane2b19192015-11-06 16:28:09 -08002959 return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
Mel Gorman88f5acf2011-01-13 15:45:41 -08002960 free_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961}
2962
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002963#ifdef CONFIG_NUMA
David Rientjes957f8222012-10-08 16:33:24 -07002964static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2965{
Gavin Shand1e80422017-02-24 14:59:33 -08002966 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
Mel Gorman5f7a75a2014-06-04 16:07:15 -07002967 RECLAIM_DISTANCE;
David Rientjes957f8222012-10-08 16:33:24 -07002968}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002969#else /* CONFIG_NUMA */
David Rientjes957f8222012-10-08 16:33:24 -07002970static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2971{
2972 return true;
2973}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002974#endif /* CONFIG_NUMA */
2975
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002976/*
Paul Jackson0798e512006-12-06 20:31:38 -08002977 * get_page_from_freelist goes through the zonelist trying to allocate
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002978 * a page.
2979 */
2980static struct page *
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002981get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
2982 const struct alloc_context *ac)
Martin Hicks753ee722005-06-21 17:14:41 -07002983{
Mel Gormanc33d6c02016-05-19 17:14:10 -07002984 struct zoneref *z = ac->preferred_zoneref;
Mel Gorman5117f452009-06-16 15:31:59 -07002985 struct zone *zone;
Mel Gorman3b8c0be2016-07-28 15:46:53 -07002986 struct pglist_data *last_pgdat_dirty_limit = NULL;
2987
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002988 /*
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002989 * Scan zonelist, looking for a zone with enough free.
Vladimir Davydov344736f2014-10-20 15:50:30 +04002990 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002991 */
Mel Gormanc33d6c02016-05-19 17:14:10 -07002992 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002993 ac->nodemask) {
Mel Gormanbe06af02016-05-19 17:13:47 -07002994 struct page *page;
Johannes Weinere085dbc2013-09-11 14:20:46 -07002995 unsigned long mark;
2996
Mel Gorman664eedd2014-06-04 16:10:08 -07002997 if (cpusets_enabled() &&
2998 (alloc_flags & ALLOC_CPUSET) &&
Vlastimil Babka002f2902016-05-19 17:14:30 -07002999 !__cpuset_zone_allowed(zone, gfp_mask))
Mel Gormancd38b112011-07-25 17:12:29 -07003000 continue;
Johannes Weinera756cf52012-01-10 15:07:49 -08003001 /*
3002 * When allocating a page cache page for writing, we
Mel Gorman281e3722016-07-28 15:46:11 -07003003 * want to get it from a node that is within its dirty
3004 * limit, such that no single node holds more than its
Johannes Weinera756cf52012-01-10 15:07:49 -08003005 * proportional share of globally allowed dirty pages.
Mel Gorman281e3722016-07-28 15:46:11 -07003006 * The dirty limits take into account the node's
Johannes Weinera756cf52012-01-10 15:07:49 -08003007 * lowmem reserves and high watermark so that kswapd
3008 * should be able to balance it without having to
3009 * write pages from its LRU list.
3010 *
Johannes Weinera756cf52012-01-10 15:07:49 -08003011 * XXX: For now, allow allocations to potentially
Mel Gorman281e3722016-07-28 15:46:11 -07003012 * exceed the per-node dirty limit in the slowpath
Mel Gormanc9ab0c42015-11-06 16:28:12 -08003013 * (spread_dirty_pages unset) before going into reclaim,
Johannes Weinera756cf52012-01-10 15:07:49 -08003014 * which is important when on a NUMA setup the allowed
Mel Gorman281e3722016-07-28 15:46:11 -07003015 * nodes are together not big enough to reach the
Johannes Weinera756cf52012-01-10 15:07:49 -08003016 * global limit. The proper fix for these situations
Mel Gorman281e3722016-07-28 15:46:11 -07003017 * will require awareness of nodes in the
Johannes Weinera756cf52012-01-10 15:07:49 -08003018 * dirty-throttling and the flusher threads.
3019 */
Mel Gorman3b8c0be2016-07-28 15:46:53 -07003020 if (ac->spread_dirty_pages) {
3021 if (last_pgdat_dirty_limit == zone->zone_pgdat)
3022 continue;
3023
3024 if (!node_dirty_ok(zone->zone_pgdat)) {
3025 last_pgdat_dirty_limit = zone->zone_pgdat;
3026 continue;
3027 }
3028 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003029
Johannes Weinere085dbc2013-09-11 14:20:46 -07003030 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
Mel Gorman48ee5f32016-05-19 17:14:07 -07003031 if (!zone_watermark_fast(zone, order, mark,
Mel Gorman93ea9962016-05-19 17:14:13 -07003032 ac_classzone_idx(ac), alloc_flags)) {
Mel Gormanfa5e0842009-06-16 15:33:22 -07003033 int ret;
3034
Mel Gorman5dab2912014-06-04 16:10:14 -07003035 /* Checked here to keep the fast path fast */
3036 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3037 if (alloc_flags & ALLOC_NO_WATERMARKS)
3038 goto try_this_zone;
3039
Mel Gormana5f5f912016-07-28 15:46:32 -07003040 if (node_reclaim_mode == 0 ||
Mel Gormanc33d6c02016-05-19 17:14:10 -07003041 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
Mel Gormancd38b112011-07-25 17:12:29 -07003042 continue;
3043
Mel Gormana5f5f912016-07-28 15:46:32 -07003044 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
Mel Gormanfa5e0842009-06-16 15:33:22 -07003045 switch (ret) {
Mel Gormana5f5f912016-07-28 15:46:32 -07003046 case NODE_RECLAIM_NOSCAN:
Mel Gormanfa5e0842009-06-16 15:33:22 -07003047 /* did not scan */
Mel Gormancd38b112011-07-25 17:12:29 -07003048 continue;
Mel Gormana5f5f912016-07-28 15:46:32 -07003049 case NODE_RECLAIM_FULL:
Mel Gormanfa5e0842009-06-16 15:33:22 -07003050 /* scanned but unreclaimable */
Mel Gormancd38b112011-07-25 17:12:29 -07003051 continue;
Mel Gormanfa5e0842009-06-16 15:33:22 -07003052 default:
3053 /* did we reclaim enough */
Mel Gormanfed27192013-04-29 15:07:57 -07003054 if (zone_watermark_ok(zone, order, mark,
Mel Gorman93ea9962016-05-19 17:14:13 -07003055 ac_classzone_idx(ac), alloc_flags))
Mel Gormanfed27192013-04-29 15:07:57 -07003056 goto try_this_zone;
3057
Mel Gormanfed27192013-04-29 15:07:57 -07003058 continue;
Paul Jackson0798e512006-12-06 20:31:38 -08003059 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003060 }
3061
Mel Gormanfa5e0842009-06-16 15:33:22 -07003062try_this_zone:
Mel Gormanc33d6c02016-05-19 17:14:10 -07003063 page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, order,
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003064 gfp_mask, alloc_flags, ac->migratetype);
Vlastimil Babka75379192015-02-11 15:25:38 -08003065 if (page) {
Mel Gorman479f8542016-05-19 17:14:35 -07003066 prep_new_page(page, order, gfp_mask, alloc_flags);
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003067
3068 /*
3069 * If this is a high-order atomic allocation then check
3070 * if the pageblock should be reserved for the future
3071 */
3072 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3073 reserve_highatomic_pageblock(page, zone, order);
3074
Vlastimil Babka75379192015-02-11 15:25:38 -08003075 return page;
3076 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07003077 }
Paul Jackson9276b1bc2006-12-06 20:31:48 -08003078
Mel Gorman4ffeaf32014-08-06 16:07:22 -07003079 return NULL;
Martin Hicks753ee722005-06-21 17:14:41 -07003080}
3081
David Rientjes29423e772011-03-22 16:30:47 -07003082/*
3083 * Large machines with many possible nodes should not always dump per-node
3084 * meminfo in irq context.
3085 */
3086static inline bool should_suppress_show_mem(void)
3087{
3088 bool ret = false;
3089
3090#if NODES_SHIFT > 8
3091 ret = in_interrupt();
3092#endif
3093 return ret;
3094}
3095
Dave Hansena238ab52011-05-24 17:12:16 -07003096static DEFINE_RATELIMIT_STATE(nopage_rs,
3097 DEFAULT_RATELIMIT_INTERVAL,
3098 DEFAULT_RATELIMIT_BURST);
3099
Michal Hocko7877cdc2016-10-07 17:01:55 -07003100void warn_alloc(gfp_t gfp_mask, const char *fmt, ...)
Dave Hansena238ab52011-05-24 17:12:16 -07003101{
Dave Hansena238ab52011-05-24 17:12:16 -07003102 unsigned int filter = SHOW_MEM_FILTER_NODES;
Michal Hocko7877cdc2016-10-07 17:01:55 -07003103 struct va_format vaf;
3104 va_list args;
Dave Hansena238ab52011-05-24 17:12:16 -07003105
Stanislaw Gruszkac0a32fc52012-01-10 15:07:28 -08003106 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
3107 debug_guardpage_minorder() > 0)
Dave Hansena238ab52011-05-24 17:12:16 -07003108 return;
3109
3110 /*
3111 * This documents exceptions given to allocations in certain
3112 * contexts that are allowed to allocate outside current's set
3113 * of allowed nodes.
3114 */
3115 if (!(gfp_mask & __GFP_NOMEMALLOC))
3116 if (test_thread_flag(TIF_MEMDIE) ||
3117 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3118 filter &= ~SHOW_MEM_FILTER_NODES;
Mel Gormand0164ad2015-11-06 16:28:21 -08003119 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
Dave Hansena238ab52011-05-24 17:12:16 -07003120 filter &= ~SHOW_MEM_FILTER_NODES;
3121
Michal Hocko7877cdc2016-10-07 17:01:55 -07003122 pr_warn("%s: ", current->comm);
Joe Perches3ee9a4f2011-10-31 17:08:35 -07003123
Michal Hocko7877cdc2016-10-07 17:01:55 -07003124 va_start(args, fmt);
3125 vaf.fmt = fmt;
3126 vaf.va = &args;
3127 pr_cont("%pV", &vaf);
3128 va_end(args);
Joe Perches3ee9a4f2011-10-31 17:08:35 -07003129
Michal Hocko7877cdc2016-10-07 17:01:55 -07003130 pr_cont(", mode:%#x(%pGg)\n", gfp_mask, &gfp_mask);
Joe Perches3ee9a4f2011-10-31 17:08:35 -07003131
Dave Hansena238ab52011-05-24 17:12:16 -07003132 dump_stack();
3133 if (!should_suppress_show_mem())
3134 show_mem(filter);
3135}
3136
Mel Gorman11e33f62009-06-16 15:31:57 -07003137static inline struct page *
3138__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003139 const struct alloc_context *ac, unsigned long *did_some_progress)
Mel Gorman11e33f62009-06-16 15:31:57 -07003140{
David Rientjes6e0fc462015-09-08 15:00:36 -07003141 struct oom_control oc = {
3142 .zonelist = ac->zonelist,
3143 .nodemask = ac->nodemask,
Vladimir Davydov2a966b72016-07-26 15:22:33 -07003144 .memcg = NULL,
David Rientjes6e0fc462015-09-08 15:00:36 -07003145 .gfp_mask = gfp_mask,
3146 .order = order,
David Rientjes6e0fc462015-09-08 15:00:36 -07003147 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149
Johannes Weiner9879de72015-01-26 12:58:32 -08003150 *did_some_progress = 0;
3151
Johannes Weiner9879de72015-01-26 12:58:32 -08003152 /*
Johannes Weinerdc564012015-06-24 16:57:19 -07003153 * Acquire the oom lock. If that fails, somebody else is
3154 * making progress for us.
Johannes Weiner9879de72015-01-26 12:58:32 -08003155 */
Johannes Weinerdc564012015-06-24 16:57:19 -07003156 if (!mutex_trylock(&oom_lock)) {
Johannes Weiner9879de72015-01-26 12:58:32 -08003157 *did_some_progress = 1;
Mel Gorman11e33f62009-06-16 15:31:57 -07003158 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 return NULL;
3160 }
Jens Axboe6b1de912005-11-17 21:35:02 +01003161
Mel Gorman11e33f62009-06-16 15:31:57 -07003162 /*
3163 * Go through the zonelist yet one more time, keep very high watermark
3164 * here, this is only to catch a parallel oom killing, we must fail if
3165 * we're still under heavy pressure.
3166 */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003167 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
3168 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003169 if (page)
Mel Gorman11e33f62009-06-16 15:31:57 -07003170 goto out;
3171
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08003172 if (!(gfp_mask & __GFP_NOFAIL)) {
Johannes Weiner9879de72015-01-26 12:58:32 -08003173 /* Coredumps can quickly deplete all memory reserves */
3174 if (current->flags & PF_DUMPCORE)
3175 goto out;
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08003176 /* The OOM killer will not help higher order allocs */
3177 if (order > PAGE_ALLOC_COSTLY_ORDER)
3178 goto out;
David Rientjes03668b32010-08-09 17:18:54 -07003179 /* The OOM killer does not needlessly kill tasks for lowmem */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003180 if (ac->high_zoneidx < ZONE_NORMAL)
David Rientjes03668b32010-08-09 17:18:54 -07003181 goto out;
Johannes Weiner90839052015-06-24 16:57:21 -07003182 if (pm_suspended_storage())
3183 goto out;
Michal Hocko3da88fb2016-05-19 17:13:09 -07003184 /*
3185 * XXX: GFP_NOFS allocations should rather fail than rely on
3186 * other request to make a forward progress.
3187 * We are in an unfortunate situation where out_of_memory cannot
3188 * do much for this context but let's try it to at least get
3189 * access to memory reserved if the current task is killed (see
3190 * out_of_memory). Once filesystems are ready to handle allocation
3191 * failures more gracefully we should just bail out here.
3192 */
3193
David Rientjes4167e9b2015-04-14 15:46:55 -07003194 /* The OOM killer may not free memory on a specific node */
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08003195 if (gfp_mask & __GFP_THISNODE)
3196 goto out;
3197 }
Mel Gorman11e33f62009-06-16 15:31:57 -07003198 /* Exhausted what can be done so it's blamo time */
Michal Hocko5020e282016-01-14 15:20:36 -08003199 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
Michal Hockoc32b3cb2015-02-11 15:26:24 -08003200 *did_some_progress = 1;
Michal Hocko5020e282016-01-14 15:20:36 -08003201
3202 if (gfp_mask & __GFP_NOFAIL) {
3203 page = get_page_from_freelist(gfp_mask, order,
3204 ALLOC_NO_WATERMARKS|ALLOC_CPUSET, ac);
3205 /*
3206 * fallback to ignore cpuset restriction if our nodes
3207 * are depleted
3208 */
3209 if (!page)
3210 page = get_page_from_freelist(gfp_mask, order,
3211 ALLOC_NO_WATERMARKS, ac);
3212 }
3213 }
Mel Gorman11e33f62009-06-16 15:31:57 -07003214out:
Johannes Weinerdc564012015-06-24 16:57:19 -07003215 mutex_unlock(&oom_lock);
Mel Gorman11e33f62009-06-16 15:31:57 -07003216 return page;
3217}
3218
Michal Hocko33c2d212016-05-20 16:57:06 -07003219/*
3220 * Maximum number of compaction retries wit a progress before OOM
3221 * killer is consider as the only way to move forward.
3222 */
3223#define MAX_COMPACT_RETRIES 16
3224
Mel Gorman56de7262010-05-24 14:32:30 -07003225#ifdef CONFIG_COMPACTION
3226/* Try memory compaction for high-order allocations before reclaim */
3227static struct page *
3228__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003229 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003230 enum compact_priority prio, enum compact_result *compact_result)
Mel Gorman56de7262010-05-24 14:32:30 -07003231{
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003232 struct page *page;
Vlastimil Babka4e434d42017-05-08 15:59:46 -07003233 unsigned int noreclaim_flag = current->flags & PF_MEMALLOC;
Vlastimil Babka53853e22014-10-09 15:27:02 -07003234
Mel Gorman66199712012-01-12 17:19:41 -08003235 if (!order)
Mel Gorman56de7262010-05-24 14:32:30 -07003236 return NULL;
3237
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003238 current->flags |= PF_MEMALLOC;
Michal Hockoc5d01d02016-05-20 16:56:53 -07003239 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
Vlastimil Babkac3486f52016-07-28 15:49:30 -07003240 prio);
Vlastimil Babka4e434d42017-05-08 15:59:46 -07003241 current->flags = (current->flags & ~PF_MEMALLOC) | noreclaim_flag;
Mel Gorman56de7262010-05-24 14:32:30 -07003242
Michal Hockoc5d01d02016-05-20 16:56:53 -07003243 if (*compact_result <= COMPACT_INACTIVE)
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003244 return NULL;
Mel Gorman56de7262010-05-24 14:32:30 -07003245
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003246 /*
3247 * At least in one zone compaction wasn't deferred or skipped, so let's
3248 * count a compaction stall
3249 */
3250 count_vm_event(COMPACTSTALL);
3251
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003252 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003253
3254 if (page) {
3255 struct zone *zone = page_zone(page);
3256
3257 zone->compact_blockskip_flush = false;
3258 compaction_defer_reset(zone, order, true);
3259 count_vm_event(COMPACTSUCCESS);
3260 return page;
3261 }
3262
3263 /*
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003264 * It's bad if compaction run occurs and fails. The most likely reason
3265 * is that pages exist, but not enough to satisfy watermarks.
3266 */
3267 count_vm_event(COMPACTFAIL);
3268
3269 cond_resched();
3270
Mel Gorman56de7262010-05-24 14:32:30 -07003271 return NULL;
3272}
Michal Hocko33c2d212016-05-20 16:57:06 -07003273
Vlastimil Babka32508452016-10-07 17:00:28 -07003274static inline bool
3275should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3276 enum compact_result compact_result,
3277 enum compact_priority *compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07003278 int *compaction_retries)
Vlastimil Babka32508452016-10-07 17:00:28 -07003279{
3280 int max_retries = MAX_COMPACT_RETRIES;
Vlastimil Babkac2033b02016-10-07 17:00:34 -07003281 int min_priority;
Vlastimil Babka32508452016-10-07 17:00:28 -07003282
3283 if (!order)
3284 return false;
3285
Vlastimil Babkad9436492016-10-07 17:00:31 -07003286 if (compaction_made_progress(compact_result))
3287 (*compaction_retries)++;
3288
Vlastimil Babka32508452016-10-07 17:00:28 -07003289 /*
3290 * compaction considers all the zone as desperately out of memory
3291 * so it doesn't really make much sense to retry except when the
3292 * failure could be caused by insufficient priority
3293 */
Vlastimil Babkad9436492016-10-07 17:00:31 -07003294 if (compaction_failed(compact_result))
3295 goto check_priority;
Vlastimil Babka32508452016-10-07 17:00:28 -07003296
3297 /*
3298 * make sure the compaction wasn't deferred or didn't bail out early
3299 * due to locks contention before we declare that we should give up.
3300 * But do not retry if the given zonelist is not suitable for
3301 * compaction.
3302 */
3303 if (compaction_withdrawn(compact_result))
3304 return compaction_zonelist_suitable(ac, order, alloc_flags);
3305
3306 /*
3307 * !costly requests are much more important than __GFP_REPEAT
3308 * costly ones because they are de facto nofail and invoke OOM
3309 * killer to move on while costly can fail and users are ready
3310 * to cope with that. 1/4 retries is rather arbitrary but we
3311 * would need much more detailed feedback from compaction to
3312 * make a better decision.
3313 */
3314 if (order > PAGE_ALLOC_COSTLY_ORDER)
3315 max_retries /= 4;
Vlastimil Babkad9436492016-10-07 17:00:31 -07003316 if (*compaction_retries <= max_retries)
Vlastimil Babka32508452016-10-07 17:00:28 -07003317 return true;
3318
Vlastimil Babkad9436492016-10-07 17:00:31 -07003319 /*
3320 * Make sure there are attempts at the highest priority if we exhausted
3321 * all retries or failed at the lower priorities.
3322 */
3323check_priority:
Vlastimil Babkac2033b02016-10-07 17:00:34 -07003324 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3325 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3326 if (*compact_priority > min_priority) {
Vlastimil Babkad9436492016-10-07 17:00:31 -07003327 (*compact_priority)--;
3328 *compaction_retries = 0;
3329 return true;
3330 }
Vlastimil Babka32508452016-10-07 17:00:28 -07003331 return false;
3332}
Mel Gorman56de7262010-05-24 14:32:30 -07003333#else
3334static inline struct page *
3335__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003336 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003337 enum compact_priority prio, enum compact_result *compact_result)
Mel Gorman56de7262010-05-24 14:32:30 -07003338{
Michal Hocko33c2d212016-05-20 16:57:06 -07003339 *compact_result = COMPACT_SKIPPED;
Mel Gorman56de7262010-05-24 14:32:30 -07003340 return NULL;
3341}
Michal Hocko33c2d212016-05-20 16:57:06 -07003342
3343static inline bool
Michal Hocko86a294a2016-05-20 16:57:12 -07003344should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3345 enum compact_result compact_result,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003346 enum compact_priority *compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07003347 int *compaction_retries)
Michal Hocko33c2d212016-05-20 16:57:06 -07003348{
Michal Hocko31e49bf2016-05-20 16:57:15 -07003349 struct zone *zone;
3350 struct zoneref *z;
3351
3352 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3353 return false;
3354
3355 /*
3356 * There are setups with compaction disabled which would prefer to loop
3357 * inside the allocator rather than hit the oom killer prematurely.
3358 * Let's give them a good hope and keep retrying while the order-0
3359 * watermarks are OK.
3360 */
3361 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3362 ac->nodemask) {
3363 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3364 ac_classzone_idx(ac), alloc_flags))
3365 return true;
3366 }
Michal Hocko33c2d212016-05-20 16:57:06 -07003367 return false;
3368}
Vlastimil Babka32508452016-10-07 17:00:28 -07003369#endif /* CONFIG_COMPACTION */
Mel Gorman56de7262010-05-24 14:32:30 -07003370
Marek Szyprowskibba90712012-01-25 12:09:52 +01003371/* Perform direct synchronous page reclaim */
3372static int
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003373__perform_reclaim(gfp_t gfp_mask, unsigned int order,
3374 const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07003375{
Mel Gorman11e33f62009-06-16 15:31:57 -07003376 struct reclaim_state reclaim_state;
Marek Szyprowskibba90712012-01-25 12:09:52 +01003377 int progress;
Mel Gorman11e33f62009-06-16 15:31:57 -07003378
3379 cond_resched();
3380
3381 /* We now go into synchronous reclaim */
3382 cpuset_memory_pressure_bump();
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003383 current->flags |= PF_MEMALLOC;
Mel Gorman11e33f62009-06-16 15:31:57 -07003384 lockdep_set_current_reclaim_state(gfp_mask);
3385 reclaim_state.reclaimed_slab = 0;
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003386 current->reclaim_state = &reclaim_state;
Mel Gorman11e33f62009-06-16 15:31:57 -07003387
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003388 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3389 ac->nodemask);
Mel Gorman11e33f62009-06-16 15:31:57 -07003390
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003391 current->reclaim_state = NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07003392 lockdep_clear_current_reclaim_state();
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003393 current->flags &= ~PF_MEMALLOC;
Mel Gorman11e33f62009-06-16 15:31:57 -07003394
3395 cond_resched();
3396
Marek Szyprowskibba90712012-01-25 12:09:52 +01003397 return progress;
3398}
3399
3400/* The really slow allocator path where we enter direct reclaim */
3401static inline struct page *
3402__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003403 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003404 unsigned long *did_some_progress)
Marek Szyprowskibba90712012-01-25 12:09:52 +01003405{
3406 struct page *page = NULL;
3407 bool drained = false;
3408
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003409 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07003410 if (unlikely(!(*did_some_progress)))
3411 return NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07003412
Mel Gorman9ee493c2010-09-09 16:38:18 -07003413retry:
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003414 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07003415
3416 /*
3417 * If an allocation failed after direct reclaim, it could be because
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003418 * pages are pinned on the per-cpu lists or in high alloc reserves.
3419 * Shrink them them and try again
Mel Gorman9ee493c2010-09-09 16:38:18 -07003420 */
3421 if (!page && !drained) {
Minchan Kim8ddf5f92016-12-12 16:42:14 -08003422 unreserve_highatomic_pageblock(ac, false);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08003423 drain_all_pages(NULL);
Mel Gorman9ee493c2010-09-09 16:38:18 -07003424 drained = true;
3425 goto retry;
3426 }
3427
Mel Gorman11e33f62009-06-16 15:31:57 -07003428 return page;
3429}
3430
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003431static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07003432{
3433 struct zoneref *z;
3434 struct zone *zone;
Mel Gormane1a55632016-07-28 15:46:26 -07003435 pg_data_t *last_pgdat = NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07003436
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003437 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
Mel Gormane1a55632016-07-28 15:46:26 -07003438 ac->high_zoneidx, ac->nodemask) {
3439 if (last_pgdat != zone->zone_pgdat)
Mel Gorman52e9f872016-07-28 15:46:29 -07003440 wakeup_kswapd(zone, order, ac->high_zoneidx);
Mel Gormane1a55632016-07-28 15:46:26 -07003441 last_pgdat = zone->zone_pgdat;
3442 }
Mel Gorman11e33f62009-06-16 15:31:57 -07003443}
3444
Mel Gormanc6038442016-05-19 17:13:38 -07003445static inline unsigned int
Peter Zijlstra341ce062009-06-16 15:32:02 -07003446gfp_to_alloc_flags(gfp_t gfp_mask)
3447{
Mel Gormanc6038442016-05-19 17:13:38 -07003448 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
Peter Zijlstra341ce062009-06-16 15:32:02 -07003449
Mel Gormana56f57f2009-06-16 15:32:02 -07003450 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
Namhyung Kime6223a32010-10-26 14:21:59 -07003451 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
Mel Gormana56f57f2009-06-16 15:32:02 -07003452
Peter Zijlstra341ce062009-06-16 15:32:02 -07003453 /*
3454 * The caller may dip into page reserves a bit more if the caller
3455 * cannot run direct reclaim, or if the caller has realtime scheduling
3456 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
Mel Gormand0164ad2015-11-06 16:28:21 -08003457 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
Peter Zijlstra341ce062009-06-16 15:32:02 -07003458 */
Namhyung Kime6223a32010-10-26 14:21:59 -07003459 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
Peter Zijlstra341ce062009-06-16 15:32:02 -07003460
Mel Gormand0164ad2015-11-06 16:28:21 -08003461 if (gfp_mask & __GFP_ATOMIC) {
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08003462 /*
David Rientjesb104a352014-07-30 16:08:24 -07003463 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
3464 * if it can't schedule.
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08003465 */
David Rientjesb104a352014-07-30 16:08:24 -07003466 if (!(gfp_mask & __GFP_NOMEMALLOC))
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08003467 alloc_flags |= ALLOC_HARDER;
Peter Zijlstra341ce062009-06-16 15:32:02 -07003468 /*
David Rientjesb104a352014-07-30 16:08:24 -07003469 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
Vladimir Davydov344736f2014-10-20 15:50:30 +04003470 * comment for __cpuset_node_allowed().
Peter Zijlstra341ce062009-06-16 15:32:02 -07003471 */
3472 alloc_flags &= ~ALLOC_CPUSET;
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003473 } else if (unlikely(rt_task(current)) && !in_interrupt())
Peter Zijlstra341ce062009-06-16 15:32:02 -07003474 alloc_flags |= ALLOC_HARDER;
3475
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07003476#ifdef CONFIG_CMA
David Rientjes43e7a342014-10-09 15:27:25 -07003477 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07003478 alloc_flags |= ALLOC_CMA;
3479#endif
Peter Zijlstra341ce062009-06-16 15:32:02 -07003480 return alloc_flags;
3481}
3482
Mel Gorman072bb0a2012-07-31 16:43:58 -07003483bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3484{
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003485 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3486 return false;
3487
3488 if (gfp_mask & __GFP_MEMALLOC)
3489 return true;
3490 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3491 return true;
3492 if (!in_interrupt() &&
3493 ((current->flags & PF_MEMALLOC) ||
3494 unlikely(test_thread_flag(TIF_MEMDIE))))
3495 return true;
3496
3497 return false;
Mel Gorman072bb0a2012-07-31 16:43:58 -07003498}
3499
Michal Hocko0a0337e2016-05-20 16:57:00 -07003500/*
3501 * Maximum number of reclaim retries without any progress before OOM killer
3502 * is consider as the only way to move forward.
3503 */
3504#define MAX_RECLAIM_RETRIES 16
3505
3506/*
3507 * Checks whether it makes sense to retry the reclaim to make a forward progress
3508 * for the given allocation request.
3509 * The reclaim feedback represented by did_some_progress (any progress during
Michal Hocko7854ea62016-05-20 16:57:09 -07003510 * the last reclaim round) and no_progress_loops (number of reclaim rounds without
3511 * any progress in a row) is considered as well as the reclaimable pages on the
3512 * applicable zone list (with a backoff mechanism which is a function of
3513 * no_progress_loops).
Michal Hocko0a0337e2016-05-20 16:57:00 -07003514 *
3515 * Returns true if a retry is viable or false to enter the oom path.
3516 */
3517static inline bool
3518should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3519 struct alloc_context *ac, int alloc_flags,
Vlastimil Babka423b4522016-10-07 17:00:40 -07003520 bool did_some_progress, int *no_progress_loops)
Michal Hocko0a0337e2016-05-20 16:57:00 -07003521{
3522 struct zone *zone;
3523 struct zoneref *z;
3524
3525 /*
Vlastimil Babka423b4522016-10-07 17:00:40 -07003526 * Costly allocations might have made a progress but this doesn't mean
3527 * their order will become available due to high fragmentation so
3528 * always increment the no progress counter for them
3529 */
3530 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3531 *no_progress_loops = 0;
3532 else
3533 (*no_progress_loops)++;
3534
3535 /*
Michal Hocko0a0337e2016-05-20 16:57:00 -07003536 * Make sure we converge to OOM if we cannot make any progress
3537 * several times in the row.
3538 */
Minchan Kim34bd01b2016-12-12 16:42:11 -08003539 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
3540 /* Before OOM, exhaust highatomic_reserve */
Minchan Kim8ddf5f92016-12-12 16:42:14 -08003541 return unreserve_highatomic_pageblock(ac, true);
Minchan Kim34bd01b2016-12-12 16:42:11 -08003542 }
Michal Hocko0a0337e2016-05-20 16:57:00 -07003543
Michal Hocko0a0337e2016-05-20 16:57:00 -07003544 /*
Mel Gormanbca67592016-07-28 15:47:05 -07003545 * Keep reclaiming pages while there is a chance this will lead
3546 * somewhere. If none of the target zones can satisfy our allocation
3547 * request even if all reclaimable pages are considered then we are
3548 * screwed and have to go OOM.
Michal Hocko0a0337e2016-05-20 16:57:00 -07003549 */
3550 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3551 ac->nodemask) {
3552 unsigned long available;
Michal Hockoede37712016-05-20 16:57:03 -07003553 unsigned long reclaimable;
Michal Hocko0a0337e2016-05-20 16:57:00 -07003554
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003555 available = reclaimable = zone_reclaimable_pages(zone);
Vlastimil Babka423b4522016-10-07 17:00:40 -07003556 available -= DIV_ROUND_UP((*no_progress_loops) * available,
Michal Hocko0a0337e2016-05-20 16:57:00 -07003557 MAX_RECLAIM_RETRIES);
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003558 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
Michal Hocko0a0337e2016-05-20 16:57:00 -07003559
3560 /*
3561 * Would the allocation succeed if we reclaimed the whole
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003562 * available?
Michal Hocko0a0337e2016-05-20 16:57:00 -07003563 */
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003564 if (__zone_watermark_ok(zone, order, min_wmark_pages(zone),
3565 ac_classzone_idx(ac), alloc_flags, available)) {
Michal Hockoede37712016-05-20 16:57:03 -07003566 /*
3567 * If we didn't make any progress and have a lot of
3568 * dirty + writeback pages then we should wait for
3569 * an IO to complete to slow down the reclaim and
3570 * prevent from pre mature OOM
3571 */
3572 if (!did_some_progress) {
Mel Gorman11fb9982016-07-28 15:46:20 -07003573 unsigned long write_pending;
Michal Hockoede37712016-05-20 16:57:03 -07003574
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003575 write_pending = zone_page_state_snapshot(zone,
3576 NR_ZONE_WRITE_PENDING);
Michal Hockoede37712016-05-20 16:57:03 -07003577
Mel Gorman11fb9982016-07-28 15:46:20 -07003578 if (2 * write_pending > reclaimable) {
Michal Hockoede37712016-05-20 16:57:03 -07003579 congestion_wait(BLK_RW_ASYNC, HZ/10);
3580 return true;
3581 }
3582 }
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003583
Michal Hockoede37712016-05-20 16:57:03 -07003584 /*
3585 * Memory allocation/reclaim might be called from a WQ
3586 * context and the current implementation of the WQ
3587 * concurrency control doesn't recognize that
3588 * a particular WQ is congested if the worker thread is
3589 * looping without ever sleeping. Therefore we have to
3590 * do a short sleep here rather than calling
3591 * cond_resched().
3592 */
3593 if (current->flags & PF_WQ_WORKER)
3594 schedule_timeout_uninterruptible(1);
3595 else
3596 cond_resched();
3597
Michal Hocko0a0337e2016-05-20 16:57:00 -07003598 return true;
3599 }
3600 }
3601
3602 return false;
3603}
3604
Mel Gorman11e33f62009-06-16 15:31:57 -07003605static inline struct page *
3606__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003607 struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07003608{
Mel Gormand0164ad2015-11-06 16:28:21 -08003609 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
Mel Gorman11e33f62009-06-16 15:31:57 -07003610 struct page *page = NULL;
Mel Gormanc6038442016-05-19 17:13:38 -07003611 unsigned int alloc_flags;
Mel Gorman11e33f62009-06-16 15:31:57 -07003612 unsigned long did_some_progress;
Vlastimil Babkab678e4f2017-01-24 15:18:38 -08003613 enum compact_priority compact_priority;
Michal Hockoc5d01d02016-05-20 16:56:53 -07003614 enum compact_result compact_result;
Vlastimil Babkab678e4f2017-01-24 15:18:38 -08003615 int compaction_retries;
3616 int no_progress_loops;
Michal Hocko63f53de2016-10-07 17:01:58 -07003617 unsigned long alloc_start = jiffies;
3618 unsigned int stall_timeout = 10 * HZ;
Vlastimil Babkab678e4f2017-01-24 15:18:38 -08003619 unsigned int cpuset_mems_cookie;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003620
Christoph Lameter952f3b52006-12-06 20:33:26 -08003621 /*
Mel Gorman72807a72009-06-16 15:32:18 -07003622 * In the slowpath, we sanity check order to avoid ever trying to
3623 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
3624 * be using allocators in order of preference for an area that is
3625 * too large.
3626 */
Mel Gorman1fc28b72009-07-29 15:04:08 -07003627 if (order >= MAX_ORDER) {
3628 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
Mel Gorman72807a72009-06-16 15:32:18 -07003629 return NULL;
Mel Gorman1fc28b72009-07-29 15:04:08 -07003630 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003631
Christoph Lameter952f3b52006-12-06 20:33:26 -08003632 /*
Mel Gormand0164ad2015-11-06 16:28:21 -08003633 * We also sanity check to catch abuse of atomic reserves being used by
3634 * callers that are not in atomic context.
3635 */
3636 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
3637 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3638 gfp_mask &= ~__GFP_ATOMIC;
3639
Vlastimil Babkab678e4f2017-01-24 15:18:38 -08003640retry_cpuset:
3641 compaction_retries = 0;
3642 no_progress_loops = 0;
3643 compact_priority = DEF_COMPACT_PRIORITY;
3644 cpuset_mems_cookie = read_mems_allowed_begin();
Vlastimil Babka96e5cec2017-01-24 15:18:41 -08003645 /*
3646 * We need to recalculate the starting point for the zonelist iterator
3647 * because we might have used different nodemask in the fast path, or
3648 * there was a cpuset modification and we are retrying - otherwise we
3649 * could end up iterating over non-eligible zones endlessly.
3650 */
3651 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3652 ac->high_zoneidx, ac->nodemask);
3653 if (!ac->preferred_zoneref->zone)
3654 goto nopage;
3655
Vlastimil Babkab678e4f2017-01-24 15:18:38 -08003656
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003657 /*
3658 * The fast path uses conservative alloc_flags to succeed only until
3659 * kswapd needs to be woken up, and to avoid the cost of setting up
3660 * alloc_flags precisely. So we do that now.
3661 */
3662 alloc_flags = gfp_to_alloc_flags(gfp_mask);
3663
Mel Gormand0164ad2015-11-06 16:28:21 -08003664 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003665 wake_all_kswapds(order, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003666
Paul Jackson9bf22292005-09-06 15:18:12 -07003667 /*
Vlastimil Babka23771232016-07-28 15:49:16 -07003668 * The adjusted alloc_flags might result in immediate success, so try
3669 * that first
3670 */
3671 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3672 if (page)
3673 goto got_pg;
3674
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003675 /*
3676 * For costly allocations, try direct compaction first, as it's likely
3677 * that we have enough base pages and don't need to reclaim. Don't try
3678 * that for allocations that are allowed to ignore watermarks, as the
3679 * ALLOC_NO_WATERMARKS attempt didn't yet happen.
3680 */
3681 if (can_direct_reclaim && order > PAGE_ALLOC_COSTLY_ORDER &&
3682 !gfp_pfmemalloc_allowed(gfp_mask)) {
3683 page = __alloc_pages_direct_compact(gfp_mask, order,
3684 alloc_flags, ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003685 INIT_COMPACT_PRIORITY,
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003686 &compact_result);
3687 if (page)
3688 goto got_pg;
3689
Vlastimil Babka3eb27712016-07-28 15:49:22 -07003690 /*
3691 * Checks for costly allocations with __GFP_NORETRY, which
3692 * includes THP page fault allocations
3693 */
3694 if (gfp_mask & __GFP_NORETRY) {
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003695 /*
3696 * If compaction is deferred for high-order allocations,
3697 * it is because sync compaction recently failed. If
3698 * this is the case and the caller requested a THP
3699 * allocation, we do not want to heavily disrupt the
3700 * system, so we fail the allocation instead of entering
3701 * direct reclaim.
3702 */
3703 if (compact_result == COMPACT_DEFERRED)
3704 goto nopage;
3705
3706 /*
Vlastimil Babka3eb27712016-07-28 15:49:22 -07003707 * Looks like reclaim/compaction is worth trying, but
3708 * sync compaction could be very expensive, so keep
Vlastimil Babka25160352016-07-28 15:49:25 -07003709 * using async compaction.
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003710 */
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003711 compact_priority = INIT_COMPACT_PRIORITY;
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003712 }
3713 }
Vlastimil Babka23771232016-07-28 15:49:16 -07003714
3715retry:
3716 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
3717 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3718 wake_all_kswapds(order, ac);
3719
3720 if (gfp_pfmemalloc_allowed(gfp_mask))
3721 alloc_flags = ALLOC_NO_WATERMARKS;
3722
3723 /*
Mel Gormane46e7b72016-06-03 14:56:01 -07003724 * Reset the zonelist iterators if memory policies can be ignored.
3725 * These allocations are high priority and system rather than user
3726 * orientated.
3727 */
Vlastimil Babka23771232016-07-28 15:49:16 -07003728 if (!(alloc_flags & ALLOC_CPUSET) || (alloc_flags & ALLOC_NO_WATERMARKS)) {
Mel Gormane46e7b72016-06-03 14:56:01 -07003729 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3730 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3731 ac->high_zoneidx, ac->nodemask);
3732 }
3733
Vlastimil Babka23771232016-07-28 15:49:16 -07003734 /* Attempt with potentially adjusted zonelist and alloc_flags */
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003735 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003736 if (page)
3737 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738
Mel Gormand0164ad2015-11-06 16:28:21 -08003739 /* Caller is not willing to reclaim, we can't balance anything */
3740 if (!can_direct_reclaim) {
David Rientjesaed0a0e2014-01-21 15:51:12 -08003741 /*
Michal Hocko33d53102016-01-14 15:19:05 -08003742 * All existing users of the __GFP_NOFAIL are blockable, so warn
3743 * of any new users that actually allow this type of allocation
3744 * to fail.
David Rientjesaed0a0e2014-01-21 15:51:12 -08003745 */
3746 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 goto nopage;
David Rientjesaed0a0e2014-01-21 15:51:12 -08003748 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749
Peter Zijlstra341ce062009-06-16 15:32:02 -07003750 /* Avoid recursion of direct reclaim */
Michal Hocko33d53102016-01-14 15:19:05 -08003751 if (current->flags & PF_MEMALLOC) {
3752 /*
3753 * __GFP_NOFAIL request from this context is rather bizarre
3754 * because we cannot reclaim anything and only can loop waiting
3755 * for somebody to do a work for us.
3756 */
3757 if (WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3758 cond_resched();
3759 goto retry;
3760 }
Peter Zijlstra341ce062009-06-16 15:32:02 -07003761 goto nopage;
Michal Hocko33d53102016-01-14 15:19:05 -08003762 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763
David Rientjes6583bb62009-07-29 15:02:06 -07003764 /* Avoid allocations with no watermarks from looping endlessly */
3765 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
3766 goto nopage;
3767
David Rientjes8fe78042014-08-06 16:07:54 -07003768
Mel Gorman11e33f62009-06-16 15:31:57 -07003769 /* Try direct reclaim and then allocating */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003770 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3771 &did_some_progress);
Mel Gorman11e33f62009-06-16 15:31:57 -07003772 if (page)
3773 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003775 /* Try direct compaction and then allocating */
3776 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003777 compact_priority, &compact_result);
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003778 if (page)
3779 goto got_pg;
3780
Johannes Weiner90839052015-06-24 16:57:21 -07003781 /* Do not loop if specifically requested */
3782 if (gfp_mask & __GFP_NORETRY)
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003783 goto nopage;
Johannes Weiner90839052015-06-24 16:57:21 -07003784
Michal Hocko0a0337e2016-05-20 16:57:00 -07003785 /*
3786 * Do not retry costly high order allocations unless they are
3787 * __GFP_REPEAT
3788 */
3789 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003790 goto nopage;
Michal Hocko0a0337e2016-05-20 16:57:00 -07003791
Michal Hocko63f53de2016-10-07 17:01:58 -07003792 /* Make sure we know about allocations which stall for too long */
3793 if (time_after(jiffies, alloc_start + stall_timeout)) {
3794 warn_alloc(gfp_mask,
Tetsuo Handa9e80c712016-11-10 10:46:04 -08003795 "page allocation stalls for %ums, order:%u",
Michal Hocko63f53de2016-10-07 17:01:58 -07003796 jiffies_to_msecs(jiffies-alloc_start), order);
3797 stall_timeout += 10 * HZ;
3798 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799
Michal Hocko0a0337e2016-05-20 16:57:00 -07003800 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
Vlastimil Babka423b4522016-10-07 17:00:40 -07003801 did_some_progress > 0, &no_progress_loops))
Michal Hocko0a0337e2016-05-20 16:57:00 -07003802 goto retry;
3803
Michal Hocko33c2d212016-05-20 16:57:06 -07003804 /*
3805 * It doesn't make any sense to retry for the compaction if the order-0
3806 * reclaim is not able to make any progress because the current
3807 * implementation of the compaction depends on the sufficient amount
3808 * of free memory (see __compaction_suitable)
3809 */
3810 if (did_some_progress > 0 &&
Michal Hocko86a294a2016-05-20 16:57:12 -07003811 should_compact_retry(ac, order, alloc_flags,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003812 compact_result, &compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07003813 &compaction_retries))
Michal Hocko33c2d212016-05-20 16:57:06 -07003814 goto retry;
3815
Vlastimil Babka96e5cec2017-01-24 15:18:41 -08003816 /*
3817 * It's possible we raced with cpuset update so the OOM would be
3818 * premature (see below the nopage: label for full explanation).
3819 */
3820 if (read_mems_allowed_retry(cpuset_mems_cookie))
3821 goto retry_cpuset;
3822
Johannes Weiner90839052015-06-24 16:57:21 -07003823 /* Reclaim has failed us, start killing things */
3824 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3825 if (page)
3826 goto got_pg;
3827
3828 /* Retry as long as the OOM killer is making progress */
Michal Hocko0a0337e2016-05-20 16:57:00 -07003829 if (did_some_progress) {
3830 no_progress_loops = 0;
Johannes Weiner90839052015-06-24 16:57:21 -07003831 goto retry;
Michal Hocko0a0337e2016-05-20 16:57:00 -07003832 }
Johannes Weiner90839052015-06-24 16:57:21 -07003833
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834nopage:
Vlastimil Babkab678e4f2017-01-24 15:18:38 -08003835 /*
Vlastimil Babka96e5cec2017-01-24 15:18:41 -08003836 * When updating a task's mems_allowed or mempolicy nodemask, it is
3837 * possible to race with parallel threads in such a way that our
3838 * allocation can fail while the mask is being updated. If we are about
3839 * to fail, check if the cpuset changed during allocation and if so,
3840 * retry.
Vlastimil Babkab678e4f2017-01-24 15:18:38 -08003841 */
3842 if (read_mems_allowed_retry(cpuset_mems_cookie))
3843 goto retry_cpuset;
3844
Michal Hocko7877cdc2016-10-07 17:01:55 -07003845 warn_alloc(gfp_mask,
3846 "page allocation failure: order:%u", order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847got_pg:
Mel Gorman072bb0a2012-07-31 16:43:58 -07003848 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849}
Mel Gorman11e33f62009-06-16 15:31:57 -07003850
3851/*
3852 * This is the 'heart' of the zoned buddy allocator.
3853 */
3854struct page *
3855__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3856 struct zonelist *zonelist, nodemask_t *nodemask)
3857{
Mel Gorman5bb1b162016-05-19 17:13:50 -07003858 struct page *page;
Mel Gormane6cbd7f2016-07-28 15:46:50 -07003859 unsigned int alloc_flags = ALLOC_WMARK_LOW;
Mel Gorman83d4ca82016-05-19 17:13:56 -07003860 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003861 struct alloc_context ac = {
3862 .high_zoneidx = gfp_zone(gfp_mask),
Mel Gorman682a3382016-05-19 17:13:30 -07003863 .zonelist = zonelist,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003864 .nodemask = nodemask,
3865 .migratetype = gfpflags_to_migratetype(gfp_mask),
3866 };
Mel Gorman11e33f62009-06-16 15:31:57 -07003867
Mel Gorman682a3382016-05-19 17:13:30 -07003868 if (cpusets_enabled()) {
Mel Gorman83d4ca82016-05-19 17:13:56 -07003869 alloc_mask |= __GFP_HARDWALL;
Mel Gorman682a3382016-05-19 17:13:30 -07003870 alloc_flags |= ALLOC_CPUSET;
3871 if (!ac.nodemask)
3872 ac.nodemask = &cpuset_current_mems_allowed;
3873 }
3874
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10003875 gfp_mask &= gfp_allowed_mask;
3876
Mel Gorman11e33f62009-06-16 15:31:57 -07003877 lockdep_trace_alloc(gfp_mask);
3878
Mel Gormand0164ad2015-11-06 16:28:21 -08003879 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
Mel Gorman11e33f62009-06-16 15:31:57 -07003880
3881 if (should_fail_alloc_page(gfp_mask, order))
3882 return NULL;
3883
3884 /*
3885 * Check the zones suitable for the gfp_mask contain at least one
3886 * valid zone. It's possible to have an empty zonelist as a result
David Rientjes4167e9b2015-04-14 15:46:55 -07003887 * of __GFP_THISNODE and a memoryless node
Mel Gorman11e33f62009-06-16 15:31:57 -07003888 */
3889 if (unlikely(!zonelist->_zonerefs->zone))
3890 return NULL;
3891
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003892 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
Vlastimil Babka21bb9bd2014-10-09 15:26:51 -07003893 alloc_flags |= ALLOC_CMA;
3894
Mel Gormanc9ab0c42015-11-06 16:28:12 -08003895 /* Dirty zone balancing only done in the fast path */
3896 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3897
Mel Gormane46e7b72016-06-03 14:56:01 -07003898 /*
3899 * The preferred zone is used for statistics but crucially it is
3900 * also used as the starting point for the zonelist iterator. It
3901 * may get reset for allocations that ignore memory policies.
3902 */
Mel Gormanc33d6c02016-05-19 17:14:10 -07003903 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
3904 ac.high_zoneidx, ac.nodemask);
Vlastimil Babkaade7afe2017-01-24 15:18:32 -08003905 if (!ac.preferred_zoneref->zone) {
Mel Gorman5bb1b162016-05-19 17:13:50 -07003906 page = NULL;
Vlastimil Babkab678e4f2017-01-24 15:18:38 -08003907 /*
3908 * This might be due to race with cpuset_current_mems_allowed
3909 * update, so make sure we retry with original nodemask in the
3910 * slow path.
3911 */
Mel Gorman4fcb0972016-05-19 17:14:01 -07003912 goto no_zone;
Mel Gorman5bb1b162016-05-19 17:13:50 -07003913 }
3914
Mel Gorman5117f452009-06-16 15:31:59 -07003915 /* First allocation attempt */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003916 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
Mel Gorman4fcb0972016-05-19 17:14:01 -07003917 if (likely(page))
3918 goto out;
Andrew Morton91fbdc02015-02-11 15:25:04 -08003919
Vlastimil Babkab678e4f2017-01-24 15:18:38 -08003920no_zone:
Mel Gorman4fcb0972016-05-19 17:14:01 -07003921 /*
3922 * Runtime PM, block IO and its error handling path can deadlock
3923 * because I/O on the device might not complete.
3924 */
3925 alloc_mask = memalloc_noio_flags(gfp_mask);
3926 ac.spread_dirty_pages = false;
Mel Gorman11e33f62009-06-16 15:31:57 -07003927
Mel Gorman47415262016-05-19 17:14:44 -07003928 /*
3929 * Restore the original nodemask if it was potentially replaced with
3930 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
3931 */
Vlastimil Babka96e5cec2017-01-24 15:18:41 -08003932 if (unlikely(ac.nodemask != nodemask))
Mel Gorman47415262016-05-19 17:14:44 -07003933 ac.nodemask = nodemask;
Xishi Qiu23f086f2015-02-11 15:25:07 -08003934
Mel Gormancc9a6c82012-03-21 16:34:11 -07003935 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
Mel Gormancc9a6c82012-03-21 16:34:11 -07003936
Mel Gorman4fcb0972016-05-19 17:14:01 -07003937out:
Vladimir Davydovc4159a72016-08-08 23:03:12 +03003938 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
3939 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
3940 __free_pages(page, order);
3941 page = NULL;
Vladimir Davydov49491482016-07-26 15:24:24 -07003942 }
3943
Mel Gorman4fcb0972016-05-19 17:14:01 -07003944 if (kmemcheck_enabled && page)
3945 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
3946
3947 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
3948
Mel Gorman11e33f62009-06-16 15:31:57 -07003949 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950}
Mel Gormand2391712009-06-16 15:31:52 -07003951EXPORT_SYMBOL(__alloc_pages_nodemask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952
3953/*
3954 * Common helper functions.
3955 */
Harvey Harrison920c7a52008-02-04 22:29:26 -08003956unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957{
Akinobu Mita945a1112009-09-21 17:01:47 -07003958 struct page *page;
3959
3960 /*
3961 * __get_free_pages() returns a 32-bit address, which cannot represent
3962 * a highmem page
3963 */
3964 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
3965
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 page = alloc_pages(gfp_mask, order);
3967 if (!page)
3968 return 0;
3969 return (unsigned long) page_address(page);
3970}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971EXPORT_SYMBOL(__get_free_pages);
3972
Harvey Harrison920c7a52008-02-04 22:29:26 -08003973unsigned long get_zeroed_page(gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974{
Akinobu Mita945a1112009-09-21 17:01:47 -07003975 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977EXPORT_SYMBOL(get_zeroed_page);
3978
Harvey Harrison920c7a52008-02-04 22:29:26 -08003979void __free_pages(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980{
Nick Pigginb5810032005-10-29 18:16:12 -07003981 if (put_page_testzero(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003982 if (order == 0)
Mel Gormanb745bc82014-06-04 16:10:22 -07003983 free_hot_cold_page(page, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 else
3985 __free_pages_ok(page, order);
3986 }
3987}
3988
3989EXPORT_SYMBOL(__free_pages);
3990
Harvey Harrison920c7a52008-02-04 22:29:26 -08003991void free_pages(unsigned long addr, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992{
3993 if (addr != 0) {
Nick Piggin725d7042006-09-25 23:30:55 -07003994 VM_BUG_ON(!virt_addr_valid((void *)addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995 __free_pages(virt_to_page((void *)addr), order);
3996 }
3997}
3998
3999EXPORT_SYMBOL(free_pages);
4000
Glauber Costa6a1a0d32012-12-18 14:22:00 -08004001/*
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004002 * Page Fragment:
4003 * An arbitrary-length arbitrary-offset area of memory which resides
4004 * within a 0 or higher order page. Multiple fragments within that page
4005 * are individually refcounted, in the page's reference counter.
4006 *
4007 * The page_frag functions below provide a simple allocation framework for
4008 * page fragments. This is used by the network stack and network device
4009 * drivers to provide a backing region of memory for use as either an
4010 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4011 */
4012static struct page *__page_frag_refill(struct page_frag_cache *nc,
4013 gfp_t gfp_mask)
4014{
4015 struct page *page = NULL;
4016 gfp_t gfp = gfp_mask;
4017
4018#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4019 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4020 __GFP_NOMEMALLOC;
4021 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4022 PAGE_FRAG_CACHE_MAX_ORDER);
4023 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4024#endif
4025 if (unlikely(!page))
4026 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4027
4028 nc->va = page ? page_address(page) : NULL;
4029
4030 return page;
4031}
4032
4033void *__alloc_page_frag(struct page_frag_cache *nc,
4034 unsigned int fragsz, gfp_t gfp_mask)
4035{
4036 unsigned int size = PAGE_SIZE;
4037 struct page *page;
4038 int offset;
4039
4040 if (unlikely(!nc->va)) {
4041refill:
4042 page = __page_frag_refill(nc, gfp_mask);
4043 if (!page)
4044 return NULL;
4045
4046#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4047 /* if size can vary use size else just use PAGE_SIZE */
4048 size = nc->size;
4049#endif
4050 /* Even if we own the page, we do not use atomic_set().
4051 * This would break get_page_unless_zero() users.
4052 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07004053 page_ref_add(page, size - 1);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004054
4055 /* reset page count bias and offset to start of new frag */
Michal Hocko2f064f32015-08-21 14:11:51 -07004056 nc->pfmemalloc = page_is_pfmemalloc(page);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004057 nc->pagecnt_bias = size;
4058 nc->offset = size;
4059 }
4060
4061 offset = nc->offset - fragsz;
4062 if (unlikely(offset < 0)) {
4063 page = virt_to_page(nc->va);
4064
Joonsoo Kimfe896d12016-03-17 14:19:26 -07004065 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004066 goto refill;
4067
4068#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4069 /* if size can vary use size else just use PAGE_SIZE */
4070 size = nc->size;
4071#endif
4072 /* OK, page count is 0, we can safely set it */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07004073 set_page_count(page, size);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07004074
4075 /* reset page count bias and offset to start of new frag */
4076 nc->pagecnt_bias = size;
4077 offset = size - fragsz;
4078 }
4079
4080 nc->pagecnt_bias--;
4081 nc->offset = offset;
4082
4083 return nc->va + offset;
4084}
4085EXPORT_SYMBOL(__alloc_page_frag);
4086
4087/*
4088 * Frees a page fragment allocated out of either a compound or order 0 page.
4089 */
4090void __free_page_frag(void *addr)
4091{
4092 struct page *page = virt_to_head_page(addr);
4093
4094 if (unlikely(put_page_testzero(page)))
4095 __free_pages_ok(page, compound_order(page));
4096}
4097EXPORT_SYMBOL(__free_page_frag);
4098
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08004099static void *make_alloc_exact(unsigned long addr, unsigned int order,
4100 size_t size)
Andi Kleenee85c2e2011-05-11 15:13:34 -07004101{
4102 if (addr) {
4103 unsigned long alloc_end = addr + (PAGE_SIZE << order);
4104 unsigned long used = addr + PAGE_ALIGN(size);
4105
4106 split_page(virt_to_page((void *)addr), order);
4107 while (used < alloc_end) {
4108 free_page(used);
4109 used += PAGE_SIZE;
4110 }
4111 }
4112 return (void *)addr;
4113}
4114
Timur Tabi2be0ffe2008-07-23 21:28:11 -07004115/**
4116 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4117 * @size: the number of bytes to allocate
4118 * @gfp_mask: GFP flags for the allocation
4119 *
4120 * This function is similar to alloc_pages(), except that it allocates the
4121 * minimum number of pages to satisfy the request. alloc_pages() can only
4122 * allocate memory in power-of-two pages.
4123 *
4124 * This function is also limited by MAX_ORDER.
4125 *
4126 * Memory allocated by this function must be released by free_pages_exact().
4127 */
4128void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4129{
4130 unsigned int order = get_order(size);
4131 unsigned long addr;
4132
4133 addr = __get_free_pages(gfp_mask, order);
Andi Kleenee85c2e2011-05-11 15:13:34 -07004134 return make_alloc_exact(addr, order, size);
Timur Tabi2be0ffe2008-07-23 21:28:11 -07004135}
4136EXPORT_SYMBOL(alloc_pages_exact);
4137
4138/**
Andi Kleenee85c2e2011-05-11 15:13:34 -07004139 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4140 * pages on a node.
Randy Dunlapb5e6ab52011-05-16 13:16:54 -07004141 * @nid: the preferred node ID where memory should be allocated
Andi Kleenee85c2e2011-05-11 15:13:34 -07004142 * @size: the number of bytes to allocate
4143 * @gfp_mask: GFP flags for the allocation
4144 *
4145 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4146 * back.
Andi Kleenee85c2e2011-05-11 15:13:34 -07004147 */
Fabian Fredericke1931812014-08-06 16:04:59 -07004148void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
Andi Kleenee85c2e2011-05-11 15:13:34 -07004149{
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08004150 unsigned int order = get_order(size);
Andi Kleenee85c2e2011-05-11 15:13:34 -07004151 struct page *p = alloc_pages_node(nid, gfp_mask, order);
4152 if (!p)
4153 return NULL;
4154 return make_alloc_exact((unsigned long)page_address(p), order, size);
4155}
Andi Kleenee85c2e2011-05-11 15:13:34 -07004156
4157/**
Timur Tabi2be0ffe2008-07-23 21:28:11 -07004158 * free_pages_exact - release memory allocated via alloc_pages_exact()
4159 * @virt: the value returned by alloc_pages_exact.
4160 * @size: size of allocation, same value as passed to alloc_pages_exact().
4161 *
4162 * Release the memory allocated by a previous call to alloc_pages_exact.
4163 */
4164void free_pages_exact(void *virt, size_t size)
4165{
4166 unsigned long addr = (unsigned long)virt;
4167 unsigned long end = addr + PAGE_ALIGN(size);
4168
4169 while (addr < end) {
4170 free_page(addr);
4171 addr += PAGE_SIZE;
4172 }
4173}
4174EXPORT_SYMBOL(free_pages_exact);
4175
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004176/**
4177 * nr_free_zone_pages - count number of pages beyond high watermark
4178 * @offset: The zone index of the highest zone
4179 *
4180 * nr_free_zone_pages() counts the number of counts pages which are beyond the
4181 * high watermark within all zones at or below a given zone index. For each
4182 * zone, the number of pages is calculated as:
Jiang Liu834405c2013-07-03 15:03:04 -07004183 * managed_pages - high_pages
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004184 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004185static unsigned long nr_free_zone_pages(int offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186{
Mel Gormandd1a2392008-04-28 02:12:17 -07004187 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07004188 struct zone *zone;
4189
Martin J. Blighe310fd42005-07-29 22:59:18 -07004190 /* Just pick one node, since fallback list is circular */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004191 unsigned long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192
Mel Gorman0e884602008-04-28 02:12:14 -07004193 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194
Mel Gorman54a6eb52008-04-28 02:12:16 -07004195 for_each_zone_zonelist(zone, z, zonelist, offset) {
Jiang Liub40da042013-02-22 16:33:52 -08004196 unsigned long size = zone->managed_pages;
Mel Gorman41858962009-06-16 15:32:12 -07004197 unsigned long high = high_wmark_pages(zone);
Martin J. Blighe310fd42005-07-29 22:59:18 -07004198 if (size > high)
4199 sum += size - high;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200 }
4201
4202 return sum;
4203}
4204
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004205/**
4206 * nr_free_buffer_pages - count number of pages beyond high watermark
4207 *
4208 * nr_free_buffer_pages() counts the number of pages which are beyond the high
4209 * watermark within ZONE_DMA and ZONE_NORMAL.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004211unsigned long nr_free_buffer_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212{
Al Viroaf4ca452005-10-21 02:55:38 -04004213 return nr_free_zone_pages(gfp_zone(GFP_USER));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214}
Meelap Shahc2f1a552007-07-17 04:04:39 -07004215EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004217/**
4218 * nr_free_pagecache_pages - count number of pages beyond high watermark
4219 *
4220 * nr_free_pagecache_pages() counts the number of pages which are beyond the
4221 * high watermark within all zones.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004223unsigned long nr_free_pagecache_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224{
Mel Gorman2a1e2742007-07-17 04:03:12 -07004225 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226}
Christoph Lameter08e0f6a2006-09-27 01:50:06 -07004227
4228static inline void show_node(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229{
Kirill A. Shutemove5adfff2012-12-11 16:00:29 -08004230 if (IS_ENABLED(CONFIG_NUMA))
Andy Whitcroft25ba77c2006-12-06 20:33:03 -08004231 printk("Node %d ", zone_to_nid(zone));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233
Igor Redkod02bd272016-03-17 14:19:05 -07004234long si_mem_available(void)
4235{
4236 long available;
4237 unsigned long pagecache;
4238 unsigned long wmark_low = 0;
4239 unsigned long pages[NR_LRU_LISTS];
4240 struct zone *zone;
4241 int lru;
4242
4243 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
Mel Gorman2f95ff92016-08-11 15:32:57 -07004244 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
Igor Redkod02bd272016-03-17 14:19:05 -07004245
4246 for_each_zone(zone)
4247 wmark_low += zone->watermark[WMARK_LOW];
4248
4249 /*
4250 * Estimate the amount of memory available for userspace allocations,
4251 * without causing swapping.
4252 */
4253 available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
4254
4255 /*
4256 * Not all the page cache can be freed, otherwise the system will
4257 * start swapping. Assume at least half of the page cache, or the
4258 * low watermark worth of cache, needs to stay.
4259 */
4260 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
4261 pagecache -= min(pagecache / 2, wmark_low);
4262 available += pagecache;
4263
4264 /*
4265 * Part of the reclaimable slab consists of items that are in use,
4266 * and cannot be freed. Cap this estimate at the low watermark.
4267 */
4268 available += global_page_state(NR_SLAB_RECLAIMABLE) -
4269 min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
4270
4271 if (available < 0)
4272 available = 0;
4273 return available;
4274}
4275EXPORT_SYMBOL_GPL(si_mem_available);
4276
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277void si_meminfo(struct sysinfo *val)
4278{
4279 val->totalram = totalram_pages;
Mel Gorman11fb9982016-07-28 15:46:20 -07004280 val->sharedram = global_node_page_state(NR_SHMEM);
Christoph Lameterd23ad422007-02-10 01:43:02 -08004281 val->freeram = global_page_state(NR_FREE_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282 val->bufferram = nr_blockdev_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 val->totalhigh = totalhigh_pages;
4284 val->freehigh = nr_free_highpages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 val->mem_unit = PAGE_SIZE;
4286}
4287
4288EXPORT_SYMBOL(si_meminfo);
4289
4290#ifdef CONFIG_NUMA
4291void si_meminfo_node(struct sysinfo *val, int nid)
4292{
Jiang Liucdd91a72013-07-03 15:03:27 -07004293 int zone_type; /* needs to be signed */
4294 unsigned long managed_pages = 0;
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07004295 unsigned long managed_highpages = 0;
4296 unsigned long free_highpages = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297 pg_data_t *pgdat = NODE_DATA(nid);
4298
Jiang Liucdd91a72013-07-03 15:03:27 -07004299 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4300 managed_pages += pgdat->node_zones[zone_type].managed_pages;
4301 val->totalram = managed_pages;
Mel Gorman11fb9982016-07-28 15:46:20 -07004302 val->sharedram = node_page_state(pgdat, NR_SHMEM);
Mel Gorman75ef7182016-07-28 15:45:24 -07004303 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07004304#ifdef CONFIG_HIGHMEM
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07004305 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
4306 struct zone *zone = &pgdat->node_zones[zone_type];
4307
4308 if (is_highmem(zone)) {
4309 managed_highpages += zone->managed_pages;
4310 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
4311 }
4312 }
4313 val->totalhigh = managed_highpages;
4314 val->freehigh = free_highpages;
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07004315#else
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07004316 val->totalhigh = managed_highpages;
4317 val->freehigh = free_highpages;
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07004318#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319 val->mem_unit = PAGE_SIZE;
4320}
4321#endif
4322
David Rientjesddd588b2011-03-22 16:30:46 -07004323/*
David Rientjes7bf02ea2011-05-24 17:11:16 -07004324 * Determine whether the node should be displayed or not, depending on whether
4325 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
David Rientjesddd588b2011-03-22 16:30:46 -07004326 */
David Rientjes7bf02ea2011-05-24 17:11:16 -07004327bool skip_free_areas_node(unsigned int flags, int nid)
David Rientjesddd588b2011-03-22 16:30:46 -07004328{
4329 bool ret = false;
Mel Gormancc9a6c82012-03-21 16:34:11 -07004330 unsigned int cpuset_mems_cookie;
David Rientjesddd588b2011-03-22 16:30:46 -07004331
4332 if (!(flags & SHOW_MEM_FILTER_NODES))
4333 goto out;
4334
Mel Gormancc9a6c82012-03-21 16:34:11 -07004335 do {
Mel Gormand26914d2014-04-03 14:47:24 -07004336 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07004337 ret = !node_isset(nid, cpuset_current_mems_allowed);
Mel Gormand26914d2014-04-03 14:47:24 -07004338 } while (read_mems_allowed_retry(cpuset_mems_cookie));
David Rientjesddd588b2011-03-22 16:30:46 -07004339out:
4340 return ret;
4341}
4342
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343#define K(x) ((x) << (PAGE_SHIFT-10))
4344
Rabin Vincent377e4f12012-12-11 16:00:24 -08004345static void show_migration_types(unsigned char type)
4346{
4347 static const char types[MIGRATE_TYPES] = {
4348 [MIGRATE_UNMOVABLE] = 'U',
Rabin Vincent377e4f12012-12-11 16:00:24 -08004349 [MIGRATE_MOVABLE] = 'M',
Vlastimil Babka475a2f92015-12-11 13:40:29 -08004350 [MIGRATE_RECLAIMABLE] = 'E',
4351 [MIGRATE_HIGHATOMIC] = 'H',
Rabin Vincent377e4f12012-12-11 16:00:24 -08004352#ifdef CONFIG_CMA
4353 [MIGRATE_CMA] = 'C',
4354#endif
Minchan Kim194159f2013-02-22 16:33:58 -08004355#ifdef CONFIG_MEMORY_ISOLATION
Rabin Vincent377e4f12012-12-11 16:00:24 -08004356 [MIGRATE_ISOLATE] = 'I',
Minchan Kim194159f2013-02-22 16:33:58 -08004357#endif
Rabin Vincent377e4f12012-12-11 16:00:24 -08004358 };
4359 char tmp[MIGRATE_TYPES + 1];
4360 char *p = tmp;
4361 int i;
4362
4363 for (i = 0; i < MIGRATE_TYPES; i++) {
4364 if (type & (1 << i))
4365 *p++ = types[i];
4366 }
4367
4368 *p = '\0';
Joe Perches1f84a182016-10-27 17:46:29 -07004369 printk(KERN_CONT "(%s) ", tmp);
Rabin Vincent377e4f12012-12-11 16:00:24 -08004370}
4371
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372/*
4373 * Show free area list (used inside shift_scroll-lock stuff)
4374 * We also calculate the percentage fragmentation. We do this by counting the
4375 * memory on each free list with the exception of the first item on the list.
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004376 *
4377 * Bits in @filter:
4378 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
4379 * cpuset.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380 */
David Rientjes7bf02ea2011-05-24 17:11:16 -07004381void show_free_areas(unsigned int filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004382{
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004383 unsigned long free_pcp = 0;
Jes Sorensenc7241912006-09-27 01:50:05 -07004384 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385 struct zone *zone;
Mel Gorman599d0c92016-07-28 15:45:31 -07004386 pg_data_t *pgdat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004387
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07004388 for_each_populated_zone(zone) {
David Rientjes7bf02ea2011-05-24 17:11:16 -07004389 if (skip_free_areas_node(filter, zone_to_nid(zone)))
David Rientjesddd588b2011-03-22 16:30:46 -07004390 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004391
Konstantin Khlebnikov761b0672015-04-14 15:45:32 -07004392 for_each_online_cpu(cpu)
4393 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394 }
4395
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07004396 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
4397 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004398 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
4399 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07004400 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004401 " free:%lu free_pcp:%lu free_cma:%lu\n",
Mel Gorman599d0c92016-07-28 15:45:31 -07004402 global_node_page_state(NR_ACTIVE_ANON),
4403 global_node_page_state(NR_INACTIVE_ANON),
4404 global_node_page_state(NR_ISOLATED_ANON),
4405 global_node_page_state(NR_ACTIVE_FILE),
4406 global_node_page_state(NR_INACTIVE_FILE),
4407 global_node_page_state(NR_ISOLATED_FILE),
4408 global_node_page_state(NR_UNEVICTABLE),
Mel Gorman11fb9982016-07-28 15:46:20 -07004409 global_node_page_state(NR_FILE_DIRTY),
4410 global_node_page_state(NR_WRITEBACK),
4411 global_node_page_state(NR_UNSTABLE_NFS),
KOSAKI Motohiro3701b032009-09-21 17:01:29 -07004412 global_page_state(NR_SLAB_RECLAIMABLE),
4413 global_page_state(NR_SLAB_UNRECLAIMABLE),
Mel Gorman50658e22016-07-28 15:46:14 -07004414 global_node_page_state(NR_FILE_MAPPED),
Mel Gorman11fb9982016-07-28 15:46:20 -07004415 global_node_page_state(NR_SHMEM),
Andrew Mortona25700a2007-02-08 14:20:40 -08004416 global_page_state(NR_PAGETABLE),
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07004417 global_page_state(NR_BOUNCE),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004418 global_page_state(NR_FREE_PAGES),
4419 free_pcp,
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07004420 global_page_state(NR_FREE_CMA_PAGES));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421
Mel Gorman599d0c92016-07-28 15:45:31 -07004422 for_each_online_pgdat(pgdat) {
4423 printk("Node %d"
4424 " active_anon:%lukB"
4425 " inactive_anon:%lukB"
4426 " active_file:%lukB"
4427 " inactive_file:%lukB"
4428 " unevictable:%lukB"
4429 " isolated(anon):%lukB"
4430 " isolated(file):%lukB"
Mel Gorman50658e22016-07-28 15:46:14 -07004431 " mapped:%lukB"
Mel Gorman11fb9982016-07-28 15:46:20 -07004432 " dirty:%lukB"
4433 " writeback:%lukB"
4434 " shmem:%lukB"
4435#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4436 " shmem_thp: %lukB"
4437 " shmem_pmdmapped: %lukB"
4438 " anon_thp: %lukB"
4439#endif
4440 " writeback_tmp:%lukB"
4441 " unstable:%lukB"
Minchan Kim33e077b2016-07-28 15:47:14 -07004442 " pages_scanned:%lu"
Mel Gorman599d0c92016-07-28 15:45:31 -07004443 " all_unreclaimable? %s"
4444 "\n",
4445 pgdat->node_id,
4446 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
4447 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
4448 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
4449 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
4450 K(node_page_state(pgdat, NR_UNEVICTABLE)),
4451 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
4452 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
Mel Gorman50658e22016-07-28 15:46:14 -07004453 K(node_page_state(pgdat, NR_FILE_MAPPED)),
Mel Gorman11fb9982016-07-28 15:46:20 -07004454 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4455 K(node_page_state(pgdat, NR_WRITEBACK)),
Alexander Polakove3b08eb2017-04-07 16:04:45 -07004456 K(node_page_state(pgdat, NR_SHMEM)),
Mel Gorman11fb9982016-07-28 15:46:20 -07004457#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4458 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4459 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4460 * HPAGE_PMD_NR),
4461 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4462#endif
Mel Gorman11fb9982016-07-28 15:46:20 -07004463 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4464 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
Minchan Kim33e077b2016-07-28 15:47:14 -07004465 node_page_state(pgdat, NR_PAGES_SCANNED),
Mel Gorman599d0c92016-07-28 15:45:31 -07004466 !pgdat_reclaimable(pgdat) ? "yes" : "no");
4467 }
4468
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07004469 for_each_populated_zone(zone) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470 int i;
4471
David Rientjes7bf02ea2011-05-24 17:11:16 -07004472 if (skip_free_areas_node(filter, zone_to_nid(zone)))
David Rientjesddd588b2011-03-22 16:30:46 -07004473 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004474
4475 free_pcp = 0;
4476 for_each_online_cpu(cpu)
4477 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4478
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 show_node(zone);
Joe Perches1f84a182016-10-27 17:46:29 -07004480 printk(KERN_CONT
4481 "%s"
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482 " free:%lukB"
4483 " min:%lukB"
4484 " low:%lukB"
4485 " high:%lukB"
Minchan Kim71c799f2016-07-28 15:47:26 -07004486 " active_anon:%lukB"
4487 " inactive_anon:%lukB"
4488 " active_file:%lukB"
4489 " inactive_file:%lukB"
4490 " unevictable:%lukB"
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004491 " writepending:%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492 " present:%lukB"
Jiang Liu9feedc92012-12-12 13:52:12 -08004493 " managed:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004494 " mlocked:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004495 " slab_reclaimable:%lukB"
4496 " slab_unreclaimable:%lukB"
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -07004497 " kernel_stack:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004498 " pagetables:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004499 " bounce:%lukB"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004500 " free_pcp:%lukB"
4501 " local_pcp:%ukB"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07004502 " free_cma:%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503 "\n",
4504 zone->name,
Mel Gorman88f5acf2011-01-13 15:45:41 -08004505 K(zone_page_state(zone, NR_FREE_PAGES)),
Mel Gorman41858962009-06-16 15:32:12 -07004506 K(min_wmark_pages(zone)),
4507 K(low_wmark_pages(zone)),
4508 K(high_wmark_pages(zone)),
Minchan Kim71c799f2016-07-28 15:47:26 -07004509 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
4510 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
4511 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
4512 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
4513 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004514 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515 K(zone->present_pages),
Jiang Liu9feedc92012-12-12 13:52:12 -08004516 K(zone->managed_pages),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004517 K(zone_page_state(zone, NR_MLOCK)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004518 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
4519 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
Andy Lutomirskid30dd8b2016-07-28 15:48:14 -07004520 zone_page_state(zone, NR_KERNEL_STACK_KB),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004521 K(zone_page_state(zone, NR_PAGETABLE)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004522 K(zone_page_state(zone, NR_BOUNCE)),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004523 K(free_pcp),
4524 K(this_cpu_read(zone->pageset->pcp.count)),
Minchan Kim33e077b2016-07-28 15:47:14 -07004525 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526 printk("lowmem_reserve[]:");
4527 for (i = 0; i < MAX_NR_ZONES; i++)
Joe Perches1f84a182016-10-27 17:46:29 -07004528 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
4529 printk(KERN_CONT "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530 }
4531
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07004532 for_each_populated_zone(zone) {
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08004533 unsigned int order;
4534 unsigned long nr[MAX_ORDER], flags, total = 0;
Rabin Vincent377e4f12012-12-11 16:00:24 -08004535 unsigned char types[MAX_ORDER];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536
David Rientjes7bf02ea2011-05-24 17:11:16 -07004537 if (skip_free_areas_node(filter, zone_to_nid(zone)))
David Rientjesddd588b2011-03-22 16:30:46 -07004538 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004539 show_node(zone);
Joe Perches1f84a182016-10-27 17:46:29 -07004540 printk(KERN_CONT "%s: ", zone->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004541
4542 spin_lock_irqsave(&zone->lock, flags);
4543 for (order = 0; order < MAX_ORDER; order++) {
Rabin Vincent377e4f12012-12-11 16:00:24 -08004544 struct free_area *area = &zone->free_area[order];
4545 int type;
4546
4547 nr[order] = area->nr_free;
Kirill Korotaev8f9de512006-06-23 02:03:50 -07004548 total += nr[order] << order;
Rabin Vincent377e4f12012-12-11 16:00:24 -08004549
4550 types[order] = 0;
4551 for (type = 0; type < MIGRATE_TYPES; type++) {
4552 if (!list_empty(&area->free_list[type]))
4553 types[order] |= 1 << type;
4554 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555 }
4556 spin_unlock_irqrestore(&zone->lock, flags);
Rabin Vincent377e4f12012-12-11 16:00:24 -08004557 for (order = 0; order < MAX_ORDER; order++) {
Joe Perches1f84a182016-10-27 17:46:29 -07004558 printk(KERN_CONT "%lu*%lukB ",
4559 nr[order], K(1UL) << order);
Rabin Vincent377e4f12012-12-11 16:00:24 -08004560 if (nr[order])
4561 show_migration_types(types[order]);
4562 }
Joe Perches1f84a182016-10-27 17:46:29 -07004563 printk(KERN_CONT "= %lukB\n", K(total));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564 }
4565
David Rientjes949f7ec2013-04-29 15:07:48 -07004566 hugetlb_show_meminfo();
4567
Mel Gorman11fb9982016-07-28 15:46:20 -07004568 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
Larry Woodmane6f36022008-02-04 22:29:30 -08004569
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570 show_swap_cache_info();
4571}
4572
Mel Gorman19770b32008-04-28 02:12:18 -07004573static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4574{
4575 zoneref->zone = zone;
4576 zoneref->zone_idx = zone_idx(zone);
4577}
4578
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579/*
4580 * Builds allocation fallback zone lists.
Christoph Lameter1a932052006-01-06 00:11:16 -08004581 *
4582 * Add all populated zones of a node to the zonelist.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004584static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004585 int nr_zones)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586{
Christoph Lameter1a932052006-01-06 00:11:16 -08004587 struct zone *zone;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004588 enum zone_type zone_type = MAX_NR_ZONES;
Christoph Lameter02a68a52006-01-06 00:11:18 -08004589
4590 do {
Christoph Lameter2f6726e2006-09-25 23:31:18 -07004591 zone_type--;
Christoph Lameter070f8032006-01-06 00:11:19 -08004592 zone = pgdat->node_zones + zone_type;
Mel Gorman6aa303d2016-09-01 16:14:55 -07004593 if (managed_zone(zone)) {
Mel Gormandd1a2392008-04-28 02:12:17 -07004594 zoneref_set_zone(zone,
4595 &zonelist->_zonerefs[nr_zones++]);
Christoph Lameter070f8032006-01-06 00:11:19 -08004596 check_highest_zone(zone_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597 }
Christoph Lameter2f6726e2006-09-25 23:31:18 -07004598 } while (zone_type);
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004599
Christoph Lameter070f8032006-01-06 00:11:19 -08004600 return nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601}
4602
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004603
4604/*
4605 * zonelist_order:
4606 * 0 = automatic detection of better ordering.
4607 * 1 = order by ([node] distance, -zonetype)
4608 * 2 = order by (-zonetype, [node] distance)
4609 *
4610 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
4611 * the same zonelist. So only NUMA can configure this param.
4612 */
4613#define ZONELIST_ORDER_DEFAULT 0
4614#define ZONELIST_ORDER_NODE 1
4615#define ZONELIST_ORDER_ZONE 2
4616
4617/* zonelist order in the kernel.
4618 * set_zonelist_order() will set this to NODE or ZONE.
4619 */
4620static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
4621static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
4622
4623
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624#ifdef CONFIG_NUMA
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004625/* The value user specified ....changed by config */
4626static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4627/* string for sysctl */
4628#define NUMA_ZONELIST_ORDER_LEN 16
4629char numa_zonelist_order[16] = "default";
4630
4631/*
4632 * interface for configure zonelist ordering.
4633 * command line option "numa_zonelist_order"
4634 * = "[dD]efault - default, automatic configuration.
4635 * = "[nN]ode - order by node locality, then by zone within node
4636 * = "[zZ]one - order by zone, then by locality within zone
4637 */
4638
4639static int __parse_numa_zonelist_order(char *s)
4640{
4641 if (*s == 'd' || *s == 'D') {
4642 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4643 } else if (*s == 'n' || *s == 'N') {
4644 user_zonelist_order = ZONELIST_ORDER_NODE;
4645 } else if (*s == 'z' || *s == 'Z') {
4646 user_zonelist_order = ZONELIST_ORDER_ZONE;
4647 } else {
Joe Perches11705322016-03-17 14:19:50 -07004648 pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004649 return -EINVAL;
4650 }
4651 return 0;
4652}
4653
4654static __init int setup_numa_zonelist_order(char *s)
4655{
Volodymyr G. Lukiianykecb256f2011-01-13 15:46:26 -08004656 int ret;
4657
4658 if (!s)
4659 return 0;
4660
4661 ret = __parse_numa_zonelist_order(s);
4662 if (ret == 0)
4663 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
4664
4665 return ret;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004666}
4667early_param("numa_zonelist_order", setup_numa_zonelist_order);
4668
4669/*
4670 * sysctl handler for numa_zonelist_order
4671 */
Joe Perchescccad5b2014-06-06 14:38:09 -07004672int numa_zonelist_order_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07004673 void __user *buffer, size_t *length,
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004674 loff_t *ppos)
4675{
4676 char saved_string[NUMA_ZONELIST_ORDER_LEN];
4677 int ret;
Andi Kleen443c6f12009-12-23 21:00:47 +01004678 static DEFINE_MUTEX(zl_order_mutex);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004679
Andi Kleen443c6f12009-12-23 21:00:47 +01004680 mutex_lock(&zl_order_mutex);
Chen Gangdacbde02013-07-03 15:02:35 -07004681 if (write) {
4682 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
4683 ret = -EINVAL;
4684 goto out;
4685 }
4686 strcpy(saved_string, (char *)table->data);
4687 }
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07004688 ret = proc_dostring(table, write, buffer, length, ppos);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004689 if (ret)
Andi Kleen443c6f12009-12-23 21:00:47 +01004690 goto out;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004691 if (write) {
4692 int oldval = user_zonelist_order;
Chen Gangdacbde02013-07-03 15:02:35 -07004693
4694 ret = __parse_numa_zonelist_order((char *)table->data);
4695 if (ret) {
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004696 /*
4697 * bogus value. restore saved string
4698 */
Chen Gangdacbde02013-07-03 15:02:35 -07004699 strncpy((char *)table->data, saved_string,
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004700 NUMA_ZONELIST_ORDER_LEN);
4701 user_zonelist_order = oldval;
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004702 } else if (oldval != user_zonelist_order) {
4703 mutex_lock(&zonelists_mutex);
Jiang Liu9adb62a2012-07-31 16:43:28 -07004704 build_all_zonelists(NULL, NULL);
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004705 mutex_unlock(&zonelists_mutex);
4706 }
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004707 }
Andi Kleen443c6f12009-12-23 21:00:47 +01004708out:
4709 mutex_unlock(&zl_order_mutex);
4710 return ret;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004711}
4712
4713
Christoph Lameter62bc62a2009-06-16 15:32:15 -07004714#define MAX_NODE_LOAD (nr_online_nodes)
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004715static int node_load[MAX_NUMNODES];
4716
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717/**
Pavel Pisa4dc3b162005-05-01 08:59:25 -07004718 * find_next_best_node - find the next node that should appear in a given node's fallback list
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719 * @node: node whose fallback list we're appending
4720 * @used_node_mask: nodemask_t of already used nodes
4721 *
4722 * We use a number of factors to determine which is the next node that should
4723 * appear on a given node's fallback list. The node should not have appeared
4724 * already in @node's fallback list, and it should be the next closest node
4725 * according to the distance array (which contains arbitrary distance values
4726 * from each node to each node in the system), and should also prefer nodes
4727 * with no CPUs, since presumably they'll have very little allocation pressure
4728 * on them otherwise.
4729 * It returns -1 if no node is found.
4730 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004731static int find_next_best_node(int node, nodemask_t *used_node_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732{
Linus Torvalds4cf808e2006-02-17 20:38:21 +01004733 int n, val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734 int min_val = INT_MAX;
David Rientjes00ef2d22013-02-22 16:35:36 -08004735 int best_node = NUMA_NO_NODE;
Rusty Russella70f7302009-03-13 14:49:46 +10304736 const struct cpumask *tmp = cpumask_of_node(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737
Linus Torvalds4cf808e2006-02-17 20:38:21 +01004738 /* Use the local node if we haven't already */
4739 if (!node_isset(node, *used_node_mask)) {
4740 node_set(node, *used_node_mask);
4741 return node;
4742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08004744 for_each_node_state(n, N_MEMORY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745
4746 /* Don't want a node to appear more than once */
4747 if (node_isset(n, *used_node_mask))
4748 continue;
4749
Linus Torvalds1da177e2005-04-16 15:20:36 -07004750 /* Use the distance array to find the distance */
4751 val = node_distance(node, n);
4752
Linus Torvalds4cf808e2006-02-17 20:38:21 +01004753 /* Penalize nodes under us ("prefer the next node") */
4754 val += (n < node);
4755
Linus Torvalds1da177e2005-04-16 15:20:36 -07004756 /* Give preference to headless and unused nodes */
Rusty Russella70f7302009-03-13 14:49:46 +10304757 tmp = cpumask_of_node(n);
4758 if (!cpumask_empty(tmp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004759 val += PENALTY_FOR_NODE_WITH_CPUS;
4760
4761 /* Slight preference for less loaded node */
4762 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4763 val += node_load[n];
4764
4765 if (val < min_val) {
4766 min_val = val;
4767 best_node = n;
4768 }
4769 }
4770
4771 if (best_node >= 0)
4772 node_set(best_node, *used_node_mask);
4773
4774 return best_node;
4775}
4776
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004777
4778/*
4779 * Build zonelists ordered by node and zones within node.
4780 * This results in maximum locality--normal zone overflows into local
4781 * DMA zone, if any--but risks exhausting DMA zone.
4782 */
4783static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004784{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004785 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786 struct zonelist *zonelist;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004787
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07004788 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
Mel Gormandd1a2392008-04-28 02:12:17 -07004789 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
Mel Gorman54a6eb52008-04-28 02:12:16 -07004790 ;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004791 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
Mel Gormandd1a2392008-04-28 02:12:17 -07004792 zonelist->_zonerefs[j].zone = NULL;
4793 zonelist->_zonerefs[j].zone_idx = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004794}
4795
4796/*
Christoph Lameter523b9452007-10-16 01:25:37 -07004797 * Build gfp_thisnode zonelists
4798 */
4799static void build_thisnode_zonelists(pg_data_t *pgdat)
4800{
Christoph Lameter523b9452007-10-16 01:25:37 -07004801 int j;
4802 struct zonelist *zonelist;
4803
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07004804 zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK];
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004805 j = build_zonelists_node(pgdat, zonelist, 0);
Mel Gormandd1a2392008-04-28 02:12:17 -07004806 zonelist->_zonerefs[j].zone = NULL;
4807 zonelist->_zonerefs[j].zone_idx = 0;
Christoph Lameter523b9452007-10-16 01:25:37 -07004808}
4809
4810/*
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004811 * Build zonelists ordered by zone and nodes within zones.
4812 * This results in conserving DMA zone[s] until all Normal memory is
4813 * exhausted, but results in overflowing to remote node while memory
4814 * may still exist in local DMA zone.
4815 */
4816static int node_order[MAX_NUMNODES];
4817
4818static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4819{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004820 int pos, j, node;
4821 int zone_type; /* needs to be signed */
4822 struct zone *z;
4823 struct zonelist *zonelist;
4824
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07004825 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
Mel Gorman54a6eb52008-04-28 02:12:16 -07004826 pos = 0;
4827 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
4828 for (j = 0; j < nr_nodes; j++) {
4829 node = node_order[j];
4830 z = &NODE_DATA(node)->node_zones[zone_type];
Mel Gorman6aa303d2016-09-01 16:14:55 -07004831 if (managed_zone(z)) {
Mel Gormandd1a2392008-04-28 02:12:17 -07004832 zoneref_set_zone(z,
4833 &zonelist->_zonerefs[pos++]);
Mel Gorman54a6eb52008-04-28 02:12:16 -07004834 check_highest_zone(zone_type);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004835 }
4836 }
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004837 }
Mel Gormandd1a2392008-04-28 02:12:17 -07004838 zonelist->_zonerefs[pos].zone = NULL;
4839 zonelist->_zonerefs[pos].zone_idx = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004840}
4841
Mel Gorman31939132014-10-09 15:28:30 -07004842#if defined(CONFIG_64BIT)
4843/*
4844 * Devices that require DMA32/DMA are relatively rare and do not justify a
4845 * penalty to every machine in case the specialised case applies. Default
4846 * to Node-ordering on 64-bit NUMA machines
4847 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004848static int default_zonelist_order(void)
4849{
Mel Gorman31939132014-10-09 15:28:30 -07004850 return ZONELIST_ORDER_NODE;
4851}
4852#else
4853/*
4854 * On 32-bit, the Normal zone needs to be preserved for allocations accessible
4855 * by the kernel. If processes running on node 0 deplete the low memory zone
4856 * then reclaim will occur more frequency increasing stalls and potentially
4857 * be easier to OOM if a large percentage of the zone is under writeback or
4858 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
4859 * Hence, default to zone ordering on 32-bit.
4860 */
4861static int default_zonelist_order(void)
4862{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004863 return ZONELIST_ORDER_ZONE;
4864}
Mel Gorman31939132014-10-09 15:28:30 -07004865#endif /* CONFIG_64BIT */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004866
4867static void set_zonelist_order(void)
4868{
4869 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
4870 current_zonelist_order = default_zonelist_order();
4871 else
4872 current_zonelist_order = user_zonelist_order;
4873}
4874
4875static void build_zonelists(pg_data_t *pgdat)
4876{
Yaowei Baic00eb152016-01-14 15:19:00 -08004877 int i, node, load;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878 nodemask_t used_mask;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004879 int local_node, prev_node;
4880 struct zonelist *zonelist;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08004881 unsigned int order = current_zonelist_order;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004882
4883 /* initialize zonelists */
Christoph Lameter523b9452007-10-16 01:25:37 -07004884 for (i = 0; i < MAX_ZONELISTS; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004885 zonelist = pgdat->node_zonelists + i;
Mel Gormandd1a2392008-04-28 02:12:17 -07004886 zonelist->_zonerefs[0].zone = NULL;
4887 zonelist->_zonerefs[0].zone_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004888 }
4889
4890 /* NUMA-aware ordering of nodes */
4891 local_node = pgdat->node_id;
Christoph Lameter62bc62a2009-06-16 15:32:15 -07004892 load = nr_online_nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893 prev_node = local_node;
4894 nodes_clear(used_mask);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004895
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004896 memset(node_order, 0, sizeof(node_order));
Yaowei Baic00eb152016-01-14 15:19:00 -08004897 i = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004898
Linus Torvalds1da177e2005-04-16 15:20:36 -07004899 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
4900 /*
4901 * We don't want to pressure a particular node.
4902 * So adding penalty to the first node in same
4903 * distance group to make it round-robin.
4904 */
David Rientjes957f8222012-10-08 16:33:24 -07004905 if (node_distance(local_node, node) !=
4906 node_distance(local_node, prev_node))
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004907 node_load[node] = load;
4908
Linus Torvalds1da177e2005-04-16 15:20:36 -07004909 prev_node = node;
4910 load--;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004911 if (order == ZONELIST_ORDER_NODE)
4912 build_zonelists_in_node_order(pgdat, node);
4913 else
Yaowei Baic00eb152016-01-14 15:19:00 -08004914 node_order[i++] = node; /* remember order */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004915 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004916
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004917 if (order == ZONELIST_ORDER_ZONE) {
4918 /* calculate node order -- i.e., DMA last! */
Yaowei Baic00eb152016-01-14 15:19:00 -08004919 build_zonelists_in_zone_order(pgdat, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004920 }
Christoph Lameter523b9452007-10-16 01:25:37 -07004921
4922 build_thisnode_zonelists(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004923}
4924
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004925#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4926/*
4927 * Return node id of node used for "local" allocations.
4928 * I.e., first node id of first zone in arg node's generic zonelist.
4929 * Used for initializing percpu 'numa_mem', which is used primarily
4930 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
4931 */
4932int local_memory_node(int node)
4933{
Mel Gormanc33d6c02016-05-19 17:14:10 -07004934 struct zoneref *z;
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004935
Mel Gormanc33d6c02016-05-19 17:14:10 -07004936 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004937 gfp_zone(GFP_KERNEL),
Mel Gormanc33d6c02016-05-19 17:14:10 -07004938 NULL);
4939 return z->zone->node;
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004940}
4941#endif
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004942
Joonsoo Kim6423aa82016-08-10 16:27:49 -07004943static void setup_min_unmapped_ratio(void);
4944static void setup_min_slab_ratio(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004945#else /* CONFIG_NUMA */
4946
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004947static void set_zonelist_order(void)
4948{
4949 current_zonelist_order = ZONELIST_ORDER_ZONE;
4950}
4951
4952static void build_zonelists(pg_data_t *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953{
Christoph Lameter19655d32006-09-25 23:31:19 -07004954 int node, local_node;
Mel Gorman54a6eb52008-04-28 02:12:16 -07004955 enum zone_type j;
4956 struct zonelist *zonelist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957
4958 local_node = pgdat->node_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07004960 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004961 j = build_zonelists_node(pgdat, zonelist, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962
Mel Gorman54a6eb52008-04-28 02:12:16 -07004963 /*
4964 * Now we build the zonelist so that it contains the zones
4965 * of all the other nodes.
4966 * We don't want to pressure a particular node, so when
4967 * building the zones for node N, we make sure that the
4968 * zones coming right after the local ones are those from
4969 * node N+1 (modulo N)
4970 */
4971 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
4972 if (!node_online(node))
4973 continue;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004974 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07004976 for (node = 0; node < local_node; node++) {
4977 if (!node_online(node))
4978 continue;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004979 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
Mel Gorman54a6eb52008-04-28 02:12:16 -07004980 }
4981
Mel Gormandd1a2392008-04-28 02:12:17 -07004982 zonelist->_zonerefs[j].zone = NULL;
4983 zonelist->_zonerefs[j].zone_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984}
4985
4986#endif /* CONFIG_NUMA */
4987
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004988/*
4989 * Boot pageset table. One per cpu which is going to be used for all
4990 * zones and all nodes. The parameters will be set in such a way
4991 * that an item put on a list will immediately be handed over to
4992 * the buddy list. This is safe since pageset manipulation is done
4993 * with interrupts disabled.
4994 *
4995 * The boot_pagesets must be kept even after bootup is complete for
4996 * unused processors and/or zones. They do play a role for bootstrapping
4997 * hotplugged processors.
4998 *
4999 * zoneinfo_show() and maybe other functions do
5000 * not check if the processor is online before following the pageset pointer.
5001 * Other parts of the kernel may not check if the zone is available.
5002 */
5003static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5004static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
Haicheng Li1f522502010-05-24 14:32:51 -07005005static void setup_zone_pageset(struct zone *zone);
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005006
Haicheng Li4eaf3f62010-05-24 14:32:52 -07005007/*
5008 * Global mutex to protect against size modification of zonelists
5009 * as well as to serialize pageset setup for the new populated zone.
5010 */
5011DEFINE_MUTEX(zonelists_mutex);
5012
Rusty Russell9b1a4d32008-07-28 12:16:30 -05005013/* return values int ....just for stop_machine() */
Jiang Liu4ed7e022012-07-31 16:43:35 -07005014static int __build_all_zonelists(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005015{
Yasunori Goto68113782006-06-23 02:03:11 -07005016 int nid;
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005017 int cpu;
Jiang Liu9adb62a2012-07-31 16:43:28 -07005018 pg_data_t *self = data;
Paul Jackson9276b1bc2006-12-06 20:31:48 -08005019
Bo Liu7f9cfb32009-08-18 14:11:19 -07005020#ifdef CONFIG_NUMA
5021 memset(node_load, 0, sizeof(node_load));
5022#endif
Jiang Liu9adb62a2012-07-31 16:43:28 -07005023
5024 if (self && !node_online(self->node_id)) {
5025 build_zonelists(self);
Jiang Liu9adb62a2012-07-31 16:43:28 -07005026 }
5027
Paul Jackson9276b1bc2006-12-06 20:31:48 -08005028 for_each_online_node(nid) {
Christoph Lameter7ea15302007-10-16 01:25:29 -07005029 pg_data_t *pgdat = NODE_DATA(nid);
5030
5031 build_zonelists(pgdat);
Paul Jackson9276b1bc2006-12-06 20:31:48 -08005032 }
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005033
5034 /*
5035 * Initialize the boot_pagesets that are going to be used
5036 * for bootstrapping processors. The real pagesets for
5037 * each zone will be allocated later when the per cpu
5038 * allocator is available.
5039 *
5040 * boot_pagesets are used also for bootstrapping offline
5041 * cpus if the system is already booted because the pagesets
5042 * are needed to initialize allocators on a specific cpu too.
5043 * F.e. the percpu allocator needs the page allocator which
5044 * needs the percpu allocator in order to allocate its pagesets
5045 * (a chicken-egg dilemma).
5046 */
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005047 for_each_possible_cpu(cpu) {
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005048 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5049
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07005050#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5051 /*
5052 * We now know the "local memory node" for each node--
5053 * i.e., the node of the first zone in the generic zonelist.
5054 * Set up numa_mem percpu variable for on-line cpus. During
5055 * boot, only the boot cpu should be on-line; we'll init the
5056 * secondary cpus' numa_mem as they come on-line. During
5057 * node/memory hotplug, we'll fixup all on-line cpus.
5058 */
5059 if (cpu_online(cpu))
5060 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5061#endif
5062 }
5063
Yasunori Goto68113782006-06-23 02:03:11 -07005064 return 0;
5065}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005067static noinline void __init
5068build_all_zonelists_init(void)
5069{
5070 __build_all_zonelists(NULL);
5071 mminit_verify_zonelist();
5072 cpuset_init_current_mems_allowed();
5073}
5074
Haicheng Li4eaf3f62010-05-24 14:32:52 -07005075/*
5076 * Called with zonelists_mutex held always
5077 * unless system_state == SYSTEM_BOOTING.
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005078 *
5079 * __ref due to (1) call of __meminit annotated setup_zone_pageset
5080 * [we're only called with non-NULL zone through __meminit paths] and
5081 * (2) call of __init annotated helper build_all_zonelists_init
5082 * [protected by SYSTEM_BOOTING].
Haicheng Li4eaf3f62010-05-24 14:32:52 -07005083 */
Jiang Liu9adb62a2012-07-31 16:43:28 -07005084void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
Yasunori Goto68113782006-06-23 02:03:11 -07005085{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005086 set_zonelist_order();
5087
Yasunori Goto68113782006-06-23 02:03:11 -07005088 if (system_state == SYSTEM_BOOTING) {
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08005089 build_all_zonelists_init();
Yasunori Goto68113782006-06-23 02:03:11 -07005090 } else {
KAMEZAWA Hiroyukie9959f02010-11-24 12:57:09 -08005091#ifdef CONFIG_MEMORY_HOTPLUG
Jiang Liu9adb62a2012-07-31 16:43:28 -07005092 if (zone)
5093 setup_zone_pageset(zone);
KAMEZAWA Hiroyukie9959f02010-11-24 12:57:09 -08005094#endif
Cody P Schaferdd1895e2013-07-03 15:01:36 -07005095 /* we have to stop all cpus to guarantee there is no user
5096 of zonelist */
Jiang Liu9adb62a2012-07-31 16:43:28 -07005097 stop_machine(__build_all_zonelists, pgdat, NULL);
Yasunori Goto68113782006-06-23 02:03:11 -07005098 /* cpuset refresh routine should be here */
5099 }
Andrew Mortonbd1e22b2006-06-23 02:03:47 -07005100 vm_total_pages = nr_free_pagecache_pages();
Mel Gorman9ef9acb2007-10-16 01:25:54 -07005101 /*
5102 * Disable grouping by mobility if the number of pages in the
5103 * system is too low to allow the mechanism to work. It would be
5104 * more accurate, but expensive to check per-zone. This check is
5105 * made on memory-hotadd so a system can start with mobility
5106 * disabled and enable it later
5107 */
Mel Gormand9c23402007-10-16 01:26:01 -07005108 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
Mel Gorman9ef9acb2007-10-16 01:25:54 -07005109 page_group_by_mobility_disabled = 1;
5110 else
5111 page_group_by_mobility_disabled = 0;
5112
Joe Perches756a025f02016-03-17 14:19:47 -07005113 pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n",
5114 nr_online_nodes,
5115 zonelist_order_name[current_zonelist_order],
5116 page_group_by_mobility_disabled ? "off" : "on",
5117 vm_total_pages);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005118#ifdef CONFIG_NUMA
Anton Blanchardf88dfff2014-12-10 15:42:53 -08005119 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07005120#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121}
5122
5123/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124 * Initially all pages are reserved - free ones are freed
5125 * up by free_all_bootmem() once the early boot process is
5126 * done. Non-atomic initialization, single-pass.
5127 */
Matt Tolentinoc09b4242006-01-17 07:03:44 +01005128void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
Dave Hansena2f3aa022007-01-10 23:15:30 -08005129 unsigned long start_pfn, enum memmap_context context)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005130{
Dan Williams4b94ffd2016-01-15 16:56:22 -08005131 struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
Andy Whitcroft29751f62005-06-23 00:08:00 -07005132 unsigned long end_pfn = start_pfn + size;
Dan Williams4b94ffd2016-01-15 16:56:22 -08005133 pg_data_t *pgdat = NODE_DATA(nid);
Andy Whitcroft29751f62005-06-23 00:08:00 -07005134 unsigned long pfn;
Mel Gorman3a80a7f2015-06-30 14:57:02 -07005135 unsigned long nr_initialised = 0;
Taku Izumi342332e2016-03-15 14:55:22 -07005136#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5137 struct memblock_region *r = NULL, *tmp;
5138#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139
Hugh Dickins22b31ee2009-01-06 14:40:09 -08005140 if (highest_memmap_pfn < end_pfn - 1)
5141 highest_memmap_pfn = end_pfn - 1;
5142
Dan Williams4b94ffd2016-01-15 16:56:22 -08005143 /*
5144 * Honor reservation requested by the driver for this ZONE_DEVICE
5145 * memory
5146 */
5147 if (altmap && start_pfn == altmap->base_pfn)
5148 start_pfn += altmap->reserve;
5149
Greg Ungerercbe8dd42006-01-12 01:05:24 -08005150 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
Dave Hansena2f3aa022007-01-10 23:15:30 -08005151 /*
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005152 * There can be holes in boot-time mem_map[]s handed to this
5153 * function. They do not exist on hotplugged memory.
Dave Hansena2f3aa022007-01-10 23:15:30 -08005154 */
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005155 if (context != MEMMAP_EARLY)
5156 goto not_early;
5157
5158 if (!early_pfn_valid(pfn))
5159 continue;
5160 if (!early_pfn_in_nid(pfn, nid))
5161 continue;
5162 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
5163 break;
Taku Izumi342332e2016-03-15 14:55:22 -07005164
5165#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005166 /*
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005167 * Check given memblock attribute by firmware which can affect
5168 * kernel memory layout. If zone==ZONE_MOVABLE but memory is
5169 * mirrored, it's an overlapped memmap init. skip it.
5170 */
5171 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5172 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
5173 for_each_memblock(memory, tmp)
5174 if (pfn < memblock_region_memory_end_pfn(tmp))
5175 break;
5176 r = tmp;
Taku Izumi342332e2016-03-15 14:55:22 -07005177 }
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005178 if (pfn >= memblock_region_memory_base_pfn(r) &&
5179 memblock_is_mirror(r)) {
5180 /* already initialized as NORMAL */
5181 pfn = memblock_region_memory_end_pfn(r);
5182 continue;
5183 }
Dave Hansena2f3aa022007-01-10 23:15:30 -08005184 }
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005185#endif
Mel Gormanac5d2532015-06-30 14:57:20 -07005186
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005187not_early:
Mel Gormanac5d2532015-06-30 14:57:20 -07005188 /*
5189 * Mark the block movable so that blocks are reserved for
5190 * movable at startup. This will force kernel allocations
5191 * to reserve their blocks rather than leaking throughout
5192 * the address space during boot when many long-lived
Mel Gorman974a7862015-11-06 16:28:34 -08005193 * kernel allocations are made.
Mel Gormanac5d2532015-06-30 14:57:20 -07005194 *
5195 * bitmap is created for zone's valid pfn range. but memmap
5196 * can be created for invalid pages (for alignment)
5197 * check here not to call set_pageblock_migratetype() against
5198 * pfn out of zone.
5199 */
5200 if (!(pfn & (pageblock_nr_pages - 1))) {
5201 struct page *page = pfn_to_page(pfn);
5202
5203 __init_single_page(page, pfn, zone, nid);
5204 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5205 } else {
5206 __init_single_pfn(pfn, zone, nid);
5207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208 }
5209}
5210
Andi Kleen1e548de2008-02-04 22:29:26 -08005211static void __meminit zone_init_free_lists(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212{
Mel Gorman7aeb09f2014-06-04 16:10:21 -07005213 unsigned int order, t;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07005214 for_each_migratetype_order(order, t) {
5215 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005216 zone->free_area[order].nr_free = 0;
5217 }
5218}
5219
5220#ifndef __HAVE_ARCH_MEMMAP_INIT
5221#define memmap_init(size, nid, zone, start_pfn) \
Dave Hansena2f3aa022007-01-10 23:15:30 -08005222 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223#endif
5224
David Rientjes7cd2b0a2014-06-23 13:22:04 -07005225static int zone_batchsize(struct zone *zone)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005226{
David Howells3a6be872009-05-06 16:03:03 -07005227#ifdef CONFIG_MMU
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005228 int batch;
5229
5230 /*
5231 * The per-cpu-pages pools are set to around 1000th of the
Seth, Rohitba56e912005-10-29 18:15:47 -07005232 * size of the zone. But no more than 1/2 of a meg.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005233 *
5234 * OK, so we don't know how big the cache is. So guess.
5235 */
Jiang Liub40da042013-02-22 16:33:52 -08005236 batch = zone->managed_pages / 1024;
Seth, Rohitba56e912005-10-29 18:15:47 -07005237 if (batch * PAGE_SIZE > 512 * 1024)
5238 batch = (512 * 1024) / PAGE_SIZE;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005239 batch /= 4; /* We effectively *= 4 below */
5240 if (batch < 1)
5241 batch = 1;
5242
5243 /*
Nick Piggin0ceaacc2005-12-04 13:55:25 +11005244 * Clamp the batch to a 2^n - 1 value. Having a power
5245 * of 2 value was found to be more likely to have
5246 * suboptimal cache aliasing properties in some cases.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005247 *
Nick Piggin0ceaacc2005-12-04 13:55:25 +11005248 * For example if 2 tasks are alternately allocating
5249 * batches of pages, one task can end up with a lot
5250 * of pages of one half of the possible page colors
5251 * and the other with pages of the other colors.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005252 */
David Howells91552032009-05-06 16:03:02 -07005253 batch = rounddown_pow_of_two(batch + batch/2) - 1;
Seth, Rohitba56e912005-10-29 18:15:47 -07005254
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005255 return batch;
David Howells3a6be872009-05-06 16:03:03 -07005256
5257#else
5258 /* The deferral and batching of frees should be suppressed under NOMMU
5259 * conditions.
5260 *
5261 * The problem is that NOMMU needs to be able to allocate large chunks
5262 * of contiguous memory as there's no hardware page translation to
5263 * assemble apparent contiguous memory from discontiguous pages.
5264 *
5265 * Queueing large contiguous runs of pages for batching, however,
5266 * causes the pages to actually be freed in smaller chunks. As there
5267 * can be a significant delay between the individual batches being
5268 * recycled, this leads to the once large chunks of space being
5269 * fragmented and becoming unavailable for high-order allocations.
5270 */
5271 return 0;
5272#endif
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005273}
5274
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005275/*
5276 * pcp->high and pcp->batch values are related and dependent on one another:
5277 * ->batch must never be higher then ->high.
5278 * The following function updates them in a safe manner without read side
5279 * locking.
5280 *
5281 * Any new users of pcp->batch and pcp->high should ensure they can cope with
5282 * those fields changing asynchronously (acording the the above rule).
5283 *
5284 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5285 * outside of boot time (or some other assurance that no concurrent updaters
5286 * exist).
5287 */
5288static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5289 unsigned long batch)
5290{
5291 /* start with a fail safe value for batch */
5292 pcp->batch = 1;
5293 smp_wmb();
5294
5295 /* Update high, then batch, in order */
5296 pcp->high = high;
5297 smp_wmb();
5298
5299 pcp->batch = batch;
5300}
5301
Cody P Schafer36640332013-07-03 15:01:40 -07005302/* a companion to pageset_set_high() */
Cody P Schafer4008bab2013-07-03 15:01:28 -07005303static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
5304{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005305 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
Cody P Schafer4008bab2013-07-03 15:01:28 -07005306}
5307
Cody P Schafer88c90db2013-07-03 15:01:35 -07005308static void pageset_init(struct per_cpu_pageset *p)
Christoph Lameter2caaad42005-06-21 17:15:00 -07005309{
5310 struct per_cpu_pages *pcp;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07005311 int migratetype;
Christoph Lameter2caaad42005-06-21 17:15:00 -07005312
Magnus Damm1c6fe942005-10-26 01:58:59 -07005313 memset(p, 0, sizeof(*p));
5314
Christoph Lameter3dfa5722008-02-04 22:29:19 -08005315 pcp = &p->pcp;
Christoph Lameter2caaad42005-06-21 17:15:00 -07005316 pcp->count = 0;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07005317 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
5318 INIT_LIST_HEAD(&pcp->lists[migratetype]);
Christoph Lameter2caaad42005-06-21 17:15:00 -07005319}
5320
Cody P Schafer88c90db2013-07-03 15:01:35 -07005321static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
5322{
5323 pageset_init(p);
5324 pageset_set_batch(p, batch);
5325}
5326
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005327/*
Cody P Schafer36640332013-07-03 15:01:40 -07005328 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005329 * to the value high for the pageset p.
5330 */
Cody P Schafer36640332013-07-03 15:01:40 -07005331static void pageset_set_high(struct per_cpu_pageset *p,
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005332 unsigned long high)
5333{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005334 unsigned long batch = max(1UL, high / 4);
5335 if ((high / 4) > (PAGE_SHIFT * 8))
5336 batch = PAGE_SHIFT * 8;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005337
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005338 pageset_update(&p->pcp, high, batch);
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005339}
5340
David Rientjes7cd2b0a2014-06-23 13:22:04 -07005341static void pageset_set_high_and_batch(struct zone *zone,
5342 struct per_cpu_pageset *pcp)
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005343{
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005344 if (percpu_pagelist_fraction)
Cody P Schafer36640332013-07-03 15:01:40 -07005345 pageset_set_high(pcp,
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005346 (zone->managed_pages /
5347 percpu_pagelist_fraction));
5348 else
5349 pageset_set_batch(pcp, zone_batchsize(zone));
5350}
5351
Cody P Schafer169f6c12013-07-03 15:01:41 -07005352static void __meminit zone_pageset_init(struct zone *zone, int cpu)
5353{
5354 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
5355
5356 pageset_init(pcp);
5357 pageset_set_high_and_batch(zone, pcp);
5358}
5359
Jiang Liu4ed7e022012-07-31 16:43:35 -07005360static void __meminit setup_zone_pageset(struct zone *zone)
Wu Fengguang319774e2010-05-24 14:32:49 -07005361{
5362 int cpu;
Wu Fengguang319774e2010-05-24 14:32:49 -07005363 zone->pageset = alloc_percpu(struct per_cpu_pageset);
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005364 for_each_possible_cpu(cpu)
5365 zone_pageset_init(zone, cpu);
Wu Fengguang319774e2010-05-24 14:32:49 -07005366}
5367
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005368/*
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005369 * Allocate per cpu pagesets and initialize them.
5370 * Before this call only boot pagesets were available.
Christoph Lameter2caaad42005-06-21 17:15:00 -07005371 */
Al Viro78d99552005-12-15 09:18:25 +00005372void __init setup_per_cpu_pageset(void)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005373{
Mel Gormanb4911ea2016-08-04 15:31:49 -07005374 struct pglist_data *pgdat;
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005375 struct zone *zone;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005376
Wu Fengguang319774e2010-05-24 14:32:49 -07005377 for_each_populated_zone(zone)
5378 setup_zone_pageset(zone);
Mel Gormanb4911ea2016-08-04 15:31:49 -07005379
5380 for_each_online_pgdat(pgdat)
5381 pgdat->per_cpu_nodestats =
5382 alloc_percpu(struct per_cpu_nodestat);
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005383}
5384
Matt Tolentinoc09b4242006-01-17 07:03:44 +01005385static __meminit void zone_pcp_init(struct zone *zone)
Dave Hansened8ece22005-10-29 18:16:50 -07005386{
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005387 /*
5388 * per cpu subsystem is not up at this point. The following code
5389 * relies on the ability of the linker to provide the
5390 * offset of a (static) per cpu variable into the per cpu area.
5391 */
5392 zone->pageset = &boot_pageset;
Dave Hansened8ece22005-10-29 18:16:50 -07005393
Xishi Qiub38a8722013-11-12 15:07:20 -08005394 if (populated_zone(zone))
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005395 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
5396 zone->name, zone->present_pages,
5397 zone_batchsize(zone));
Dave Hansened8ece22005-10-29 18:16:50 -07005398}
5399
Jiang Liu4ed7e022012-07-31 16:43:35 -07005400int __meminit init_currently_empty_zone(struct zone *zone,
Yasunori Goto718127c2006-06-23 02:03:10 -07005401 unsigned long zone_start_pfn,
Yaowei Baib171e402015-11-05 18:47:06 -08005402 unsigned long size)
Dave Hansened8ece22005-10-29 18:16:50 -07005403{
5404 struct pglist_data *pgdat = zone->zone_pgdat;
Linus Torvalds9dcb8b62016-10-26 10:15:30 -07005405
Dave Hansened8ece22005-10-29 18:16:50 -07005406 pgdat->nr_zones = zone_idx(zone) + 1;
5407
Dave Hansened8ece22005-10-29 18:16:50 -07005408 zone->zone_start_pfn = zone_start_pfn;
5409
Mel Gorman708614e2008-07-23 21:26:51 -07005410 mminit_dprintk(MMINIT_TRACE, "memmap_init",
5411 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
5412 pgdat->node_id,
5413 (unsigned long)zone_idx(zone),
5414 zone_start_pfn, (zone_start_pfn + size));
5415
Andi Kleen1e548de2008-02-04 22:29:26 -08005416 zone_init_free_lists(zone);
Linus Torvalds9dcb8b62016-10-26 10:15:30 -07005417 zone->initialized = 1;
Yasunori Goto718127c2006-06-23 02:03:10 -07005418
5419 return 0;
Dave Hansened8ece22005-10-29 18:16:50 -07005420}
5421
Tejun Heo0ee332c2011-12-08 10:22:09 -08005422#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Mel Gormanc7132162006-09-27 01:49:43 -07005423#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
Mel Gorman8a942fd2015-06-30 14:56:55 -07005424
Mel Gormanc7132162006-09-27 01:49:43 -07005425/*
5426 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
Mel Gormanc7132162006-09-27 01:49:43 -07005427 */
Mel Gorman8a942fd2015-06-30 14:56:55 -07005428int __meminit __early_pfn_to_nid(unsigned long pfn,
5429 struct mminit_pfnnid_cache *state)
Mel Gormanc7132162006-09-27 01:49:43 -07005430{
Tejun Heoc13291a2011-07-12 10:46:30 +02005431 unsigned long start_pfn, end_pfn;
Yinghai Lue76b63f2013-09-11 14:22:17 -07005432 int nid;
Russ Anderson7c243c72013-04-29 15:07:59 -07005433
Mel Gorman8a942fd2015-06-30 14:56:55 -07005434 if (state->last_start <= pfn && pfn < state->last_end)
5435 return state->last_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005436
Yinghai Lue76b63f2013-09-11 14:22:17 -07005437 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
5438 if (nid != -1) {
Mel Gorman8a942fd2015-06-30 14:56:55 -07005439 state->last_start = start_pfn;
5440 state->last_end = end_pfn;
5441 state->last_nid = nid;
Yinghai Lue76b63f2013-09-11 14:22:17 -07005442 }
5443
5444 return nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005445}
5446#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
5447
Mel Gormanc7132162006-09-27 01:49:43 -07005448/**
Santosh Shilimkar67828322014-01-21 15:50:25 -08005449 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005450 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
Santosh Shilimkar67828322014-01-21 15:50:25 -08005451 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
Mel Gormanc7132162006-09-27 01:49:43 -07005452 *
Zhang Zhen7d018172014-06-04 16:10:53 -07005453 * If an architecture guarantees that all ranges registered contain no holes
5454 * and may be freed, this this function may be used instead of calling
5455 * memblock_free_early_nid() manually.
Mel Gormanc7132162006-09-27 01:49:43 -07005456 */
Tejun Heoc13291a2011-07-12 10:46:30 +02005457void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07005458{
Tejun Heoc13291a2011-07-12 10:46:30 +02005459 unsigned long start_pfn, end_pfn;
5460 int i, this_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005461
Tejun Heoc13291a2011-07-12 10:46:30 +02005462 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
5463 start_pfn = min(start_pfn, max_low_pfn);
5464 end_pfn = min(end_pfn, max_low_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005465
Tejun Heoc13291a2011-07-12 10:46:30 +02005466 if (start_pfn < end_pfn)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005467 memblock_free_early_nid(PFN_PHYS(start_pfn),
5468 (end_pfn - start_pfn) << PAGE_SHIFT,
5469 this_nid);
Mel Gormanc7132162006-09-27 01:49:43 -07005470 }
5471}
5472
5473/**
5474 * sparse_memory_present_with_active_regions - Call memory_present for each active range
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005475 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
Mel Gormanc7132162006-09-27 01:49:43 -07005476 *
Zhang Zhen7d018172014-06-04 16:10:53 -07005477 * If an architecture guarantees that all ranges registered contain no holes and may
5478 * be freed, this function may be used instead of calling memory_present() manually.
Mel Gormanc7132162006-09-27 01:49:43 -07005479 */
5480void __init sparse_memory_present_with_active_regions(int nid)
5481{
Tejun Heoc13291a2011-07-12 10:46:30 +02005482 unsigned long start_pfn, end_pfn;
5483 int i, this_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005484
Tejun Heoc13291a2011-07-12 10:46:30 +02005485 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
5486 memory_present(this_nid, start_pfn, end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005487}
5488
5489/**
5490 * get_pfn_range_for_nid - Return the start and end page frames for a node
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005491 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
5492 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
5493 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
Mel Gormanc7132162006-09-27 01:49:43 -07005494 *
5495 * It returns the start and end page frame of a node based on information
Zhang Zhen7d018172014-06-04 16:10:53 -07005496 * provided by memblock_set_node(). If called for a node
Mel Gormanc7132162006-09-27 01:49:43 -07005497 * with no available memory, a warning is printed and the start and end
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005498 * PFNs will be 0.
Mel Gormanc7132162006-09-27 01:49:43 -07005499 */
Yasunori Gotoa3142c82007-05-08 00:23:07 -07005500void __meminit get_pfn_range_for_nid(unsigned int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005501 unsigned long *start_pfn, unsigned long *end_pfn)
5502{
Tejun Heoc13291a2011-07-12 10:46:30 +02005503 unsigned long this_start_pfn, this_end_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005504 int i;
Tejun Heoc13291a2011-07-12 10:46:30 +02005505
Mel Gormanc7132162006-09-27 01:49:43 -07005506 *start_pfn = -1UL;
5507 *end_pfn = 0;
5508
Tejun Heoc13291a2011-07-12 10:46:30 +02005509 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
5510 *start_pfn = min(*start_pfn, this_start_pfn);
5511 *end_pfn = max(*end_pfn, this_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005512 }
5513
Christoph Lameter633c0662007-10-16 01:25:37 -07005514 if (*start_pfn == -1UL)
Mel Gormanc7132162006-09-27 01:49:43 -07005515 *start_pfn = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07005516}
5517
5518/*
Mel Gorman2a1e2742007-07-17 04:03:12 -07005519 * This finds a zone that can be used for ZONE_MOVABLE pages. The
5520 * assumption is made that zones within a node are ordered in monotonic
5521 * increasing memory addresses so that the "highest" populated zone is used
5522 */
Adrian Bunkb69a7282008-07-23 21:28:12 -07005523static void __init find_usable_zone_for_movable(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07005524{
5525 int zone_index;
5526 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
5527 if (zone_index == ZONE_MOVABLE)
5528 continue;
5529
5530 if (arch_zone_highest_possible_pfn[zone_index] >
5531 arch_zone_lowest_possible_pfn[zone_index])
5532 break;
5533 }
5534
5535 VM_BUG_ON(zone_index == -1);
5536 movable_zone = zone_index;
5537}
5538
5539/*
5540 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005541 * because it is sized independent of architecture. Unlike the other zones,
Mel Gorman2a1e2742007-07-17 04:03:12 -07005542 * the starting point for ZONE_MOVABLE is not fixed. It may be different
5543 * in each node depending on the size of each node and how evenly kernelcore
5544 * is distributed. This helper function adjusts the zone ranges
5545 * provided by the architecture for a given node by using the end of the
5546 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
5547 * zones within a node are in order of monotonic increases memory addresses
5548 */
Adrian Bunkb69a7282008-07-23 21:28:12 -07005549static void __meminit adjust_zone_range_for_zone_movable(int nid,
Mel Gorman2a1e2742007-07-17 04:03:12 -07005550 unsigned long zone_type,
5551 unsigned long node_start_pfn,
5552 unsigned long node_end_pfn,
5553 unsigned long *zone_start_pfn,
5554 unsigned long *zone_end_pfn)
5555{
5556 /* Only adjust if ZONE_MOVABLE is on this node */
5557 if (zone_movable_pfn[nid]) {
5558 /* Size ZONE_MOVABLE */
5559 if (zone_type == ZONE_MOVABLE) {
5560 *zone_start_pfn = zone_movable_pfn[nid];
5561 *zone_end_pfn = min(node_end_pfn,
5562 arch_zone_highest_possible_pfn[movable_zone]);
5563
Xishi Qiue506b992016-10-07 16:58:06 -07005564 /* Adjust for ZONE_MOVABLE starting within this range */
5565 } else if (!mirrored_kernelcore &&
5566 *zone_start_pfn < zone_movable_pfn[nid] &&
5567 *zone_end_pfn > zone_movable_pfn[nid]) {
5568 *zone_end_pfn = zone_movable_pfn[nid];
5569
Mel Gorman2a1e2742007-07-17 04:03:12 -07005570 /* Check if this whole range is within ZONE_MOVABLE */
5571 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5572 *zone_start_pfn = *zone_end_pfn;
5573 }
5574}
5575
5576/*
Mel Gormanc7132162006-09-27 01:49:43 -07005577 * Return the number of pages a zone spans in a node, including holes
5578 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
5579 */
Paul Mundt6ea6e682007-07-15 23:38:20 -07005580static unsigned long __meminit zone_spanned_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005581 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005582 unsigned long node_start_pfn,
5583 unsigned long node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005584 unsigned long *zone_start_pfn,
5585 unsigned long *zone_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005586 unsigned long *ignored)
5587{
Xishi Qiub5685e92015-09-08 15:04:16 -07005588 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07005589 if (!node_start_pfn && !node_end_pfn)
5590 return 0;
5591
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005592 /* Get the start and end of the zone */
Taku Izumid91749c2016-03-15 14:55:18 -07005593 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5594 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
Mel Gorman2a1e2742007-07-17 04:03:12 -07005595 adjust_zone_range_for_zone_movable(nid, zone_type,
5596 node_start_pfn, node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005597 zone_start_pfn, zone_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005598
5599 /* Check that this node has pages within the zone's required range */
Taku Izumid91749c2016-03-15 14:55:18 -07005600 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07005601 return 0;
5602
5603 /* Move the zone boundaries inside the node if necessary */
Taku Izumid91749c2016-03-15 14:55:18 -07005604 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5605 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005606
5607 /* Return the spanned pages */
Taku Izumid91749c2016-03-15 14:55:18 -07005608 return *zone_end_pfn - *zone_start_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005609}
5610
5611/*
5612 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005613 * then all holes in the requested range will be accounted for.
Mel Gormanc7132162006-09-27 01:49:43 -07005614 */
Yinghai Lu32996252009-12-15 17:59:02 -08005615unsigned long __meminit __absent_pages_in_range(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005616 unsigned long range_start_pfn,
5617 unsigned long range_end_pfn)
5618{
Tejun Heo96e907d2011-07-12 10:46:29 +02005619 unsigned long nr_absent = range_end_pfn - range_start_pfn;
5620 unsigned long start_pfn, end_pfn;
5621 int i;
Mel Gormanc7132162006-09-27 01:49:43 -07005622
Tejun Heo96e907d2011-07-12 10:46:29 +02005623 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5624 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5625 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5626 nr_absent -= end_pfn - start_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005627 }
Tejun Heo96e907d2011-07-12 10:46:29 +02005628 return nr_absent;
Mel Gormanc7132162006-09-27 01:49:43 -07005629}
5630
5631/**
5632 * absent_pages_in_range - Return number of page frames in holes within a range
5633 * @start_pfn: The start PFN to start searching for holes
5634 * @end_pfn: The end PFN to stop searching for holes
5635 *
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005636 * It returns the number of pages frames in memory holes within a range.
Mel Gormanc7132162006-09-27 01:49:43 -07005637 */
5638unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5639 unsigned long end_pfn)
5640{
5641 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5642}
5643
5644/* Return the number of page frames in holes in a zone on a node */
Paul Mundt6ea6e682007-07-15 23:38:20 -07005645static unsigned long __meminit zone_absent_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005646 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005647 unsigned long node_start_pfn,
5648 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005649 unsigned long *ignored)
5650{
Tejun Heo96e907d2011-07-12 10:46:29 +02005651 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5652 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
Mel Gorman9c7cd682006-09-27 01:49:58 -07005653 unsigned long zone_start_pfn, zone_end_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07005654 unsigned long nr_absent;
Mel Gorman9c7cd682006-09-27 01:49:58 -07005655
Xishi Qiub5685e92015-09-08 15:04:16 -07005656 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07005657 if (!node_start_pfn && !node_end_pfn)
5658 return 0;
5659
Tejun Heo96e907d2011-07-12 10:46:29 +02005660 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5661 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
Mel Gorman9c7cd682006-09-27 01:49:58 -07005662
Mel Gorman2a1e2742007-07-17 04:03:12 -07005663 adjust_zone_range_for_zone_movable(nid, zone_type,
5664 node_start_pfn, node_end_pfn,
5665 &zone_start_pfn, &zone_end_pfn);
Taku Izumi342332e2016-03-15 14:55:22 -07005666 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5667
5668 /*
5669 * ZONE_MOVABLE handling.
5670 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5671 * and vice versa.
5672 */
Xishi Qiue506b992016-10-07 16:58:06 -07005673 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
5674 unsigned long start_pfn, end_pfn;
5675 struct memblock_region *r;
Taku Izumi342332e2016-03-15 14:55:22 -07005676
Xishi Qiue506b992016-10-07 16:58:06 -07005677 for_each_memblock(memory, r) {
5678 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5679 zone_start_pfn, zone_end_pfn);
5680 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5681 zone_start_pfn, zone_end_pfn);
Taku Izumi342332e2016-03-15 14:55:22 -07005682
Xishi Qiue506b992016-10-07 16:58:06 -07005683 if (zone_type == ZONE_MOVABLE &&
5684 memblock_is_mirror(r))
5685 nr_absent += end_pfn - start_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07005686
Xishi Qiue506b992016-10-07 16:58:06 -07005687 if (zone_type == ZONE_NORMAL &&
5688 !memblock_is_mirror(r))
5689 nr_absent += end_pfn - start_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07005690 }
5691 }
5692
5693 return nr_absent;
Mel Gormanc7132162006-09-27 01:49:43 -07005694}
Mel Gorman0e0b8642006-09-27 01:49:56 -07005695
Tejun Heo0ee332c2011-12-08 10:22:09 -08005696#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Paul Mundt6ea6e682007-07-15 23:38:20 -07005697static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005698 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005699 unsigned long node_start_pfn,
5700 unsigned long node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005701 unsigned long *zone_start_pfn,
5702 unsigned long *zone_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005703 unsigned long *zones_size)
5704{
Taku Izumid91749c2016-03-15 14:55:18 -07005705 unsigned int zone;
5706
5707 *zone_start_pfn = node_start_pfn;
5708 for (zone = 0; zone < zone_type; zone++)
5709 *zone_start_pfn += zones_size[zone];
5710
5711 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
5712
Mel Gormanc7132162006-09-27 01:49:43 -07005713 return zones_size[zone_type];
5714}
5715
Paul Mundt6ea6e682007-07-15 23:38:20 -07005716static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005717 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005718 unsigned long node_start_pfn,
5719 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005720 unsigned long *zholes_size)
5721{
5722 if (!zholes_size)
5723 return 0;
5724
5725 return zholes_size[zone_type];
5726}
Yinghai Lu20e69262013-03-01 14:51:27 -08005727
Tejun Heo0ee332c2011-12-08 10:22:09 -08005728#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07005729
Yasunori Gotoa3142c82007-05-08 00:23:07 -07005730static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005731 unsigned long node_start_pfn,
5732 unsigned long node_end_pfn,
5733 unsigned long *zones_size,
5734 unsigned long *zholes_size)
Mel Gormanc7132162006-09-27 01:49:43 -07005735{
Gu Zhengfebd5942015-06-24 16:57:02 -07005736 unsigned long realtotalpages = 0, totalpages = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07005737 enum zone_type i;
5738
Gu Zhengfebd5942015-06-24 16:57:02 -07005739 for (i = 0; i < MAX_NR_ZONES; i++) {
5740 struct zone *zone = pgdat->node_zones + i;
Taku Izumid91749c2016-03-15 14:55:18 -07005741 unsigned long zone_start_pfn, zone_end_pfn;
Gu Zhengfebd5942015-06-24 16:57:02 -07005742 unsigned long size, real_size;
Mel Gormanc7132162006-09-27 01:49:43 -07005743
Gu Zhengfebd5942015-06-24 16:57:02 -07005744 size = zone_spanned_pages_in_node(pgdat->node_id, i,
5745 node_start_pfn,
5746 node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005747 &zone_start_pfn,
5748 &zone_end_pfn,
Gu Zhengfebd5942015-06-24 16:57:02 -07005749 zones_size);
5750 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005751 node_start_pfn, node_end_pfn,
5752 zholes_size);
Taku Izumid91749c2016-03-15 14:55:18 -07005753 if (size)
5754 zone->zone_start_pfn = zone_start_pfn;
5755 else
5756 zone->zone_start_pfn = 0;
Gu Zhengfebd5942015-06-24 16:57:02 -07005757 zone->spanned_pages = size;
5758 zone->present_pages = real_size;
5759
5760 totalpages += size;
5761 realtotalpages += real_size;
5762 }
5763
5764 pgdat->node_spanned_pages = totalpages;
Mel Gormanc7132162006-09-27 01:49:43 -07005765 pgdat->node_present_pages = realtotalpages;
5766 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5767 realtotalpages);
5768}
5769
Mel Gorman835c1342007-10-16 01:25:47 -07005770#ifndef CONFIG_SPARSEMEM
5771/*
5772 * Calculate the size of the zone->blockflags rounded to an unsigned long
Mel Gormand9c23402007-10-16 01:26:01 -07005773 * Start by making sure zonesize is a multiple of pageblock_order by rounding
5774 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
Mel Gorman835c1342007-10-16 01:25:47 -07005775 * round what is now in bits to nearest long in bits, then return it in
5776 * bytes.
5777 */
Linus Torvalds7c455122013-02-18 09:58:02 -08005778static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07005779{
5780 unsigned long usemapsize;
5781
Linus Torvalds7c455122013-02-18 09:58:02 -08005782 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
Mel Gormand9c23402007-10-16 01:26:01 -07005783 usemapsize = roundup(zonesize, pageblock_nr_pages);
5784 usemapsize = usemapsize >> pageblock_order;
Mel Gorman835c1342007-10-16 01:25:47 -07005785 usemapsize *= NR_PAGEBLOCK_BITS;
5786 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
5787
5788 return usemapsize / 8;
5789}
5790
5791static void __init setup_usemap(struct pglist_data *pgdat,
Linus Torvalds7c455122013-02-18 09:58:02 -08005792 struct zone *zone,
5793 unsigned long zone_start_pfn,
5794 unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07005795{
Linus Torvalds7c455122013-02-18 09:58:02 -08005796 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
Mel Gorman835c1342007-10-16 01:25:47 -07005797 zone->pageblock_flags = NULL;
Julia Lawall58a01a42009-01-06 14:39:28 -08005798 if (usemapsize)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005799 zone->pageblock_flags =
5800 memblock_virt_alloc_node_nopanic(usemapsize,
5801 pgdat->node_id);
Mel Gorman835c1342007-10-16 01:25:47 -07005802}
5803#else
Linus Torvalds7c455122013-02-18 09:58:02 -08005804static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
5805 unsigned long zone_start_pfn, unsigned long zonesize) {}
Mel Gorman835c1342007-10-16 01:25:47 -07005806#endif /* CONFIG_SPARSEMEM */
5807
Mel Gormand9c23402007-10-16 01:26:01 -07005808#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
Mel Gormanba72cb82007-11-28 16:21:13 -08005809
Mel Gormand9c23402007-10-16 01:26:01 -07005810/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
Chen Gang15ca2202013-09-11 14:20:27 -07005811void __paginginit set_pageblock_order(void)
Mel Gormand9c23402007-10-16 01:26:01 -07005812{
Andrew Morton955c1cd2012-05-29 15:06:31 -07005813 unsigned int order;
5814
Mel Gormand9c23402007-10-16 01:26:01 -07005815 /* Check that pageblock_nr_pages has not already been setup */
5816 if (pageblock_order)
5817 return;
5818
Andrew Morton955c1cd2012-05-29 15:06:31 -07005819 if (HPAGE_SHIFT > PAGE_SHIFT)
5820 order = HUGETLB_PAGE_ORDER;
5821 else
5822 order = MAX_ORDER - 1;
5823
Mel Gormand9c23402007-10-16 01:26:01 -07005824 /*
5825 * Assume the largest contiguous order of interest is a huge page.
Andrew Morton955c1cd2012-05-29 15:06:31 -07005826 * This value may be variable depending on boot parameters on IA64 and
5827 * powerpc.
Mel Gormand9c23402007-10-16 01:26:01 -07005828 */
5829 pageblock_order = order;
5830}
5831#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5832
Mel Gormanba72cb82007-11-28 16:21:13 -08005833/*
5834 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
Andrew Morton955c1cd2012-05-29 15:06:31 -07005835 * is unused as pageblock_order is set at compile-time. See
5836 * include/linux/pageblock-flags.h for the values of pageblock_order based on
5837 * the kernel config
Mel Gormanba72cb82007-11-28 16:21:13 -08005838 */
Chen Gang15ca2202013-09-11 14:20:27 -07005839void __paginginit set_pageblock_order(void)
Mel Gormanba72cb82007-11-28 16:21:13 -08005840{
Mel Gormanba72cb82007-11-28 16:21:13 -08005841}
Mel Gormand9c23402007-10-16 01:26:01 -07005842
5843#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5844
Jiang Liu01cefae2012-12-12 13:52:19 -08005845static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
5846 unsigned long present_pages)
5847{
5848 unsigned long pages = spanned_pages;
5849
5850 /*
5851 * Provide a more accurate estimation if there are holes within
5852 * the zone and SPARSEMEM is in use. If there are holes within the
5853 * zone, each populated memory region may cost us one or two extra
5854 * memmap pages due to alignment because memmap pages for each
5855 * populated regions may not naturally algined on page boundary.
5856 * So the (present_pages >> 4) heuristic is a tradeoff for that.
5857 */
5858 if (spanned_pages > present_pages + (present_pages >> 4) &&
5859 IS_ENABLED(CONFIG_SPARSEMEM))
5860 pages = present_pages;
5861
5862 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
5863}
5864
Linus Torvalds1da177e2005-04-16 15:20:36 -07005865/*
5866 * Set up the zone data structures:
5867 * - mark all pages reserved
5868 * - mark all memory queues empty
5869 * - clear the memory bitmaps
Minchan Kim6527af52012-07-31 16:46:16 -07005870 *
5871 * NOTE: pgdat should get zeroed by caller.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005872 */
Wei Yang7f3eb552015-09-08 14:59:50 -07005873static void __paginginit free_area_init_core(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005874{
Christoph Lameter2f1b6242006-09-25 23:31:13 -07005875 enum zone_type j;
Dave Hansened8ece22005-10-29 18:16:50 -07005876 int nid = pgdat->node_id;
Yasunori Goto718127c2006-06-23 02:03:10 -07005877 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005878
Dave Hansen208d54e2005-10-29 18:16:52 -07005879 pgdat_resize_init(pgdat);
Andrea Arcangeli8177a422012-03-23 20:56:34 +01005880#ifdef CONFIG_NUMA_BALANCING
5881 spin_lock_init(&pgdat->numabalancing_migrate_lock);
5882 pgdat->numabalancing_migrate_nr_pages = 0;
5883 pgdat->numabalancing_migrate_next_window = jiffies;
5884#endif
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08005885#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5886 spin_lock_init(&pgdat->split_queue_lock);
5887 INIT_LIST_HEAD(&pgdat->split_queue);
5888 pgdat->split_queue_len = 0;
5889#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005890 init_waitqueue_head(&pgdat->kswapd_wait);
Mel Gorman55150612012-07-31 16:44:35 -07005891 init_waitqueue_head(&pgdat->pfmemalloc_wait);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07005892#ifdef CONFIG_COMPACTION
5893 init_waitqueue_head(&pgdat->kcompactd_wait);
5894#endif
Joonsoo Kimeefa8642014-12-12 16:55:46 -08005895 pgdat_page_ext_init(pgdat);
Mel Gormana52633d2016-07-28 15:45:28 -07005896 spin_lock_init(&pgdat->lru_lock);
Mel Gormana9dd0a82016-07-28 15:46:02 -07005897 lruvec_init(node_lruvec(pgdat));
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01005898
Linus Torvalds1da177e2005-04-16 15:20:36 -07005899 for (j = 0; j < MAX_NR_ZONES; j++) {
5900 struct zone *zone = pgdat->node_zones + j;
Jiang Liu9feedc92012-12-12 13:52:12 -08005901 unsigned long size, realsize, freesize, memmap_pages;
Taku Izumid91749c2016-03-15 14:55:18 -07005902 unsigned long zone_start_pfn = zone->zone_start_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005903
Gu Zhengfebd5942015-06-24 16:57:02 -07005904 size = zone->spanned_pages;
5905 realsize = freesize = zone->present_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005906
Mel Gorman0e0b8642006-09-27 01:49:56 -07005907 /*
Jiang Liu9feedc92012-12-12 13:52:12 -08005908 * Adjust freesize so that it accounts for how much memory
Mel Gorman0e0b8642006-09-27 01:49:56 -07005909 * is used by this zone for memmap. This affects the watermark
5910 * and per-cpu initialisations
5911 */
Jiang Liu01cefae2012-12-12 13:52:19 -08005912 memmap_pages = calc_memmap_size(size, realsize);
Zhong Hongboba914f42014-12-12 16:56:21 -08005913 if (!is_highmem_idx(j)) {
5914 if (freesize >= memmap_pages) {
5915 freesize -= memmap_pages;
5916 if (memmap_pages)
5917 printk(KERN_DEBUG
5918 " %s zone: %lu pages used for memmap\n",
5919 zone_names[j], memmap_pages);
5920 } else
Joe Perches11705322016-03-17 14:19:50 -07005921 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
Zhong Hongboba914f42014-12-12 16:56:21 -08005922 zone_names[j], memmap_pages, freesize);
5923 }
Mel Gorman0e0b8642006-09-27 01:49:56 -07005924
Christoph Lameter62672762007-02-10 01:43:07 -08005925 /* Account for reserved pages */
Jiang Liu9feedc92012-12-12 13:52:12 -08005926 if (j == 0 && freesize > dma_reserve) {
5927 freesize -= dma_reserve;
Yinghai Lud903ef92008-10-18 20:27:06 -07005928 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
Christoph Lameter62672762007-02-10 01:43:07 -08005929 zone_names[0], dma_reserve);
Mel Gorman0e0b8642006-09-27 01:49:56 -07005930 }
5931
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07005932 if (!is_highmem_idx(j))
Jiang Liu9feedc92012-12-12 13:52:12 -08005933 nr_kernel_pages += freesize;
Jiang Liu01cefae2012-12-12 13:52:19 -08005934 /* Charge for highmem memmap if there are enough kernel pages */
5935 else if (nr_kernel_pages > memmap_pages * 2)
5936 nr_kernel_pages -= memmap_pages;
Jiang Liu9feedc92012-12-12 13:52:12 -08005937 nr_all_pages += freesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005938
Jiang Liu9feedc92012-12-12 13:52:12 -08005939 /*
5940 * Set an approximate value for lowmem here, it will be adjusted
5941 * when the bootmem allocator frees pages into the buddy system.
5942 * And all highmem pages will be managed by the buddy system.
5943 */
5944 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
Christoph Lameter96146342006-07-03 00:24:13 -07005945#ifdef CONFIG_NUMA
Christoph Lameterd5f541e2006-09-27 01:50:08 -07005946 zone->node = nid;
Christoph Lameter96146342006-07-03 00:24:13 -07005947#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005948 zone->name = zone_names[j];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005949 zone->zone_pgdat = pgdat;
Mel Gormana52633d2016-07-28 15:45:28 -07005950 spin_lock_init(&zone->lock);
5951 zone_seqlock_init(zone);
Dave Hansened8ece22005-10-29 18:16:50 -07005952 zone_pcp_init(zone);
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07005953
Linus Torvalds1da177e2005-04-16 15:20:36 -07005954 if (!size)
5955 continue;
5956
Andrew Morton955c1cd2012-05-29 15:06:31 -07005957 set_pageblock_order();
Linus Torvalds7c455122013-02-18 09:58:02 -08005958 setup_usemap(pgdat, zone, zone_start_pfn, size);
Yaowei Baib171e402015-11-05 18:47:06 -08005959 ret = init_currently_empty_zone(zone, zone_start_pfn, size);
Yasunori Goto718127c2006-06-23 02:03:10 -07005960 BUG_ON(ret);
Heiko Carstens76cdd582008-05-14 16:05:52 -07005961 memmap_init(size, nid, j, zone_start_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005962 }
5963}
5964
Fabian Frederickbd721ea2016-08-02 14:03:33 -07005965static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005966{
Tony Luckb0aeba72015-11-10 10:09:47 -08005967 unsigned long __maybe_unused start = 0;
Laura Abbotta1c34a32015-11-05 18:48:46 -08005968 unsigned long __maybe_unused offset = 0;
5969
Linus Torvalds1da177e2005-04-16 15:20:36 -07005970 /* Skip empty nodes */
5971 if (!pgdat->node_spanned_pages)
5972 return;
5973
Andy Whitcroftd41dee32005-06-23 00:07:54 -07005974#ifdef CONFIG_FLAT_NODE_MEM_MAP
Tony Luckb0aeba72015-11-10 10:09:47 -08005975 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
5976 offset = pgdat->node_start_pfn - start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005977 /* ia64 gets its own node_mem_map, before this, without bootmem */
5978 if (!pgdat->node_mem_map) {
Tony Luckb0aeba72015-11-10 10:09:47 -08005979 unsigned long size, end;
Andy Whitcroftd41dee32005-06-23 00:07:54 -07005980 struct page *map;
5981
Bob Piccoe984bb42006-05-20 15:00:31 -07005982 /*
5983 * The zone's endpoints aren't required to be MAX_ORDER
5984 * aligned but the node_mem_map endpoints must be in order
5985 * for the buddy allocator to function correctly.
5986 */
Cody P Schafer108bcc92013-02-22 16:35:23 -08005987 end = pgdat_end_pfn(pgdat);
Bob Piccoe984bb42006-05-20 15:00:31 -07005988 end = ALIGN(end, MAX_ORDER_NR_PAGES);
5989 size = (end - start) * sizeof(struct page);
Dave Hansen6f167ec2005-06-23 00:07:39 -07005990 map = alloc_remap(pgdat->node_id, size);
5991 if (!map)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005992 map = memblock_virt_alloc_node_nopanic(size,
5993 pgdat->node_id);
Laura Abbotta1c34a32015-11-05 18:48:46 -08005994 pgdat->node_mem_map = map + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005995 }
Roman Zippel12d810c2007-05-31 00:40:54 -07005996#ifndef CONFIG_NEED_MULTIPLE_NODES
Linus Torvalds1da177e2005-04-16 15:20:36 -07005997 /*
5998 * With no DISCONTIG, the global mem_map is just set as node 0's
5999 */
Mel Gormanc7132162006-09-27 01:49:43 -07006000 if (pgdat == NODE_DATA(0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006001 mem_map = NODE_DATA(0)->node_mem_map;
Laura Abbotta1c34a32015-11-05 18:48:46 -08006002#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
Mel Gormanc7132162006-09-27 01:49:43 -07006003 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
Laura Abbotta1c34a32015-11-05 18:48:46 -08006004 mem_map -= offset;
Tejun Heo0ee332c2011-12-08 10:22:09 -08006005#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07006006 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006007#endif
Andy Whitcroftd41dee32005-06-23 00:07:54 -07006008#endif /* CONFIG_FLAT_NODE_MEM_MAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006009}
6010
Johannes Weiner9109fb72008-07-23 21:27:20 -07006011void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
6012 unsigned long node_start_pfn, unsigned long *zholes_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006013{
Johannes Weiner9109fb72008-07-23 21:27:20 -07006014 pg_data_t *pgdat = NODE_DATA(nid);
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006015 unsigned long start_pfn = 0;
6016 unsigned long end_pfn = 0;
Johannes Weiner9109fb72008-07-23 21:27:20 -07006017
Minchan Kim88fdf752012-07-31 16:46:14 -07006018 /* pg_data_t should be reset to zero when it's allocated */
Mel Gorman38087d92016-07-28 15:45:49 -07006019 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
Minchan Kim88fdf752012-07-31 16:46:14 -07006020
Linus Torvalds1da177e2005-04-16 15:20:36 -07006021 pgdat->node_id = nid;
6022 pgdat->node_start_pfn = node_start_pfn;
Mel Gorman75ef7182016-07-28 15:45:24 -07006023 pgdat->per_cpu_nodestats = NULL;
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006024#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6025 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
Juergen Gross8d29e182015-02-11 15:26:01 -08006026 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
Zhen Lei4ada0c52015-09-08 15:04:19 -07006027 (u64)start_pfn << PAGE_SHIFT,
6028 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
Taku Izumid91749c2016-03-15 14:55:18 -07006029#else
6030 start_pfn = node_start_pfn;
Zhang Yanfei7960aed2013-07-08 15:59:52 -07006031#endif
6032 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
6033 zones_size, zholes_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006034
6035 alloc_node_mem_map(pgdat);
Yinghai Lue8c27ac2008-06-01 13:15:22 -07006036#ifdef CONFIG_FLAT_NODE_MEM_MAP
6037 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
6038 nid, (unsigned long)pgdat,
6039 (unsigned long)pgdat->node_mem_map);
6040#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006041
Michal Hocko292f70c2017-06-02 14:46:49 -07006042 reset_deferred_meminit(pgdat);
Wei Yang7f3eb552015-09-08 14:59:50 -07006043 free_area_init_core(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006044}
6045
Tejun Heo0ee332c2011-12-08 10:22:09 -08006046#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Miklos Szeredi418508c2007-05-23 13:57:55 -07006047
6048#if MAX_NUMNODES > 1
6049/*
6050 * Figure out the number of possible node ids.
6051 */
Cody P Schaferf9872ca2013-04-29 15:08:01 -07006052void __init setup_nr_node_ids(void)
Miklos Szeredi418508c2007-05-23 13:57:55 -07006053{
Wei Yang904a9552015-09-08 14:59:48 -07006054 unsigned int highest;
Miklos Szeredi418508c2007-05-23 13:57:55 -07006055
Wei Yang904a9552015-09-08 14:59:48 -07006056 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
Miklos Szeredi418508c2007-05-23 13:57:55 -07006057 nr_node_ids = highest + 1;
6058}
Miklos Szeredi418508c2007-05-23 13:57:55 -07006059#endif
6060
Mel Gormanc7132162006-09-27 01:49:43 -07006061/**
Tejun Heo1e019792011-07-12 09:45:34 +02006062 * node_map_pfn_alignment - determine the maximum internode alignment
6063 *
6064 * This function should be called after node map is populated and sorted.
6065 * It calculates the maximum power of two alignment which can distinguish
6066 * all the nodes.
6067 *
6068 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
6069 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
6070 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
6071 * shifted, 1GiB is enough and this function will indicate so.
6072 *
6073 * This is used to test whether pfn -> nid mapping of the chosen memory
6074 * model has fine enough granularity to avoid incorrect mapping for the
6075 * populated node map.
6076 *
6077 * Returns the determined alignment in pfn's. 0 if there is no alignment
6078 * requirement (single node).
6079 */
6080unsigned long __init node_map_pfn_alignment(void)
6081{
6082 unsigned long accl_mask = 0, last_end = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02006083 unsigned long start, end, mask;
Tejun Heo1e019792011-07-12 09:45:34 +02006084 int last_nid = -1;
Tejun Heoc13291a2011-07-12 10:46:30 +02006085 int i, nid;
Tejun Heo1e019792011-07-12 09:45:34 +02006086
Tejun Heoc13291a2011-07-12 10:46:30 +02006087 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
Tejun Heo1e019792011-07-12 09:45:34 +02006088 if (!start || last_nid < 0 || last_nid == nid) {
6089 last_nid = nid;
6090 last_end = end;
6091 continue;
6092 }
6093
6094 /*
6095 * Start with a mask granular enough to pin-point to the
6096 * start pfn and tick off bits one-by-one until it becomes
6097 * too coarse to separate the current node from the last.
6098 */
6099 mask = ~((1 << __ffs(start)) - 1);
6100 while (mask && last_end <= (start & (mask << 1)))
6101 mask <<= 1;
6102
6103 /* accumulate all internode masks */
6104 accl_mask |= mask;
6105 }
6106
6107 /* convert mask to number of pages */
6108 return ~accl_mask + 1;
6109}
6110
Mel Gormana6af2bc2007-02-10 01:42:57 -08006111/* Find the lowest pfn for a node */
Adrian Bunkb69a7282008-07-23 21:28:12 -07006112static unsigned long __init find_min_pfn_for_node(int nid)
Mel Gormanc7132162006-09-27 01:49:43 -07006113{
Mel Gormana6af2bc2007-02-10 01:42:57 -08006114 unsigned long min_pfn = ULONG_MAX;
Tejun Heoc13291a2011-07-12 10:46:30 +02006115 unsigned long start_pfn;
6116 int i;
Mel Gorman1abbfb42006-11-23 12:01:41 +00006117
Tejun Heoc13291a2011-07-12 10:46:30 +02006118 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
6119 min_pfn = min(min_pfn, start_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07006120
Mel Gormana6af2bc2007-02-10 01:42:57 -08006121 if (min_pfn == ULONG_MAX) {
Joe Perches11705322016-03-17 14:19:50 -07006122 pr_warn("Could not find start_pfn for node %d\n", nid);
Mel Gormana6af2bc2007-02-10 01:42:57 -08006123 return 0;
6124 }
6125
6126 return min_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07006127}
6128
6129/**
6130 * find_min_pfn_with_active_regions - Find the minimum PFN registered
6131 *
6132 * It returns the minimum PFN based on information provided via
Zhang Zhen7d018172014-06-04 16:10:53 -07006133 * memblock_set_node().
Mel Gormanc7132162006-09-27 01:49:43 -07006134 */
6135unsigned long __init find_min_pfn_with_active_regions(void)
6136{
6137 return find_min_pfn_for_node(MAX_NUMNODES);
6138}
6139
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006140/*
6141 * early_calculate_totalpages()
6142 * Sum pages in active regions for movable zone.
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006143 * Populate N_MEMORY for calculating usable_nodes.
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006144 */
Adrian Bunk484f51f2007-10-16 01:26:03 -07006145static unsigned long __init early_calculate_totalpages(void)
Mel Gorman7e63efe2007-07-17 04:03:15 -07006146{
Mel Gorman7e63efe2007-07-17 04:03:15 -07006147 unsigned long totalpages = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02006148 unsigned long start_pfn, end_pfn;
6149 int i, nid;
Mel Gorman7e63efe2007-07-17 04:03:15 -07006150
Tejun Heoc13291a2011-07-12 10:46:30 +02006151 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6152 unsigned long pages = end_pfn - start_pfn;
6153
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006154 totalpages += pages;
6155 if (pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006156 node_set_state(nid, N_MEMORY);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006157 }
Pintu Kumarb8af2942013-09-11 14:20:34 -07006158 return totalpages;
Mel Gorman7e63efe2007-07-17 04:03:15 -07006159}
6160
Mel Gorman2a1e2742007-07-17 04:03:12 -07006161/*
6162 * Find the PFN the Movable zone begins in each node. Kernel memory
6163 * is spread evenly between nodes as long as the nodes have enough
6164 * memory. When they don't, some nodes will have more kernelcore than
6165 * others
6166 */
Kautuk Consulb224ef82012-03-21 16:34:15 -07006167static void __init find_zone_movable_pfns_for_nodes(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07006168{
6169 int i, nid;
6170 unsigned long usable_startpfn;
6171 unsigned long kernelcore_node, kernelcore_remaining;
Yinghai Lu66918dc2009-06-30 11:41:37 -07006172 /* save the state before borrow the nodemask */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006173 nodemask_t saved_node_state = node_states[N_MEMORY];
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006174 unsigned long totalpages = early_calculate_totalpages();
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006175 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
Emil Medve136199f2014-04-07 15:37:52 -07006176 struct memblock_region *r;
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006177
6178 /* Need to find movable_zone earlier when movable_node is specified. */
6179 find_usable_zone_for_movable();
Mel Gorman2a1e2742007-07-17 04:03:12 -07006180
Mel Gorman7e63efe2007-07-17 04:03:15 -07006181 /*
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006182 * If movable_node is specified, ignore kernelcore and movablecore
6183 * options.
6184 */
6185 if (movable_node_is_enabled()) {
Emil Medve136199f2014-04-07 15:37:52 -07006186 for_each_memblock(memory, r) {
6187 if (!memblock_is_hotpluggable(r))
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006188 continue;
6189
Emil Medve136199f2014-04-07 15:37:52 -07006190 nid = r->nid;
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006191
Emil Medve136199f2014-04-07 15:37:52 -07006192 usable_startpfn = PFN_DOWN(r->base);
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006193 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6194 min(usable_startpfn, zone_movable_pfn[nid]) :
6195 usable_startpfn;
6196 }
6197
6198 goto out2;
6199 }
6200
6201 /*
Taku Izumi342332e2016-03-15 14:55:22 -07006202 * If kernelcore=mirror is specified, ignore movablecore option
6203 */
6204 if (mirrored_kernelcore) {
6205 bool mem_below_4gb_not_mirrored = false;
6206
6207 for_each_memblock(memory, r) {
6208 if (memblock_is_mirror(r))
6209 continue;
6210
6211 nid = r->nid;
6212
6213 usable_startpfn = memblock_region_memory_base_pfn(r);
6214
6215 if (usable_startpfn < 0x100000) {
6216 mem_below_4gb_not_mirrored = true;
6217 continue;
6218 }
6219
6220 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6221 min(usable_startpfn, zone_movable_pfn[nid]) :
6222 usable_startpfn;
6223 }
6224
6225 if (mem_below_4gb_not_mirrored)
6226 pr_warn("This configuration results in unmirrored kernel memory.");
6227
6228 goto out2;
6229 }
6230
6231 /*
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006232 * If movablecore=nn[KMG] was specified, calculate what size of
Mel Gorman7e63efe2007-07-17 04:03:15 -07006233 * kernelcore that corresponds so that memory usable for
6234 * any allocation type is evenly spread. If both kernelcore
6235 * and movablecore are specified, then the value of kernelcore
6236 * will be used for required_kernelcore if it's greater than
6237 * what movablecore would have allowed.
6238 */
6239 if (required_movablecore) {
Mel Gorman7e63efe2007-07-17 04:03:15 -07006240 unsigned long corepages;
6241
6242 /*
6243 * Round-up so that ZONE_MOVABLE is at least as large as what
6244 * was requested by the user
6245 */
6246 required_movablecore =
6247 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
Xishi Qiu9fd745d2015-11-05 18:48:11 -08006248 required_movablecore = min(totalpages, required_movablecore);
Mel Gorman7e63efe2007-07-17 04:03:15 -07006249 corepages = totalpages - required_movablecore;
6250
6251 required_kernelcore = max(required_kernelcore, corepages);
6252 }
6253
Xishi Qiubde304b2015-11-05 18:48:56 -08006254 /*
6255 * If kernelcore was not specified or kernelcore size is larger
6256 * than totalpages, there is no ZONE_MOVABLE.
6257 */
6258 if (!required_kernelcore || required_kernelcore >= totalpages)
Yinghai Lu66918dc2009-06-30 11:41:37 -07006259 goto out;
Mel Gorman2a1e2742007-07-17 04:03:12 -07006260
6261 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
Mel Gorman2a1e2742007-07-17 04:03:12 -07006262 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
6263
6264restart:
6265 /* Spread kernelcore memory as evenly as possible throughout nodes */
6266 kernelcore_node = required_kernelcore / usable_nodes;
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006267 for_each_node_state(nid, N_MEMORY) {
Tejun Heoc13291a2011-07-12 10:46:30 +02006268 unsigned long start_pfn, end_pfn;
6269
Mel Gorman2a1e2742007-07-17 04:03:12 -07006270 /*
6271 * Recalculate kernelcore_node if the division per node
6272 * now exceeds what is necessary to satisfy the requested
6273 * amount of memory for the kernel
6274 */
6275 if (required_kernelcore < kernelcore_node)
6276 kernelcore_node = required_kernelcore / usable_nodes;
6277
6278 /*
6279 * As the map is walked, we track how much memory is usable
6280 * by the kernel using kernelcore_remaining. When it is
6281 * 0, the rest of the node is usable by ZONE_MOVABLE
6282 */
6283 kernelcore_remaining = kernelcore_node;
6284
6285 /* Go through each range of PFNs within this node */
Tejun Heoc13291a2011-07-12 10:46:30 +02006286 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
Mel Gorman2a1e2742007-07-17 04:03:12 -07006287 unsigned long size_pages;
6288
Tejun Heoc13291a2011-07-12 10:46:30 +02006289 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
Mel Gorman2a1e2742007-07-17 04:03:12 -07006290 if (start_pfn >= end_pfn)
6291 continue;
6292
6293 /* Account for what is only usable for kernelcore */
6294 if (start_pfn < usable_startpfn) {
6295 unsigned long kernel_pages;
6296 kernel_pages = min(end_pfn, usable_startpfn)
6297 - start_pfn;
6298
6299 kernelcore_remaining -= min(kernel_pages,
6300 kernelcore_remaining);
6301 required_kernelcore -= min(kernel_pages,
6302 required_kernelcore);
6303
6304 /* Continue if range is now fully accounted */
6305 if (end_pfn <= usable_startpfn) {
6306
6307 /*
6308 * Push zone_movable_pfn to the end so
6309 * that if we have to rebalance
6310 * kernelcore across nodes, we will
6311 * not double account here
6312 */
6313 zone_movable_pfn[nid] = end_pfn;
6314 continue;
6315 }
6316 start_pfn = usable_startpfn;
6317 }
6318
6319 /*
6320 * The usable PFN range for ZONE_MOVABLE is from
6321 * start_pfn->end_pfn. Calculate size_pages as the
6322 * number of pages used as kernelcore
6323 */
6324 size_pages = end_pfn - start_pfn;
6325 if (size_pages > kernelcore_remaining)
6326 size_pages = kernelcore_remaining;
6327 zone_movable_pfn[nid] = start_pfn + size_pages;
6328
6329 /*
6330 * Some kernelcore has been met, update counts and
6331 * break if the kernelcore for this node has been
Pintu Kumarb8af2942013-09-11 14:20:34 -07006332 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07006333 */
6334 required_kernelcore -= min(required_kernelcore,
6335 size_pages);
6336 kernelcore_remaining -= size_pages;
6337 if (!kernelcore_remaining)
6338 break;
6339 }
6340 }
6341
6342 /*
6343 * If there is still required_kernelcore, we do another pass with one
6344 * less node in the count. This will push zone_movable_pfn[nid] further
6345 * along on the nodes that still have memory until kernelcore is
Pintu Kumarb8af2942013-09-11 14:20:34 -07006346 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07006347 */
6348 usable_nodes--;
6349 if (usable_nodes && required_kernelcore > usable_nodes)
6350 goto restart;
6351
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006352out2:
Mel Gorman2a1e2742007-07-17 04:03:12 -07006353 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
6354 for (nid = 0; nid < MAX_NUMNODES; nid++)
6355 zone_movable_pfn[nid] =
6356 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
Yinghai Lu66918dc2009-06-30 11:41:37 -07006357
Yinghai Lu20e69262013-03-01 14:51:27 -08006358out:
Yinghai Lu66918dc2009-06-30 11:41:37 -07006359 /* restore the node_state */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006360 node_states[N_MEMORY] = saved_node_state;
Mel Gorman2a1e2742007-07-17 04:03:12 -07006361}
6362
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006363/* Any regular or high memory on that node ? */
6364static void check_for_memory(pg_data_t *pgdat, int nid)
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006365{
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006366 enum zone_type zone_type;
6367
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006368 if (N_MEMORY == N_NORMAL_MEMORY)
6369 return;
6370
6371 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006372 struct zone *zone = &pgdat->node_zones[zone_type];
Xishi Qiub38a8722013-11-12 15:07:20 -08006373 if (populated_zone(zone)) {
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006374 node_set_state(nid, N_HIGH_MEMORY);
6375 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
6376 zone_type <= ZONE_NORMAL)
6377 node_set_state(nid, N_NORMAL_MEMORY);
Bob Liud0048b02012-01-12 17:19:07 -08006378 break;
6379 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006380 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006381}
6382
Mel Gormanc7132162006-09-27 01:49:43 -07006383/**
6384 * free_area_init_nodes - Initialise all pg_data_t and zone data
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006385 * @max_zone_pfn: an array of max PFNs for each zone
Mel Gormanc7132162006-09-27 01:49:43 -07006386 *
6387 * This will call free_area_init_node() for each active node in the system.
Zhang Zhen7d018172014-06-04 16:10:53 -07006388 * Using the page ranges provided by memblock_set_node(), the size of each
Mel Gormanc7132162006-09-27 01:49:43 -07006389 * zone in each node and their holes is calculated. If the maximum PFN
6390 * between two adjacent zones match, it is assumed that the zone is empty.
6391 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
6392 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
6393 * starts where the previous one ended. For example, ZONE_DMA32 starts
6394 * at arch_max_dma_pfn.
6395 */
6396void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6397{
Tejun Heoc13291a2011-07-12 10:46:30 +02006398 unsigned long start_pfn, end_pfn;
6399 int i, nid;
Mel Gormana6af2bc2007-02-10 01:42:57 -08006400
Mel Gormanc7132162006-09-27 01:49:43 -07006401 /* Record where the zone boundaries are */
6402 memset(arch_zone_lowest_possible_pfn, 0,
6403 sizeof(arch_zone_lowest_possible_pfn));
6404 memset(arch_zone_highest_possible_pfn, 0,
6405 sizeof(arch_zone_highest_possible_pfn));
Oliver O'Halloran90cae1f2016-07-26 15:22:17 -07006406
6407 start_pfn = find_min_pfn_with_active_regions();
6408
6409 for (i = 0; i < MAX_NR_ZONES; i++) {
Mel Gorman2a1e2742007-07-17 04:03:12 -07006410 if (i == ZONE_MOVABLE)
6411 continue;
Oliver O'Halloran90cae1f2016-07-26 15:22:17 -07006412
6413 end_pfn = max(max_zone_pfn[i], start_pfn);
6414 arch_zone_lowest_possible_pfn[i] = start_pfn;
6415 arch_zone_highest_possible_pfn[i] = end_pfn;
6416
6417 start_pfn = end_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07006418 }
Mel Gorman2a1e2742007-07-17 04:03:12 -07006419 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
6420 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
6421
6422 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
6423 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
Kautuk Consulb224ef82012-03-21 16:34:15 -07006424 find_zone_movable_pfns_for_nodes();
Mel Gormanc7132162006-09-27 01:49:43 -07006425
Mel Gormanc7132162006-09-27 01:49:43 -07006426 /* Print out the zone ranges */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006427 pr_info("Zone ranges:\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07006428 for (i = 0; i < MAX_NR_ZONES; i++) {
6429 if (i == ZONE_MOVABLE)
6430 continue;
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006431 pr_info(" %-8s ", zone_names[i]);
David Rientjes72f0ba02010-03-05 13:42:14 -08006432 if (arch_zone_lowest_possible_pfn[i] ==
6433 arch_zone_highest_possible_pfn[i])
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006434 pr_cont("empty\n");
David Rientjes72f0ba02010-03-05 13:42:14 -08006435 else
Juergen Gross8d29e182015-02-11 15:26:01 -08006436 pr_cont("[mem %#018Lx-%#018Lx]\n",
6437 (u64)arch_zone_lowest_possible_pfn[i]
6438 << PAGE_SHIFT,
6439 ((u64)arch_zone_highest_possible_pfn[i]
Bjorn Helgaasa62e2f42012-05-29 15:06:30 -07006440 << PAGE_SHIFT) - 1);
Mel Gorman2a1e2742007-07-17 04:03:12 -07006441 }
6442
6443 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006444 pr_info("Movable zone start for each node\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07006445 for (i = 0; i < MAX_NUMNODES; i++) {
6446 if (zone_movable_pfn[i])
Juergen Gross8d29e182015-02-11 15:26:01 -08006447 pr_info(" Node %d: %#018Lx\n", i,
6448 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
Mel Gorman2a1e2742007-07-17 04:03:12 -07006449 }
Mel Gormanc7132162006-09-27 01:49:43 -07006450
Wanpeng Lif2d52fe2012-10-08 16:32:24 -07006451 /* Print out the early node map */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006452 pr_info("Early memory node ranges\n");
Tejun Heoc13291a2011-07-12 10:46:30 +02006453 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
Juergen Gross8d29e182015-02-11 15:26:01 -08006454 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
6455 (u64)start_pfn << PAGE_SHIFT,
6456 ((u64)end_pfn << PAGE_SHIFT) - 1);
Mel Gormanc7132162006-09-27 01:49:43 -07006457
6458 /* Initialise every node */
Mel Gorman708614e2008-07-23 21:26:51 -07006459 mminit_verify_pageflags_layout();
Christoph Lameter8ef82862007-02-20 13:57:52 -08006460 setup_nr_node_ids();
Mel Gormanc7132162006-09-27 01:49:43 -07006461 for_each_online_node(nid) {
6462 pg_data_t *pgdat = NODE_DATA(nid);
Johannes Weiner9109fb72008-07-23 21:27:20 -07006463 free_area_init_node(nid, NULL,
Mel Gormanc7132162006-09-27 01:49:43 -07006464 find_min_pfn_for_node(nid), NULL);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006465
6466 /* Any memory on that node */
6467 if (pgdat->node_present_pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006468 node_set_state(nid, N_MEMORY);
6469 check_for_memory(pgdat, nid);
Mel Gormanc7132162006-09-27 01:49:43 -07006470 }
6471}
Mel Gorman2a1e2742007-07-17 04:03:12 -07006472
Mel Gorman7e63efe2007-07-17 04:03:15 -07006473static int __init cmdline_parse_core(char *p, unsigned long *core)
Mel Gorman2a1e2742007-07-17 04:03:12 -07006474{
6475 unsigned long long coremem;
6476 if (!p)
6477 return -EINVAL;
6478
6479 coremem = memparse(p, &p);
Mel Gorman7e63efe2007-07-17 04:03:15 -07006480 *core = coremem >> PAGE_SHIFT;
Mel Gorman2a1e2742007-07-17 04:03:12 -07006481
Mel Gorman7e63efe2007-07-17 04:03:15 -07006482 /* Paranoid check that UL is enough for the coremem value */
Mel Gorman2a1e2742007-07-17 04:03:12 -07006483 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
6484
6485 return 0;
6486}
Mel Gormaned7ed362007-07-17 04:03:14 -07006487
Mel Gorman7e63efe2007-07-17 04:03:15 -07006488/*
6489 * kernelcore=size sets the amount of memory for use for allocations that
6490 * cannot be reclaimed or migrated.
6491 */
6492static int __init cmdline_parse_kernelcore(char *p)
6493{
Taku Izumi342332e2016-03-15 14:55:22 -07006494 /* parse kernelcore=mirror */
6495 if (parse_option_str(p, "mirror")) {
6496 mirrored_kernelcore = true;
6497 return 0;
6498 }
6499
Mel Gorman7e63efe2007-07-17 04:03:15 -07006500 return cmdline_parse_core(p, &required_kernelcore);
6501}
6502
6503/*
6504 * movablecore=size sets the amount of memory for use for allocations that
6505 * can be reclaimed or migrated.
6506 */
6507static int __init cmdline_parse_movablecore(char *p)
6508{
6509 return cmdline_parse_core(p, &required_movablecore);
6510}
6511
Mel Gormaned7ed362007-07-17 04:03:14 -07006512early_param("kernelcore", cmdline_parse_kernelcore);
Mel Gorman7e63efe2007-07-17 04:03:15 -07006513early_param("movablecore", cmdline_parse_movablecore);
Mel Gormaned7ed362007-07-17 04:03:14 -07006514
Tejun Heo0ee332c2011-12-08 10:22:09 -08006515#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07006516
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07006517void adjust_managed_page_count(struct page *page, long count)
6518{
6519 spin_lock(&managed_page_count_lock);
6520 page_zone(page)->managed_pages += count;
6521 totalram_pages += count;
Jiang Liu3dcc0572013-07-03 15:03:21 -07006522#ifdef CONFIG_HIGHMEM
6523 if (PageHighMem(page))
6524 totalhigh_pages += count;
6525#endif
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07006526 spin_unlock(&managed_page_count_lock);
6527}
Jiang Liu3dcc0572013-07-03 15:03:21 -07006528EXPORT_SYMBOL(adjust_managed_page_count);
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07006529
Jiang Liu11199692013-07-03 15:02:48 -07006530unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
Jiang Liu69afade2013-04-29 15:06:21 -07006531{
Jiang Liu11199692013-07-03 15:02:48 -07006532 void *pos;
6533 unsigned long pages = 0;
Jiang Liu69afade2013-04-29 15:06:21 -07006534
Jiang Liu11199692013-07-03 15:02:48 -07006535 start = (void *)PAGE_ALIGN((unsigned long)start);
6536 end = (void *)((unsigned long)end & PAGE_MASK);
6537 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
Jiang Liudbe67df2013-07-03 15:02:51 -07006538 if ((unsigned int)poison <= 0xFF)
Jiang Liu11199692013-07-03 15:02:48 -07006539 memset(pos, poison, PAGE_SIZE);
6540 free_reserved_page(virt_to_page(pos));
Jiang Liu69afade2013-04-29 15:06:21 -07006541 }
6542
6543 if (pages && s)
Jiang Liu11199692013-07-03 15:02:48 -07006544 pr_info("Freeing %s memory: %ldK (%p - %p)\n",
Jiang Liu69afade2013-04-29 15:06:21 -07006545 s, pages << (PAGE_SHIFT - 10), start, end);
6546
6547 return pages;
6548}
Jiang Liu11199692013-07-03 15:02:48 -07006549EXPORT_SYMBOL(free_reserved_area);
Jiang Liu69afade2013-04-29 15:06:21 -07006550
Jiang Liucfa11e02013-04-29 15:07:00 -07006551#ifdef CONFIG_HIGHMEM
6552void free_highmem_page(struct page *page)
6553{
6554 __free_reserved_page(page);
6555 totalram_pages++;
Jiang Liu7b4b2a02013-07-03 15:03:11 -07006556 page_zone(page)->managed_pages++;
Jiang Liucfa11e02013-04-29 15:07:00 -07006557 totalhigh_pages++;
6558}
6559#endif
6560
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006561
6562void __init mem_init_print_info(const char *str)
6563{
6564 unsigned long physpages, codesize, datasize, rosize, bss_size;
6565 unsigned long init_code_size, init_data_size;
6566
6567 physpages = get_num_physpages();
6568 codesize = _etext - _stext;
6569 datasize = _edata - _sdata;
6570 rosize = __end_rodata - __start_rodata;
6571 bss_size = __bss_stop - __bss_start;
6572 init_data_size = __init_end - __init_begin;
6573 init_code_size = _einittext - _sinittext;
6574
6575 /*
6576 * Detect special cases and adjust section sizes accordingly:
6577 * 1) .init.* may be embedded into .data sections
6578 * 2) .init.text.* may be out of [__init_begin, __init_end],
6579 * please refer to arch/tile/kernel/vmlinux.lds.S.
6580 * 3) .rodata.* may be embedded into .text or .data sections.
6581 */
6582#define adj_init_size(start, end, size, pos, adj) \
Pintu Kumarb8af2942013-09-11 14:20:34 -07006583 do { \
6584 if (start <= pos && pos < end && size > adj) \
6585 size -= adj; \
6586 } while (0)
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006587
6588 adj_init_size(__init_begin, __init_end, init_data_size,
6589 _sinittext, init_code_size);
6590 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
6591 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
6592 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
6593 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
6594
6595#undef adj_init_size
6596
Joe Perches756a025f02016-03-17 14:19:47 -07006597 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006598#ifdef CONFIG_HIGHMEM
Joe Perches756a025f02016-03-17 14:19:47 -07006599 ", %luK highmem"
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006600#endif
Joe Perches756a025f02016-03-17 14:19:47 -07006601 "%s%s)\n",
6602 nr_free_pages() << (PAGE_SHIFT - 10),
6603 physpages << (PAGE_SHIFT - 10),
6604 codesize >> 10, datasize >> 10, rosize >> 10,
6605 (init_data_size + init_code_size) >> 10, bss_size >> 10,
6606 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
6607 totalcma_pages << (PAGE_SHIFT - 10),
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006608#ifdef CONFIG_HIGHMEM
Joe Perches756a025f02016-03-17 14:19:47 -07006609 totalhigh_pages << (PAGE_SHIFT - 10),
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006610#endif
Joe Perches756a025f02016-03-17 14:19:47 -07006611 str ? ", " : "", str ? str : "");
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006612}
6613
Mel Gorman0e0b8642006-09-27 01:49:56 -07006614/**
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006615 * set_dma_reserve - set the specified number of pages reserved in the first zone
6616 * @new_dma_reserve: The number of pages to mark reserved
Mel Gorman0e0b8642006-09-27 01:49:56 -07006617 *
Yaowei Bai013110a2015-09-08 15:04:10 -07006618 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
Mel Gorman0e0b8642006-09-27 01:49:56 -07006619 * In the DMA zone, a significant percentage may be consumed by kernel image
6620 * and other unfreeable allocations which can skew the watermarks badly. This
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006621 * function may optionally be used to account for unfreeable pages in the
6622 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
6623 * smaller per-cpu batchsize.
Mel Gorman0e0b8642006-09-27 01:49:56 -07006624 */
6625void __init set_dma_reserve(unsigned long new_dma_reserve)
6626{
6627 dma_reserve = new_dma_reserve;
6628}
6629
Linus Torvalds1da177e2005-04-16 15:20:36 -07006630void __init free_area_init(unsigned long *zones_size)
6631{
Johannes Weiner9109fb72008-07-23 21:27:20 -07006632 free_area_init_node(0, zones_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006633 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
6634}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006635
Linus Torvalds1da177e2005-04-16 15:20:36 -07006636static int page_alloc_cpu_notify(struct notifier_block *self,
6637 unsigned long action, void *hcpu)
6638{
6639 int cpu = (unsigned long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006640
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006641 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
Konstantin Khlebnikovf0cb3c72012-03-21 16:34:06 -07006642 lru_add_drain_cpu(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08006643 drain_pages(cpu);
6644
6645 /*
6646 * Spill the event counters of the dead processor
6647 * into the current processors event counters.
6648 * This artificially elevates the count of the current
6649 * processor.
6650 */
Christoph Lameterf8891e52006-06-30 01:55:45 -07006651 vm_events_fold_cpu(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08006652
6653 /*
6654 * Zero the differential counters of the dead processor
6655 * so that the vm statistics are consistent.
6656 *
6657 * This is only okay since the processor is dead and cannot
6658 * race with what we are doing.
6659 */
Christoph Lameter2bb921e2013-09-11 14:21:30 -07006660 cpu_vm_stats_fold(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006661 }
6662 return NOTIFY_OK;
6663}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006664
6665void __init page_alloc_init(void)
6666{
6667 hotcpu_notifier(page_alloc_cpu_notify, 0);
6668}
6669
6670/*
Yaowei Bai34b10062015-09-08 15:04:13 -07006671 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006672 * or min_free_kbytes changes.
6673 */
6674static void calculate_totalreserve_pages(void)
6675{
6676 struct pglist_data *pgdat;
6677 unsigned long reserve_pages = 0;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006678 enum zone_type i, j;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006679
6680 for_each_online_pgdat(pgdat) {
Mel Gorman281e3722016-07-28 15:46:11 -07006681
6682 pgdat->totalreserve_pages = 0;
6683
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006684 for (i = 0; i < MAX_NR_ZONES; i++) {
6685 struct zone *zone = pgdat->node_zones + i;
Mel Gorman3484b2d2014-08-06 16:07:14 -07006686 long max = 0;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006687
6688 /* Find valid and maximum lowmem_reserve in the zone */
6689 for (j = i; j < MAX_NR_ZONES; j++) {
6690 if (zone->lowmem_reserve[j] > max)
6691 max = zone->lowmem_reserve[j];
6692 }
6693
Mel Gorman41858962009-06-16 15:32:12 -07006694 /* we treat the high watermark as reserved pages. */
6695 max += high_wmark_pages(zone);
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006696
Jiang Liub40da042013-02-22 16:33:52 -08006697 if (max > zone->managed_pages)
6698 max = zone->managed_pages;
Johannes Weinera8d01432016-01-14 15:20:15 -08006699
Mel Gorman281e3722016-07-28 15:46:11 -07006700 pgdat->totalreserve_pages += max;
Johannes Weinera8d01432016-01-14 15:20:15 -08006701
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006702 reserve_pages += max;
6703 }
6704 }
6705 totalreserve_pages = reserve_pages;
6706}
6707
6708/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07006709 * setup_per_zone_lowmem_reserve - called whenever
Yaowei Bai34b10062015-09-08 15:04:13 -07006710 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
Linus Torvalds1da177e2005-04-16 15:20:36 -07006711 * has a correct pages reserved value, so an adequate number of
6712 * pages are left in the zone after a successful __alloc_pages().
6713 */
6714static void setup_per_zone_lowmem_reserve(void)
6715{
6716 struct pglist_data *pgdat;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006717 enum zone_type j, idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006718
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -08006719 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006720 for (j = 0; j < MAX_NR_ZONES; j++) {
6721 struct zone *zone = pgdat->node_zones + j;
Jiang Liub40da042013-02-22 16:33:52 -08006722 unsigned long managed_pages = zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006723
6724 zone->lowmem_reserve[j] = 0;
6725
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006726 idx = j;
6727 while (idx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006728 struct zone *lower_zone;
6729
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006730 idx--;
6731
Linus Torvalds1da177e2005-04-16 15:20:36 -07006732 if (sysctl_lowmem_reserve_ratio[idx] < 1)
6733 sysctl_lowmem_reserve_ratio[idx] = 1;
6734
6735 lower_zone = pgdat->node_zones + idx;
Jiang Liub40da042013-02-22 16:33:52 -08006736 lower_zone->lowmem_reserve[j] = managed_pages /
Linus Torvalds1da177e2005-04-16 15:20:36 -07006737 sysctl_lowmem_reserve_ratio[idx];
Jiang Liub40da042013-02-22 16:33:52 -08006738 managed_pages += lower_zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006739 }
6740 }
6741 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006742
6743 /* update totalreserve_pages */
6744 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006745}
6746
Mel Gormancfd3da12011-04-25 21:36:42 +00006747static void __setup_per_zone_wmarks(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006748{
6749 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6750 unsigned long lowmem_pages = 0;
6751 struct zone *zone;
6752 unsigned long flags;
6753
6754 /* Calculate total number of !ZONE_HIGHMEM pages */
6755 for_each_zone(zone) {
6756 if (!is_highmem(zone))
Jiang Liub40da042013-02-22 16:33:52 -08006757 lowmem_pages += zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006758 }
6759
6760 for_each_zone(zone) {
Andrew Mortonac924c62006-05-15 09:43:59 -07006761 u64 tmp;
6762
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07006763 spin_lock_irqsave(&zone->lock, flags);
Jiang Liub40da042013-02-22 16:33:52 -08006764 tmp = (u64)pages_min * zone->managed_pages;
Andrew Mortonac924c62006-05-15 09:43:59 -07006765 do_div(tmp, lowmem_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006766 if (is_highmem(zone)) {
6767 /*
Nick Piggin669ed172005-11-13 16:06:45 -08006768 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6769 * need highmem pages, so cap pages_min to a small
6770 * value here.
6771 *
Mel Gorman41858962009-06-16 15:32:12 -07006772 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
Yaowei Bai42ff2702015-04-14 15:47:14 -07006773 * deltas control asynch page reclaim, and so should
Nick Piggin669ed172005-11-13 16:06:45 -08006774 * not be capped for highmem.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006775 */
Andrew Morton90ae8d62013-02-22 16:32:22 -08006776 unsigned long min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006777
Jiang Liub40da042013-02-22 16:33:52 -08006778 min_pages = zone->managed_pages / 1024;
Andrew Morton90ae8d62013-02-22 16:32:22 -08006779 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
Mel Gorman41858962009-06-16 15:32:12 -07006780 zone->watermark[WMARK_MIN] = min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006781 } else {
Nick Piggin669ed172005-11-13 16:06:45 -08006782 /*
6783 * If it's a lowmem zone, reserve a number of pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07006784 * proportionate to the zone's size.
6785 */
Mel Gorman41858962009-06-16 15:32:12 -07006786 zone->watermark[WMARK_MIN] = tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006787 }
6788
Johannes Weiner795ae7a2016-03-17 14:19:14 -07006789 /*
6790 * Set the kswapd watermarks distance according to the
6791 * scale factor in proportion to available memory, but
6792 * ensure a minimum size on small systems.
6793 */
6794 tmp = max_t(u64, tmp >> 2,
6795 mult_frac(zone->managed_pages,
6796 watermark_scale_factor, 10000));
6797
6798 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
6799 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
Marek Szyprowski49f223a2012-01-25 12:49:24 +01006800
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07006801 spin_unlock_irqrestore(&zone->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006802 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006803
6804 /* update totalreserve_pages */
6805 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006806}
6807
Mel Gormancfd3da12011-04-25 21:36:42 +00006808/**
6809 * setup_per_zone_wmarks - called when min_free_kbytes changes
6810 * or when memory is hot-{added|removed}
6811 *
6812 * Ensures that the watermark[min,low,high] values for each zone are set
6813 * correctly with respect to min_free_kbytes.
6814 */
6815void setup_per_zone_wmarks(void)
6816{
6817 mutex_lock(&zonelists_mutex);
6818 __setup_per_zone_wmarks();
6819 mutex_unlock(&zonelists_mutex);
6820}
6821
Randy Dunlap55a44622009-09-21 17:01:20 -07006822/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07006823 * Initialise min_free_kbytes.
6824 *
6825 * For small machines we want it small (128k min). For large machines
6826 * we want it large (64MB max). But it is not linear, because network
6827 * bandwidth does not increase linearly with machine size. We use
6828 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07006829 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006830 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
6831 *
6832 * which yields
6833 *
6834 * 16MB: 512k
6835 * 32MB: 724k
6836 * 64MB: 1024k
6837 * 128MB: 1448k
6838 * 256MB: 2048k
6839 * 512MB: 2896k
6840 * 1024MB: 4096k
6841 * 2048MB: 5792k
6842 * 4096MB: 8192k
6843 * 8192MB: 11584k
6844 * 16384MB: 16384k
6845 */
KOSAKI Motohiro1b79acc2011-05-24 17:11:32 -07006846int __meminit init_per_zone_wmark_min(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006847{
6848 unsigned long lowmem_kbytes;
Michal Hocko5f127332013-07-08 16:00:40 -07006849 int new_min_free_kbytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006850
6851 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
Michal Hocko5f127332013-07-08 16:00:40 -07006852 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006853
Michal Hocko5f127332013-07-08 16:00:40 -07006854 if (new_min_free_kbytes > user_min_free_kbytes) {
6855 min_free_kbytes = new_min_free_kbytes;
6856 if (min_free_kbytes < 128)
6857 min_free_kbytes = 128;
6858 if (min_free_kbytes > 65536)
6859 min_free_kbytes = 65536;
6860 } else {
6861 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6862 new_min_free_kbytes, user_min_free_kbytes);
6863 }
Minchan Kimbc75d332009-06-16 15:32:48 -07006864 setup_per_zone_wmarks();
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -07006865 refresh_zone_stat_thresholds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006866 setup_per_zone_lowmem_reserve();
Joonsoo Kim6423aa82016-08-10 16:27:49 -07006867
6868#ifdef CONFIG_NUMA
6869 setup_min_unmapped_ratio();
6870 setup_min_slab_ratio();
6871#endif
6872
Linus Torvalds1da177e2005-04-16 15:20:36 -07006873 return 0;
6874}
Jason Baronbc22af72016-05-05 16:22:12 -07006875core_initcall(init_per_zone_wmark_min)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006876
6877/*
Pintu Kumarb8af2942013-09-11 14:20:34 -07006878 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
Linus Torvalds1da177e2005-04-16 15:20:36 -07006879 * that we can call two helper functions whenever min_free_kbytes
6880 * changes.
6881 */
Joe Perchescccad5b2014-06-06 14:38:09 -07006882int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006883 void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006884{
Han Pingtianda8c7572014-01-23 15:53:17 -08006885 int rc;
6886
6887 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6888 if (rc)
6889 return rc;
6890
Michal Hocko5f127332013-07-08 16:00:40 -07006891 if (write) {
6892 user_min_free_kbytes = min_free_kbytes;
Minchan Kimbc75d332009-06-16 15:32:48 -07006893 setup_per_zone_wmarks();
Michal Hocko5f127332013-07-08 16:00:40 -07006894 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006895 return 0;
6896}
6897
Johannes Weiner795ae7a2016-03-17 14:19:14 -07006898int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
6899 void __user *buffer, size_t *length, loff_t *ppos)
6900{
6901 int rc;
6902
6903 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6904 if (rc)
6905 return rc;
6906
6907 if (write)
6908 setup_per_zone_wmarks();
6909
6910 return 0;
6911}
6912
Christoph Lameter96146342006-07-03 00:24:13 -07006913#ifdef CONFIG_NUMA
Joonsoo Kim6423aa82016-08-10 16:27:49 -07006914static void setup_min_unmapped_ratio(void)
Christoph Lameter96146342006-07-03 00:24:13 -07006915{
Joonsoo Kim6423aa82016-08-10 16:27:49 -07006916 pg_data_t *pgdat;
Christoph Lameter96146342006-07-03 00:24:13 -07006917 struct zone *zone;
Christoph Lameter96146342006-07-03 00:24:13 -07006918
Mel Gormana5f5f912016-07-28 15:46:32 -07006919 for_each_online_pgdat(pgdat)
Joonsoo Kim81cbcbc2016-08-10 16:27:46 -07006920 pgdat->min_unmapped_pages = 0;
Mel Gormana5f5f912016-07-28 15:46:32 -07006921
Christoph Lameter96146342006-07-03 00:24:13 -07006922 for_each_zone(zone)
Mel Gormana5f5f912016-07-28 15:46:32 -07006923 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
Christoph Lameter96146342006-07-03 00:24:13 -07006924 sysctl_min_unmapped_ratio) / 100;
Christoph Lameter96146342006-07-03 00:24:13 -07006925}
Christoph Lameter0ff38492006-09-25 23:31:52 -07006926
Joonsoo Kim6423aa82016-08-10 16:27:49 -07006927
6928int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006929 void __user *buffer, size_t *length, loff_t *ppos)
Christoph Lameter0ff38492006-09-25 23:31:52 -07006930{
Christoph Lameter0ff38492006-09-25 23:31:52 -07006931 int rc;
6932
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006933 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
Christoph Lameter0ff38492006-09-25 23:31:52 -07006934 if (rc)
6935 return rc;
6936
Joonsoo Kim6423aa82016-08-10 16:27:49 -07006937 setup_min_unmapped_ratio();
6938
6939 return 0;
6940}
6941
6942static void setup_min_slab_ratio(void)
6943{
6944 pg_data_t *pgdat;
6945 struct zone *zone;
6946
Mel Gormana5f5f912016-07-28 15:46:32 -07006947 for_each_online_pgdat(pgdat)
6948 pgdat->min_slab_pages = 0;
6949
Christoph Lameter0ff38492006-09-25 23:31:52 -07006950 for_each_zone(zone)
Mel Gormana5f5f912016-07-28 15:46:32 -07006951 zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
Christoph Lameter0ff38492006-09-25 23:31:52 -07006952 sysctl_min_slab_ratio) / 100;
Joonsoo Kim6423aa82016-08-10 16:27:49 -07006953}
6954
6955int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
6956 void __user *buffer, size_t *length, loff_t *ppos)
6957{
6958 int rc;
6959
6960 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6961 if (rc)
6962 return rc;
6963
6964 setup_min_slab_ratio();
6965
Christoph Lameter0ff38492006-09-25 23:31:52 -07006966 return 0;
6967}
Christoph Lameter96146342006-07-03 00:24:13 -07006968#endif
6969
Linus Torvalds1da177e2005-04-16 15:20:36 -07006970/*
6971 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6972 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6973 * whenever sysctl_lowmem_reserve_ratio changes.
6974 *
6975 * The reserve ratio obviously has absolutely no relation with the
Mel Gorman41858962009-06-16 15:32:12 -07006976 * minimum watermarks. The lowmem reserve ratio can only make sense
Linus Torvalds1da177e2005-04-16 15:20:36 -07006977 * if in function of the boot time zone sizes.
6978 */
Joe Perchescccad5b2014-06-06 14:38:09 -07006979int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006980 void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006981{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006982 proc_dointvec_minmax(table, write, buffer, length, ppos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006983 setup_per_zone_lowmem_reserve();
6984 return 0;
6985}
6986
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006987/*
6988 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
Pintu Kumarb8af2942013-09-11 14:20:34 -07006989 * cpu. It is the fraction of total pages in each zone that a hot per cpu
6990 * pagelist can have before it gets flushed back to buddy allocator.
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006991 */
Joe Perchescccad5b2014-06-06 14:38:09 -07006992int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006993 void __user *buffer, size_t *length, loff_t *ppos)
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006994{
6995 struct zone *zone;
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006996 int old_percpu_pagelist_fraction;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006997 int ret;
6998
Cody P Schaferc8e251f2013-07-03 15:01:29 -07006999 mutex_lock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07007000 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
7001
7002 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7003 if (!write || ret < 0)
7004 goto out;
7005
7006 /* Sanity checking to avoid pcp imbalance */
7007 if (percpu_pagelist_fraction &&
7008 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
7009 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
7010 ret = -EINVAL;
7011 goto out;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007012 }
David Rientjes7cd2b0a2014-06-23 13:22:04 -07007013
7014 /* No change? */
7015 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
7016 goto out;
7017
7018 for_each_populated_zone(zone) {
7019 unsigned int cpu;
7020
7021 for_each_possible_cpu(cpu)
7022 pageset_set_high_and_batch(zone,
7023 per_cpu_ptr(zone->pageset, cpu));
7024 }
7025out:
Cody P Schaferc8e251f2013-07-03 15:01:29 -07007026 mutex_unlock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07007027 return ret;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08007028}
7029
Rasmus Villemoesa9919c72015-06-24 16:56:28 -07007030#ifdef CONFIG_NUMA
David S. Millerf034b5d2006-08-24 03:08:07 -07007031int hashdist = HASHDIST_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007032
Linus Torvalds1da177e2005-04-16 15:20:36 -07007033static int __init set_hashdist(char *str)
7034{
7035 if (!str)
7036 return 0;
7037 hashdist = simple_strtoul(str, &str, 0);
7038 return 1;
7039}
7040__setup("hashdist=", set_hashdist);
7041#endif
7042
Srikar Dronamrajuf6f34b42016-10-07 16:59:15 -07007043#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
7044/*
7045 * Returns the number of pages that arch has reserved but
7046 * is not known to alloc_large_system_hash().
7047 */
7048static unsigned long __init arch_reserved_kernel_pages(void)
7049{
7050 return 0;
7051}
7052#endif
7053
Linus Torvalds1da177e2005-04-16 15:20:36 -07007054/*
7055 * allocate a large system hash table from bootmem
7056 * - it is assumed that the hash table must contain an exact power-of-2
7057 * quantity of entries
7058 * - limit is the number of hash buckets, not the total allocation size
7059 */
7060void *__init alloc_large_system_hash(const char *tablename,
7061 unsigned long bucketsize,
7062 unsigned long numentries,
7063 int scale,
7064 int flags,
7065 unsigned int *_hash_shift,
7066 unsigned int *_hash_mask,
Tim Bird31fe62b2012-05-23 13:33:35 +00007067 unsigned long low_limit,
7068 unsigned long high_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007069{
Tim Bird31fe62b2012-05-23 13:33:35 +00007070 unsigned long long max = high_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007071 unsigned long log2qty, size;
7072 void *table = NULL;
7073
7074 /* allow the kernel cmdline to have a say */
7075 if (!numentries) {
7076 /* round applicable memory size up to nearest megabyte */
Andrew Morton04903662006-12-06 20:37:33 -08007077 numentries = nr_kernel_pages;
Srikar Dronamrajuf6f34b42016-10-07 16:59:15 -07007078 numentries -= arch_reserved_kernel_pages();
Jerry Zhoua7e83312013-09-11 14:20:26 -07007079
7080 /* It isn't necessary when PAGE_SIZE >= 1MB */
7081 if (PAGE_SHIFT < 20)
7082 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007083
7084 /* limit to 1 bucket per 2^scale bytes of low memory */
7085 if (scale > PAGE_SHIFT)
7086 numentries >>= (scale - PAGE_SHIFT);
7087 else
7088 numentries <<= (PAGE_SHIFT - scale);
Paul Mundt9ab37b82007-01-05 16:36:30 -08007089
7090 /* Make sure we've got at least a 0-order allocation.. */
Jan Beulich2c85f512009-09-21 17:03:07 -07007091 if (unlikely(flags & HASH_SMALL)) {
7092 /* Makes no sense without HASH_EARLY */
7093 WARN_ON(!(flags & HASH_EARLY));
7094 if (!(numentries >> *_hash_shift)) {
7095 numentries = 1UL << *_hash_shift;
7096 BUG_ON(!numentries);
7097 }
7098 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
Paul Mundt9ab37b82007-01-05 16:36:30 -08007099 numentries = PAGE_SIZE / bucketsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007100 }
John Hawkes6e692ed2006-03-25 03:08:02 -08007101 numentries = roundup_pow_of_two(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007102
7103 /* limit allocation size to 1/16 total memory by default */
7104 if (max == 0) {
7105 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
7106 do_div(max, bucketsize);
7107 }
Dimitri Sivanich074b8512012-02-08 12:39:07 -08007108 max = min(max, 0x80000000ULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007109
Tim Bird31fe62b2012-05-23 13:33:35 +00007110 if (numentries < low_limit)
7111 numentries = low_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007112 if (numentries > max)
7113 numentries = max;
7114
David Howellsf0d1b0b2006-12-08 02:37:49 -08007115 log2qty = ilog2(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007116
7117 do {
7118 size = bucketsize << log2qty;
7119 if (flags & HASH_EARLY)
Santosh Shilimkar67828322014-01-21 15:50:25 -08007120 table = memblock_virt_alloc_nopanic(size, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007121 else if (hashdist)
7122 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
7123 else {
Eric Dumazet1037b832007-07-15 23:38:05 -07007124 /*
7125 * If bucketsize is not a power-of-two, we may free
Mel Gormana1dd2682009-06-16 15:32:19 -07007126 * some pages at the end of hash table which
7127 * alloc_pages_exact() automatically does
Eric Dumazet1037b832007-07-15 23:38:05 -07007128 */
Catalin Marinas264ef8a2009-07-07 10:33:01 +01007129 if (get_order(size) < MAX_ORDER) {
Mel Gormana1dd2682009-06-16 15:32:19 -07007130 table = alloc_pages_exact(size, GFP_ATOMIC);
Catalin Marinas264ef8a2009-07-07 10:33:01 +01007131 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
7132 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007133 }
7134 } while (!table && size > PAGE_SIZE && --log2qty);
7135
7136 if (!table)
7137 panic("Failed to allocate %s hash table\n", tablename);
7138
Joe Perches11705322016-03-17 14:19:50 -07007139 pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
7140 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007141
7142 if (_hash_shift)
7143 *_hash_shift = log2qty;
7144 if (_hash_mask)
7145 *_hash_mask = (1 << log2qty) - 1;
7146
7147 return table;
7148}
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -08007149
KAMEZAWA Hiroyukia5d76b52007-10-16 01:26:11 -07007150/*
Minchan Kim80934512012-07-31 16:43:01 -07007151 * This function checks whether pageblock includes unmovable pages or not.
7152 * If @count is not zero, it is okay to include less @count unmovable pages
7153 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07007154 * PageLRU check without isolation or lru_lock could race so that
Minchan Kim80934512012-07-31 16:43:01 -07007155 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
7156 * expect this function should be exact.
KAMEZAWA Hiroyukia5d76b52007-10-16 01:26:11 -07007157 */
Wen Congyangb023f462012-12-11 16:00:45 -08007158bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7159 bool skip_hwpoisoned_pages)
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007160{
7161 unsigned long pfn, iter, found;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01007162 int mt;
7163
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007164 /*
7165 * For avoiding noise data, lru_add_drain_all() should be called
Minchan Kim80934512012-07-31 16:43:01 -07007166 * If ZONE_MOVABLE, the zone never contains unmovable pages
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007167 */
7168 if (zone_idx(zone) == ZONE_MOVABLE)
Minchan Kim80934512012-07-31 16:43:01 -07007169 return false;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01007170 mt = get_pageblock_migratetype(page);
7171 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
Minchan Kim80934512012-07-31 16:43:01 -07007172 return false;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007173
7174 pfn = page_to_pfn(page);
7175 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
7176 unsigned long check = pfn + iter;
7177
Namhyung Kim29723fc2011-02-25 14:44:25 -08007178 if (!pfn_valid_within(check))
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007179 continue;
Namhyung Kim29723fc2011-02-25 14:44:25 -08007180
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007181 page = pfn_to_page(check);
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07007182
7183 /*
7184 * Hugepages are not in LRU lists, but they're movable.
7185 * We need not scan over tail pages bacause we don't
7186 * handle each tail page individually in migration.
7187 */
7188 if (PageHuge(page)) {
7189 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
7190 continue;
7191 }
7192
Minchan Kim97d255c2012-07-31 16:42:59 -07007193 /*
7194 * We can't use page_count without pin a page
7195 * because another CPU can free compound page.
7196 * This check already skips compound tails of THP
Joonsoo Kim0139aa72016-05-19 17:10:49 -07007197 * because their page->_refcount is zero at all time.
Minchan Kim97d255c2012-07-31 16:42:59 -07007198 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07007199 if (!page_ref_count(page)) {
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007200 if (PageBuddy(page))
7201 iter += (1 << page_order(page)) - 1;
7202 continue;
7203 }
Minchan Kim97d255c2012-07-31 16:42:59 -07007204
Wen Congyangb023f462012-12-11 16:00:45 -08007205 /*
7206 * The HWPoisoned page may be not in buddy system, and
7207 * page_count() is not 0.
7208 */
7209 if (skip_hwpoisoned_pages && PageHWPoison(page))
7210 continue;
7211
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007212 if (!PageLRU(page))
7213 found++;
7214 /*
Johannes Weiner6b4f7792014-12-12 16:56:13 -08007215 * If there are RECLAIMABLE pages, we need to check
7216 * it. But now, memory offline itself doesn't call
7217 * shrink_node_slabs() and it still to be fixed.
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007218 */
7219 /*
7220 * If the page is not RAM, page_count()should be 0.
7221 * we don't need more check. This is an _used_ not-movable page.
7222 *
7223 * The problematic thing here is PG_reserved pages. PG_reserved
7224 * is set to both of a memory hole page and a _used_ kernel
7225 * page at boot.
7226 */
7227 if (found > count)
Minchan Kim80934512012-07-31 16:43:01 -07007228 return true;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007229 }
Minchan Kim80934512012-07-31 16:43:01 -07007230 return false;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007231}
7232
7233bool is_pageblock_removable_nolock(struct page *page)
7234{
Michal Hocko656a0702012-01-20 14:33:58 -08007235 struct zone *zone;
7236 unsigned long pfn;
Michal Hocko687875f2012-01-20 14:33:55 -08007237
7238 /*
7239 * We have to be careful here because we are iterating over memory
7240 * sections which are not zone aware so we might end up outside of
7241 * the zone but still within the section.
Michal Hocko656a0702012-01-20 14:33:58 -08007242 * We have to take care about the node as well. If the node is offline
7243 * its NODE_DATA will be NULL - see page_zone.
Michal Hocko687875f2012-01-20 14:33:55 -08007244 */
Michal Hocko656a0702012-01-20 14:33:58 -08007245 if (!node_online(page_to_nid(page)))
7246 return false;
7247
7248 zone = page_zone(page);
7249 pfn = page_to_pfn(page);
Cody P Schafer108bcc92013-02-22 16:35:23 -08007250 if (!zone_spans_pfn(zone, pfn))
Michal Hocko687875f2012-01-20 14:33:55 -08007251 return false;
7252
Wen Congyangb023f462012-12-11 16:00:45 -08007253 return !has_unmovable_pages(zone, page, 0, true);
KAMEZAWA Hiroyukia5d76b52007-10-16 01:26:11 -07007254}
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007255
Vlastimil Babka080fe202016-02-05 15:36:41 -08007256#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007257
7258static unsigned long pfn_max_align_down(unsigned long pfn)
7259{
7260 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
7261 pageblock_nr_pages) - 1);
7262}
7263
7264static unsigned long pfn_max_align_up(unsigned long pfn)
7265{
7266 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
7267 pageblock_nr_pages));
7268}
7269
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007270/* [start, end) must belong to a single zone. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007271static int __alloc_contig_migrate_range(struct compact_control *cc,
7272 unsigned long start, unsigned long end)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007273{
7274 /* This function is based on compact_zone() from compaction.c. */
Minchan Kimbeb51ea2012-10-08 16:33:51 -07007275 unsigned long nr_reclaimed;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007276 unsigned long pfn = start;
7277 unsigned int tries = 0;
7278 int ret = 0;
7279
Marek Szyprowskibe49a6e2012-12-12 13:51:19 -08007280 migrate_prep();
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007281
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007282 while (pfn < end || !list_empty(&cc->migratepages)) {
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007283 if (fatal_signal_pending(current)) {
7284 ret = -EINTR;
7285 break;
7286 }
7287
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007288 if (list_empty(&cc->migratepages)) {
7289 cc->nr_migratepages = 0;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07007290 pfn = isolate_migratepages_range(cc, pfn, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007291 if (!pfn) {
7292 ret = -EINTR;
7293 break;
7294 }
7295 tries = 0;
7296 } else if (++tries == 5) {
7297 ret = ret < 0 ? ret : -EBUSY;
7298 break;
7299 }
7300
Minchan Kimbeb51ea2012-10-08 16:33:51 -07007301 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
7302 &cc->migratepages);
7303 cc->nr_migratepages -= nr_reclaimed;
Minchan Kim02c6de82012-10-08 16:31:55 -07007304
Hugh Dickins9c620e22013-02-22 16:35:14 -08007305 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
David Rientjese0b9dae2014-06-04 16:08:28 -07007306 NULL, 0, cc->mode, MR_CMA);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007307 }
Srinivas Pandruvada2a6f5122013-02-22 16:32:09 -08007308 if (ret < 0) {
7309 putback_movable_pages(&cc->migratepages);
7310 return ret;
7311 }
7312 return 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007313}
7314
7315/**
7316 * alloc_contig_range() -- tries to allocate given range of pages
7317 * @start: start PFN to allocate
7318 * @end: one-past-the-last PFN to allocate
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02007319 * @migratetype: migratetype of the underlaying pageblocks (either
7320 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
7321 * in range must have the same migratetype and it must
7322 * be either of the two.
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007323 *
7324 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
7325 * aligned, however it's the caller's responsibility to guarantee that
7326 * we are the only thread that changes migrate type of pageblocks the
7327 * pages fall in.
7328 *
7329 * The PFN range must belong to a single zone.
7330 *
7331 * Returns zero on success or negative error code. On success all
7332 * pages which PFN is in [start, end) are allocated for the caller and
7333 * need to be freed with free_contig_range().
7334 */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02007335int alloc_contig_range(unsigned long start, unsigned long end,
7336 unsigned migratetype)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007337{
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007338 unsigned long outer_start, outer_end;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08007339 unsigned int order;
7340 int ret = 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007341
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007342 struct compact_control cc = {
7343 .nr_migratepages = 0,
7344 .order = -1,
7345 .zone = page_zone(pfn_to_page(start)),
David Rientjese0b9dae2014-06-04 16:08:28 -07007346 .mode = MIGRATE_SYNC,
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007347 .ignore_skip_hint = true,
7348 };
7349 INIT_LIST_HEAD(&cc.migratepages);
7350
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007351 /*
7352 * What we do here is we mark all pageblocks in range as
7353 * MIGRATE_ISOLATE. Because pageblock and max order pages may
7354 * have different sizes, and due to the way page allocator
7355 * work, we align the range to biggest of the two pages so
7356 * that page allocator won't try to merge buddies from
7357 * different pageblocks and change MIGRATE_ISOLATE to some
7358 * other migration type.
7359 *
7360 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
7361 * migrate the pages from an unaligned range (ie. pages that
7362 * we are interested in). This will put all the pages in
7363 * range back to page allocator as MIGRATE_ISOLATE.
7364 *
7365 * When this is done, we take the pages in range from page
7366 * allocator removing them from the buddy system. This way
7367 * page allocator will never consider using them.
7368 *
7369 * This lets us mark the pageblocks back as
7370 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
7371 * aligned range but not in the unaligned, original range are
7372 * put back to page allocator so that buddy can use them.
7373 */
7374
7375 ret = start_isolate_page_range(pfn_max_align_down(start),
Wen Congyangb023f462012-12-11 16:00:45 -08007376 pfn_max_align_up(end), migratetype,
7377 false);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007378 if (ret)
Bob Liu86a595f2012-10-25 13:37:56 -07007379 return ret;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007380
Heesub Shin483242b2013-01-07 11:10:13 +09007381 cc.zone->cma_alloc = 1;
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007382 /*
7383 * In case of -EBUSY, we'd like to know which page causes problem.
7384 * So, just fall through. We will check it in test_pages_isolated().
7385 */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007386 ret = __alloc_contig_migrate_range(&cc, start, end);
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007387 if (ret && ret != -EBUSY)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007388 goto done;
7389
7390 /*
7391 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
7392 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
7393 * more, all pages in [start, end) are free in page allocator.
7394 * What we are going to do is to allocate all pages from
7395 * [start, end) (that is remove them from page allocator).
7396 *
7397 * The only problem is that pages at the beginning and at the
7398 * end of interesting range may be not aligned with pages that
7399 * page allocator holds, ie. they can be part of higher order
7400 * pages. Because of this, we reserve the bigger range and
7401 * once this is done free the pages we are not interested in.
7402 *
7403 * We don't have to hold zone->lock here because the pages are
7404 * isolated thus they won't get removed from buddy.
7405 */
7406
7407 lru_add_drain_all();
Vlastimil Babka510f5502014-12-10 15:43:07 -08007408 drain_all_pages(cc.zone);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007409
7410 order = 0;
7411 outer_start = start;
7412 while (!PageBuddy(pfn_to_page(outer_start))) {
7413 if (++order >= MAX_ORDER) {
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007414 outer_start = start;
7415 break;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007416 }
7417 outer_start &= ~0UL << order;
7418 }
7419
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007420 if (outer_start != start) {
7421 order = page_order(pfn_to_page(outer_start));
7422
7423 /*
7424 * outer_start page could be small order buddy page and
7425 * it doesn't include start page. Adjust outer_start
7426 * in this case to report failed page properly
7427 * on tracepoint in test_pages_isolated()
7428 */
7429 if (outer_start + (1UL << order) <= start)
7430 outer_start = start;
7431 }
7432
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007433 /* Make sure the range is really isolated. */
Wen Congyangb023f462012-12-11 16:00:45 -08007434 if (test_pages_isolated(outer_start, end, false)) {
Michal Nazarewiczdae803e2014-11-13 15:19:27 -08007435 pr_info("%s: [%lx, %lx) PFNs busy\n",
7436 __func__, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007437 ret = -EBUSY;
7438 goto done;
7439 }
7440
Marek Szyprowski49f223a2012-01-25 12:49:24 +01007441 /* Grab isolated pages from freelists. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007442 outer_end = isolate_freepages_range(&cc, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007443 if (!outer_end) {
7444 ret = -EBUSY;
7445 goto done;
7446 }
7447
7448 /* Free head and tail (if any) */
7449 if (start != outer_start)
7450 free_contig_range(outer_start, start - outer_start);
7451 if (end != outer_end)
7452 free_contig_range(end, outer_end - end);
7453
7454done:
7455 undo_isolate_page_range(pfn_max_align_down(start),
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02007456 pfn_max_align_up(end), migratetype);
Heesub Shin483242b2013-01-07 11:10:13 +09007457 cc.zone->cma_alloc = 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007458 return ret;
7459}
7460
7461void free_contig_range(unsigned long pfn, unsigned nr_pages)
7462{
Marek Szyprowskibcc2b022012-12-20 15:05:18 -08007463 unsigned int count = 0;
7464
7465 for (; nr_pages--; pfn++) {
7466 struct page *page = pfn_to_page(pfn);
7467
7468 count += page_count(page) != 1;
7469 __free_page(page);
7470 }
7471 WARN(count != 0, "%d pages are still in use!\n", count);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007472}
7473#endif
7474
Jiang Liu4ed7e022012-07-31 16:43:35 -07007475#ifdef CONFIG_MEMORY_HOTPLUG
Cody P Schafer0a647f32013-07-03 15:01:33 -07007476/*
7477 * The zone indicated has a new number of managed_pages; batch sizes and percpu
7478 * page high values need to be recalulated.
7479 */
Jiang Liu4ed7e022012-07-31 16:43:35 -07007480void __meminit zone_pcp_update(struct zone *zone)
7481{
Cody P Schafer0a647f32013-07-03 15:01:33 -07007482 unsigned cpu;
Cody P Schaferc8e251f2013-07-03 15:01:29 -07007483 mutex_lock(&pcp_batch_high_lock);
Cody P Schafer0a647f32013-07-03 15:01:33 -07007484 for_each_possible_cpu(cpu)
Cody P Schafer169f6c12013-07-03 15:01:41 -07007485 pageset_set_high_and_batch(zone,
7486 per_cpu_ptr(zone->pageset, cpu));
Cody P Schaferc8e251f2013-07-03 15:01:29 -07007487 mutex_unlock(&pcp_batch_high_lock);
Jiang Liu4ed7e022012-07-31 16:43:35 -07007488}
7489#endif
7490
Jiang Liu340175b2012-07-31 16:43:32 -07007491void zone_pcp_reset(struct zone *zone)
7492{
7493 unsigned long flags;
Minchan Kim5a883812012-10-08 16:33:39 -07007494 int cpu;
7495 struct per_cpu_pageset *pset;
Jiang Liu340175b2012-07-31 16:43:32 -07007496
7497 /* avoid races with drain_pages() */
7498 local_irq_save(flags);
7499 if (zone->pageset != &boot_pageset) {
Minchan Kim5a883812012-10-08 16:33:39 -07007500 for_each_online_cpu(cpu) {
7501 pset = per_cpu_ptr(zone->pageset, cpu);
7502 drain_zonestat(zone, pset);
7503 }
Jiang Liu340175b2012-07-31 16:43:32 -07007504 free_percpu(zone->pageset);
7505 zone->pageset = &boot_pageset;
7506 }
7507 local_irq_restore(flags);
7508}
7509
Wen Congyang6dcd73d2012-12-11 16:01:01 -08007510#ifdef CONFIG_MEMORY_HOTREMOVE
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007511/*
Joonsoo Kimb9eb6312016-05-19 17:12:06 -07007512 * All pages in the range must be in a single zone and isolated
7513 * before calling this.
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007514 */
7515void
7516__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
7517{
7518 struct page *page;
7519 struct zone *zone;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07007520 unsigned int order, i;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007521 unsigned long pfn;
7522 unsigned long flags;
7523 /* find the first valid pfn */
7524 for (pfn = start_pfn; pfn < end_pfn; pfn++)
7525 if (pfn_valid(pfn))
7526 break;
7527 if (pfn == end_pfn)
7528 return;
7529 zone = page_zone(pfn_to_page(pfn));
7530 spin_lock_irqsave(&zone->lock, flags);
7531 pfn = start_pfn;
7532 while (pfn < end_pfn) {
7533 if (!pfn_valid(pfn)) {
7534 pfn++;
7535 continue;
7536 }
7537 page = pfn_to_page(pfn);
Wen Congyangb023f462012-12-11 16:00:45 -08007538 /*
7539 * The HWPoisoned page may be not in buddy system, and
7540 * page_count() is not 0.
7541 */
7542 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7543 pfn++;
7544 SetPageReserved(page);
7545 continue;
7546 }
7547
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007548 BUG_ON(page_count(page));
7549 BUG_ON(!PageBuddy(page));
7550 order = page_order(page);
7551#ifdef CONFIG_DEBUG_VM
Joe Perches11705322016-03-17 14:19:50 -07007552 pr_info("remove from free list %lx %d %lx\n",
7553 pfn, 1 << order, end_pfn);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007554#endif
7555 list_del(&page->lru);
7556 rmv_page_order(page);
7557 zone->free_area[order].nr_free--;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007558 for (i = 0; i < (1 << order); i++)
7559 SetPageReserved((page+i));
7560 pfn += (1 << order);
7561 }
7562 spin_unlock_irqrestore(&zone->lock, flags);
7563}
7564#endif
Wu Fengguang8d22ba12009-12-16 12:19:58 +01007565
Wu Fengguang8d22ba12009-12-16 12:19:58 +01007566bool is_free_buddy_page(struct page *page)
7567{
7568 struct zone *zone = page_zone(page);
7569 unsigned long pfn = page_to_pfn(page);
7570 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07007571 unsigned int order;
Wu Fengguang8d22ba12009-12-16 12:19:58 +01007572
7573 spin_lock_irqsave(&zone->lock, flags);
7574 for (order = 0; order < MAX_ORDER; order++) {
7575 struct page *page_head = page - (pfn & ((1 << order) - 1));
7576
7577 if (PageBuddy(page_head) && page_order(page_head) >= order)
7578 break;
7579 }
7580 spin_unlock_irqrestore(&zone->lock, flags);
7581
7582 return order < MAX_ORDER;
7583}