blob: 5ab2e30a100685217ac7b3367c3f0e4acbeb241a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
KOSAKI Motohiro10ed2732008-03-04 14:28:32 -080022#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/bootmem.h>
Yinghai Luedbe7d22010-08-25 13:39:16 -070024#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/compiler.h>
Randy Dunlap9f158332005-09-13 01:25:16 -070026#include <linux/kernel.h>
Vegard Nossumb1eeab62008-11-25 16:55:53 +010027#include <linux/kmemcheck.h>
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -080028#include <linux/kasan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/suspend.h>
31#include <linux/pagevec.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
Dave Hansena238ab52011-05-24 17:12:16 -070034#include <linux/ratelimit.h>
David Rientjes5a3135c22007-10-16 23:25:53 -070035#include <linux/oom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/notifier.h>
37#include <linux/topology.h>
38#include <linux/sysctl.h>
39#include <linux/cpu.h>
40#include <linux/cpuset.h>
Dave Hansenbdc8cb92005-10-29 18:16:53 -070041#include <linux/memory_hotplug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/nodemask.h>
43#include <linux/vmalloc.h>
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -070044#include <linux/vmstat.h>
Christoph Lameter4be38e32006-01-06 00:11:17 -080045#include <linux/mempolicy.h>
Dan Williams4b94ffd2016-01-15 16:56:22 -080046#include <linux/memremap.h>
Yasunori Goto68113782006-06-23 02:03:11 -070047#include <linux/stop_machine.h>
Mel Gormanc7132162006-09-27 01:49:43 -070048#include <linux/sort.h>
49#include <linux/pfn.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070050#include <linux/backing-dev.h>
Akinobu Mita933e3122006-12-08 02:39:45 -080051#include <linux/fault-inject.h>
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070052#include <linux/page-isolation.h>
Joonsoo Kimeefa864b2014-12-12 16:55:46 -080053#include <linux/page_ext.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070054#include <linux/debugobjects.h>
Catalin Marinasdbb1f812009-06-11 13:23:19 +010055#include <linux/kmemleak.h>
Mel Gorman56de7262010-05-24 14:32:30 -070056#include <linux/compaction.h>
Mel Gorman0d3d0622009-09-21 17:02:44 -070057#include <trace/events/kmem.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070058#include <linux/prefetch.h>
Lisa Du6e543d52013-09-11 14:22:36 -070059#include <linux/mm_inline.h>
Michal Nazarewicz041d3a82011-12-29 13:09:50 +010060#include <linux/migrate.h>
Joonsoo Kime30825f2014-12-12 16:55:49 -080061#include <linux/page_ext.h>
David Rientjes949f7ec2013-04-29 15:07:48 -070062#include <linux/hugetlb.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060063#include <linux/sched/rt.h>
Joonsoo Kim48c96a32014-12-12 16:56:01 -080064#include <linux/page_owner.h>
Mel Gorman0e1cc952015-06-30 14:57:27 -070065#include <linux/kthread.h>
Vladimir Davydov49491482016-07-26 15:24:24 -070066#include <linux/memcontrol.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Jiang Liu7ee3d4e2013-07-03 15:03:41 -070068#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <asm/tlbflush.h>
Andrew Mortonac924c62006-05-15 09:43:59 -070070#include <asm/div64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include "internal.h"
72
Cody P Schaferc8e251f2013-07-03 15:01:29 -070073/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
74static DEFINE_MUTEX(pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -070075#define MIN_PERCPU_PAGELIST_FRACTION (8)
Cody P Schaferc8e251f2013-07-03 15:01:29 -070076
Lee Schermerhorn72812012010-05-26 14:44:56 -070077#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
78DEFINE_PER_CPU(int, numa_node);
79EXPORT_PER_CPU_SYMBOL(numa_node);
80#endif
81
Lee Schermerhorn7aac7892010-05-26 14:45:00 -070082#ifdef CONFIG_HAVE_MEMORYLESS_NODES
83/*
84 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
85 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
86 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
87 * defined in <linux/topology.h>.
88 */
89DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
90EXPORT_PER_CPU_SYMBOL(_numa_mem_);
Joonsoo Kimad2c8142014-10-09 15:26:13 -070091int _node_numa_mem_[MAX_NUMNODES];
Lee Schermerhorn7aac7892010-05-26 14:45:00 -070092#endif
93
Linus Torvalds1da177e2005-04-16 15:20:36 -070094/*
Christoph Lameter13808912007-10-16 01:25:27 -070095 * Array of node states.
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 */
Christoph Lameter13808912007-10-16 01:25:27 -070097nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
98 [N_POSSIBLE] = NODE_MASK_ALL,
99 [N_ONLINE] = { { [0] = 1UL } },
100#ifndef CONFIG_NUMA
101 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
102#ifdef CONFIG_HIGHMEM
103 [N_HIGH_MEMORY] = { { [0] = 1UL } },
104#endif
Lai Jiangshan20b2f522012-12-12 13:52:00 -0800105#ifdef CONFIG_MOVABLE_NODE
106 [N_MEMORY] = { { [0] = 1UL } },
107#endif
Christoph Lameter13808912007-10-16 01:25:27 -0700108 [N_CPU] = { { [0] = 1UL } },
109#endif /* NUMA */
110};
111EXPORT_SYMBOL(node_states);
112
Jiang Liuc3d5f5f2013-07-03 15:03:14 -0700113/* Protect totalram_pages and zone->managed_pages */
114static DEFINE_SPINLOCK(managed_page_count_lock);
115
Ravikiran G Thirumalai6c231b72005-09-06 15:17:45 -0700116unsigned long totalram_pages __read_mostly;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -0700117unsigned long totalreserve_pages __read_mostly;
Pintu Kumare48322a2014-12-18 16:17:15 -0800118unsigned long totalcma_pages __read_mostly;
Johannes Weinerab8fabd2012-01-10 15:07:42 -0800119
Hugh Dickins1b76b022012-05-11 01:00:07 -0700120int percpu_pagelist_fraction;
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +1000121gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -0700123/*
124 * A cached value of the page's pageblock's migratetype, used when the page is
125 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
126 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
127 * Also the migratetype set in the page does not necessarily match the pcplist
128 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
129 * other index - this ensures that it will be put on the correct CMA freelist.
130 */
131static inline int get_pcppage_migratetype(struct page *page)
132{
133 return page->index;
134}
135
136static inline void set_pcppage_migratetype(struct page *page, int migratetype)
137{
138 page->index = migratetype;
139}
140
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800141#ifdef CONFIG_PM_SLEEP
142/*
143 * The following functions are used by the suspend/hibernate code to temporarily
144 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
145 * while devices are suspended. To avoid races with the suspend/hibernate code,
146 * they should always be called with pm_mutex held (gfp_allowed_mask also should
147 * only be modified with pm_mutex held, unless the suspend/hibernate code is
148 * guaranteed not to run in parallel with that modification).
149 */
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100150
151static gfp_t saved_gfp_mask;
152
153void pm_restore_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800154{
155 WARN_ON(!mutex_is_locked(&pm_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100156 if (saved_gfp_mask) {
157 gfp_allowed_mask = saved_gfp_mask;
158 saved_gfp_mask = 0;
159 }
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800160}
161
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100162void pm_restrict_gfp_mask(void)
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800163{
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800164 WARN_ON(!mutex_is_locked(&pm_mutex));
Rafael J. Wysockic9e664f2010-12-03 22:57:45 +0100165 WARN_ON(saved_gfp_mask);
166 saved_gfp_mask = gfp_allowed_mask;
Mel Gormand0164ad2015-11-06 16:28:21 -0800167 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800168}
Mel Gormanf90ac392012-01-10 15:07:15 -0800169
170bool pm_suspended_storage(void)
171{
Mel Gormand0164ad2015-11-06 16:28:21 -0800172 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
Mel Gormanf90ac392012-01-10 15:07:15 -0800173 return false;
174 return true;
175}
Rafael J. Wysocki452aa692010-03-05 13:42:13 -0800176#endif /* CONFIG_PM_SLEEP */
177
Mel Gormand9c23402007-10-16 01:26:01 -0700178#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800179unsigned int pageblock_order __read_mostly;
Mel Gormand9c23402007-10-16 01:26:01 -0700180#endif
181
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800182static void __free_pages_ok(struct page *page, unsigned int order);
David Howellsa226f6c2006-01-06 00:11:08 -0800183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184/*
185 * results with 256, 32 in the lowmem_reserve sysctl:
186 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
187 * 1G machine -> (16M dma, 784M normal, 224M high)
188 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
189 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
Yaowei Bai84109e12015-02-12 15:00:22 -0800190 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
Andi Kleena2f1b422005-11-05 17:25:53 +0100191 *
192 * TBD: should special case ZONE_DMA32 machines here - in those we normally
193 * don't need any ZONE_NORMAL reservation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 */
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700195int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800196#ifdef CONFIG_ZONE_DMA
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700197 256,
Christoph Lameter4b51d662007-02-10 01:43:10 -0800198#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700199#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700200 256,
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700201#endif
Christoph Lametere53ef382006-09-25 23:31:14 -0700202#ifdef CONFIG_HIGHMEM
Mel Gorman2a1e2742007-07-17 04:03:12 -0700203 32,
Christoph Lametere53ef382006-09-25 23:31:14 -0700204#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700205 32,
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700206};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208EXPORT_SYMBOL(totalram_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
Helge Deller15ad7cd2006-12-06 20:40:36 -0800210static char * const zone_names[MAX_NR_ZONES] = {
Christoph Lameter4b51d662007-02-10 01:43:10 -0800211#ifdef CONFIG_ZONE_DMA
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700212 "DMA",
Christoph Lameter4b51d662007-02-10 01:43:10 -0800213#endif
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700214#ifdef CONFIG_ZONE_DMA32
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700215 "DMA32",
Christoph Lameterfb0e7942006-09-25 23:31:13 -0700216#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700217 "Normal",
Christoph Lametere53ef382006-09-25 23:31:14 -0700218#ifdef CONFIG_HIGHMEM
Mel Gorman2a1e2742007-07-17 04:03:12 -0700219 "HighMem",
Christoph Lametere53ef382006-09-25 23:31:14 -0700220#endif
Mel Gorman2a1e2742007-07-17 04:03:12 -0700221 "Movable",
Dan Williams033fbae2015-08-09 15:29:06 -0400222#ifdef CONFIG_ZONE_DEVICE
223 "Device",
224#endif
Christoph Lameter2f1b6242006-09-25 23:31:13 -0700225};
226
Vlastimil Babka60f30352016-03-15 14:56:08 -0700227char * const migratetype_names[MIGRATE_TYPES] = {
228 "Unmovable",
229 "Movable",
230 "Reclaimable",
231 "HighAtomic",
232#ifdef CONFIG_CMA
233 "CMA",
234#endif
235#ifdef CONFIG_MEMORY_ISOLATION
236 "Isolate",
237#endif
238};
239
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800240compound_page_dtor * const compound_page_dtors[] = {
241 NULL,
242 free_compound_page,
243#ifdef CONFIG_HUGETLB_PAGE
244 free_huge_page,
245#endif
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800246#ifdef CONFIG_TRANSPARENT_HUGEPAGE
247 free_transhuge_page,
248#endif
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800249};
250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251int min_free_kbytes = 1024;
Han Pingtian42aa83c2014-01-23 15:53:28 -0800252int user_min_free_kbytes = -1;
Johannes Weiner795ae7a2016-03-17 14:19:14 -0700253int watermark_scale_factor = 10;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Jan Beulich2c85f512009-09-21 17:03:07 -0700255static unsigned long __meminitdata nr_kernel_pages;
256static unsigned long __meminitdata nr_all_pages;
Yasunori Gotoa3142c82007-05-08 00:23:07 -0700257static unsigned long __meminitdata dma_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Tejun Heo0ee332c2011-12-08 10:22:09 -0800259#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
260static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
261static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
262static unsigned long __initdata required_kernelcore;
263static unsigned long __initdata required_movablecore;
264static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
Taku Izumi342332e2016-03-15 14:55:22 -0700265static bool mirrored_kernelcore;
Mel Gormanc7132162006-09-27 01:49:43 -0700266
Tejun Heo0ee332c2011-12-08 10:22:09 -0800267/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
268int movable_zone;
269EXPORT_SYMBOL(movable_zone);
270#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -0700271
Miklos Szeredi418508c2007-05-23 13:57:55 -0700272#if MAX_NUMNODES > 1
273int nr_node_ids __read_mostly = MAX_NUMNODES;
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700274int nr_online_nodes __read_mostly = 1;
Miklos Szeredi418508c2007-05-23 13:57:55 -0700275EXPORT_SYMBOL(nr_node_ids);
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700276EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi418508c2007-05-23 13:57:55 -0700277#endif
278
Mel Gorman9ef9acb2007-10-16 01:25:54 -0700279int page_group_by_mobility_disabled __read_mostly;
280
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700281#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
282static inline void reset_deferred_meminit(pg_data_t *pgdat)
283{
284 pgdat->first_deferred_pfn = ULONG_MAX;
285}
286
287/* Returns true if the struct page for the pfn is uninitialised */
Mel Gorman0e1cc952015-06-30 14:57:27 -0700288static inline bool __meminit early_page_uninitialised(unsigned long pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700289{
Mel Gormanef70b6f2016-07-14 12:07:23 -0700290 int nid = early_pfn_to_nid(pfn);
291
292 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700293 return true;
294
295 return false;
296}
297
298/*
299 * Returns false when the remaining initialisation should be deferred until
300 * later in the boot cycle when it can be parallelised.
301 */
302static inline bool update_defer_init(pg_data_t *pgdat,
303 unsigned long pfn, unsigned long zone_end,
304 unsigned long *nr_initialised)
305{
Li Zhang987b3092016-03-17 14:20:16 -0700306 unsigned long max_initialise;
307
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700308 /* Always populate low zones for address-contrained allocations */
309 if (zone_end < pgdat_end_pfn(pgdat))
310 return true;
Li Zhang987b3092016-03-17 14:20:16 -0700311 /*
312 * Initialise at least 2G of a node but also take into account that
313 * two large system hashes that can take up 1GB for 0.25TB/node.
314 */
315 max_initialise = max(2UL << (30 - PAGE_SHIFT),
316 (pgdat->node_spanned_pages >> 8));
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700317
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700318 (*nr_initialised)++;
Li Zhang987b3092016-03-17 14:20:16 -0700319 if ((*nr_initialised > max_initialise) &&
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700320 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
321 pgdat->first_deferred_pfn = pfn;
322 return false;
323 }
324
325 return true;
326}
327#else
328static inline void reset_deferred_meminit(pg_data_t *pgdat)
329{
330}
331
332static inline bool early_page_uninitialised(unsigned long pfn)
333{
334 return false;
335}
336
337static inline bool update_defer_init(pg_data_t *pgdat,
338 unsigned long pfn, unsigned long zone_end,
339 unsigned long *nr_initialised)
340{
341 return true;
342}
343#endif
344
Mel Gorman0b423ca2016-05-19 17:14:27 -0700345/* Return a pointer to the bitmap storing bits affecting a block of pages */
346static inline unsigned long *get_pageblock_bitmap(struct page *page,
347 unsigned long pfn)
348{
349#ifdef CONFIG_SPARSEMEM
350 return __pfn_to_section(pfn)->pageblock_flags;
351#else
352 return page_zone(page)->pageblock_flags;
353#endif /* CONFIG_SPARSEMEM */
354}
355
356static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
357{
358#ifdef CONFIG_SPARSEMEM
359 pfn &= (PAGES_PER_SECTION-1);
360 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
361#else
362 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
363 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
364#endif /* CONFIG_SPARSEMEM */
365}
366
367/**
368 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
369 * @page: The page within the block of interest
370 * @pfn: The target page frame number
371 * @end_bitidx: The last bit of interest to retrieve
372 * @mask: mask of bits that the caller is interested in
373 *
374 * Return: pageblock_bits flags
375 */
376static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
377 unsigned long pfn,
378 unsigned long end_bitidx,
379 unsigned long mask)
380{
381 unsigned long *bitmap;
382 unsigned long bitidx, word_bitidx;
383 unsigned long word;
384
385 bitmap = get_pageblock_bitmap(page, pfn);
386 bitidx = pfn_to_bitidx(page, pfn);
387 word_bitidx = bitidx / BITS_PER_LONG;
388 bitidx &= (BITS_PER_LONG-1);
389
390 word = bitmap[word_bitidx];
391 bitidx += end_bitidx;
392 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
393}
394
395unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
396 unsigned long end_bitidx,
397 unsigned long mask)
398{
399 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
400}
401
402static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
403{
404 return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
405}
406
407/**
408 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
409 * @page: The page within the block of interest
410 * @flags: The flags to set
411 * @pfn: The target page frame number
412 * @end_bitidx: The last bit of interest
413 * @mask: mask of bits that the caller is interested in
414 */
415void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
416 unsigned long pfn,
417 unsigned long end_bitidx,
418 unsigned long mask)
419{
420 unsigned long *bitmap;
421 unsigned long bitidx, word_bitidx;
422 unsigned long old_word, word;
423
424 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
425
426 bitmap = get_pageblock_bitmap(page, pfn);
427 bitidx = pfn_to_bitidx(page, pfn);
428 word_bitidx = bitidx / BITS_PER_LONG;
429 bitidx &= (BITS_PER_LONG-1);
430
431 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
432
433 bitidx += end_bitidx;
434 mask <<= (BITS_PER_LONG - bitidx - 1);
435 flags <<= (BITS_PER_LONG - bitidx - 1);
436
437 word = READ_ONCE(bitmap[word_bitidx]);
438 for (;;) {
439 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
440 if (word == old_word)
441 break;
442 word = old_word;
443 }
444}
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700445
Minchan Kimee6f5092012-07-31 16:43:50 -0700446void set_pageblock_migratetype(struct page *page, int migratetype)
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700447{
KOSAKI Motohiro5d0f3f72013-11-12 15:08:18 -0800448 if (unlikely(page_group_by_mobility_disabled &&
449 migratetype < MIGRATE_PCPTYPES))
Mel Gorman49255c62009-06-16 15:31:58 -0700450 migratetype = MIGRATE_UNMOVABLE;
451
Mel Gormanb2a0ac82007-10-16 01:25:48 -0700452 set_pageblock_flags_group(page, (unsigned long)migratetype,
453 PB_migrate, PB_migrate_end);
454}
455
Nick Piggin13e74442006-01-06 00:10:58 -0800456#ifdef CONFIG_DEBUG_VM
Dave Hansenc6a57e12005-10-29 18:16:52 -0700457static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458{
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700459 int ret = 0;
460 unsigned seq;
461 unsigned long pfn = page_to_pfn(page);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800462 unsigned long sp, start_pfn;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700463
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700464 do {
465 seq = zone_span_seqbegin(zone);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800466 start_pfn = zone->zone_start_pfn;
467 sp = zone->spanned_pages;
Cody P Schafer108bcc92013-02-22 16:35:23 -0800468 if (!zone_spans_pfn(zone, pfn))
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700469 ret = 1;
470 } while (zone_span_seqretry(zone, seq));
471
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800472 if (ret)
Dave Hansen613813e2014-06-04 16:07:27 -0700473 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
474 pfn, zone_to_nid(zone), zone->name,
475 start_pfn, start_pfn + sp);
Cody P Schaferb5e6a5a2013-02-22 16:35:28 -0800476
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700477 return ret;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700478}
479
480static int page_is_consistent(struct zone *zone, struct page *page)
481{
Andy Whitcroft14e07292007-05-06 14:49:14 -0700482 if (!pfn_valid_within(page_to_pfn(page)))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700483 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 if (zone != page_zone(page))
Dave Hansenc6a57e12005-10-29 18:16:52 -0700485 return 0;
486
487 return 1;
488}
489/*
490 * Temporary debugging check for pages not lying within a given zone.
491 */
492static int bad_range(struct zone *zone, struct page *page)
493{
494 if (page_outside_zone_boundaries(zone, page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 return 1;
Dave Hansenc6a57e12005-10-29 18:16:52 -0700496 if (!page_is_consistent(zone, page))
497 return 1;
498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 return 0;
500}
Nick Piggin13e74442006-01-06 00:10:58 -0800501#else
502static inline int bad_range(struct zone *zone, struct page *page)
503{
504 return 0;
505}
506#endif
507
Kirill A. Shutemovd230dec2014-04-07 15:37:38 -0700508static void bad_page(struct page *page, const char *reason,
509 unsigned long bad_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510{
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800511 static unsigned long resume;
512 static unsigned long nr_shown;
513 static unsigned long nr_unshown;
514
515 /*
516 * Allow a burst of 60 reports, then keep quiet for that minute;
517 * or allow a steady drip of one report per second.
518 */
519 if (nr_shown == 60) {
520 if (time_before(jiffies, resume)) {
521 nr_unshown++;
522 goto out;
523 }
524 if (nr_unshown) {
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700525 pr_alert(
Hugh Dickins1e9e6362009-01-06 14:40:13 -0800526 "BUG: Bad page state: %lu messages suppressed\n",
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800527 nr_unshown);
528 nr_unshown = 0;
529 }
530 nr_shown = 0;
531 }
532 if (nr_shown++ == 0)
533 resume = jiffies + 60 * HZ;
534
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700535 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
Hugh Dickins3dc14742009-01-06 14:40:08 -0800536 current->comm, page_to_pfn(page));
Vlastimil Babkaff8e8112016-03-15 14:56:24 -0700537 __dump_page(page, reason);
538 bad_flags &= page->flags;
539 if (bad_flags)
540 pr_alert("bad because of flags: %#lx(%pGp)\n",
541 bad_flags, &bad_flags);
Vlastimil Babka4e462112016-03-15 14:56:21 -0700542 dump_page_owner(page);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700543
Dave Jones4f318882011-10-31 17:07:24 -0700544 print_modules();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 dump_stack();
Hugh Dickinsd936cf92009-01-06 14:40:12 -0800546out:
Hugh Dickins8cc3b392009-01-06 14:40:06 -0800547 /* Leave bad fields for debug, except PageBuddy could make trouble */
Mel Gorman22b751c2013-02-22 16:34:59 -0800548 page_mapcount_reset(page); /* remove PageBuddy */
Rusty Russell373d4d02013-01-21 17:17:39 +1030549 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550}
551
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552/*
553 * Higher-order pages are called "compound pages". They are structured thusly:
554 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800555 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800557 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
558 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800560 * The first tail page's ->compound_dtor holds the offset in array of compound
561 * page destructors. See compound_page_dtors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 *
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800563 * The first tail page's ->compound_order holds the order of allocation.
Hugh Dickins41d78ba2006-02-14 13:52:58 -0800564 * This usage means that zero-order pages may not be compound.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 */
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800566
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800567void free_compound_page(struct page *page)
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800568{
Christoph Lameterd85f3382007-05-06 14:49:39 -0700569 __free_pages_ok(page, compound_order(page));
Hugh Dickinsd98c7a02006-02-14 13:52:59 -0800570}
571
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800572void prep_compound_page(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573{
574 int i;
575 int nr_pages = 1 << order;
576
Kirill A. Shutemovf1e61552015-11-06 16:29:50 -0800577 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
Christoph Lameterd85f3382007-05-06 14:49:39 -0700578 set_compound_order(page, order);
Christoph Lameter6d777952007-05-06 14:49:40 -0700579 __SetPageHead(page);
Andy Whitcroft18229df2008-11-06 12:53:27 -0800580 for (i = 1; i < nr_pages; i++) {
581 struct page *p = page + i;
Youquan Song58a84aa2011-12-08 14:34:18 -0800582 set_page_count(p, 0);
Kirill A. Shutemov1c290f62016-01-15 16:52:07 -0800583 p->mapping = TAIL_MAPPING;
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800584 set_compound_head(p, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 }
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800586 atomic_set(compound_mapcount_ptr(page), -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800589#ifdef CONFIG_DEBUG_PAGEALLOC
590unsigned int _debug_guardpage_minorder;
Christian Borntraegerea6eabb2016-03-15 14:55:30 -0700591bool _debug_pagealloc_enabled __read_mostly
592 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
Joonsoo Kim505f6d22016-03-17 14:17:56 -0700593EXPORT_SYMBOL(_debug_pagealloc_enabled);
Joonsoo Kime30825f2014-12-12 16:55:49 -0800594bool _debug_guardpage_enabled __read_mostly;
595
Joonsoo Kim031bc572014-12-12 16:55:52 -0800596static int __init early_debug_pagealloc(char *buf)
597{
598 if (!buf)
599 return -EINVAL;
Minfei Huang2a138dc2016-05-20 16:58:13 -0700600 return kstrtobool(buf, &_debug_pagealloc_enabled);
Joonsoo Kim031bc572014-12-12 16:55:52 -0800601}
602early_param("debug_pagealloc", early_debug_pagealloc);
603
Joonsoo Kime30825f2014-12-12 16:55:49 -0800604static bool need_debug_guardpage(void)
605{
Joonsoo Kim031bc572014-12-12 16:55:52 -0800606 /* If we don't use debug_pagealloc, we don't need guard page */
607 if (!debug_pagealloc_enabled())
608 return false;
609
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700610 if (!debug_guardpage_minorder())
611 return false;
612
Joonsoo Kime30825f2014-12-12 16:55:49 -0800613 return true;
614}
615
616static void init_debug_guardpage(void)
617{
Joonsoo Kim031bc572014-12-12 16:55:52 -0800618 if (!debug_pagealloc_enabled())
619 return;
620
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700621 if (!debug_guardpage_minorder())
622 return;
623
Joonsoo Kime30825f2014-12-12 16:55:49 -0800624 _debug_guardpage_enabled = true;
625}
626
627struct page_ext_operations debug_guardpage_ops = {
628 .need = need_debug_guardpage,
629 .init = init_debug_guardpage,
630};
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800631
632static int __init debug_guardpage_minorder_setup(char *buf)
633{
634 unsigned long res;
635
636 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
Joe Perches11705322016-03-17 14:19:50 -0700637 pr_err("Bad debug_guardpage_minorder value\n");
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800638 return 0;
639 }
640 _debug_guardpage_minorder = res;
Joe Perches11705322016-03-17 14:19:50 -0700641 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800642 return 0;
643}
Joonsoo Kimf1c1e9f2016-10-07 16:58:18 -0700644early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800645
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700646static inline bool set_page_guard(struct zone *zone, struct page *page,
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800647 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800648{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800649 struct page_ext *page_ext;
650
651 if (!debug_guardpage_enabled())
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700652 return false;
653
654 if (order >= debug_guardpage_minorder())
655 return false;
Joonsoo Kime30825f2014-12-12 16:55:49 -0800656
657 page_ext = lookup_page_ext(page);
Yang Shif86e4272016-06-03 14:55:38 -0700658 if (unlikely(!page_ext))
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700659 return false;
Yang Shif86e4272016-06-03 14:55:38 -0700660
Joonsoo Kime30825f2014-12-12 16:55:49 -0800661 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
662
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800663 INIT_LIST_HEAD(&page->lru);
664 set_page_private(page, order);
665 /* Guard pages are not available for any usage */
666 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700667
668 return true;
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800669}
670
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800671static inline void clear_page_guard(struct zone *zone, struct page *page,
672 unsigned int order, int migratetype)
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800673{
Joonsoo Kime30825f2014-12-12 16:55:49 -0800674 struct page_ext *page_ext;
675
676 if (!debug_guardpage_enabled())
677 return;
678
679 page_ext = lookup_page_ext(page);
Yang Shif86e4272016-06-03 14:55:38 -0700680 if (unlikely(!page_ext))
681 return;
682
Joonsoo Kime30825f2014-12-12 16:55:49 -0800683 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
684
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800685 set_page_private(page, 0);
686 if (!is_migrate_isolate(migratetype))
687 __mod_zone_freepage_state(zone, (1 << order), migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800688}
689#else
Joonsoo Kim980ac162016-10-07 16:58:27 -0700690struct page_ext_operations debug_guardpage_ops;
Joonsoo Kimacbc15a2016-10-07 16:58:15 -0700691static inline bool set_page_guard(struct zone *zone, struct page *page,
692 unsigned int order, int migratetype) { return false; }
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800693static inline void clear_page_guard(struct zone *zone, struct page *page,
694 unsigned int order, int migratetype) {}
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800695#endif
696
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700697static inline void set_page_order(struct page *page, unsigned int order)
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700698{
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700699 set_page_private(page, order);
Nick Piggin676165a2006-04-10 11:21:48 +1000700 __SetPageBuddy(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701}
702
703static inline void rmv_page_order(struct page *page)
704{
Nick Piggin676165a2006-04-10 11:21:48 +1000705 __ClearPageBuddy(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700706 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707}
708
709/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 * This function checks whether a page is free && is the buddy
711 * we can do coalesce a page and its buddy if
Nick Piggin13e74442006-01-06 00:10:58 -0800712 * (a) the buddy is not in a hole &&
Nick Piggin676165a2006-04-10 11:21:48 +1000713 * (b) the buddy is in the buddy system &&
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700714 * (c) a page and its buddy have the same order &&
715 * (d) a page and its buddy are in the same zone.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 *
Wang Sheng-Huicf6fe942013-09-11 14:22:48 -0700717 * For recording whether a page is in the buddy system, we set ->_mapcount
718 * PAGE_BUDDY_MAPCOUNT_VALUE.
719 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
720 * serialized by zone->lock.
Nick Piggin676165a2006-04-10 11:21:48 +1000721 *
722 * For recording page's order, we use page_private(page).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 */
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700724static inline int page_is_buddy(struct page *page, struct page *buddy,
Mel Gorman7aeb09f2014-06-04 16:10:21 -0700725 unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726{
Andy Whitcroft14e07292007-05-06 14:49:14 -0700727 if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin13e74442006-01-06 00:10:58 -0800728 return 0;
Nick Piggin13e74442006-01-06 00:10:58 -0800729
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800730 if (page_is_guard(buddy) && page_order(buddy) == order) {
Mel Gormand34c5fa2014-06-04 16:10:10 -0700731 if (page_zone_id(page) != page_zone_id(buddy))
732 return 0;
733
Weijie Yang4c5018c2015-02-10 14:11:39 -0800734 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
735
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800736 return 1;
737 }
738
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700739 if (PageBuddy(buddy) && page_order(buddy) == order) {
Mel Gormand34c5fa2014-06-04 16:10:10 -0700740 /*
741 * zone check is done late to avoid uselessly
742 * calculating zone/node ids for pages that could
743 * never merge.
744 */
745 if (page_zone_id(page) != page_zone_id(buddy))
746 return 0;
747
Weijie Yang4c5018c2015-02-10 14:11:39 -0800748 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
749
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700750 return 1;
Nick Piggin676165a2006-04-10 11:21:48 +1000751 }
Andrew Morton6aa3001b22006-04-18 22:20:52 -0700752 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753}
754
755/*
756 * Freeing function for a buddy system allocator.
757 *
758 * The concept of a buddy system is to maintain direct-mapped table
759 * (containing bit values) for memory blocks of various "orders".
760 * The bottom level table contains the map for the smallest allocatable
761 * units of memory (here, pages), and each level above it describes
762 * pairs of units from the levels below, hence, "buddies".
763 * At a high level, all that happens here is marking the table entry
764 * at the bottom level available, and propagating the changes upward
765 * as necessary, plus some accounting needed to play nicely with other
766 * parts of the VM system.
767 * At each level, we keep a list of pages, which are heads of continuous
Wang Sheng-Huicf6fe942013-09-11 14:22:48 -0700768 * free pages of length of (1 << order) and marked with _mapcount
769 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
770 * field.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 * So when we are allocating or freeing one, we can derive the state of the
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100772 * other. That is, if we allocate a small block, and both were
773 * free, the remainder of the region must be split into blocks.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 * If a block is freed, and its buddy is also free, then this
Michal Nazarewicz5f63b722012-01-11 15:16:11 +0100775 * triggers coalescing into a block of larger size.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +0100777 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 */
779
Nick Piggin48db57f2006-01-08 01:00:42 -0800780static inline void __free_one_page(struct page *page,
Mel Gormandc4b0ca2014-06-04 16:10:17 -0700781 unsigned long pfn,
Mel Gormaned0ae212009-06-16 15:32:07 -0700782 struct zone *zone, unsigned int order,
783 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784{
785 unsigned long page_idx;
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700786 unsigned long combined_idx;
KyongHo Cho43506fa2011-01-13 15:47:24 -0800787 unsigned long uninitialized_var(buddy_idx);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700788 struct page *buddy;
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700789 unsigned int max_order;
790
791 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
Cody P Schaferd29bb972013-02-22 16:35:25 -0800793 VM_BUG_ON(!zone_is_initialized(zone));
Kirill A. Shutemov6e9f0d52015-02-11 15:25:50 -0800794 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795
Mel Gormaned0ae212009-06-16 15:32:07 -0700796 VM_BUG_ON(migratetype == -1);
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700797 if (likely(!is_migrate_isolate(migratetype)))
Joonsoo Kim8f82b552014-11-13 15:19:18 -0800798 __mod_zone_freepage_state(zone, 1 << order, migratetype);
Mel Gormaned0ae212009-06-16 15:32:07 -0700799
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700800 page_idx = pfn & ((1 << MAX_ORDER) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
Sasha Levin309381fea2014-01-23 15:52:54 -0800802 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
803 VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700805continue_merging:
Joonsoo Kim3c605092014-11-13 15:19:21 -0800806 while (order < max_order - 1) {
KyongHo Cho43506fa2011-01-13 15:47:24 -0800807 buddy_idx = __find_buddy_index(page_idx, order);
808 buddy = page + (buddy_idx - page_idx);
Andy Whitcroftcb2b95e2006-06-23 02:03:01 -0700809 if (!page_is_buddy(page, buddy, order))
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700810 goto done_merging;
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800811 /*
812 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
813 * merge with it and move up one order.
814 */
815 if (page_is_guard(buddy)) {
Joonsoo Kim2847cf92014-12-12 16:55:01 -0800816 clear_page_guard(zone, buddy, order, migratetype);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -0800817 } else {
818 list_del(&buddy->lru);
819 zone->free_area[order].nr_free--;
820 rmv_page_order(buddy);
821 }
KyongHo Cho43506fa2011-01-13 15:47:24 -0800822 combined_idx = buddy_idx & page_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 page = page + (combined_idx - page_idx);
824 page_idx = combined_idx;
825 order++;
826 }
Vlastimil Babkad9dddbf2016-03-25 14:21:50 -0700827 if (max_order < MAX_ORDER) {
828 /* If we are here, it means order is >= pageblock_order.
829 * We want to prevent merge between freepages on isolate
830 * pageblock and normal pageblock. Without this, pageblock
831 * isolation could cause incorrect freepage or CMA accounting.
832 *
833 * We don't want to hit this code for the more frequent
834 * low-order merging.
835 */
836 if (unlikely(has_isolate_pageblock(zone))) {
837 int buddy_mt;
838
839 buddy_idx = __find_buddy_index(page_idx, order);
840 buddy = page + (buddy_idx - page_idx);
841 buddy_mt = get_pageblock_migratetype(buddy);
842
843 if (migratetype != buddy_mt
844 && (is_migrate_isolate(migratetype) ||
845 is_migrate_isolate(buddy_mt)))
846 goto done_merging;
847 }
848 max_order++;
849 goto continue_merging;
850 }
851
852done_merging:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 set_page_order(page, order);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700854
855 /*
856 * If this is not the largest possible page, check if the buddy
857 * of the next-highest order is free. If it is, it's possible
858 * that pages are being freed that will coalesce soon. In case,
859 * that is happening, add the free page to the tail of the list
860 * so it's less likely to be used soon and more likely to be merged
861 * as a higher order page
862 */
Mel Gormanb7f50cf2010-10-26 14:21:11 -0700863 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700864 struct page *higher_page, *higher_buddy;
KyongHo Cho43506fa2011-01-13 15:47:24 -0800865 combined_idx = buddy_idx & page_idx;
866 higher_page = page + (combined_idx - page_idx);
867 buddy_idx = __find_buddy_index(combined_idx, order + 1);
Li Haifeng0ba8f2d2012-09-17 14:09:21 -0700868 higher_buddy = higher_page + (buddy_idx - combined_idx);
Corrado Zoccolo6dda9d52010-05-24 14:31:54 -0700869 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
870 list_add_tail(&page->lru,
871 &zone->free_area[order].free_list[migratetype]);
872 goto out;
873 }
874 }
875
876 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
877out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 zone->free_area[order].nr_free++;
879}
880
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700881/*
882 * A bad page could be due to a number of fields. Instead of multiple branches,
883 * try and check multiple fields with one check. The caller must do a detailed
884 * check if necessary.
885 */
886static inline bool page_expected_state(struct page *page,
887 unsigned long check_flags)
888{
889 if (unlikely(atomic_read(&page->_mapcount) != -1))
890 return false;
891
892 if (unlikely((unsigned long)page->mapping |
893 page_ref_count(page) |
894#ifdef CONFIG_MEMCG
895 (unsigned long)page->mem_cgroup |
896#endif
897 (page->flags & check_flags)))
898 return false;
899
900 return true;
901}
902
Mel Gormanbb552ac2016-05-19 17:14:18 -0700903static void free_pages_check_bad(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904{
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700905 const char *bad_reason;
906 unsigned long bad_flags;
907
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700908 bad_reason = NULL;
909 bad_flags = 0;
Dave Hansenf0b791a2014-01-23 15:52:49 -0800910
Kirill A. Shutemov53f92632016-01-15 16:53:42 -0800911 if (unlikely(atomic_read(&page->_mapcount) != -1))
Dave Hansenf0b791a2014-01-23 15:52:49 -0800912 bad_reason = "nonzero mapcount";
913 if (unlikely(page->mapping != NULL))
914 bad_reason = "non-NULL mapping";
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700915 if (unlikely(page_ref_count(page) != 0))
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700916 bad_reason = "nonzero _refcount";
Dave Hansenf0b791a2014-01-23 15:52:49 -0800917 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
918 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
919 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
920 }
Johannes Weiner9edad6e2014-12-10 15:44:58 -0800921#ifdef CONFIG_MEMCG
922 if (unlikely(page->mem_cgroup))
923 bad_reason = "page still charged to cgroup";
924#endif
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700925 bad_page(page, bad_reason, bad_flags);
Mel Gormanbb552ac2016-05-19 17:14:18 -0700926}
927
928static inline int free_pages_check(struct page *page)
929{
Mel Gormanda838d42016-05-19 17:14:21 -0700930 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
Mel Gormanbb552ac2016-05-19 17:14:18 -0700931 return 0;
Mel Gormanbb552ac2016-05-19 17:14:18 -0700932
933 /* Something has gone sideways, find it */
934 free_pages_check_bad(page);
Mel Gorman7bfec6f2016-05-19 17:14:15 -0700935 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936}
937
Mel Gorman4db75482016-05-19 17:14:32 -0700938static int free_tail_pages_check(struct page *head_page, struct page *page)
939{
940 int ret = 1;
941
942 /*
943 * We rely page->lru.next never has bit 0 set, unless the page
944 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
945 */
946 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
947
948 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
949 ret = 0;
950 goto out;
951 }
952 switch (page - head_page) {
953 case 1:
954 /* the first tail page: ->mapping is compound_mapcount() */
955 if (unlikely(compound_mapcount(page))) {
956 bad_page(page, "nonzero compound_mapcount", 0);
957 goto out;
958 }
959 break;
960 case 2:
961 /*
962 * the second tail page: ->mapping is
963 * page_deferred_list().next -- ignore value.
964 */
965 break;
966 default:
967 if (page->mapping != TAIL_MAPPING) {
968 bad_page(page, "corrupted mapping in tail page", 0);
969 goto out;
970 }
971 break;
972 }
973 if (unlikely(!PageTail(page))) {
974 bad_page(page, "PageTail not set", 0);
975 goto out;
976 }
977 if (unlikely(compound_head(page) != head_page)) {
978 bad_page(page, "compound_head not consistent", 0);
979 goto out;
980 }
981 ret = 0;
982out:
983 page->mapping = NULL;
984 clear_compound_head(page);
985 return ret;
986}
987
Mel Gormane2769db2016-05-19 17:14:38 -0700988static __always_inline bool free_pages_prepare(struct page *page,
989 unsigned int order, bool check_free)
990{
991 int bad = 0;
992
993 VM_BUG_ON_PAGE(PageTail(page), page);
994
995 trace_mm_page_free(page, order);
996 kmemcheck_free_shadow(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -0700997
998 /*
999 * Check tail pages before head page information is cleared to
1000 * avoid checking PageCompound for order-0 pages.
1001 */
1002 if (unlikely(order)) {
1003 bool compound = PageCompound(page);
1004 int i;
1005
1006 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1007
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001008 if (compound)
1009 ClearPageDoubleMap(page);
Mel Gormane2769db2016-05-19 17:14:38 -07001010 for (i = 1; i < (1 << order); i++) {
1011 if (compound)
1012 bad += free_tail_pages_check(page, page + i);
1013 if (unlikely(free_pages_check(page + i))) {
1014 bad++;
1015 continue;
1016 }
1017 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1018 }
1019 }
Minchan Kimbda807d2016-07-26 15:23:05 -07001020 if (PageMappingFlags(page))
Mel Gormane2769db2016-05-19 17:14:38 -07001021 page->mapping = NULL;
Vladimir Davydovc4159a72016-08-08 23:03:12 +03001022 if (memcg_kmem_enabled() && PageKmemcg(page))
Vladimir Davydov49491482016-07-26 15:24:24 -07001023 memcg_kmem_uncharge(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001024 if (check_free)
1025 bad += free_pages_check(page);
1026 if (bad)
1027 return false;
1028
1029 page_cpupid_reset_last(page);
1030 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1031 reset_page_owner(page, order);
1032
1033 if (!PageHighMem(page)) {
1034 debug_check_no_locks_freed(page_address(page),
1035 PAGE_SIZE << order);
1036 debug_check_no_obj_freed(page_address(page),
1037 PAGE_SIZE << order);
1038 }
1039 arch_free_page(page, order);
1040 kernel_poison_pages(page, 1 << order, 0);
1041 kernel_map_pages(page, 1 << order, 0);
seokhoon.yoon29b52de2016-05-20 16:58:47 -07001042 kasan_free_pages(page, order);
Mel Gormane2769db2016-05-19 17:14:38 -07001043
1044 return true;
1045}
Mel Gorman4db75482016-05-19 17:14:32 -07001046
1047#ifdef CONFIG_DEBUG_VM
1048static inline bool free_pcp_prepare(struct page *page)
1049{
Mel Gormane2769db2016-05-19 17:14:38 -07001050 return free_pages_prepare(page, 0, true);
Mel Gorman4db75482016-05-19 17:14:32 -07001051}
1052
1053static inline bool bulkfree_pcp_prepare(struct page *page)
1054{
1055 return false;
1056}
1057#else
1058static bool free_pcp_prepare(struct page *page)
1059{
Mel Gormane2769db2016-05-19 17:14:38 -07001060 return free_pages_prepare(page, 0, false);
Mel Gorman4db75482016-05-19 17:14:32 -07001061}
1062
1063static bool bulkfree_pcp_prepare(struct page *page)
1064{
1065 return free_pages_check(page);
1066}
1067#endif /* CONFIG_DEBUG_VM */
1068
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069/*
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001070 * Frees a number of pages from the PCP lists
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 * Assumes all pages on list are in same zone, and of same order.
Renaud Lienhart207f36e2005-09-10 00:26:59 -07001072 * count is the number of pages to free.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 *
1074 * If the zone was previously in an "all pages pinned" state then look to
1075 * see if this freeing clears that state.
1076 *
1077 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1078 * pinned" detection logic.
1079 */
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001080static void free_pcppages_bulk(struct zone *zone, int count,
1081 struct per_cpu_pages *pcp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082{
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001083 int migratetype = 0;
Mel Gormana6f9edd62009-09-21 17:03:20 -07001084 int batch_free = 0;
Mel Gorman0d5d8232014-08-06 16:07:16 -07001085 unsigned long nr_scanned;
Mel Gorman37779992016-05-19 17:13:58 -07001086 bool isolated_pageblocks;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001087
Nick Pigginc54ad302006-01-06 00:10:56 -08001088 spin_lock(&zone->lock);
Mel Gorman37779992016-05-19 17:13:58 -07001089 isolated_pageblocks = has_isolate_pageblock(zone);
Mel Gorman599d0c92016-07-28 15:45:31 -07001090 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
Mel Gorman0d5d8232014-08-06 16:07:16 -07001091 if (nr_scanned)
Mel Gorman599d0c92016-07-28 15:45:31 -07001092 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
Mel Gormanf2260e62009-06-16 15:32:13 -07001093
Mel Gormane5b31ac2016-05-19 17:14:24 -07001094 while (count) {
Nick Piggin48db57f2006-01-08 01:00:42 -08001095 struct page *page;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001096 struct list_head *list;
Nick Piggin48db57f2006-01-08 01:00:42 -08001097
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001098 /*
Mel Gormana6f9edd62009-09-21 17:03:20 -07001099 * Remove pages from lists in a round-robin fashion. A
1100 * batch_free count is maintained that is incremented when an
1101 * empty list is encountered. This is so more pages are freed
1102 * off fuller lists instead of spinning excessively around empty
1103 * lists
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001104 */
1105 do {
Mel Gormana6f9edd62009-09-21 17:03:20 -07001106 batch_free++;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07001107 if (++migratetype == MIGRATE_PCPTYPES)
1108 migratetype = 0;
1109 list = &pcp->lists[migratetype];
1110 } while (list_empty(list));
1111
Namhyung Kim1d168712011-03-22 16:32:45 -07001112 /* This is the only non-empty list. Free them all. */
1113 if (batch_free == MIGRATE_PCPTYPES)
Mel Gormane5b31ac2016-05-19 17:14:24 -07001114 batch_free = count;
Namhyung Kim1d168712011-03-22 16:32:45 -07001115
Mel Gormana6f9edd62009-09-21 17:03:20 -07001116 do {
Bartlomiej Zolnierkiewicz770c8aa2012-10-08 16:31:57 -07001117 int mt; /* migratetype of the to-be-freed page */
1118
Geliang Tanga16601c2016-01-14 15:20:30 -08001119 page = list_last_entry(list, struct page, lru);
Mel Gormana6f9edd62009-09-21 17:03:20 -07001120 /* must delete as __free_one_page list manipulates */
1121 list_del(&page->lru);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -07001122
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001123 mt = get_pcppage_migratetype(page);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -07001124 /* MIGRATE_ISOLATE page should not go to pcplists */
1125 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1126 /* Pageblock could have been isolated meanwhile */
Mel Gorman37779992016-05-19 17:13:58 -07001127 if (unlikely(isolated_pageblocks))
Joonsoo Kim51bb1a42014-11-13 15:19:14 -08001128 mt = get_pageblock_migratetype(page);
Joonsoo Kim51bb1a42014-11-13 15:19:14 -08001129
Mel Gorman4db75482016-05-19 17:14:32 -07001130 if (bulkfree_pcp_prepare(page))
1131 continue;
1132
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001133 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
Bartlomiej Zolnierkiewicz770c8aa2012-10-08 16:31:57 -07001134 trace_mm_page_pcpu_drain(page, 0, mt);
Mel Gormane5b31ac2016-05-19 17:14:24 -07001135 } while (--count && --batch_free && !list_empty(list));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 }
Nick Pigginc54ad302006-01-06 00:10:56 -08001137 spin_unlock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138}
1139
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001140static void free_one_page(struct zone *zone,
1141 struct page *page, unsigned long pfn,
Mel Gorman7aeb09f2014-06-04 16:10:21 -07001142 unsigned int order,
Mel Gormaned0ae212009-06-16 15:32:07 -07001143 int migratetype)
Nick Piggin48db57f2006-01-08 01:00:42 -08001144{
Mel Gorman0d5d8232014-08-06 16:07:16 -07001145 unsigned long nr_scanned;
Christoph Lameter006d22d2006-09-25 23:31:48 -07001146 spin_lock(&zone->lock);
Mel Gorman599d0c92016-07-28 15:45:31 -07001147 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
Mel Gorman0d5d8232014-08-06 16:07:16 -07001148 if (nr_scanned)
Mel Gorman599d0c92016-07-28 15:45:31 -07001149 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
Mel Gormanf2260e62009-06-16 15:32:13 -07001150
Joonsoo Kimad53f922014-11-13 15:19:11 -08001151 if (unlikely(has_isolate_pageblock(zone) ||
1152 is_migrate_isolate(migratetype))) {
1153 migratetype = get_pfnblock_migratetype(page, pfn);
Joonsoo Kimad53f922014-11-13 15:19:11 -08001154 }
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001155 __free_one_page(page, pfn, zone, order, migratetype);
Christoph Lameter006d22d2006-09-25 23:31:48 -07001156 spin_unlock(&zone->lock);
Nick Piggin48db57f2006-01-08 01:00:42 -08001157}
1158
Robin Holt1e8ce832015-06-30 14:56:45 -07001159static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1160 unsigned long zone, int nid)
1161{
Robin Holt1e8ce832015-06-30 14:56:45 -07001162 set_page_links(page, zone, nid, pfn);
Robin Holt1e8ce832015-06-30 14:56:45 -07001163 init_page_count(page);
1164 page_mapcount_reset(page);
1165 page_cpupid_reset_last(page);
Robin Holt1e8ce832015-06-30 14:56:45 -07001166
Robin Holt1e8ce832015-06-30 14:56:45 -07001167 INIT_LIST_HEAD(&page->lru);
1168#ifdef WANT_PAGE_VIRTUAL
1169 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1170 if (!is_highmem_idx(zone))
1171 set_page_address(page, __va(pfn << PAGE_SHIFT));
1172#endif
1173}
1174
1175static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
1176 int nid)
1177{
1178 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
1179}
1180
Mel Gorman7e18adb2015-06-30 14:57:05 -07001181#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1182static void init_reserved_page(unsigned long pfn)
1183{
1184 pg_data_t *pgdat;
1185 int nid, zid;
1186
1187 if (!early_page_uninitialised(pfn))
1188 return;
1189
1190 nid = early_pfn_to_nid(pfn);
1191 pgdat = NODE_DATA(nid);
1192
1193 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1194 struct zone *zone = &pgdat->node_zones[zid];
1195
1196 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1197 break;
1198 }
1199 __init_single_pfn(pfn, zid, nid);
1200}
1201#else
1202static inline void init_reserved_page(unsigned long pfn)
1203{
1204}
1205#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1206
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001207/*
1208 * Initialised pages do not have PageReserved set. This function is
1209 * called for each range allocated by the bootmem allocator and
1210 * marks the pages PageReserved. The remaining valid pages are later
1211 * sent to the buddy page allocator.
1212 */
Stefan Bader4b50bcc2016-05-20 16:58:38 -07001213void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001214{
1215 unsigned long start_pfn = PFN_DOWN(start);
1216 unsigned long end_pfn = PFN_UP(end);
1217
Mel Gorman7e18adb2015-06-30 14:57:05 -07001218 for (; start_pfn < end_pfn; start_pfn++) {
1219 if (pfn_valid(start_pfn)) {
1220 struct page *page = pfn_to_page(start_pfn);
1221
1222 init_reserved_page(start_pfn);
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -08001223
1224 /* Avoid false-positive PageTail() */
1225 INIT_LIST_HEAD(&page->lru);
1226
Mel Gorman7e18adb2015-06-30 14:57:05 -07001227 SetPageReserved(page);
1228 }
1229 }
Nathan Zimmer92923ca2015-06-30 14:56:48 -07001230}
1231
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001232static void __free_pages_ok(struct page *page, unsigned int order)
1233{
1234 unsigned long flags;
Minchan Kim95e34412012-10-08 16:32:11 -07001235 int migratetype;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001236 unsigned long pfn = page_to_pfn(page);
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001237
Mel Gormane2769db2016-05-19 17:14:38 -07001238 if (!free_pages_prepare(page, order, true))
KOSAKI Motohiroec95f532010-05-24 14:32:38 -07001239 return;
1240
Mel Gormancfc47a22014-06-04 16:10:19 -07001241 migratetype = get_pfnblock_migratetype(page, pfn);
Nick Pigginc54ad302006-01-06 00:10:56 -08001242 local_irq_save(flags);
Christoph Lameterf8891e52006-06-30 01:55:45 -07001243 __count_vm_events(PGFREE, 1 << order);
Mel Gormandc4b0ca2014-06-04 16:10:17 -07001244 free_one_page(page_zone(page), page, pfn, order, migratetype);
Nick Pigginc54ad302006-01-06 00:10:56 -08001245 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246}
1247
Li Zhang949698a2016-05-19 17:11:37 -07001248static void __init __free_pages_boot_core(struct page *page, unsigned int order)
David Howellsa226f6c2006-01-06 00:11:08 -08001249{
Johannes Weinerc3993072012-01-10 15:08:10 -08001250 unsigned int nr_pages = 1 << order;
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001251 struct page *p = page;
Johannes Weinerc3993072012-01-10 15:08:10 -08001252 unsigned int loop;
David Howellsa226f6c2006-01-06 00:11:08 -08001253
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001254 prefetchw(p);
1255 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1256 prefetchw(p + 1);
Johannes Weinerc3993072012-01-10 15:08:10 -08001257 __ClearPageReserved(p);
1258 set_page_count(p, 0);
David Howellsa226f6c2006-01-06 00:11:08 -08001259 }
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001260 __ClearPageReserved(p);
1261 set_page_count(p, 0);
Johannes Weinerc3993072012-01-10 15:08:10 -08001262
Yinghai Lue2d0bd22013-09-11 14:20:37 -07001263 page_zone(page)->managed_pages += nr_pages;
Johannes Weinerc3993072012-01-10 15:08:10 -08001264 set_page_refcounted(page);
1265 __free_pages(page, order);
David Howellsa226f6c2006-01-06 00:11:08 -08001266}
1267
Mel Gorman75a592a2015-06-30 14:56:59 -07001268#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1269 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
Mel Gorman7ace9912015-08-06 15:46:13 -07001270
Mel Gorman75a592a2015-06-30 14:56:59 -07001271static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1272
1273int __meminit early_pfn_to_nid(unsigned long pfn)
1274{
Mel Gorman7ace9912015-08-06 15:46:13 -07001275 static DEFINE_SPINLOCK(early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001276 int nid;
1277
Mel Gorman7ace9912015-08-06 15:46:13 -07001278 spin_lock(&early_pfn_lock);
Mel Gorman75a592a2015-06-30 14:56:59 -07001279 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
Mel Gorman7ace9912015-08-06 15:46:13 -07001280 if (nid < 0)
Mel Gormane4568d32016-07-14 12:07:20 -07001281 nid = first_online_node;
Mel Gorman7ace9912015-08-06 15:46:13 -07001282 spin_unlock(&early_pfn_lock);
1283
1284 return nid;
Mel Gorman75a592a2015-06-30 14:56:59 -07001285}
1286#endif
1287
1288#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1289static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1290 struct mminit_pfnnid_cache *state)
1291{
1292 int nid;
1293
1294 nid = __early_pfn_to_nid(pfn, state);
1295 if (nid >= 0 && nid != node)
1296 return false;
1297 return true;
1298}
1299
1300/* Only safe to use early in boot when initialisation is single-threaded */
1301static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1302{
1303 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1304}
1305
1306#else
1307
1308static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1309{
1310 return true;
1311}
1312static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1313 struct mminit_pfnnid_cache *state)
1314{
1315 return true;
1316}
1317#endif
1318
1319
Mel Gorman0e1cc952015-06-30 14:57:27 -07001320void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
Mel Gorman3a80a7f2015-06-30 14:57:02 -07001321 unsigned int order)
1322{
1323 if (early_page_uninitialised(pfn))
1324 return;
Li Zhang949698a2016-05-19 17:11:37 -07001325 return __free_pages_boot_core(page, order);
Mel Gorman3a80a7f2015-06-30 14:57:02 -07001326}
1327
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001328/*
1329 * Check that the whole (or subset of) a pageblock given by the interval of
1330 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1331 * with the migration of free compaction scanner. The scanners then need to
1332 * use only pfn_valid_within() check for arches that allow holes within
1333 * pageblocks.
1334 *
1335 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1336 *
1337 * It's possible on some configurations to have a setup like node0 node1 node0
1338 * i.e. it's possible that all pages within a zones range of pages do not
1339 * belong to a single zone. We assume that a border between node0 and node1
1340 * can occur within a single pageblock, but not a node0 node1 node0
1341 * interleaving within a single pageblock. It is therefore sufficient to check
1342 * the first and last page of a pageblock and avoid checking each individual
1343 * page in a pageblock.
1344 */
1345struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1346 unsigned long end_pfn, struct zone *zone)
1347{
1348 struct page *start_page;
1349 struct page *end_page;
1350
1351 /* end_pfn is one past the range we are checking */
1352 end_pfn--;
1353
1354 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1355 return NULL;
1356
1357 start_page = pfn_to_page(start_pfn);
1358
1359 if (page_zone(start_page) != zone)
1360 return NULL;
1361
1362 end_page = pfn_to_page(end_pfn);
1363
1364 /* This gives a shorter code than deriving page_zone(end_page) */
1365 if (page_zone_id(start_page) != page_zone_id(end_page))
1366 return NULL;
1367
1368 return start_page;
1369}
1370
1371void set_zone_contiguous(struct zone *zone)
1372{
1373 unsigned long block_start_pfn = zone->zone_start_pfn;
1374 unsigned long block_end_pfn;
1375
1376 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1377 for (; block_start_pfn < zone_end_pfn(zone);
1378 block_start_pfn = block_end_pfn,
1379 block_end_pfn += pageblock_nr_pages) {
1380
1381 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1382
1383 if (!__pageblock_pfn_to_page(block_start_pfn,
1384 block_end_pfn, zone))
1385 return;
1386 }
1387
1388 /* We confirm that there is no hole */
1389 zone->contiguous = true;
1390}
1391
1392void clear_zone_contiguous(struct zone *zone)
1393{
1394 zone->contiguous = false;
1395}
1396
Mel Gorman7e18adb2015-06-30 14:57:05 -07001397#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Mel Gorman0e1cc952015-06-30 14:57:27 -07001398static void __init deferred_free_range(struct page *page,
Mel Gormana4de83d2015-06-30 14:57:16 -07001399 unsigned long pfn, int nr_pages)
1400{
1401 int i;
1402
1403 if (!page)
1404 return;
1405
1406 /* Free a large naturally-aligned chunk if possible */
Xishi Qiue7801492016-10-07 16:58:09 -07001407 if (nr_pages == pageblock_nr_pages &&
1408 (pfn & (pageblock_nr_pages - 1)) == 0) {
Mel Gormanac5d2532015-06-30 14:57:20 -07001409 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Xishi Qiue7801492016-10-07 16:58:09 -07001410 __free_pages_boot_core(page, pageblock_order);
Mel Gormana4de83d2015-06-30 14:57:16 -07001411 return;
1412 }
1413
Xishi Qiue7801492016-10-07 16:58:09 -07001414 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1415 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1416 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
Li Zhang949698a2016-05-19 17:11:37 -07001417 __free_pages_boot_core(page, 0);
Xishi Qiue7801492016-10-07 16:58:09 -07001418 }
Mel Gormana4de83d2015-06-30 14:57:16 -07001419}
1420
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001421/* Completion tracking for deferred_init_memmap() threads */
1422static atomic_t pgdat_init_n_undone __initdata;
1423static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1424
1425static inline void __init pgdat_init_report_one_done(void)
1426{
1427 if (atomic_dec_and_test(&pgdat_init_n_undone))
1428 complete(&pgdat_init_all_done_comp);
1429}
Mel Gorman0e1cc952015-06-30 14:57:27 -07001430
Mel Gorman7e18adb2015-06-30 14:57:05 -07001431/* Initialise remaining memory on a node */
Mel Gorman0e1cc952015-06-30 14:57:27 -07001432static int __init deferred_init_memmap(void *data)
Mel Gorman7e18adb2015-06-30 14:57:05 -07001433{
Mel Gorman0e1cc952015-06-30 14:57:27 -07001434 pg_data_t *pgdat = data;
1435 int nid = pgdat->node_id;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001436 struct mminit_pfnnid_cache nid_init_state = { };
1437 unsigned long start = jiffies;
1438 unsigned long nr_pages = 0;
1439 unsigned long walk_start, walk_end;
1440 int i, zid;
1441 struct zone *zone;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001442 unsigned long first_init_pfn = pgdat->first_deferred_pfn;
Mel Gorman0e1cc952015-06-30 14:57:27 -07001443 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001444
Mel Gorman0e1cc952015-06-30 14:57:27 -07001445 if (first_init_pfn == ULONG_MAX) {
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001446 pgdat_init_report_one_done();
Mel Gorman0e1cc952015-06-30 14:57:27 -07001447 return 0;
1448 }
1449
1450 /* Bind memory initialisation thread to a local node if possible */
1451 if (!cpumask_empty(cpumask))
1452 set_cpus_allowed_ptr(current, cpumask);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001453
1454 /* Sanity check boundaries */
1455 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1456 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1457 pgdat->first_deferred_pfn = ULONG_MAX;
1458
1459 /* Only the highest zone is deferred so find it */
1460 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1461 zone = pgdat->node_zones + zid;
1462 if (first_init_pfn < zone_end_pfn(zone))
1463 break;
1464 }
1465
1466 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1467 unsigned long pfn, end_pfn;
Mel Gorman54608c32015-06-30 14:57:09 -07001468 struct page *page = NULL;
Mel Gormana4de83d2015-06-30 14:57:16 -07001469 struct page *free_base_page = NULL;
1470 unsigned long free_base_pfn = 0;
1471 int nr_to_free = 0;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001472
1473 end_pfn = min(walk_end, zone_end_pfn(zone));
1474 pfn = first_init_pfn;
1475 if (pfn < walk_start)
1476 pfn = walk_start;
1477 if (pfn < zone->zone_start_pfn)
1478 pfn = zone->zone_start_pfn;
1479
1480 for (; pfn < end_pfn; pfn++) {
Mel Gorman54608c32015-06-30 14:57:09 -07001481 if (!pfn_valid_within(pfn))
Mel Gormana4de83d2015-06-30 14:57:16 -07001482 goto free_range;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001483
Mel Gorman54608c32015-06-30 14:57:09 -07001484 /*
1485 * Ensure pfn_valid is checked every
Xishi Qiue7801492016-10-07 16:58:09 -07001486 * pageblock_nr_pages for memory holes
Mel Gorman54608c32015-06-30 14:57:09 -07001487 */
Xishi Qiue7801492016-10-07 16:58:09 -07001488 if ((pfn & (pageblock_nr_pages - 1)) == 0) {
Mel Gorman54608c32015-06-30 14:57:09 -07001489 if (!pfn_valid(pfn)) {
1490 page = NULL;
Mel Gormana4de83d2015-06-30 14:57:16 -07001491 goto free_range;
Mel Gorman54608c32015-06-30 14:57:09 -07001492 }
1493 }
1494
1495 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1496 page = NULL;
Mel Gormana4de83d2015-06-30 14:57:16 -07001497 goto free_range;
Mel Gorman54608c32015-06-30 14:57:09 -07001498 }
1499
1500 /* Minimise pfn page lookups and scheduler checks */
Xishi Qiue7801492016-10-07 16:58:09 -07001501 if (page && (pfn & (pageblock_nr_pages - 1)) != 0) {
Mel Gorman54608c32015-06-30 14:57:09 -07001502 page++;
1503 } else {
Mel Gormana4de83d2015-06-30 14:57:16 -07001504 nr_pages += nr_to_free;
1505 deferred_free_range(free_base_page,
1506 free_base_pfn, nr_to_free);
1507 free_base_page = NULL;
1508 free_base_pfn = nr_to_free = 0;
1509
Mel Gorman54608c32015-06-30 14:57:09 -07001510 page = pfn_to_page(pfn);
1511 cond_resched();
1512 }
Mel Gorman7e18adb2015-06-30 14:57:05 -07001513
1514 if (page->flags) {
1515 VM_BUG_ON(page_zone(page) != zone);
Mel Gormana4de83d2015-06-30 14:57:16 -07001516 goto free_range;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001517 }
1518
1519 __init_single_page(page, pfn, zid, nid);
Mel Gormana4de83d2015-06-30 14:57:16 -07001520 if (!free_base_page) {
1521 free_base_page = page;
1522 free_base_pfn = pfn;
1523 nr_to_free = 0;
1524 }
1525 nr_to_free++;
1526
1527 /* Where possible, batch up pages for a single free */
1528 continue;
1529free_range:
1530 /* Free the current block of pages to allocator */
1531 nr_pages += nr_to_free;
1532 deferred_free_range(free_base_page, free_base_pfn,
1533 nr_to_free);
1534 free_base_page = NULL;
1535 free_base_pfn = nr_to_free = 0;
Mel Gorman7e18adb2015-06-30 14:57:05 -07001536 }
Xishi Qiue7801492016-10-07 16:58:09 -07001537 /* Free the last block of pages to allocator */
1538 nr_pages += nr_to_free;
1539 deferred_free_range(free_base_page, free_base_pfn, nr_to_free);
Mel Gormana4de83d2015-06-30 14:57:16 -07001540
Mel Gorman7e18adb2015-06-30 14:57:05 -07001541 first_init_pfn = max(end_pfn, first_init_pfn);
1542 }
1543
1544 /* Sanity check that the next zone really is unpopulated */
1545 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1546
Mel Gorman0e1cc952015-06-30 14:57:27 -07001547 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
Mel Gorman7e18adb2015-06-30 14:57:05 -07001548 jiffies_to_msecs(jiffies - start));
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001549
1550 pgdat_init_report_one_done();
Mel Gorman0e1cc952015-06-30 14:57:27 -07001551 return 0;
1552}
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001553#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
Mel Gorman0e1cc952015-06-30 14:57:27 -07001554
1555void __init page_alloc_init_late(void)
1556{
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001557 struct zone *zone;
1558
1559#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Mel Gorman0e1cc952015-06-30 14:57:27 -07001560 int nid;
1561
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001562 /* There will be num_node_state(N_MEMORY) threads */
1563 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
Mel Gorman0e1cc952015-06-30 14:57:27 -07001564 for_each_node_state(nid, N_MEMORY) {
Mel Gorman0e1cc952015-06-30 14:57:27 -07001565 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1566 }
1567
1568 /* Block until all are initialised */
Nicolai Stanged3cd1312015-08-06 15:46:16 -07001569 wait_for_completion(&pgdat_init_all_done_comp);
Mel Gorman4248b0d2015-08-06 15:46:20 -07001570
1571 /* Reinit limits that are based on free pages after the kernel is up */
1572 files_maxfiles_init();
Joonsoo Kim7cf91a92016-03-15 14:57:51 -07001573#endif
1574
1575 for_each_populated_zone(zone)
1576 set_zone_contiguous(zone);
Mel Gorman7e18adb2015-06-30 14:57:05 -07001577}
Mel Gorman7e18adb2015-06-30 14:57:05 -07001578
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001579#ifdef CONFIG_CMA
Li Zhong9cf510a2013-08-23 13:52:52 +08001580/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001581void __init init_cma_reserved_pageblock(struct page *page)
1582{
1583 unsigned i = pageblock_nr_pages;
1584 struct page *p = page;
1585
1586 do {
1587 __ClearPageReserved(p);
1588 set_page_count(p, 0);
1589 } while (++p, --i);
1590
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001591 set_pageblock_migratetype(page, MIGRATE_CMA);
Michal Nazarewiczdc783272014-07-02 15:22:35 -07001592
1593 if (pageblock_order >= MAX_ORDER) {
1594 i = pageblock_nr_pages;
1595 p = page;
1596 do {
1597 set_page_refcounted(p);
1598 __free_pages(p, MAX_ORDER - 1);
1599 p += MAX_ORDER_NR_PAGES;
1600 } while (i -= MAX_ORDER_NR_PAGES);
1601 } else {
1602 set_page_refcounted(page);
1603 __free_pages(page, pageblock_order);
1604 }
1605
Jiang Liu3dcc0572013-07-03 15:03:21 -07001606 adjust_managed_page_count(page, pageblock_nr_pages);
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001607}
1608#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609
1610/*
1611 * The order of subdivision here is critical for the IO subsystem.
1612 * Please do not alter this order without good reasons and regression
1613 * testing. Specifically, as large blocks of memory are subdivided,
1614 * the order in which smaller blocks are delivered depends on the order
1615 * they're subdivided in this function. This is the primary factor
1616 * influencing the order in which pages are delivered to the IO
1617 * subsystem according to empirical testing, and this is also justified
1618 * by considering the behavior of a buddy system containing a single
1619 * large block of memory acted on by a series of small allocations.
1620 * This behavior is a critical factor in sglist merging's success.
1621 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01001622 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 */
Nick Piggin085cc7d2006-01-06 00:11:01 -08001624static inline void expand(struct zone *zone, struct page *page,
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001625 int low, int high, struct free_area *area,
1626 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627{
1628 unsigned long size = 1 << high;
1629
1630 while (high > low) {
1631 area--;
1632 high--;
1633 size >>= 1;
Sasha Levin309381fea2014-01-23 15:52:54 -08001634 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08001635
Joonsoo Kimacbc15a2016-10-07 16:58:15 -07001636 /*
1637 * Mark as guard pages (or page), that will allow to
1638 * merge back to allocator when buddy will be freed.
1639 * Corresponding page table entries will not be touched,
1640 * pages will stay not present in virtual address space
1641 */
1642 if (set_page_guard(zone, &page[size], high, migratetype))
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08001643 continue;
Joonsoo Kimacbc15a2016-10-07 16:58:15 -07001644
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001645 list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 area->nr_free++;
1647 set_page_order(&page[size], high);
1648 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649}
1650
Vlastimil Babka4e611802016-05-19 17:14:41 -07001651static void check_new_page_bad(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652{
Vlastimil Babka4e611802016-05-19 17:14:41 -07001653 const char *bad_reason = NULL;
1654 unsigned long bad_flags = 0;
Dave Hansenf0b791a2014-01-23 15:52:49 -08001655
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08001656 if (unlikely(atomic_read(&page->_mapcount) != -1))
Dave Hansenf0b791a2014-01-23 15:52:49 -08001657 bad_reason = "nonzero mapcount";
1658 if (unlikely(page->mapping != NULL))
1659 bad_reason = "non-NULL mapping";
Joonsoo Kimfe896d12016-03-17 14:19:26 -07001660 if (unlikely(page_ref_count(page) != 0))
Dave Hansenf0b791a2014-01-23 15:52:49 -08001661 bad_reason = "nonzero _count";
Naoya Horiguchif4c18e62015-08-06 15:47:08 -07001662 if (unlikely(page->flags & __PG_HWPOISON)) {
1663 bad_reason = "HWPoisoned (hardware-corrupted)";
1664 bad_flags = __PG_HWPOISON;
Naoya Horiguchie570f562016-05-20 16:58:50 -07001665 /* Don't complain about hwpoisoned pages */
1666 page_mapcount_reset(page); /* remove PageBuddy */
1667 return;
Naoya Horiguchif4c18e62015-08-06 15:47:08 -07001668 }
Dave Hansenf0b791a2014-01-23 15:52:49 -08001669 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1670 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1671 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1672 }
Johannes Weiner9edad6e2014-12-10 15:44:58 -08001673#ifdef CONFIG_MEMCG
1674 if (unlikely(page->mem_cgroup))
1675 bad_reason = "page still charged to cgroup";
1676#endif
Vlastimil Babka4e611802016-05-19 17:14:41 -07001677 bad_page(page, bad_reason, bad_flags);
1678}
1679
1680/*
1681 * This page is about to be returned from the page allocator
1682 */
1683static inline int check_new_page(struct page *page)
1684{
1685 if (likely(page_expected_state(page,
1686 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1687 return 0;
1688
1689 check_new_page_bad(page);
1690 return 1;
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001691}
1692
Laura Abbott1414c7f2016-03-15 14:56:30 -07001693static inline bool free_pages_prezeroed(bool poisoned)
1694{
1695 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1696 page_poisoning_enabled() && poisoned;
1697}
1698
Mel Gorman479f8542016-05-19 17:14:35 -07001699#ifdef CONFIG_DEBUG_VM
1700static bool check_pcp_refill(struct page *page)
1701{
1702 return false;
1703}
1704
1705static bool check_new_pcp(struct page *page)
1706{
1707 return check_new_page(page);
1708}
1709#else
1710static bool check_pcp_refill(struct page *page)
1711{
1712 return check_new_page(page);
1713}
1714static bool check_new_pcp(struct page *page)
1715{
1716 return false;
1717}
1718#endif /* CONFIG_DEBUG_VM */
1719
1720static bool check_new_pages(struct page *page, unsigned int order)
1721{
1722 int i;
1723 for (i = 0; i < (1 << order); i++) {
1724 struct page *p = page + i;
1725
1726 if (unlikely(check_new_page(p)))
1727 return true;
1728 }
1729
1730 return false;
1731}
1732
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07001733inline void post_alloc_hook(struct page *page, unsigned int order,
1734 gfp_t gfp_flags)
1735{
1736 set_page_private(page, 0);
1737 set_page_refcounted(page);
1738
1739 arch_alloc_page(page, order);
1740 kernel_map_pages(page, 1 << order, 1);
1741 kernel_poison_pages(page, 1 << order, 1);
1742 kasan_alloc_pages(page, order);
1743 set_page_owner(page, order, gfp_flags);
1744}
1745
Mel Gorman479f8542016-05-19 17:14:35 -07001746static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
Mel Gormanc6038442016-05-19 17:13:38 -07001747 unsigned int alloc_flags)
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001748{
1749 int i;
Laura Abbott1414c7f2016-03-15 14:56:30 -07001750 bool poisoned = true;
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001751
1752 for (i = 0; i < (1 << order); i++) {
1753 struct page *p = page + i;
Laura Abbott1414c7f2016-03-15 14:56:30 -07001754 if (poisoned)
1755 poisoned &= page_is_poisoned(p);
Wu Fengguang2a7684a2009-09-16 11:50:12 +02001756 }
Hugh Dickins689bceb2005-11-21 21:32:20 -08001757
Joonsoo Kim46f24fd2016-07-26 15:23:58 -07001758 post_alloc_hook(page, order, gfp_flags);
Nick Piggin17cf4402006-03-22 00:08:41 -08001759
Laura Abbott1414c7f2016-03-15 14:56:30 -07001760 if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
Anisse Astierf4d28972015-06-24 16:56:36 -07001761 for (i = 0; i < (1 << order); i++)
1762 clear_highpage(page + i);
Nick Piggin17cf4402006-03-22 00:08:41 -08001763
1764 if (order && (gfp_flags & __GFP_COMP))
1765 prep_compound_page(page, order);
1766
Vlastimil Babka75379192015-02-11 15:25:38 -08001767 /*
Michal Hocko2f064f32015-08-21 14:11:51 -07001768 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
Vlastimil Babka75379192015-02-11 15:25:38 -08001769 * allocate the page. The expectation is that the caller is taking
1770 * steps that will free more memory. The caller should avoid the page
1771 * being used for !PFMEMALLOC purposes.
1772 */
Michal Hocko2f064f32015-08-21 14:11:51 -07001773 if (alloc_flags & ALLOC_NO_WATERMARKS)
1774 set_page_pfmemalloc(page);
1775 else
1776 clear_page_pfmemalloc(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777}
1778
Mel Gorman56fd56b2007-10-16 01:25:58 -07001779/*
1780 * Go through the free lists for the given migratetype and remove
1781 * the smallest available page from the freelists
1782 */
Mel Gorman728ec982009-06-16 15:32:04 -07001783static inline
1784struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
Mel Gorman56fd56b2007-10-16 01:25:58 -07001785 int migratetype)
1786{
1787 unsigned int current_order;
Pintu Kumarb8af2942013-09-11 14:20:34 -07001788 struct free_area *area;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001789 struct page *page;
1790
1791 /* Find a page of the appropriate size in the preferred list */
1792 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1793 area = &(zone->free_area[current_order]);
Geliang Tanga16601c2016-01-14 15:20:30 -08001794 page = list_first_entry_or_null(&area->free_list[migratetype],
Mel Gorman56fd56b2007-10-16 01:25:58 -07001795 struct page, lru);
Geliang Tanga16601c2016-01-14 15:20:30 -08001796 if (!page)
1797 continue;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001798 list_del(&page->lru);
1799 rmv_page_order(page);
1800 area->nr_free--;
Mel Gorman56fd56b2007-10-16 01:25:58 -07001801 expand(zone, page, order, current_order, area, migratetype);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07001802 set_pcppage_migratetype(page, migratetype);
Mel Gorman56fd56b2007-10-16 01:25:58 -07001803 return page;
1804 }
1805
1806 return NULL;
1807}
1808
1809
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001810/*
1811 * This array describes the order lists are fallen back to when
1812 * the free lists for the desirable migrate type are depleted
1813 */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001814static int fallbacks[MIGRATE_TYPES][4] = {
Mel Gorman974a7862015-11-06 16:28:34 -08001815 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1816 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1817 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
Joonsoo Kimdc676472015-04-14 15:45:15 -07001818#ifdef CONFIG_CMA
Mel Gorman974a7862015-11-06 16:28:34 -08001819 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
Michal Nazarewicz47118af2011-12-29 13:09:50 +01001820#endif
Minchan Kim194159f2013-02-22 16:33:58 -08001821#ifdef CONFIG_MEMORY_ISOLATION
Mel Gorman974a7862015-11-06 16:28:34 -08001822 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
Minchan Kim194159f2013-02-22 16:33:58 -08001823#endif
Mel Gormanb2a0ac82007-10-16 01:25:48 -07001824};
1825
Joonsoo Kimdc676472015-04-14 15:45:15 -07001826#ifdef CONFIG_CMA
1827static struct page *__rmqueue_cma_fallback(struct zone *zone,
1828 unsigned int order)
1829{
1830 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1831}
1832#else
1833static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1834 unsigned int order) { return NULL; }
1835#endif
1836
Mel Gormanc361be52007-10-16 01:25:51 -07001837/*
1838 * Move the free pages in a range to the free lists of the requested type.
Mel Gormand9c23402007-10-16 01:26:01 -07001839 * Note that start_page and end_pages are not aligned on a pageblock
Mel Gormanc361be52007-10-16 01:25:51 -07001840 * boundary. If alignment is required, use move_freepages_block()
1841 */
Minchan Kim435b4052012-10-08 16:32:16 -07001842int move_freepages(struct zone *zone,
Adrian Bunkb69a7282008-07-23 21:28:12 -07001843 struct page *start_page, struct page *end_page,
1844 int migratetype)
Mel Gormanc361be52007-10-16 01:25:51 -07001845{
1846 struct page *page;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08001847 unsigned int order;
Mel Gormand1003132007-10-16 01:26:00 -07001848 int pages_moved = 0;
Mel Gormanc361be52007-10-16 01:25:51 -07001849
1850#ifndef CONFIG_HOLES_IN_ZONE
1851 /*
1852 * page_zone is not safe to call in this context when
1853 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1854 * anyway as we check zone boundaries in move_freepages_block().
1855 * Remove at a later date when no bug reports exist related to
Mel Gormanac0e5b72007-10-16 01:25:58 -07001856 * grouping pages by mobility
Mel Gormanc361be52007-10-16 01:25:51 -07001857 */
Mel Gorman97ee4ba2014-10-09 15:28:28 -07001858 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
Mel Gormanc361be52007-10-16 01:25:51 -07001859#endif
1860
1861 for (page = start_page; page <= end_page;) {
Adam Litke344c7902008-09-02 14:35:38 -07001862 /* Make sure we are not inadvertently changing nodes */
Sasha Levin309381fea2014-01-23 15:52:54 -08001863 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
Adam Litke344c7902008-09-02 14:35:38 -07001864
Mel Gormanc361be52007-10-16 01:25:51 -07001865 if (!pfn_valid_within(page_to_pfn(page))) {
1866 page++;
1867 continue;
1868 }
1869
1870 if (!PageBuddy(page)) {
1871 page++;
1872 continue;
1873 }
1874
1875 order = page_order(page);
Kirill A. Shutemov84be48d2011-03-22 16:33:41 -07001876 list_move(&page->lru,
1877 &zone->free_area[order].free_list[migratetype]);
Mel Gormanc361be52007-10-16 01:25:51 -07001878 page += 1 << order;
Mel Gormand1003132007-10-16 01:26:00 -07001879 pages_moved += 1 << order;
Mel Gormanc361be52007-10-16 01:25:51 -07001880 }
1881
Mel Gormand1003132007-10-16 01:26:00 -07001882 return pages_moved;
Mel Gormanc361be52007-10-16 01:25:51 -07001883}
1884
Minchan Kimee6f5092012-07-31 16:43:50 -07001885int move_freepages_block(struct zone *zone, struct page *page,
Linus Torvalds68e3e922012-06-03 20:05:57 -07001886 int migratetype)
Mel Gormanc361be52007-10-16 01:25:51 -07001887{
1888 unsigned long start_pfn, end_pfn;
1889 struct page *start_page, *end_page;
1890
1891 start_pfn = page_to_pfn(page);
Mel Gormand9c23402007-10-16 01:26:01 -07001892 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
Mel Gormanc361be52007-10-16 01:25:51 -07001893 start_page = pfn_to_page(start_pfn);
Mel Gormand9c23402007-10-16 01:26:01 -07001894 end_page = start_page + pageblock_nr_pages - 1;
1895 end_pfn = start_pfn + pageblock_nr_pages - 1;
Mel Gormanc361be52007-10-16 01:25:51 -07001896
1897 /* Do not cross zone boundaries */
Cody P Schafer108bcc92013-02-22 16:35:23 -08001898 if (!zone_spans_pfn(zone, start_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07001899 start_page = page;
Cody P Schafer108bcc92013-02-22 16:35:23 -08001900 if (!zone_spans_pfn(zone, end_pfn))
Mel Gormanc361be52007-10-16 01:25:51 -07001901 return 0;
1902
1903 return move_freepages(zone, start_page, end_page, migratetype);
1904}
1905
Mel Gorman2f66a682009-09-21 17:02:31 -07001906static void change_pageblock_range(struct page *pageblock_page,
1907 int start_order, int migratetype)
1908{
1909 int nr_pageblocks = 1 << (start_order - pageblock_order);
1910
1911 while (nr_pageblocks--) {
1912 set_pageblock_migratetype(pageblock_page, migratetype);
1913 pageblock_page += pageblock_nr_pages;
1914 }
1915}
1916
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001917/*
Vlastimil Babka9c0415e2015-02-11 15:28:21 -08001918 * When we are falling back to another migratetype during allocation, try to
1919 * steal extra free pages from the same pageblocks to satisfy further
1920 * allocations, instead of polluting multiple pageblocks.
1921 *
1922 * If we are stealing a relatively large buddy page, it is likely there will
1923 * be more free pages in the pageblock, so try to steal them all. For
1924 * reclaimable and unmovable allocations, we steal regardless of page size,
1925 * as fragmentation caused by those allocations polluting movable pageblocks
1926 * is worse than movable allocations stealing from unmovable and reclaimable
1927 * pageblocks.
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001928 */
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001929static bool can_steal_fallback(unsigned int order, int start_mt)
1930{
1931 /*
1932 * Leaving this order check is intended, although there is
1933 * relaxed order check in next check. The reason is that
1934 * we can actually steal whole pageblock if this condition met,
1935 * but, below check doesn't guarantee it and that is just heuristic
1936 * so could be changed anytime.
1937 */
1938 if (order >= pageblock_order)
1939 return true;
1940
1941 if (order >= pageblock_order / 2 ||
1942 start_mt == MIGRATE_RECLAIMABLE ||
1943 start_mt == MIGRATE_UNMOVABLE ||
1944 page_group_by_mobility_disabled)
1945 return true;
1946
1947 return false;
1948}
1949
1950/*
1951 * This function implements actual steal behaviour. If order is large enough,
1952 * we can steal whole pageblock. If not, we first move freepages in this
1953 * pageblock and check whether half of pages are moved or not. If half of
1954 * pages are moved, we can change migratetype of pageblock and permanently
1955 * use it's pages as requested migratetype in the future.
1956 */
1957static void steal_suitable_fallback(struct zone *zone, struct page *page,
1958 int start_type)
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001959{
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08001960 unsigned int current_order = page_order(page);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001961 int pages;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001962
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001963 /* Take ownership for orders >= pageblock_order */
1964 if (current_order >= pageblock_order) {
1965 change_pageblock_range(page, current_order, start_type);
Vlastimil Babka3a1086f2015-02-11 15:28:18 -08001966 return;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001967 }
1968
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001969 pages = move_freepages_block(zone, page, start_type);
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001970
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001971 /* Claim the whole block if over half of it is free */
1972 if (pages >= (1 << (pageblock_order-1)) ||
1973 page_group_by_mobility_disabled)
1974 set_pageblock_migratetype(page, start_type);
1975}
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07001976
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001977/*
1978 * Check whether there is a suitable fallback freepage with requested order.
1979 * If only_stealable is true, this function returns fallback_mt only if
1980 * we can steal other freepages all together. This would help to reduce
1981 * fragmentation due to mixed migratetype pages in one pageblock.
1982 */
1983int find_suitable_fallback(struct free_area *area, unsigned int order,
1984 int migratetype, bool only_stealable, bool *can_steal)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001985{
1986 int i;
1987 int fallback_mt;
1988
1989 if (area->nr_free == 0)
1990 return -1;
1991
1992 *can_steal = false;
1993 for (i = 0;; i++) {
1994 fallback_mt = fallbacks[migratetype][i];
Mel Gorman974a7862015-11-06 16:28:34 -08001995 if (fallback_mt == MIGRATE_TYPES)
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07001996 break;
1997
1998 if (list_empty(&area->free_list[fallback_mt]))
1999 continue;
2000
2001 if (can_steal_fallback(order, migratetype))
2002 *can_steal = true;
2003
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002004 if (!only_stealable)
2005 return fallback_mt;
2006
2007 if (*can_steal)
2008 return fallback_mt;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002009 }
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002010
2011 return -1;
Srivatsa S. Bhatfef903e2013-09-11 14:20:35 -07002012}
2013
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002014/*
2015 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2016 * there are no empty page blocks that contain a page with a suitable order
2017 */
2018static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2019 unsigned int alloc_order)
2020{
2021 int mt;
2022 unsigned long max_managed, flags;
2023
2024 /*
2025 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2026 * Check is race-prone but harmless.
2027 */
2028 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
2029 if (zone->nr_reserved_highatomic >= max_managed)
2030 return;
2031
2032 spin_lock_irqsave(&zone->lock, flags);
2033
2034 /* Recheck the nr_reserved_highatomic limit under the lock */
2035 if (zone->nr_reserved_highatomic >= max_managed)
2036 goto out_unlock;
2037
2038 /* Yoink! */
2039 mt = get_pageblock_migratetype(page);
2040 if (mt != MIGRATE_HIGHATOMIC &&
2041 !is_migrate_isolate(mt) && !is_migrate_cma(mt)) {
2042 zone->nr_reserved_highatomic += pageblock_nr_pages;
2043 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2044 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
2045 }
2046
2047out_unlock:
2048 spin_unlock_irqrestore(&zone->lock, flags);
2049}
2050
2051/*
2052 * Used when an allocation is about to fail under memory pressure. This
2053 * potentially hurts the reliability of high-order allocations when under
2054 * intense memory pressure but failed atomic allocations should be easier
2055 * to recover from than an OOM.
2056 */
2057static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
2058{
2059 struct zonelist *zonelist = ac->zonelist;
2060 unsigned long flags;
2061 struct zoneref *z;
2062 struct zone *zone;
2063 struct page *page;
2064 int order;
2065
2066 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2067 ac->nodemask) {
2068 /* Preserve at least one pageblock */
2069 if (zone->nr_reserved_highatomic <= pageblock_nr_pages)
2070 continue;
2071
2072 spin_lock_irqsave(&zone->lock, flags);
2073 for (order = 0; order < MAX_ORDER; order++) {
2074 struct free_area *area = &(zone->free_area[order]);
2075
Geliang Tanga16601c2016-01-14 15:20:30 -08002076 page = list_first_entry_or_null(
2077 &area->free_list[MIGRATE_HIGHATOMIC],
2078 struct page, lru);
2079 if (!page)
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002080 continue;
2081
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002082 /*
2083 * It should never happen but changes to locking could
2084 * inadvertently allow a per-cpu drain to add pages
2085 * to MIGRATE_HIGHATOMIC while unreserving so be safe
2086 * and watch for underflows.
2087 */
2088 zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
2089 zone->nr_reserved_highatomic);
2090
2091 /*
2092 * Convert to ac->migratetype and avoid the normal
2093 * pageblock stealing heuristics. Minimally, the caller
2094 * is doing the work and needs the pages. More
2095 * importantly, if the block was always converted to
2096 * MIGRATE_UNMOVABLE or another type then the number
2097 * of pageblocks that cannot be completely freed
2098 * may increase.
2099 */
2100 set_pageblock_migratetype(page, ac->migratetype);
2101 move_freepages_block(zone, page, ac->migratetype);
2102 spin_unlock_irqrestore(&zone->lock, flags);
2103 return;
2104 }
2105 spin_unlock_irqrestore(&zone->lock, flags);
2106 }
2107}
2108
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002109/* Remove an element from the buddy allocator from the fallback list */
Mel Gorman0ac3a402009-06-16 15:32:06 -07002110static inline struct page *
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002111__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002112{
Pintu Kumarb8af2942013-09-11 14:20:34 -07002113 struct free_area *area;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002114 unsigned int current_order;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002115 struct page *page;
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002116 int fallback_mt;
2117 bool can_steal;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002118
2119 /* Find the largest possible block of pages in the other list */
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002120 for (current_order = MAX_ORDER-1;
2121 current_order >= order && current_order <= MAX_ORDER-1;
2122 --current_order) {
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002123 area = &(zone->free_area[current_order]);
2124 fallback_mt = find_suitable_fallback(area, current_order,
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002125 start_migratetype, false, &can_steal);
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002126 if (fallback_mt == -1)
2127 continue;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002128
Geliang Tanga16601c2016-01-14 15:20:30 -08002129 page = list_first_entry(&area->free_list[fallback_mt],
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002130 struct page, lru);
2131 if (can_steal)
2132 steal_suitable_fallback(zone, page, start_migratetype);
Mel Gormane0104872007-10-16 01:25:53 -07002133
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002134 /* Remove the page from the freelists */
2135 area->nr_free--;
2136 list_del(&page->lru);
2137 rmv_page_order(page);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002138
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002139 expand(zone, page, order, current_order, area,
2140 start_migratetype);
2141 /*
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002142 * The pcppage_migratetype may differ from pageblock's
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002143 * migratetype depending on the decisions in
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002144 * find_suitable_fallback(). This is OK as long as it does not
2145 * differ for MIGRATE_CMA pageblocks. Those can be used as
2146 * fallback only via special __rmqueue_cma_fallback() function
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002147 */
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002148 set_pcppage_migratetype(page, start_migratetype);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002149
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002150 trace_mm_page_alloc_extfrag(page, order, current_order,
2151 start_migratetype, fallback_mt);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002152
Joonsoo Kim4eb7dce2015-04-14 15:45:18 -07002153 return page;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002154 }
2155
Mel Gorman728ec982009-06-16 15:32:04 -07002156 return NULL;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002157}
2158
Mel Gorman56fd56b2007-10-16 01:25:58 -07002159/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 * Do the hard work of removing an element from the buddy allocator.
2161 * Call me with the zone->lock already held.
2162 */
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002163static struct page *__rmqueue(struct zone *zone, unsigned int order,
Mel Gorman6ac02062016-01-14 15:20:28 -08002164 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 struct page *page;
2167
Mel Gorman56fd56b2007-10-16 01:25:58 -07002168 page = __rmqueue_smallest(zone, order, migratetype);
Mel Gorman974a7862015-11-06 16:28:34 -08002169 if (unlikely(!page)) {
Joonsoo Kimdc676472015-04-14 15:45:15 -07002170 if (migratetype == MIGRATE_MOVABLE)
2171 page = __rmqueue_cma_fallback(zone, order);
2172
2173 if (!page)
2174 page = __rmqueue_fallback(zone, order, migratetype);
Mel Gorman728ec982009-06-16 15:32:04 -07002175 }
2176
Mel Gorman0d3d0622009-09-21 17:02:44 -07002177 trace_mm_page_alloc_zone_locked(page, order, migratetype);
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002178 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179}
2180
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002181/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 * Obtain a specified number of elements from the buddy allocator, all under
2183 * a single hold of the lock, for efficiency. Add them to the supplied list.
2184 * Returns the number of new pages which were placed at *list.
2185 */
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002186static int rmqueue_bulk(struct zone *zone, unsigned int order,
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002187 unsigned long count, struct list_head *list,
Mel Gormanb745bc82014-06-04 16:10:22 -07002188 int migratetype, bool cold)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189{
Vlastimil Babka5bcc9f82014-06-04 16:07:22 -07002190 int i;
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01002191
Nick Pigginc54ad302006-01-06 00:10:56 -08002192 spin_lock(&zone->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 for (i = 0; i < count; ++i) {
Mel Gorman6ac02062016-01-14 15:20:28 -08002194 struct page *page = __rmqueue(zone, order, migratetype);
Nick Piggin085cc7d2006-01-06 00:11:01 -08002195 if (unlikely(page == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 break;
Mel Gorman81eabcb2007-12-17 16:20:05 -08002197
Mel Gorman479f8542016-05-19 17:14:35 -07002198 if (unlikely(check_pcp_refill(page)))
2199 continue;
2200
Mel Gorman81eabcb2007-12-17 16:20:05 -08002201 /*
2202 * Split buddy pages returned by expand() are received here
2203 * in physical page order. The page is added to the callers and
2204 * list and the list head then moves forward. From the callers
2205 * perspective, the linked list is ordered by page number in
2206 * some conditions. This is useful for IO devices that can
2207 * merge IO requests if the physical pages are ordered
2208 * properly.
2209 */
Mel Gormanb745bc82014-06-04 16:10:22 -07002210 if (likely(!cold))
Mel Gormane084b2d2009-07-29 15:02:04 -07002211 list_add(&page->lru, list);
2212 else
2213 list_add_tail(&page->lru, list);
Mel Gorman81eabcb2007-12-17 16:20:05 -08002214 list = &page->lru;
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002215 if (is_migrate_cma(get_pcppage_migratetype(page)))
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07002216 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2217 -(1 << order));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 }
Mel Gormanf2260e62009-06-16 15:32:13 -07002219 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
Nick Pigginc54ad302006-01-06 00:10:56 -08002220 spin_unlock(&zone->lock);
Nick Piggin085cc7d2006-01-06 00:11:01 -08002221 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222}
2223
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002224#ifdef CONFIG_NUMA
Christoph Lameter8fce4d82006-03-09 17:33:54 -08002225/*
Christoph Lameter4037d452007-05-09 02:35:14 -07002226 * Called from the vmstat counter updater to drain pagesets of this
2227 * currently executing processor on remote nodes after they have
2228 * expired.
2229 *
Christoph Lameter879336c2006-03-22 00:09:08 -08002230 * Note that this function must be called with the thread pinned to
2231 * a single processor.
Christoph Lameter8fce4d82006-03-09 17:33:54 -08002232 */
Christoph Lameter4037d452007-05-09 02:35:14 -07002233void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002234{
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002235 unsigned long flags;
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07002236 int to_drain, batch;
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002237
Christoph Lameter4037d452007-05-09 02:35:14 -07002238 local_irq_save(flags);
Jason Low4db0c3c2015-04-15 16:14:08 -07002239 batch = READ_ONCE(pcp->batch);
Michal Nazarewicz7be12fc2014-08-06 16:05:15 -07002240 to_drain = min(pcp->count, batch);
KOSAKI Motohiro2a135152012-07-31 16:42:53 -07002241 if (to_drain > 0) {
2242 free_pcppages_bulk(zone, to_drain, pcp);
2243 pcp->count -= to_drain;
2244 }
Christoph Lameter4037d452007-05-09 02:35:14 -07002245 local_irq_restore(flags);
Christoph Lameter4ae7c032005-06-21 17:14:57 -07002246}
2247#endif
2248
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002249/*
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002250 * Drain pcplists of the indicated processor and zone.
2251 *
2252 * The processor must either be the current processor and the
2253 * thread pinned to the current processor or a processor that
2254 * is not online.
2255 */
2256static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2257{
2258 unsigned long flags;
2259 struct per_cpu_pageset *pset;
2260 struct per_cpu_pages *pcp;
2261
2262 local_irq_save(flags);
2263 pset = per_cpu_ptr(zone->pageset, cpu);
2264
2265 pcp = &pset->pcp;
2266 if (pcp->count) {
2267 free_pcppages_bulk(zone, pcp->count, pcp);
2268 pcp->count = 0;
2269 }
2270 local_irq_restore(flags);
2271}
2272
2273/*
2274 * Drain pcplists of all zones on the indicated processor.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002275 *
2276 * The processor must either be the current processor and the
2277 * thread pinned to the current processor or a processor that
2278 * is not online.
2279 */
2280static void drain_pages(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281{
2282 struct zone *zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07002284 for_each_populated_zone(zone) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002285 drain_pages_zone(cpu, zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 }
2287}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002289/*
2290 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002291 *
2292 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2293 * the single zone's pages.
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002294 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002295void drain_local_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002296{
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002297 int cpu = smp_processor_id();
2298
2299 if (zone)
2300 drain_pages_zone(cpu, zone);
2301 else
2302 drain_pages(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002303}
2304
2305/*
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002306 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2307 *
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002308 * When zone parameter is non-NULL, spill just the single zone's pages.
2309 *
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002310 * Note that this code is protected against sending an IPI to an offline
2311 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
2312 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
2313 * nothing keeps CPUs from showing up after we populated the cpumask and
2314 * before the call to on_each_cpu_mask().
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002315 */
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002316void drain_all_pages(struct zone *zone)
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002317{
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002318 int cpu;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002319
2320 /*
2321 * Allocate in the BSS so we wont require allocation in
2322 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2323 */
2324 static cpumask_t cpus_with_pcps;
2325
2326 /*
2327 * We don't care about racing with CPU hotplug event
2328 * as offline notification will cause the notified
2329 * cpu to drain that CPU pcps and on_each_cpu_mask
2330 * disables preemption as part of its processing
2331 */
2332 for_each_online_cpu(cpu) {
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002333 struct per_cpu_pageset *pcp;
2334 struct zone *z;
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002335 bool has_pcps = false;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002336
2337 if (zone) {
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002338 pcp = per_cpu_ptr(zone->pageset, cpu);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002339 if (pcp->pcp.count)
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002340 has_pcps = true;
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002341 } else {
2342 for_each_populated_zone(z) {
2343 pcp = per_cpu_ptr(z->pageset, cpu);
2344 if (pcp->pcp.count) {
2345 has_pcps = true;
2346 break;
2347 }
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002348 }
2349 }
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002350
Gilad Ben-Yossef74046492012-03-28 14:42:45 -07002351 if (has_pcps)
2352 cpumask_set_cpu(cpu, &cpus_with_pcps);
2353 else
2354 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2355 }
Vlastimil Babka93481ff2014-12-10 15:43:01 -08002356 on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
2357 zone, 1);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08002358}
2359
Rafael J. Wysocki296699d2007-07-29 23:27:18 +02002360#ifdef CONFIG_HIBERNATION
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361
2362void mark_free_pages(struct zone *zone)
2363{
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002364 unsigned long pfn, max_zone_pfn;
2365 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002366 unsigned int order, t;
Geliang Tang86760a22016-01-14 15:20:33 -08002367 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
Xishi Qiu8080fc02013-09-11 14:21:45 -07002369 if (zone_is_empty(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 return;
2371
2372 spin_lock_irqsave(&zone->lock, flags);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002373
Cody P Schafer108bcc92013-02-22 16:35:23 -08002374 max_zone_pfn = zone_end_pfn(zone);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002375 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2376 if (pfn_valid(pfn)) {
Geliang Tang86760a22016-01-14 15:20:33 -08002377 page = pfn_to_page(pfn);
Joonsoo Kimba6b0972016-05-19 17:12:16 -07002378
2379 if (page_zone(page) != zone)
2380 continue;
2381
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002382 if (!swsusp_page_is_forbidden(page))
2383 swsusp_unset_page_free(page);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002384 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002386 for_each_migratetype_order(order, t) {
Geliang Tang86760a22016-01-14 15:20:33 -08002387 list_for_each_entry(page,
2388 &zone->free_area[order].free_list[t], lru) {
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002389 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
Geliang Tang86760a22016-01-14 15:20:33 -08002391 pfn = page_to_pfn(page);
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002392 for (i = 0; i < (1UL << order); i++)
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002393 swsusp_set_page_free(pfn_to_page(pfn + i));
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07002394 }
Mel Gormanb2a0ac82007-10-16 01:25:48 -07002395 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 spin_unlock_irqrestore(&zone->lock, flags);
2397}
Mel Gormane2c55dc2007-10-16 01:25:50 -07002398#endif /* CONFIG_PM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399
2400/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 * Free a 0-order page
Mel Gormanb745bc82014-06-04 16:10:22 -07002402 * cold == true ? free a cold page : free a hot page
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 */
Mel Gormanb745bc82014-06-04 16:10:22 -07002404void free_hot_cold_page(struct page *page, bool cold)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405{
2406 struct zone *zone = page_zone(page);
2407 struct per_cpu_pages *pcp;
2408 unsigned long flags;
Mel Gormandc4b0ca2014-06-04 16:10:17 -07002409 unsigned long pfn = page_to_pfn(page);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002410 int migratetype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
Mel Gorman4db75482016-05-19 17:14:32 -07002412 if (!free_pcp_prepare(page))
Hugh Dickins689bceb2005-11-21 21:32:20 -08002413 return;
2414
Mel Gormandc4b0ca2014-06-04 16:10:17 -07002415 migratetype = get_pfnblock_migratetype(page, pfn);
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002416 set_pcppage_migratetype(page, migratetype);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 local_irq_save(flags);
Christoph Lameterf8891e52006-06-30 01:55:45 -07002418 __count_vm_event(PGFREE);
Mel Gormanda456f12009-06-16 15:32:08 -07002419
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002420 /*
2421 * We only track unmovable, reclaimable and movable on pcp lists.
2422 * Free ISOLATE pages back to the allocator because they are being
2423 * offlined but treat RESERVE as movable pages so we can get those
2424 * areas back if necessary. Otherwise, we may have to free
2425 * excessively into the page allocator
2426 */
2427 if (migratetype >= MIGRATE_PCPTYPES) {
Minchan Kim194159f2013-02-22 16:33:58 -08002428 if (unlikely(is_migrate_isolate(migratetype))) {
Mel Gormandc4b0ca2014-06-04 16:10:17 -07002429 free_one_page(zone, page, pfn, 0, migratetype);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002430 goto out;
2431 }
2432 migratetype = MIGRATE_MOVABLE;
2433 }
2434
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09002435 pcp = &this_cpu_ptr(zone->pageset)->pcp;
Mel Gormanb745bc82014-06-04 16:10:22 -07002436 if (!cold)
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002437 list_add(&page->lru, &pcp->lists[migratetype]);
Mel Gormanb745bc82014-06-04 16:10:22 -07002438 else
2439 list_add_tail(&page->lru, &pcp->lists[migratetype]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 pcp->count++;
Nick Piggin48db57f2006-01-08 01:00:42 -08002441 if (pcp->count >= pcp->high) {
Jason Low4db0c3c2015-04-15 16:14:08 -07002442 unsigned long batch = READ_ONCE(pcp->batch);
Cody P Schafer998d39c2013-07-03 15:01:32 -07002443 free_pcppages_bulk(zone, batch, pcp);
2444 pcp->count -= batch;
Nick Piggin48db57f2006-01-08 01:00:42 -08002445 }
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002446
2447out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449}
2450
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002451/*
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002452 * Free a list of 0-order pages
2453 */
Mel Gormanb745bc82014-06-04 16:10:22 -07002454void free_hot_cold_page_list(struct list_head *list, bool cold)
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002455{
2456 struct page *page, *next;
2457
2458 list_for_each_entry_safe(page, next, list, lru) {
Konstantin Khlebnikovb413d482012-01-10 15:07:09 -08002459 trace_mm_page_free_batched(page, cold);
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -08002460 free_hot_cold_page(page, cold);
2461 }
2462}
2463
2464/*
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002465 * split_page takes a non-compound higher-order page, and splits it into
2466 * n (1<<order) sub-pages: page[0..n]
2467 * Each sub-page must be freed individually.
2468 *
2469 * Note: this is probably too low level an operation for use in drivers.
2470 * Please consult with lkml before using this in your driver.
2471 */
2472void split_page(struct page *page, unsigned int order)
2473{
2474 int i;
2475
Sasha Levin309381fea2014-01-23 15:52:54 -08002476 VM_BUG_ON_PAGE(PageCompound(page), page);
2477 VM_BUG_ON_PAGE(!page_count(page), page);
Vegard Nossumb1eeab62008-11-25 16:55:53 +01002478
2479#ifdef CONFIG_KMEMCHECK
2480 /*
2481 * Split shadow pages too, because free(page[0]) would
2482 * otherwise free the whole shadow.
2483 */
2484 if (kmemcheck_page_is_tracked(page))
2485 split_page(virt_to_page(page[0].shadow), order);
2486#endif
2487
Joonsoo Kima9627bc2016-07-26 15:23:49 -07002488 for (i = 1; i < (1 << order); i++)
Nick Piggin7835e982006-03-22 00:08:40 -08002489 set_page_refcounted(page + i);
Joonsoo Kima9627bc2016-07-26 15:23:49 -07002490 split_page_owner(page, order);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002491}
K. Y. Srinivasan5853ff22013-03-25 15:47:38 -07002492EXPORT_SYMBOL_GPL(split_page);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -08002493
Joonsoo Kim3c605092014-11-13 15:19:21 -08002494int __isolate_free_page(struct page *page, unsigned int order)
Mel Gorman748446b2010-05-24 14:32:27 -07002495{
Mel Gorman748446b2010-05-24 14:32:27 -07002496 unsigned long watermark;
2497 struct zone *zone;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07002498 int mt;
Mel Gorman748446b2010-05-24 14:32:27 -07002499
2500 BUG_ON(!PageBuddy(page));
2501
2502 zone = page_zone(page);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002503 mt = get_pageblock_migratetype(page);
Mel Gorman748446b2010-05-24 14:32:27 -07002504
Minchan Kim194159f2013-02-22 16:33:58 -08002505 if (!is_migrate_isolate(mt)) {
Vlastimil Babka8348faf2016-10-07 16:58:00 -07002506 /*
2507 * Obey watermarks as if the page was being allocated. We can
2508 * emulate a high-order watermark check with a raised order-0
2509 * watermark, because we already know our high-order page
2510 * exists.
2511 */
2512 watermark = min_wmark_pages(zone) + (1UL << order);
Vlastimil Babka984fdba2016-10-07 16:57:57 -07002513 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002514 return 0;
2515
Mel Gorman8fb74b92013-01-11 14:32:16 -08002516 __mod_zone_freepage_state(zone, -(1UL << order), mt);
Marek Szyprowski2e30abd2012-12-11 16:02:57 -08002517 }
Mel Gorman748446b2010-05-24 14:32:27 -07002518
2519 /* Remove page from free list */
2520 list_del(&page->lru);
2521 zone->free_area[order].nr_free--;
2522 rmv_page_order(page);
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -07002523
zhong jiang400bc7f2016-07-28 15:45:07 -07002524 /*
2525 * Set the pageblock if the isolated page is at least half of a
2526 * pageblock
2527 */
Mel Gorman748446b2010-05-24 14:32:27 -07002528 if (order >= pageblock_order - 1) {
2529 struct page *endpage = page + (1 << order) - 1;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002530 for (; page < endpage; page += pageblock_nr_pages) {
2531 int mt = get_pageblock_migratetype(page);
Minchan Kim194159f2013-02-22 16:33:58 -08002532 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
Michal Nazarewicz47118af2011-12-29 13:09:50 +01002533 set_pageblock_migratetype(page,
2534 MIGRATE_MOVABLE);
2535 }
Mel Gorman748446b2010-05-24 14:32:27 -07002536 }
2537
Joonsoo Kimf3a14ce2015-07-17 16:24:15 -07002538
Mel Gorman8fb74b92013-01-11 14:32:16 -08002539 return 1UL << order;
Mel Gorman1fb3f8c2012-10-08 16:29:12 -07002540}
2541
2542/*
Mel Gorman060e7412016-05-19 17:13:27 -07002543 * Update NUMA hit/miss statistics
2544 *
2545 * Must be called with interrupts disabled.
2546 *
2547 * When __GFP_OTHER_NODE is set assume the node of the preferred
2548 * zone is the local node. This is useful for daemons who allocate
2549 * memory on behalf of other processes.
2550 */
2551static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
2552 gfp_t flags)
2553{
2554#ifdef CONFIG_NUMA
2555 int local_nid = numa_node_id();
2556 enum zone_stat_item local_stat = NUMA_LOCAL;
2557
2558 if (unlikely(flags & __GFP_OTHER_NODE)) {
2559 local_stat = NUMA_OTHER;
2560 local_nid = preferred_zone->node;
2561 }
2562
2563 if (z->node == local_nid) {
2564 __inc_zone_state(z, NUMA_HIT);
2565 __inc_zone_state(z, local_stat);
2566 } else {
2567 __inc_zone_state(z, NUMA_MISS);
2568 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
2569 }
2570#endif
2571}
2572
2573/*
Vlastimil Babka75379192015-02-11 15:25:38 -08002574 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 */
Mel Gorman0a15c3e2009-06-16 15:32:05 -07002576static inline
2577struct page *buffered_rmqueue(struct zone *preferred_zone,
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002578 struct zone *zone, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07002579 gfp_t gfp_flags, unsigned int alloc_flags,
2580 int migratetype)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581{
2582 unsigned long flags;
Hugh Dickins689bceb2005-11-21 21:32:20 -08002583 struct page *page;
Mel Gormanb745bc82014-06-04 16:10:22 -07002584 bool cold = ((gfp_flags & __GFP_COLD) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585
Nick Piggin48db57f2006-01-08 01:00:42 -08002586 if (likely(order == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 struct per_cpu_pages *pcp;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002588 struct list_head *list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 local_irq_save(flags);
Mel Gorman479f8542016-05-19 17:14:35 -07002591 do {
2592 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2593 list = &pcp->lists[migratetype];
2594 if (list_empty(list)) {
2595 pcp->count += rmqueue_bulk(zone, 0,
2596 pcp->batch, list,
2597 migratetype, cold);
2598 if (unlikely(list_empty(list)))
2599 goto failed;
2600 }
Mel Gormanb92a6ed2007-10-16 01:25:50 -07002601
Mel Gorman479f8542016-05-19 17:14:35 -07002602 if (cold)
2603 page = list_last_entry(list, struct page, lru);
2604 else
2605 page = list_first_entry(list, struct page, lru);
Mel Gorman5f8dcc22009-09-21 17:03:19 -07002606
Vlastimil Babka83b93552016-06-03 14:55:52 -07002607 list_del(&page->lru);
2608 pcp->count--;
2609
2610 } while (check_new_pcp(page));
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002611 } else {
Michal Hocko0f352e52016-03-17 14:19:32 -07002612 /*
2613 * We most definitely don't want callers attempting to
2614 * allocate greater than order-1 page units with __GFP_NOFAIL.
2615 */
2616 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 spin_lock_irqsave(&zone->lock, flags);
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002618
Mel Gorman479f8542016-05-19 17:14:35 -07002619 do {
2620 page = NULL;
2621 if (alloc_flags & ALLOC_HARDER) {
2622 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2623 if (page)
2624 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2625 }
2626 if (!page)
2627 page = __rmqueue(zone, order, migratetype);
2628 } while (page && check_new_pages(page, order));
Nick Piggina74609f2006-01-06 00:11:20 -08002629 spin_unlock(&zone->lock);
2630 if (!page)
2631 goto failed;
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07002632 __mod_zone_freepage_state(zone, -(1 << order),
Vlastimil Babkabb14c2c2015-09-08 15:01:25 -07002633 get_pcppage_migratetype(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 }
2635
Mel Gorman16709d12016-07-28 15:46:56 -07002636 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
Andi Kleen78afd562011-03-22 16:33:12 -07002637 zone_statistics(preferred_zone, zone, gfp_flags);
Nick Piggina74609f2006-01-06 00:11:20 -08002638 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639
Sasha Levin309381fea2014-01-23 15:52:54 -08002640 VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641 return page;
Nick Piggina74609f2006-01-06 00:11:20 -08002642
2643failed:
2644 local_irq_restore(flags);
Nick Piggina74609f2006-01-06 00:11:20 -08002645 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646}
2647
Akinobu Mita933e3122006-12-08 02:39:45 -08002648#ifdef CONFIG_FAIL_PAGE_ALLOC
2649
Akinobu Mitab2588c42011-07-26 16:09:03 -07002650static struct {
Akinobu Mita933e3122006-12-08 02:39:45 -08002651 struct fault_attr attr;
2652
Viresh Kumar621a5f72015-09-26 15:04:07 -07002653 bool ignore_gfp_highmem;
Mel Gorman71baba42015-11-06 16:28:28 -08002654 bool ignore_gfp_reclaim;
Akinobu Mita54114992007-07-15 23:40:23 -07002655 u32 min_order;
Akinobu Mita933e3122006-12-08 02:39:45 -08002656} fail_page_alloc = {
2657 .attr = FAULT_ATTR_INITIALIZER,
Mel Gorman71baba42015-11-06 16:28:28 -08002658 .ignore_gfp_reclaim = true,
Viresh Kumar621a5f72015-09-26 15:04:07 -07002659 .ignore_gfp_highmem = true,
Akinobu Mita54114992007-07-15 23:40:23 -07002660 .min_order = 1,
Akinobu Mita933e3122006-12-08 02:39:45 -08002661};
2662
2663static int __init setup_fail_page_alloc(char *str)
2664{
2665 return setup_fault_attr(&fail_page_alloc.attr, str);
2666}
2667__setup("fail_page_alloc=", setup_fail_page_alloc);
2668
Gavin Shandeaf3862012-07-31 16:41:51 -07002669static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08002670{
Akinobu Mita54114992007-07-15 23:40:23 -07002671 if (order < fail_page_alloc.min_order)
Gavin Shandeaf3862012-07-31 16:41:51 -07002672 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002673 if (gfp_mask & __GFP_NOFAIL)
Gavin Shandeaf3862012-07-31 16:41:51 -07002674 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002675 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
Gavin Shandeaf3862012-07-31 16:41:51 -07002676 return false;
Mel Gorman71baba42015-11-06 16:28:28 -08002677 if (fail_page_alloc.ignore_gfp_reclaim &&
2678 (gfp_mask & __GFP_DIRECT_RECLAIM))
Gavin Shandeaf3862012-07-31 16:41:51 -07002679 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002680
2681 return should_fail(&fail_page_alloc.attr, 1 << order);
2682}
2683
2684#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2685
2686static int __init fail_page_alloc_debugfs(void)
2687{
Al Virof4ae40a2011-07-24 04:33:43 -04002688 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
Akinobu Mita933e3122006-12-08 02:39:45 -08002689 struct dentry *dir;
Akinobu Mita933e3122006-12-08 02:39:45 -08002690
Akinobu Mitadd48c082011-08-03 16:21:01 -07002691 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2692 &fail_page_alloc.attr);
2693 if (IS_ERR(dir))
2694 return PTR_ERR(dir);
Akinobu Mita933e3122006-12-08 02:39:45 -08002695
Akinobu Mitab2588c42011-07-26 16:09:03 -07002696 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
Mel Gorman71baba42015-11-06 16:28:28 -08002697 &fail_page_alloc.ignore_gfp_reclaim))
Akinobu Mitab2588c42011-07-26 16:09:03 -07002698 goto fail;
2699 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2700 &fail_page_alloc.ignore_gfp_highmem))
2701 goto fail;
2702 if (!debugfs_create_u32("min-order", mode, dir,
2703 &fail_page_alloc.min_order))
2704 goto fail;
Akinobu Mita933e3122006-12-08 02:39:45 -08002705
Akinobu Mitab2588c42011-07-26 16:09:03 -07002706 return 0;
2707fail:
Akinobu Mitadd48c082011-08-03 16:21:01 -07002708 debugfs_remove_recursive(dir);
Akinobu Mita933e3122006-12-08 02:39:45 -08002709
Akinobu Mitab2588c42011-07-26 16:09:03 -07002710 return -ENOMEM;
Akinobu Mita933e3122006-12-08 02:39:45 -08002711}
2712
2713late_initcall(fail_page_alloc_debugfs);
2714
2715#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2716
2717#else /* CONFIG_FAIL_PAGE_ALLOC */
2718
Gavin Shandeaf3862012-07-31 16:41:51 -07002719static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
Akinobu Mita933e3122006-12-08 02:39:45 -08002720{
Gavin Shandeaf3862012-07-31 16:41:51 -07002721 return false;
Akinobu Mita933e3122006-12-08 02:39:45 -08002722}
2723
2724#endif /* CONFIG_FAIL_PAGE_ALLOC */
2725
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726/*
Mel Gorman97a16fc2015-11-06 16:28:40 -08002727 * Return true if free base pages are above 'mark'. For high-order checks it
2728 * will return true of the order-0 watermark is reached and there is at least
2729 * one free page of a suitable size. Checking now avoids taking the zone lock
2730 * to check in the allocation paths if no pages are free.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 */
Michal Hocko86a294a2016-05-20 16:57:12 -07002732bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2733 int classzone_idx, unsigned int alloc_flags,
2734 long free_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735{
Christoph Lameterd23ad422007-02-10 01:43:02 -08002736 long min = mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 int o;
Mel Gormanc6038442016-05-19 17:13:38 -07002738 const bool alloc_harder = (alloc_flags & ALLOC_HARDER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002740 /* free_pages may go negative - that's OK */
Michal Hockodf0a6da2012-01-10 15:08:02 -08002741 free_pages -= (1 << order) - 1;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002742
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002743 if (alloc_flags & ALLOC_HIGH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 min -= min / 2;
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002745
2746 /*
2747 * If the caller does not have rights to ALLOC_HARDER then subtract
2748 * the high-atomic reserves. This will over-estimate the size of the
2749 * atomic reserve but it avoids a search.
2750 */
Mel Gorman97a16fc2015-11-06 16:28:40 -08002751 if (likely(!alloc_harder))
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002752 free_pages -= z->nr_reserved_highatomic;
2753 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 min -= min / 4;
Mel Gormane2b19192015-11-06 16:28:09 -08002755
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07002756#ifdef CONFIG_CMA
2757 /* If allocation can't use CMA areas don't use free CMA pages */
2758 if (!(alloc_flags & ALLOC_CMA))
Mel Gorman97a16fc2015-11-06 16:28:40 -08002759 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07002760#endif
Tomasz Stanislawski026b0812013-06-12 14:05:02 -07002761
Mel Gorman97a16fc2015-11-06 16:28:40 -08002762 /*
2763 * Check watermarks for an order-0 allocation request. If these
2764 * are not met, then a high-order request also cannot go ahead
2765 * even if a suitable page happened to be free.
2766 */
2767 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
Mel Gorman88f5acf2011-01-13 15:45:41 -08002768 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769
Mel Gorman97a16fc2015-11-06 16:28:40 -08002770 /* If this is an order-0 request then the watermark is fine */
2771 if (!order)
2772 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773
Mel Gorman97a16fc2015-11-06 16:28:40 -08002774 /* For a high-order request, check at least one suitable page is free */
2775 for (o = order; o < MAX_ORDER; o++) {
2776 struct free_area *area = &z->free_area[o];
2777 int mt;
2778
2779 if (!area->nr_free)
2780 continue;
2781
2782 if (alloc_harder)
2783 return true;
2784
2785 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2786 if (!list_empty(&area->free_list[mt]))
2787 return true;
2788 }
2789
2790#ifdef CONFIG_CMA
2791 if ((alloc_flags & ALLOC_CMA) &&
2792 !list_empty(&area->free_list[MIGRATE_CMA])) {
2793 return true;
2794 }
2795#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 }
Mel Gorman97a16fc2015-11-06 16:28:40 -08002797 return false;
Mel Gorman88f5acf2011-01-13 15:45:41 -08002798}
2799
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002800bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
Mel Gormanc6038442016-05-19 17:13:38 -07002801 int classzone_idx, unsigned int alloc_flags)
Mel Gorman88f5acf2011-01-13 15:45:41 -08002802{
2803 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2804 zone_page_state(z, NR_FREE_PAGES));
2805}
2806
Mel Gorman48ee5f32016-05-19 17:14:07 -07002807static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
2808 unsigned long mark, int classzone_idx, unsigned int alloc_flags)
2809{
2810 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2811 long cma_pages = 0;
2812
2813#ifdef CONFIG_CMA
2814 /* If allocation can't use CMA areas don't use free CMA pages */
2815 if (!(alloc_flags & ALLOC_CMA))
2816 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
2817#endif
2818
2819 /*
2820 * Fast check for order-0 only. If this fails then the reserves
2821 * need to be calculated. There is a corner case where the check
2822 * passes but only the high-order atomic reserve are free. If
2823 * the caller is !atomic then it'll uselessly search the free
2824 * list. That corner case is then slower but it is harmless.
2825 */
2826 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
2827 return true;
2828
2829 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2830 free_pages);
2831}
2832
Mel Gorman7aeb09f2014-06-04 16:10:21 -07002833bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
Mel Gormane2b19192015-11-06 16:28:09 -08002834 unsigned long mark, int classzone_idx)
Mel Gorman88f5acf2011-01-13 15:45:41 -08002835{
2836 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2837
2838 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
2839 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
2840
Mel Gormane2b19192015-11-06 16:28:09 -08002841 return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
Mel Gorman88f5acf2011-01-13 15:45:41 -08002842 free_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843}
2844
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002845#ifdef CONFIG_NUMA
David Rientjes957f8222012-10-08 16:33:24 -07002846static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2847{
Mel Gorman5f7a75a2014-06-04 16:07:15 -07002848 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
2849 RECLAIM_DISTANCE;
David Rientjes957f8222012-10-08 16:33:24 -07002850}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002851#else /* CONFIG_NUMA */
David Rientjes957f8222012-10-08 16:33:24 -07002852static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2853{
2854 return true;
2855}
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002856#endif /* CONFIG_NUMA */
2857
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002858/*
Paul Jackson0798e512006-12-06 20:31:38 -08002859 * get_page_from_freelist goes through the zonelist trying to allocate
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002860 * a page.
2861 */
2862static struct page *
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002863get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
2864 const struct alloc_context *ac)
Martin Hicks753ee722005-06-21 17:14:41 -07002865{
Mel Gormanc33d6c02016-05-19 17:14:10 -07002866 struct zoneref *z = ac->preferred_zoneref;
Mel Gorman5117f452009-06-16 15:31:59 -07002867 struct zone *zone;
Mel Gorman3b8c0be2016-07-28 15:46:53 -07002868 struct pglist_data *last_pgdat_dirty_limit = NULL;
2869
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002870 /*
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002871 * Scan zonelist, looking for a zone with enough free.
Vladimir Davydov344736f2014-10-20 15:50:30 +04002872 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002873 */
Mel Gormanc33d6c02016-05-19 17:14:10 -07002874 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08002875 ac->nodemask) {
Mel Gormanbe06af02016-05-19 17:13:47 -07002876 struct page *page;
Johannes Weinere085dbc2013-09-11 14:20:46 -07002877 unsigned long mark;
2878
Mel Gorman664eedd2014-06-04 16:10:08 -07002879 if (cpusets_enabled() &&
2880 (alloc_flags & ALLOC_CPUSET) &&
Vlastimil Babka002f2902016-05-19 17:14:30 -07002881 !__cpuset_zone_allowed(zone, gfp_mask))
Mel Gormancd38b112011-07-25 17:12:29 -07002882 continue;
Johannes Weinera756cf52012-01-10 15:07:49 -08002883 /*
2884 * When allocating a page cache page for writing, we
Mel Gorman281e3722016-07-28 15:46:11 -07002885 * want to get it from a node that is within its dirty
2886 * limit, such that no single node holds more than its
Johannes Weinera756cf52012-01-10 15:07:49 -08002887 * proportional share of globally allowed dirty pages.
Mel Gorman281e3722016-07-28 15:46:11 -07002888 * The dirty limits take into account the node's
Johannes Weinera756cf52012-01-10 15:07:49 -08002889 * lowmem reserves and high watermark so that kswapd
2890 * should be able to balance it without having to
2891 * write pages from its LRU list.
2892 *
Johannes Weinera756cf52012-01-10 15:07:49 -08002893 * XXX: For now, allow allocations to potentially
Mel Gorman281e3722016-07-28 15:46:11 -07002894 * exceed the per-node dirty limit in the slowpath
Mel Gormanc9ab0c42015-11-06 16:28:12 -08002895 * (spread_dirty_pages unset) before going into reclaim,
Johannes Weinera756cf52012-01-10 15:07:49 -08002896 * which is important when on a NUMA setup the allowed
Mel Gorman281e3722016-07-28 15:46:11 -07002897 * nodes are together not big enough to reach the
Johannes Weinera756cf52012-01-10 15:07:49 -08002898 * global limit. The proper fix for these situations
Mel Gorman281e3722016-07-28 15:46:11 -07002899 * will require awareness of nodes in the
Johannes Weinera756cf52012-01-10 15:07:49 -08002900 * dirty-throttling and the flusher threads.
2901 */
Mel Gorman3b8c0be2016-07-28 15:46:53 -07002902 if (ac->spread_dirty_pages) {
2903 if (last_pgdat_dirty_limit == zone->zone_pgdat)
2904 continue;
2905
2906 if (!node_dirty_ok(zone->zone_pgdat)) {
2907 last_pgdat_dirty_limit = zone->zone_pgdat;
2908 continue;
2909 }
2910 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002911
Johannes Weinere085dbc2013-09-11 14:20:46 -07002912 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
Mel Gorman48ee5f32016-05-19 17:14:07 -07002913 if (!zone_watermark_fast(zone, order, mark,
Mel Gorman93ea9962016-05-19 17:14:13 -07002914 ac_classzone_idx(ac), alloc_flags)) {
Mel Gormanfa5e0842009-06-16 15:33:22 -07002915 int ret;
2916
Mel Gorman5dab2912014-06-04 16:10:14 -07002917 /* Checked here to keep the fast path fast */
2918 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
2919 if (alloc_flags & ALLOC_NO_WATERMARKS)
2920 goto try_this_zone;
2921
Mel Gormana5f5f912016-07-28 15:46:32 -07002922 if (node_reclaim_mode == 0 ||
Mel Gormanc33d6c02016-05-19 17:14:10 -07002923 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
Mel Gormancd38b112011-07-25 17:12:29 -07002924 continue;
2925
Mel Gormana5f5f912016-07-28 15:46:32 -07002926 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
Mel Gormanfa5e0842009-06-16 15:33:22 -07002927 switch (ret) {
Mel Gormana5f5f912016-07-28 15:46:32 -07002928 case NODE_RECLAIM_NOSCAN:
Mel Gormanfa5e0842009-06-16 15:33:22 -07002929 /* did not scan */
Mel Gormancd38b112011-07-25 17:12:29 -07002930 continue;
Mel Gormana5f5f912016-07-28 15:46:32 -07002931 case NODE_RECLAIM_FULL:
Mel Gormanfa5e0842009-06-16 15:33:22 -07002932 /* scanned but unreclaimable */
Mel Gormancd38b112011-07-25 17:12:29 -07002933 continue;
Mel Gormanfa5e0842009-06-16 15:33:22 -07002934 default:
2935 /* did we reclaim enough */
Mel Gormanfed27192013-04-29 15:07:57 -07002936 if (zone_watermark_ok(zone, order, mark,
Mel Gorman93ea9962016-05-19 17:14:13 -07002937 ac_classzone_idx(ac), alloc_flags))
Mel Gormanfed27192013-04-29 15:07:57 -07002938 goto try_this_zone;
2939
Mel Gormanfed27192013-04-29 15:07:57 -07002940 continue;
Paul Jackson0798e512006-12-06 20:31:38 -08002941 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08002942 }
2943
Mel Gormanfa5e0842009-06-16 15:33:22 -07002944try_this_zone:
Mel Gormanc33d6c02016-05-19 17:14:10 -07002945 page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, order,
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002946 gfp_mask, alloc_flags, ac->migratetype);
Vlastimil Babka75379192015-02-11 15:25:38 -08002947 if (page) {
Mel Gorman479f8542016-05-19 17:14:35 -07002948 prep_new_page(page, order, gfp_mask, alloc_flags);
Mel Gorman0aaa29a2015-11-06 16:28:37 -08002949
2950 /*
2951 * If this is a high-order atomic allocation then check
2952 * if the pageblock should be reserved for the future
2953 */
2954 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
2955 reserve_highatomic_pageblock(page, zone, order);
2956
Vlastimil Babka75379192015-02-11 15:25:38 -08002957 return page;
2958 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07002959 }
Paul Jackson9276b1bc2006-12-06 20:31:48 -08002960
Mel Gorman4ffeaf32014-08-06 16:07:22 -07002961 return NULL;
Martin Hicks753ee722005-06-21 17:14:41 -07002962}
2963
David Rientjes29423e772011-03-22 16:30:47 -07002964/*
2965 * Large machines with many possible nodes should not always dump per-node
2966 * meminfo in irq context.
2967 */
2968static inline bool should_suppress_show_mem(void)
2969{
2970 bool ret = false;
2971
2972#if NODES_SHIFT > 8
2973 ret = in_interrupt();
2974#endif
2975 return ret;
2976}
2977
Dave Hansena238ab52011-05-24 17:12:16 -07002978static DEFINE_RATELIMIT_STATE(nopage_rs,
2979 DEFAULT_RATELIMIT_INTERVAL,
2980 DEFAULT_RATELIMIT_BURST);
2981
Michal Hocko7877cdc2016-10-07 17:01:55 -07002982void warn_alloc(gfp_t gfp_mask, const char *fmt, ...)
Dave Hansena238ab52011-05-24 17:12:16 -07002983{
Dave Hansena238ab52011-05-24 17:12:16 -07002984 unsigned int filter = SHOW_MEM_FILTER_NODES;
Michal Hocko7877cdc2016-10-07 17:01:55 -07002985 struct va_format vaf;
2986 va_list args;
Dave Hansena238ab52011-05-24 17:12:16 -07002987
Stanislaw Gruszkac0a32fc2012-01-10 15:07:28 -08002988 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2989 debug_guardpage_minorder() > 0)
Dave Hansena238ab52011-05-24 17:12:16 -07002990 return;
2991
2992 /*
2993 * This documents exceptions given to allocations in certain
2994 * contexts that are allowed to allocate outside current's set
2995 * of allowed nodes.
2996 */
2997 if (!(gfp_mask & __GFP_NOMEMALLOC))
2998 if (test_thread_flag(TIF_MEMDIE) ||
2999 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3000 filter &= ~SHOW_MEM_FILTER_NODES;
Mel Gormand0164ad2015-11-06 16:28:21 -08003001 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
Dave Hansena238ab52011-05-24 17:12:16 -07003002 filter &= ~SHOW_MEM_FILTER_NODES;
3003
Michal Hocko7877cdc2016-10-07 17:01:55 -07003004 pr_warn("%s: ", current->comm);
Joe Perches3ee9a4f2011-10-31 17:08:35 -07003005
Michal Hocko7877cdc2016-10-07 17:01:55 -07003006 va_start(args, fmt);
3007 vaf.fmt = fmt;
3008 vaf.va = &args;
3009 pr_cont("%pV", &vaf);
3010 va_end(args);
Joe Perches3ee9a4f2011-10-31 17:08:35 -07003011
Michal Hocko7877cdc2016-10-07 17:01:55 -07003012 pr_cont(", mode:%#x(%pGg)\n", gfp_mask, &gfp_mask);
Joe Perches3ee9a4f2011-10-31 17:08:35 -07003013
Dave Hansena238ab52011-05-24 17:12:16 -07003014 dump_stack();
3015 if (!should_suppress_show_mem())
3016 show_mem(filter);
3017}
3018
Mel Gorman11e33f62009-06-16 15:31:57 -07003019static inline struct page *
3020__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003021 const struct alloc_context *ac, unsigned long *did_some_progress)
Mel Gorman11e33f62009-06-16 15:31:57 -07003022{
David Rientjes6e0fc462015-09-08 15:00:36 -07003023 struct oom_control oc = {
3024 .zonelist = ac->zonelist,
3025 .nodemask = ac->nodemask,
Vladimir Davydov2a966b72016-07-26 15:22:33 -07003026 .memcg = NULL,
David Rientjes6e0fc462015-09-08 15:00:36 -07003027 .gfp_mask = gfp_mask,
3028 .order = order,
David Rientjes6e0fc462015-09-08 15:00:36 -07003029 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031
Johannes Weiner9879de72015-01-26 12:58:32 -08003032 *did_some_progress = 0;
3033
Johannes Weiner9879de72015-01-26 12:58:32 -08003034 /*
Johannes Weinerdc564012015-06-24 16:57:19 -07003035 * Acquire the oom lock. If that fails, somebody else is
3036 * making progress for us.
Johannes Weiner9879de72015-01-26 12:58:32 -08003037 */
Johannes Weinerdc564012015-06-24 16:57:19 -07003038 if (!mutex_trylock(&oom_lock)) {
Johannes Weiner9879de72015-01-26 12:58:32 -08003039 *did_some_progress = 1;
Mel Gorman11e33f62009-06-16 15:31:57 -07003040 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 return NULL;
3042 }
Jens Axboe6b1de912005-11-17 21:35:02 +01003043
Mel Gorman11e33f62009-06-16 15:31:57 -07003044 /*
3045 * Go through the zonelist yet one more time, keep very high watermark
3046 * here, this is only to catch a parallel oom killing, we must fail if
3047 * we're still under heavy pressure.
3048 */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003049 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
3050 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003051 if (page)
Mel Gorman11e33f62009-06-16 15:31:57 -07003052 goto out;
3053
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08003054 if (!(gfp_mask & __GFP_NOFAIL)) {
Johannes Weiner9879de72015-01-26 12:58:32 -08003055 /* Coredumps can quickly deplete all memory reserves */
3056 if (current->flags & PF_DUMPCORE)
3057 goto out;
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08003058 /* The OOM killer will not help higher order allocs */
3059 if (order > PAGE_ALLOC_COSTLY_ORDER)
3060 goto out;
David Rientjes03668b32010-08-09 17:18:54 -07003061 /* The OOM killer does not needlessly kill tasks for lowmem */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003062 if (ac->high_zoneidx < ZONE_NORMAL)
David Rientjes03668b32010-08-09 17:18:54 -07003063 goto out;
Johannes Weiner90839052015-06-24 16:57:21 -07003064 if (pm_suspended_storage())
3065 goto out;
Michal Hocko3da88fb2016-05-19 17:13:09 -07003066 /*
3067 * XXX: GFP_NOFS allocations should rather fail than rely on
3068 * other request to make a forward progress.
3069 * We are in an unfortunate situation where out_of_memory cannot
3070 * do much for this context but let's try it to at least get
3071 * access to memory reserved if the current task is killed (see
3072 * out_of_memory). Once filesystems are ready to handle allocation
3073 * failures more gracefully we should just bail out here.
3074 */
3075
David Rientjes4167e9b2015-04-14 15:46:55 -07003076 /* The OOM killer may not free memory on a specific node */
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08003077 if (gfp_mask & __GFP_THISNODE)
3078 goto out;
3079 }
Mel Gorman11e33f62009-06-16 15:31:57 -07003080 /* Exhausted what can be done so it's blamo time */
Michal Hocko5020e282016-01-14 15:20:36 -08003081 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
Michal Hockoc32b3cb2015-02-11 15:26:24 -08003082 *did_some_progress = 1;
Michal Hocko5020e282016-01-14 15:20:36 -08003083
3084 if (gfp_mask & __GFP_NOFAIL) {
3085 page = get_page_from_freelist(gfp_mask, order,
3086 ALLOC_NO_WATERMARKS|ALLOC_CPUSET, ac);
3087 /*
3088 * fallback to ignore cpuset restriction if our nodes
3089 * are depleted
3090 */
3091 if (!page)
3092 page = get_page_from_freelist(gfp_mask, order,
3093 ALLOC_NO_WATERMARKS, ac);
3094 }
3095 }
Mel Gorman11e33f62009-06-16 15:31:57 -07003096out:
Johannes Weinerdc564012015-06-24 16:57:19 -07003097 mutex_unlock(&oom_lock);
Mel Gorman11e33f62009-06-16 15:31:57 -07003098 return page;
3099}
3100
Michal Hocko33c2d212016-05-20 16:57:06 -07003101/*
3102 * Maximum number of compaction retries wit a progress before OOM
3103 * killer is consider as the only way to move forward.
3104 */
3105#define MAX_COMPACT_RETRIES 16
3106
Mel Gorman56de7262010-05-24 14:32:30 -07003107#ifdef CONFIG_COMPACTION
3108/* Try memory compaction for high-order allocations before reclaim */
3109static struct page *
3110__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003111 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003112 enum compact_priority prio, enum compact_result *compact_result)
Mel Gorman56de7262010-05-24 14:32:30 -07003113{
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003114 struct page *page;
Vlastimil Babka53853e22014-10-09 15:27:02 -07003115
Mel Gorman66199712012-01-12 17:19:41 -08003116 if (!order)
Mel Gorman56de7262010-05-24 14:32:30 -07003117 return NULL;
3118
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003119 current->flags |= PF_MEMALLOC;
Michal Hockoc5d01d02016-05-20 16:56:53 -07003120 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
Vlastimil Babkac3486f52016-07-28 15:49:30 -07003121 prio);
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003122 current->flags &= ~PF_MEMALLOC;
Mel Gorman56de7262010-05-24 14:32:30 -07003123
Michal Hockoc5d01d02016-05-20 16:56:53 -07003124 if (*compact_result <= COMPACT_INACTIVE)
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003125 return NULL;
Mel Gorman56de7262010-05-24 14:32:30 -07003126
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003127 /*
3128 * At least in one zone compaction wasn't deferred or skipped, so let's
3129 * count a compaction stall
3130 */
3131 count_vm_event(COMPACTSTALL);
3132
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003133 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003134
3135 if (page) {
3136 struct zone *zone = page_zone(page);
3137
3138 zone->compact_blockskip_flush = false;
3139 compaction_defer_reset(zone, order, true);
3140 count_vm_event(COMPACTSUCCESS);
3141 return page;
3142 }
3143
3144 /*
Vlastimil Babka98dd3b42014-10-09 15:27:04 -07003145 * It's bad if compaction run occurs and fails. The most likely reason
3146 * is that pages exist, but not enough to satisfy watermarks.
3147 */
3148 count_vm_event(COMPACTFAIL);
3149
3150 cond_resched();
3151
Mel Gorman56de7262010-05-24 14:32:30 -07003152 return NULL;
3153}
Michal Hocko33c2d212016-05-20 16:57:06 -07003154
Vlastimil Babka32508452016-10-07 17:00:28 -07003155static inline bool
3156should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3157 enum compact_result compact_result,
3158 enum compact_priority *compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07003159 int *compaction_retries)
Vlastimil Babka32508452016-10-07 17:00:28 -07003160{
3161 int max_retries = MAX_COMPACT_RETRIES;
Vlastimil Babkac2033b02016-10-07 17:00:34 -07003162 int min_priority;
Vlastimil Babka32508452016-10-07 17:00:28 -07003163
3164 if (!order)
3165 return false;
3166
Vlastimil Babkad9436492016-10-07 17:00:31 -07003167 if (compaction_made_progress(compact_result))
3168 (*compaction_retries)++;
3169
Vlastimil Babka32508452016-10-07 17:00:28 -07003170 /*
3171 * compaction considers all the zone as desperately out of memory
3172 * so it doesn't really make much sense to retry except when the
3173 * failure could be caused by insufficient priority
3174 */
Vlastimil Babkad9436492016-10-07 17:00:31 -07003175 if (compaction_failed(compact_result))
3176 goto check_priority;
Vlastimil Babka32508452016-10-07 17:00:28 -07003177
3178 /*
3179 * make sure the compaction wasn't deferred or didn't bail out early
3180 * due to locks contention before we declare that we should give up.
3181 * But do not retry if the given zonelist is not suitable for
3182 * compaction.
3183 */
3184 if (compaction_withdrawn(compact_result))
3185 return compaction_zonelist_suitable(ac, order, alloc_flags);
3186
3187 /*
3188 * !costly requests are much more important than __GFP_REPEAT
3189 * costly ones because they are de facto nofail and invoke OOM
3190 * killer to move on while costly can fail and users are ready
3191 * to cope with that. 1/4 retries is rather arbitrary but we
3192 * would need much more detailed feedback from compaction to
3193 * make a better decision.
3194 */
3195 if (order > PAGE_ALLOC_COSTLY_ORDER)
3196 max_retries /= 4;
Vlastimil Babkad9436492016-10-07 17:00:31 -07003197 if (*compaction_retries <= max_retries)
Vlastimil Babka32508452016-10-07 17:00:28 -07003198 return true;
3199
Vlastimil Babkad9436492016-10-07 17:00:31 -07003200 /*
3201 * Make sure there are attempts at the highest priority if we exhausted
3202 * all retries or failed at the lower priorities.
3203 */
3204check_priority:
Vlastimil Babkac2033b02016-10-07 17:00:34 -07003205 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3206 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3207 if (*compact_priority > min_priority) {
Vlastimil Babkad9436492016-10-07 17:00:31 -07003208 (*compact_priority)--;
3209 *compaction_retries = 0;
3210 return true;
3211 }
Vlastimil Babka32508452016-10-07 17:00:28 -07003212 return false;
3213}
Mel Gorman56de7262010-05-24 14:32:30 -07003214#else
3215static inline struct page *
3216__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003217 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003218 enum compact_priority prio, enum compact_result *compact_result)
Mel Gorman56de7262010-05-24 14:32:30 -07003219{
Michal Hocko33c2d212016-05-20 16:57:06 -07003220 *compact_result = COMPACT_SKIPPED;
Mel Gorman56de7262010-05-24 14:32:30 -07003221 return NULL;
3222}
Michal Hocko33c2d212016-05-20 16:57:06 -07003223
3224static inline bool
Michal Hocko86a294a2016-05-20 16:57:12 -07003225should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3226 enum compact_result compact_result,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003227 enum compact_priority *compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07003228 int *compaction_retries)
Michal Hocko33c2d212016-05-20 16:57:06 -07003229{
Michal Hocko31e49bf2016-05-20 16:57:15 -07003230 struct zone *zone;
3231 struct zoneref *z;
3232
3233 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3234 return false;
3235
3236 /*
3237 * There are setups with compaction disabled which would prefer to loop
3238 * inside the allocator rather than hit the oom killer prematurely.
3239 * Let's give them a good hope and keep retrying while the order-0
3240 * watermarks are OK.
3241 */
3242 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3243 ac->nodemask) {
3244 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3245 ac_classzone_idx(ac), alloc_flags))
3246 return true;
3247 }
Michal Hocko33c2d212016-05-20 16:57:06 -07003248 return false;
3249}
Vlastimil Babka32508452016-10-07 17:00:28 -07003250#endif /* CONFIG_COMPACTION */
Mel Gorman56de7262010-05-24 14:32:30 -07003251
Marek Szyprowskibba90712012-01-25 12:09:52 +01003252/* Perform direct synchronous page reclaim */
3253static int
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003254__perform_reclaim(gfp_t gfp_mask, unsigned int order,
3255 const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07003256{
Mel Gorman11e33f62009-06-16 15:31:57 -07003257 struct reclaim_state reclaim_state;
Marek Szyprowskibba90712012-01-25 12:09:52 +01003258 int progress;
Mel Gorman11e33f62009-06-16 15:31:57 -07003259
3260 cond_resched();
3261
3262 /* We now go into synchronous reclaim */
3263 cpuset_memory_pressure_bump();
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003264 current->flags |= PF_MEMALLOC;
Mel Gorman11e33f62009-06-16 15:31:57 -07003265 lockdep_set_current_reclaim_state(gfp_mask);
3266 reclaim_state.reclaimed_slab = 0;
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003267 current->reclaim_state = &reclaim_state;
Mel Gorman11e33f62009-06-16 15:31:57 -07003268
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003269 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3270 ac->nodemask);
Mel Gorman11e33f62009-06-16 15:31:57 -07003271
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003272 current->reclaim_state = NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07003273 lockdep_clear_current_reclaim_state();
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003274 current->flags &= ~PF_MEMALLOC;
Mel Gorman11e33f62009-06-16 15:31:57 -07003275
3276 cond_resched();
3277
Marek Szyprowskibba90712012-01-25 12:09:52 +01003278 return progress;
3279}
3280
3281/* The really slow allocator path where we enter direct reclaim */
3282static inline struct page *
3283__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07003284 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003285 unsigned long *did_some_progress)
Marek Szyprowskibba90712012-01-25 12:09:52 +01003286{
3287 struct page *page = NULL;
3288 bool drained = false;
3289
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003290 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07003291 if (unlikely(!(*did_some_progress)))
3292 return NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07003293
Mel Gorman9ee493c2010-09-09 16:38:18 -07003294retry:
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003295 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Mel Gorman9ee493c2010-09-09 16:38:18 -07003296
3297 /*
3298 * If an allocation failed after direct reclaim, it could be because
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003299 * pages are pinned on the per-cpu lists or in high alloc reserves.
3300 * Shrink them them and try again
Mel Gorman9ee493c2010-09-09 16:38:18 -07003301 */
3302 if (!page && !drained) {
Mel Gorman0aaa29a2015-11-06 16:28:37 -08003303 unreserve_highatomic_pageblock(ac);
Vlastimil Babka93481ff2014-12-10 15:43:01 -08003304 drain_all_pages(NULL);
Mel Gorman9ee493c2010-09-09 16:38:18 -07003305 drained = true;
3306 goto retry;
3307 }
3308
Mel Gorman11e33f62009-06-16 15:31:57 -07003309 return page;
3310}
3311
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003312static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07003313{
3314 struct zoneref *z;
3315 struct zone *zone;
Mel Gormane1a55632016-07-28 15:46:26 -07003316 pg_data_t *last_pgdat = NULL;
Mel Gorman11e33f62009-06-16 15:31:57 -07003317
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003318 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
Mel Gormane1a55632016-07-28 15:46:26 -07003319 ac->high_zoneidx, ac->nodemask) {
3320 if (last_pgdat != zone->zone_pgdat)
Mel Gorman52e9f872016-07-28 15:46:29 -07003321 wakeup_kswapd(zone, order, ac->high_zoneidx);
Mel Gormane1a55632016-07-28 15:46:26 -07003322 last_pgdat = zone->zone_pgdat;
3323 }
Mel Gorman11e33f62009-06-16 15:31:57 -07003324}
3325
Mel Gormanc6038442016-05-19 17:13:38 -07003326static inline unsigned int
Peter Zijlstra341ce062009-06-16 15:32:02 -07003327gfp_to_alloc_flags(gfp_t gfp_mask)
3328{
Mel Gormanc6038442016-05-19 17:13:38 -07003329 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
Peter Zijlstra341ce062009-06-16 15:32:02 -07003330
Mel Gormana56f57f2009-06-16 15:32:02 -07003331 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
Namhyung Kime6223a32010-10-26 14:21:59 -07003332 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
Mel Gormana56f57f2009-06-16 15:32:02 -07003333
Peter Zijlstra341ce062009-06-16 15:32:02 -07003334 /*
3335 * The caller may dip into page reserves a bit more if the caller
3336 * cannot run direct reclaim, or if the caller has realtime scheduling
3337 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
Mel Gormand0164ad2015-11-06 16:28:21 -08003338 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
Peter Zijlstra341ce062009-06-16 15:32:02 -07003339 */
Namhyung Kime6223a32010-10-26 14:21:59 -07003340 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
Peter Zijlstra341ce062009-06-16 15:32:02 -07003341
Mel Gormand0164ad2015-11-06 16:28:21 -08003342 if (gfp_mask & __GFP_ATOMIC) {
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08003343 /*
David Rientjesb104a352014-07-30 16:08:24 -07003344 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
3345 * if it can't schedule.
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08003346 */
David Rientjesb104a352014-07-30 16:08:24 -07003347 if (!(gfp_mask & __GFP_NOMEMALLOC))
Andrea Arcangeli5c3240d2011-01-13 15:46:49 -08003348 alloc_flags |= ALLOC_HARDER;
Peter Zijlstra341ce062009-06-16 15:32:02 -07003349 /*
David Rientjesb104a352014-07-30 16:08:24 -07003350 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
Vladimir Davydov344736f2014-10-20 15:50:30 +04003351 * comment for __cpuset_node_allowed().
Peter Zijlstra341ce062009-06-16 15:32:02 -07003352 */
3353 alloc_flags &= ~ALLOC_CPUSET;
Andrew Mortonc06b1fc2011-01-13 15:47:32 -08003354 } else if (unlikely(rt_task(current)) && !in_interrupt())
Peter Zijlstra341ce062009-06-16 15:32:02 -07003355 alloc_flags |= ALLOC_HARDER;
3356
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07003357#ifdef CONFIG_CMA
David Rientjes43e7a342014-10-09 15:27:25 -07003358 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07003359 alloc_flags |= ALLOC_CMA;
3360#endif
Peter Zijlstra341ce062009-06-16 15:32:02 -07003361 return alloc_flags;
3362}
3363
Mel Gorman072bb0a2012-07-31 16:43:58 -07003364bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3365{
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003366 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3367 return false;
3368
3369 if (gfp_mask & __GFP_MEMALLOC)
3370 return true;
3371 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3372 return true;
3373 if (!in_interrupt() &&
3374 ((current->flags & PF_MEMALLOC) ||
3375 unlikely(test_thread_flag(TIF_MEMDIE))))
3376 return true;
3377
3378 return false;
Mel Gorman072bb0a2012-07-31 16:43:58 -07003379}
3380
Michal Hocko0a0337e2016-05-20 16:57:00 -07003381/*
3382 * Maximum number of reclaim retries without any progress before OOM killer
3383 * is consider as the only way to move forward.
3384 */
3385#define MAX_RECLAIM_RETRIES 16
3386
3387/*
3388 * Checks whether it makes sense to retry the reclaim to make a forward progress
3389 * for the given allocation request.
3390 * The reclaim feedback represented by did_some_progress (any progress during
Michal Hocko7854ea62016-05-20 16:57:09 -07003391 * the last reclaim round) and no_progress_loops (number of reclaim rounds without
3392 * any progress in a row) is considered as well as the reclaimable pages on the
3393 * applicable zone list (with a backoff mechanism which is a function of
3394 * no_progress_loops).
Michal Hocko0a0337e2016-05-20 16:57:00 -07003395 *
3396 * Returns true if a retry is viable or false to enter the oom path.
3397 */
3398static inline bool
3399should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3400 struct alloc_context *ac, int alloc_flags,
Vlastimil Babka423b4522016-10-07 17:00:40 -07003401 bool did_some_progress, int *no_progress_loops)
Michal Hocko0a0337e2016-05-20 16:57:00 -07003402{
3403 struct zone *zone;
3404 struct zoneref *z;
3405
3406 /*
Vlastimil Babka423b4522016-10-07 17:00:40 -07003407 * Costly allocations might have made a progress but this doesn't mean
3408 * their order will become available due to high fragmentation so
3409 * always increment the no progress counter for them
3410 */
3411 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3412 *no_progress_loops = 0;
3413 else
3414 (*no_progress_loops)++;
3415
3416 /*
Michal Hocko0a0337e2016-05-20 16:57:00 -07003417 * Make sure we converge to OOM if we cannot make any progress
3418 * several times in the row.
3419 */
Vlastimil Babka423b4522016-10-07 17:00:40 -07003420 if (*no_progress_loops > MAX_RECLAIM_RETRIES)
Michal Hocko0a0337e2016-05-20 16:57:00 -07003421 return false;
3422
Michal Hocko0a0337e2016-05-20 16:57:00 -07003423 /*
Mel Gormanbca67592016-07-28 15:47:05 -07003424 * Keep reclaiming pages while there is a chance this will lead
3425 * somewhere. If none of the target zones can satisfy our allocation
3426 * request even if all reclaimable pages are considered then we are
3427 * screwed and have to go OOM.
Michal Hocko0a0337e2016-05-20 16:57:00 -07003428 */
3429 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3430 ac->nodemask) {
3431 unsigned long available;
Michal Hockoede37712016-05-20 16:57:03 -07003432 unsigned long reclaimable;
Michal Hocko0a0337e2016-05-20 16:57:00 -07003433
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003434 available = reclaimable = zone_reclaimable_pages(zone);
Vlastimil Babka423b4522016-10-07 17:00:40 -07003435 available -= DIV_ROUND_UP((*no_progress_loops) * available,
Michal Hocko0a0337e2016-05-20 16:57:00 -07003436 MAX_RECLAIM_RETRIES);
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003437 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
Michal Hocko0a0337e2016-05-20 16:57:00 -07003438
3439 /*
3440 * Would the allocation succeed if we reclaimed the whole
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003441 * available?
Michal Hocko0a0337e2016-05-20 16:57:00 -07003442 */
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003443 if (__zone_watermark_ok(zone, order, min_wmark_pages(zone),
3444 ac_classzone_idx(ac), alloc_flags, available)) {
Michal Hockoede37712016-05-20 16:57:03 -07003445 /*
3446 * If we didn't make any progress and have a lot of
3447 * dirty + writeback pages then we should wait for
3448 * an IO to complete to slow down the reclaim and
3449 * prevent from pre mature OOM
3450 */
3451 if (!did_some_progress) {
Mel Gorman11fb9982016-07-28 15:46:20 -07003452 unsigned long write_pending;
Michal Hockoede37712016-05-20 16:57:03 -07003453
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003454 write_pending = zone_page_state_snapshot(zone,
3455 NR_ZONE_WRITE_PENDING);
Michal Hockoede37712016-05-20 16:57:03 -07003456
Mel Gorman11fb9982016-07-28 15:46:20 -07003457 if (2 * write_pending > reclaimable) {
Michal Hockoede37712016-05-20 16:57:03 -07003458 congestion_wait(BLK_RW_ASYNC, HZ/10);
3459 return true;
3460 }
3461 }
Mel Gorman5a1c84b2016-07-28 15:47:31 -07003462
Michal Hockoede37712016-05-20 16:57:03 -07003463 /*
3464 * Memory allocation/reclaim might be called from a WQ
3465 * context and the current implementation of the WQ
3466 * concurrency control doesn't recognize that
3467 * a particular WQ is congested if the worker thread is
3468 * looping without ever sleeping. Therefore we have to
3469 * do a short sleep here rather than calling
3470 * cond_resched().
3471 */
3472 if (current->flags & PF_WQ_WORKER)
3473 schedule_timeout_uninterruptible(1);
3474 else
3475 cond_resched();
3476
Michal Hocko0a0337e2016-05-20 16:57:00 -07003477 return true;
3478 }
3479 }
3480
3481 return false;
3482}
3483
Mel Gorman11e33f62009-06-16 15:31:57 -07003484static inline struct page *
3485__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003486 struct alloc_context *ac)
Mel Gorman11e33f62009-06-16 15:31:57 -07003487{
Mel Gormand0164ad2015-11-06 16:28:21 -08003488 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
Mel Gorman11e33f62009-06-16 15:31:57 -07003489 struct page *page = NULL;
Mel Gormanc6038442016-05-19 17:13:38 -07003490 unsigned int alloc_flags;
Mel Gorman11e33f62009-06-16 15:31:57 -07003491 unsigned long did_some_progress;
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003492 enum compact_priority compact_priority = DEF_COMPACT_PRIORITY;
Michal Hockoc5d01d02016-05-20 16:56:53 -07003493 enum compact_result compact_result;
Michal Hocko33c2d212016-05-20 16:57:06 -07003494 int compaction_retries = 0;
Michal Hocko0a0337e2016-05-20 16:57:00 -07003495 int no_progress_loops = 0;
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003496
Christoph Lameter952f3b52006-12-06 20:33:26 -08003497 /*
Mel Gorman72807a72009-06-16 15:32:18 -07003498 * In the slowpath, we sanity check order to avoid ever trying to
3499 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
3500 * be using allocators in order of preference for an area that is
3501 * too large.
3502 */
Mel Gorman1fc28b72009-07-29 15:04:08 -07003503 if (order >= MAX_ORDER) {
3504 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
Mel Gorman72807a72009-06-16 15:32:18 -07003505 return NULL;
Mel Gorman1fc28b72009-07-29 15:04:08 -07003506 }
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003507
Christoph Lameter952f3b52006-12-06 20:33:26 -08003508 /*
Mel Gormand0164ad2015-11-06 16:28:21 -08003509 * We also sanity check to catch abuse of atomic reserves being used by
3510 * callers that are not in atomic context.
3511 */
3512 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
3513 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3514 gfp_mask &= ~__GFP_ATOMIC;
3515
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003516 /*
3517 * The fast path uses conservative alloc_flags to succeed only until
3518 * kswapd needs to be woken up, and to avoid the cost of setting up
3519 * alloc_flags precisely. So we do that now.
3520 */
3521 alloc_flags = gfp_to_alloc_flags(gfp_mask);
3522
Mel Gormand0164ad2015-11-06 16:28:21 -08003523 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003524 wake_all_kswapds(order, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003525
Paul Jackson9bf22292005-09-06 15:18:12 -07003526 /*
Vlastimil Babka23771232016-07-28 15:49:16 -07003527 * The adjusted alloc_flags might result in immediate success, so try
3528 * that first
3529 */
3530 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3531 if (page)
3532 goto got_pg;
3533
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003534 /*
3535 * For costly allocations, try direct compaction first, as it's likely
3536 * that we have enough base pages and don't need to reclaim. Don't try
3537 * that for allocations that are allowed to ignore watermarks, as the
3538 * ALLOC_NO_WATERMARKS attempt didn't yet happen.
3539 */
3540 if (can_direct_reclaim && order > PAGE_ALLOC_COSTLY_ORDER &&
3541 !gfp_pfmemalloc_allowed(gfp_mask)) {
3542 page = __alloc_pages_direct_compact(gfp_mask, order,
3543 alloc_flags, ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003544 INIT_COMPACT_PRIORITY,
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003545 &compact_result);
3546 if (page)
3547 goto got_pg;
3548
Vlastimil Babka3eb27712016-07-28 15:49:22 -07003549 /*
3550 * Checks for costly allocations with __GFP_NORETRY, which
3551 * includes THP page fault allocations
3552 */
3553 if (gfp_mask & __GFP_NORETRY) {
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003554 /*
3555 * If compaction is deferred for high-order allocations,
3556 * it is because sync compaction recently failed. If
3557 * this is the case and the caller requested a THP
3558 * allocation, we do not want to heavily disrupt the
3559 * system, so we fail the allocation instead of entering
3560 * direct reclaim.
3561 */
3562 if (compact_result == COMPACT_DEFERRED)
3563 goto nopage;
3564
3565 /*
Vlastimil Babka3eb27712016-07-28 15:49:22 -07003566 * Looks like reclaim/compaction is worth trying, but
3567 * sync compaction could be very expensive, so keep
Vlastimil Babka25160352016-07-28 15:49:25 -07003568 * using async compaction.
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003569 */
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003570 compact_priority = INIT_COMPACT_PRIORITY;
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003571 }
3572 }
Vlastimil Babka23771232016-07-28 15:49:16 -07003573
3574retry:
3575 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
3576 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3577 wake_all_kswapds(order, ac);
3578
3579 if (gfp_pfmemalloc_allowed(gfp_mask))
3580 alloc_flags = ALLOC_NO_WATERMARKS;
3581
3582 /*
Mel Gormane46e7b72016-06-03 14:56:01 -07003583 * Reset the zonelist iterators if memory policies can be ignored.
3584 * These allocations are high priority and system rather than user
3585 * orientated.
3586 */
Vlastimil Babka23771232016-07-28 15:49:16 -07003587 if (!(alloc_flags & ALLOC_CPUSET) || (alloc_flags & ALLOC_NO_WATERMARKS)) {
Mel Gormane46e7b72016-06-03 14:56:01 -07003588 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3589 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3590 ac->high_zoneidx, ac->nodemask);
3591 }
3592
Vlastimil Babka23771232016-07-28 15:49:16 -07003593 /* Attempt with potentially adjusted zonelist and alloc_flags */
Vlastimil Babka31a6c192016-07-28 15:49:13 -07003594 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
Rohit Seth7fb1d9f2005-11-13 16:06:43 -08003595 if (page)
3596 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597
Mel Gormand0164ad2015-11-06 16:28:21 -08003598 /* Caller is not willing to reclaim, we can't balance anything */
3599 if (!can_direct_reclaim) {
David Rientjesaed0a0e2014-01-21 15:51:12 -08003600 /*
Michal Hocko33d53102016-01-14 15:19:05 -08003601 * All existing users of the __GFP_NOFAIL are blockable, so warn
3602 * of any new users that actually allow this type of allocation
3603 * to fail.
David Rientjesaed0a0e2014-01-21 15:51:12 -08003604 */
3605 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 goto nopage;
David Rientjesaed0a0e2014-01-21 15:51:12 -08003607 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608
Peter Zijlstra341ce062009-06-16 15:32:02 -07003609 /* Avoid recursion of direct reclaim */
Michal Hocko33d53102016-01-14 15:19:05 -08003610 if (current->flags & PF_MEMALLOC) {
3611 /*
3612 * __GFP_NOFAIL request from this context is rather bizarre
3613 * because we cannot reclaim anything and only can loop waiting
3614 * for somebody to do a work for us.
3615 */
3616 if (WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3617 cond_resched();
3618 goto retry;
3619 }
Peter Zijlstra341ce062009-06-16 15:32:02 -07003620 goto nopage;
Michal Hocko33d53102016-01-14 15:19:05 -08003621 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622
David Rientjes6583bb62009-07-29 15:02:06 -07003623 /* Avoid allocations with no watermarks from looping endlessly */
3624 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
3625 goto nopage;
3626
David Rientjes8fe78042014-08-06 16:07:54 -07003627
Mel Gorman11e33f62009-06-16 15:31:57 -07003628 /* Try direct reclaim and then allocating */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003629 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3630 &did_some_progress);
Mel Gorman11e33f62009-06-16 15:31:57 -07003631 if (page)
3632 goto got_pg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003634 /* Try direct compaction and then allocating */
3635 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003636 compact_priority, &compact_result);
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003637 if (page)
3638 goto got_pg;
3639
Johannes Weiner90839052015-06-24 16:57:21 -07003640 /* Do not loop if specifically requested */
3641 if (gfp_mask & __GFP_NORETRY)
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003642 goto nopage;
Johannes Weiner90839052015-06-24 16:57:21 -07003643
Michal Hocko0a0337e2016-05-20 16:57:00 -07003644 /*
3645 * Do not retry costly high order allocations unless they are
3646 * __GFP_REPEAT
3647 */
3648 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
Vlastimil Babkaa8161d12016-07-28 15:49:19 -07003649 goto nopage;
Michal Hocko0a0337e2016-05-20 16:57:00 -07003650
Michal Hocko0a0337e2016-05-20 16:57:00 -07003651 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
Vlastimil Babka423b4522016-10-07 17:00:40 -07003652 did_some_progress > 0, &no_progress_loops))
Michal Hocko0a0337e2016-05-20 16:57:00 -07003653 goto retry;
3654
Michal Hocko33c2d212016-05-20 16:57:06 -07003655 /*
3656 * It doesn't make any sense to retry for the compaction if the order-0
3657 * reclaim is not able to make any progress because the current
3658 * implementation of the compaction depends on the sufficient amount
3659 * of free memory (see __compaction_suitable)
3660 */
3661 if (did_some_progress > 0 &&
Michal Hocko86a294a2016-05-20 16:57:12 -07003662 should_compact_retry(ac, order, alloc_flags,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07003663 compact_result, &compact_priority,
Vlastimil Babkad9436492016-10-07 17:00:31 -07003664 &compaction_retries))
Michal Hocko33c2d212016-05-20 16:57:06 -07003665 goto retry;
3666
Johannes Weiner90839052015-06-24 16:57:21 -07003667 /* Reclaim has failed us, start killing things */
3668 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3669 if (page)
3670 goto got_pg;
3671
3672 /* Retry as long as the OOM killer is making progress */
Michal Hocko0a0337e2016-05-20 16:57:00 -07003673 if (did_some_progress) {
3674 no_progress_loops = 0;
Johannes Weiner90839052015-06-24 16:57:21 -07003675 goto retry;
Michal Hocko0a0337e2016-05-20 16:57:00 -07003676 }
Johannes Weiner90839052015-06-24 16:57:21 -07003677
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678nopage:
Michal Hocko7877cdc2016-10-07 17:01:55 -07003679 warn_alloc(gfp_mask,
3680 "page allocation failure: order:%u", order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003681got_pg:
Mel Gorman072bb0a2012-07-31 16:43:58 -07003682 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683}
Mel Gorman11e33f62009-06-16 15:31:57 -07003684
3685/*
3686 * This is the 'heart' of the zoned buddy allocator.
3687 */
3688struct page *
3689__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3690 struct zonelist *zonelist, nodemask_t *nodemask)
3691{
Mel Gorman5bb1b162016-05-19 17:13:50 -07003692 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07003693 unsigned int cpuset_mems_cookie;
Mel Gormane6cbd7f2016-07-28 15:46:50 -07003694 unsigned int alloc_flags = ALLOC_WMARK_LOW;
Mel Gorman83d4ca82016-05-19 17:13:56 -07003695 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003696 struct alloc_context ac = {
3697 .high_zoneidx = gfp_zone(gfp_mask),
Mel Gorman682a3382016-05-19 17:13:30 -07003698 .zonelist = zonelist,
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003699 .nodemask = nodemask,
3700 .migratetype = gfpflags_to_migratetype(gfp_mask),
3701 };
Mel Gorman11e33f62009-06-16 15:31:57 -07003702
Mel Gorman682a3382016-05-19 17:13:30 -07003703 if (cpusets_enabled()) {
Mel Gorman83d4ca82016-05-19 17:13:56 -07003704 alloc_mask |= __GFP_HARDWALL;
Mel Gorman682a3382016-05-19 17:13:30 -07003705 alloc_flags |= ALLOC_CPUSET;
3706 if (!ac.nodemask)
3707 ac.nodemask = &cpuset_current_mems_allowed;
3708 }
3709
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10003710 gfp_mask &= gfp_allowed_mask;
3711
Mel Gorman11e33f62009-06-16 15:31:57 -07003712 lockdep_trace_alloc(gfp_mask);
3713
Mel Gormand0164ad2015-11-06 16:28:21 -08003714 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
Mel Gorman11e33f62009-06-16 15:31:57 -07003715
3716 if (should_fail_alloc_page(gfp_mask, order))
3717 return NULL;
3718
3719 /*
3720 * Check the zones suitable for the gfp_mask contain at least one
3721 * valid zone. It's possible to have an empty zonelist as a result
David Rientjes4167e9b2015-04-14 15:46:55 -07003722 * of __GFP_THISNODE and a memoryless node
Mel Gorman11e33f62009-06-16 15:31:57 -07003723 */
3724 if (unlikely(!zonelist->_zonerefs->zone))
3725 return NULL;
3726
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003727 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
Vlastimil Babka21bb9bd2014-10-09 15:26:51 -07003728 alloc_flags |= ALLOC_CMA;
3729
Mel Gormancc9a6c82012-03-21 16:34:11 -07003730retry_cpuset:
Mel Gormand26914d2014-04-03 14:47:24 -07003731 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07003732
Mel Gormanc9ab0c42015-11-06 16:28:12 -08003733 /* Dirty zone balancing only done in the fast path */
3734 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3735
Mel Gormane46e7b72016-06-03 14:56:01 -07003736 /*
3737 * The preferred zone is used for statistics but crucially it is
3738 * also used as the starting point for the zonelist iterator. It
3739 * may get reset for allocations that ignore memory policies.
3740 */
Mel Gormanc33d6c02016-05-19 17:14:10 -07003741 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
3742 ac.high_zoneidx, ac.nodemask);
3743 if (!ac.preferred_zoneref) {
Mel Gorman5bb1b162016-05-19 17:13:50 -07003744 page = NULL;
Mel Gorman4fcb0972016-05-19 17:14:01 -07003745 goto no_zone;
Mel Gorman5bb1b162016-05-19 17:13:50 -07003746 }
3747
Mel Gorman5117f452009-06-16 15:31:59 -07003748 /* First allocation attempt */
Vlastimil Babkaa9263752015-02-11 15:25:41 -08003749 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
Mel Gorman4fcb0972016-05-19 17:14:01 -07003750 if (likely(page))
3751 goto out;
Andrew Morton91fbdc02015-02-11 15:25:04 -08003752
Mel Gorman4fcb0972016-05-19 17:14:01 -07003753 /*
3754 * Runtime PM, block IO and its error handling path can deadlock
3755 * because I/O on the device might not complete.
3756 */
3757 alloc_mask = memalloc_noio_flags(gfp_mask);
3758 ac.spread_dirty_pages = false;
Mel Gorman11e33f62009-06-16 15:31:57 -07003759
Mel Gorman47415262016-05-19 17:14:44 -07003760 /*
3761 * Restore the original nodemask if it was potentially replaced with
3762 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
3763 */
3764 if (cpusets_enabled())
3765 ac.nodemask = nodemask;
Mel Gorman4fcb0972016-05-19 17:14:01 -07003766 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
Xishi Qiu23f086f2015-02-11 15:25:07 -08003767
Mel Gorman4fcb0972016-05-19 17:14:01 -07003768no_zone:
Mel Gormancc9a6c82012-03-21 16:34:11 -07003769 /*
3770 * When updating a task's mems_allowed, it is possible to race with
3771 * parallel threads in such a way that an allocation can fail while
3772 * the mask is being updated. If a page allocation is about to fail,
3773 * check if the cpuset changed during allocation and if so, retry.
3774 */
Mel Gorman83d4ca82016-05-19 17:13:56 -07003775 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
3776 alloc_mask = gfp_mask;
Mel Gormancc9a6c82012-03-21 16:34:11 -07003777 goto retry_cpuset;
Mel Gorman83d4ca82016-05-19 17:13:56 -07003778 }
Mel Gormancc9a6c82012-03-21 16:34:11 -07003779
Mel Gorman4fcb0972016-05-19 17:14:01 -07003780out:
Vladimir Davydovc4159a72016-08-08 23:03:12 +03003781 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
3782 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
3783 __free_pages(page, order);
3784 page = NULL;
Vladimir Davydov49491482016-07-26 15:24:24 -07003785 }
3786
Mel Gorman4fcb0972016-05-19 17:14:01 -07003787 if (kmemcheck_enabled && page)
3788 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
3789
3790 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
3791
Mel Gorman11e33f62009-06-16 15:31:57 -07003792 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793}
Mel Gormand2391712009-06-16 15:31:52 -07003794EXPORT_SYMBOL(__alloc_pages_nodemask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795
3796/*
3797 * Common helper functions.
3798 */
Harvey Harrison920c7a52008-02-04 22:29:26 -08003799unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800{
Akinobu Mita945a1112009-09-21 17:01:47 -07003801 struct page *page;
3802
3803 /*
3804 * __get_free_pages() returns a 32-bit address, which cannot represent
3805 * a highmem page
3806 */
3807 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
3808
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809 page = alloc_pages(gfp_mask, order);
3810 if (!page)
3811 return 0;
3812 return (unsigned long) page_address(page);
3813}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814EXPORT_SYMBOL(__get_free_pages);
3815
Harvey Harrison920c7a52008-02-04 22:29:26 -08003816unsigned long get_zeroed_page(gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817{
Akinobu Mita945a1112009-09-21 17:01:47 -07003818 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820EXPORT_SYMBOL(get_zeroed_page);
3821
Harvey Harrison920c7a52008-02-04 22:29:26 -08003822void __free_pages(struct page *page, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823{
Nick Pigginb5810032005-10-29 18:16:12 -07003824 if (put_page_testzero(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 if (order == 0)
Mel Gormanb745bc82014-06-04 16:10:22 -07003826 free_hot_cold_page(page, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003827 else
3828 __free_pages_ok(page, order);
3829 }
3830}
3831
3832EXPORT_SYMBOL(__free_pages);
3833
Harvey Harrison920c7a52008-02-04 22:29:26 -08003834void free_pages(unsigned long addr, unsigned int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835{
3836 if (addr != 0) {
Nick Piggin725d7042006-09-25 23:30:55 -07003837 VM_BUG_ON(!virt_addr_valid((void *)addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 __free_pages(virt_to_page((void *)addr), order);
3839 }
3840}
3841
3842EXPORT_SYMBOL(free_pages);
3843
Glauber Costa6a1a0d32012-12-18 14:22:00 -08003844/*
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07003845 * Page Fragment:
3846 * An arbitrary-length arbitrary-offset area of memory which resides
3847 * within a 0 or higher order page. Multiple fragments within that page
3848 * are individually refcounted, in the page's reference counter.
3849 *
3850 * The page_frag functions below provide a simple allocation framework for
3851 * page fragments. This is used by the network stack and network device
3852 * drivers to provide a backing region of memory for use as either an
3853 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
3854 */
3855static struct page *__page_frag_refill(struct page_frag_cache *nc,
3856 gfp_t gfp_mask)
3857{
3858 struct page *page = NULL;
3859 gfp_t gfp = gfp_mask;
3860
3861#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3862 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
3863 __GFP_NOMEMALLOC;
3864 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
3865 PAGE_FRAG_CACHE_MAX_ORDER);
3866 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
3867#endif
3868 if (unlikely(!page))
3869 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
3870
3871 nc->va = page ? page_address(page) : NULL;
3872
3873 return page;
3874}
3875
3876void *__alloc_page_frag(struct page_frag_cache *nc,
3877 unsigned int fragsz, gfp_t gfp_mask)
3878{
3879 unsigned int size = PAGE_SIZE;
3880 struct page *page;
3881 int offset;
3882
3883 if (unlikely(!nc->va)) {
3884refill:
3885 page = __page_frag_refill(nc, gfp_mask);
3886 if (!page)
3887 return NULL;
3888
3889#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3890 /* if size can vary use size else just use PAGE_SIZE */
3891 size = nc->size;
3892#endif
3893 /* Even if we own the page, we do not use atomic_set().
3894 * This would break get_page_unless_zero() users.
3895 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07003896 page_ref_add(page, size - 1);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07003897
3898 /* reset page count bias and offset to start of new frag */
Michal Hocko2f064f32015-08-21 14:11:51 -07003899 nc->pfmemalloc = page_is_pfmemalloc(page);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07003900 nc->pagecnt_bias = size;
3901 nc->offset = size;
3902 }
3903
3904 offset = nc->offset - fragsz;
3905 if (unlikely(offset < 0)) {
3906 page = virt_to_page(nc->va);
3907
Joonsoo Kimfe896d12016-03-17 14:19:26 -07003908 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07003909 goto refill;
3910
3911#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3912 /* if size can vary use size else just use PAGE_SIZE */
3913 size = nc->size;
3914#endif
3915 /* OK, page count is 0, we can safely set it */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07003916 set_page_count(page, size);
Alexander Duyckb63ae8c2015-05-06 21:11:57 -07003917
3918 /* reset page count bias and offset to start of new frag */
3919 nc->pagecnt_bias = size;
3920 offset = size - fragsz;
3921 }
3922
3923 nc->pagecnt_bias--;
3924 nc->offset = offset;
3925
3926 return nc->va + offset;
3927}
3928EXPORT_SYMBOL(__alloc_page_frag);
3929
3930/*
3931 * Frees a page fragment allocated out of either a compound or order 0 page.
3932 */
3933void __free_page_frag(void *addr)
3934{
3935 struct page *page = virt_to_head_page(addr);
3936
3937 if (unlikely(put_page_testzero(page)))
3938 __free_pages_ok(page, compound_order(page));
3939}
3940EXPORT_SYMBOL(__free_page_frag);
3941
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08003942static void *make_alloc_exact(unsigned long addr, unsigned int order,
3943 size_t size)
Andi Kleenee85c2e2011-05-11 15:13:34 -07003944{
3945 if (addr) {
3946 unsigned long alloc_end = addr + (PAGE_SIZE << order);
3947 unsigned long used = addr + PAGE_ALIGN(size);
3948
3949 split_page(virt_to_page((void *)addr), order);
3950 while (used < alloc_end) {
3951 free_page(used);
3952 used += PAGE_SIZE;
3953 }
3954 }
3955 return (void *)addr;
3956}
3957
Timur Tabi2be0ffe2008-07-23 21:28:11 -07003958/**
3959 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
3960 * @size: the number of bytes to allocate
3961 * @gfp_mask: GFP flags for the allocation
3962 *
3963 * This function is similar to alloc_pages(), except that it allocates the
3964 * minimum number of pages to satisfy the request. alloc_pages() can only
3965 * allocate memory in power-of-two pages.
3966 *
3967 * This function is also limited by MAX_ORDER.
3968 *
3969 * Memory allocated by this function must be released by free_pages_exact().
3970 */
3971void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
3972{
3973 unsigned int order = get_order(size);
3974 unsigned long addr;
3975
3976 addr = __get_free_pages(gfp_mask, order);
Andi Kleenee85c2e2011-05-11 15:13:34 -07003977 return make_alloc_exact(addr, order, size);
Timur Tabi2be0ffe2008-07-23 21:28:11 -07003978}
3979EXPORT_SYMBOL(alloc_pages_exact);
3980
3981/**
Andi Kleenee85c2e2011-05-11 15:13:34 -07003982 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
3983 * pages on a node.
Randy Dunlapb5e6ab52011-05-16 13:16:54 -07003984 * @nid: the preferred node ID where memory should be allocated
Andi Kleenee85c2e2011-05-11 15:13:34 -07003985 * @size: the number of bytes to allocate
3986 * @gfp_mask: GFP flags for the allocation
3987 *
3988 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
3989 * back.
Andi Kleenee85c2e2011-05-11 15:13:34 -07003990 */
Fabian Fredericke1931812014-08-06 16:04:59 -07003991void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
Andi Kleenee85c2e2011-05-11 15:13:34 -07003992{
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08003993 unsigned int order = get_order(size);
Andi Kleenee85c2e2011-05-11 15:13:34 -07003994 struct page *p = alloc_pages_node(nid, gfp_mask, order);
3995 if (!p)
3996 return NULL;
3997 return make_alloc_exact((unsigned long)page_address(p), order, size);
3998}
Andi Kleenee85c2e2011-05-11 15:13:34 -07003999
4000/**
Timur Tabi2be0ffe2008-07-23 21:28:11 -07004001 * free_pages_exact - release memory allocated via alloc_pages_exact()
4002 * @virt: the value returned by alloc_pages_exact.
4003 * @size: size of allocation, same value as passed to alloc_pages_exact().
4004 *
4005 * Release the memory allocated by a previous call to alloc_pages_exact.
4006 */
4007void free_pages_exact(void *virt, size_t size)
4008{
4009 unsigned long addr = (unsigned long)virt;
4010 unsigned long end = addr + PAGE_ALIGN(size);
4011
4012 while (addr < end) {
4013 free_page(addr);
4014 addr += PAGE_SIZE;
4015 }
4016}
4017EXPORT_SYMBOL(free_pages_exact);
4018
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004019/**
4020 * nr_free_zone_pages - count number of pages beyond high watermark
4021 * @offset: The zone index of the highest zone
4022 *
4023 * nr_free_zone_pages() counts the number of counts pages which are beyond the
4024 * high watermark within all zones at or below a given zone index. For each
4025 * zone, the number of pages is calculated as:
Jiang Liu834405c2013-07-03 15:03:04 -07004026 * managed_pages - high_pages
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004027 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004028static unsigned long nr_free_zone_pages(int offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029{
Mel Gormandd1a2392008-04-28 02:12:17 -07004030 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07004031 struct zone *zone;
4032
Martin J. Blighe310fd42005-07-29 22:59:18 -07004033 /* Just pick one node, since fallback list is circular */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004034 unsigned long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035
Mel Gorman0e884602008-04-28 02:12:14 -07004036 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037
Mel Gorman54a6eb52008-04-28 02:12:16 -07004038 for_each_zone_zonelist(zone, z, zonelist, offset) {
Jiang Liub40da042013-02-22 16:33:52 -08004039 unsigned long size = zone->managed_pages;
Mel Gorman41858962009-06-16 15:32:12 -07004040 unsigned long high = high_wmark_pages(zone);
Martin J. Blighe310fd42005-07-29 22:59:18 -07004041 if (size > high)
4042 sum += size - high;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043 }
4044
4045 return sum;
4046}
4047
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004048/**
4049 * nr_free_buffer_pages - count number of pages beyond high watermark
4050 *
4051 * nr_free_buffer_pages() counts the number of pages which are beyond the high
4052 * watermark within ZONE_DMA and ZONE_NORMAL.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004054unsigned long nr_free_buffer_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055{
Al Viroaf4ca452005-10-21 02:55:38 -04004056 return nr_free_zone_pages(gfp_zone(GFP_USER));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057}
Meelap Shahc2f1a552007-07-17 04:04:39 -07004058EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059
Zhang Yanfeie0fb5812013-02-22 16:35:54 -08004060/**
4061 * nr_free_pagecache_pages - count number of pages beyond high watermark
4062 *
4063 * nr_free_pagecache_pages() counts the number of pages which are beyond the
4064 * high watermark within all zones.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065 */
Zhang Yanfeiebec3862013-02-22 16:35:43 -08004066unsigned long nr_free_pagecache_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067{
Mel Gorman2a1e2742007-07-17 04:03:12 -07004068 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069}
Christoph Lameter08e0f6a2006-09-27 01:50:06 -07004070
4071static inline void show_node(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072{
Kirill A. Shutemove5adfff2012-12-11 16:00:29 -08004073 if (IS_ENABLED(CONFIG_NUMA))
Andy Whitcroft25ba77c2006-12-06 20:33:03 -08004074 printk("Node %d ", zone_to_nid(zone));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004075}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076
Igor Redkod02bd272016-03-17 14:19:05 -07004077long si_mem_available(void)
4078{
4079 long available;
4080 unsigned long pagecache;
4081 unsigned long wmark_low = 0;
4082 unsigned long pages[NR_LRU_LISTS];
4083 struct zone *zone;
4084 int lru;
4085
4086 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
Mel Gorman2f95ff92016-08-11 15:32:57 -07004087 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
Igor Redkod02bd272016-03-17 14:19:05 -07004088
4089 for_each_zone(zone)
4090 wmark_low += zone->watermark[WMARK_LOW];
4091
4092 /*
4093 * Estimate the amount of memory available for userspace allocations,
4094 * without causing swapping.
4095 */
4096 available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
4097
4098 /*
4099 * Not all the page cache can be freed, otherwise the system will
4100 * start swapping. Assume at least half of the page cache, or the
4101 * low watermark worth of cache, needs to stay.
4102 */
4103 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
4104 pagecache -= min(pagecache / 2, wmark_low);
4105 available += pagecache;
4106
4107 /*
4108 * Part of the reclaimable slab consists of items that are in use,
4109 * and cannot be freed. Cap this estimate at the low watermark.
4110 */
4111 available += global_page_state(NR_SLAB_RECLAIMABLE) -
4112 min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
4113
4114 if (available < 0)
4115 available = 0;
4116 return available;
4117}
4118EXPORT_SYMBOL_GPL(si_mem_available);
4119
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120void si_meminfo(struct sysinfo *val)
4121{
4122 val->totalram = totalram_pages;
Mel Gorman11fb9982016-07-28 15:46:20 -07004123 val->sharedram = global_node_page_state(NR_SHMEM);
Christoph Lameterd23ad422007-02-10 01:43:02 -08004124 val->freeram = global_page_state(NR_FREE_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125 val->bufferram = nr_blockdev_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004126 val->totalhigh = totalhigh_pages;
4127 val->freehigh = nr_free_highpages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128 val->mem_unit = PAGE_SIZE;
4129}
4130
4131EXPORT_SYMBOL(si_meminfo);
4132
4133#ifdef CONFIG_NUMA
4134void si_meminfo_node(struct sysinfo *val, int nid)
4135{
Jiang Liucdd91a72013-07-03 15:03:27 -07004136 int zone_type; /* needs to be signed */
4137 unsigned long managed_pages = 0;
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07004138 unsigned long managed_highpages = 0;
4139 unsigned long free_highpages = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140 pg_data_t *pgdat = NODE_DATA(nid);
4141
Jiang Liucdd91a72013-07-03 15:03:27 -07004142 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4143 managed_pages += pgdat->node_zones[zone_type].managed_pages;
4144 val->totalram = managed_pages;
Mel Gorman11fb9982016-07-28 15:46:20 -07004145 val->sharedram = node_page_state(pgdat, NR_SHMEM);
Mel Gorman75ef7182016-07-28 15:45:24 -07004146 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07004147#ifdef CONFIG_HIGHMEM
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07004148 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
4149 struct zone *zone = &pgdat->node_zones[zone_type];
4150
4151 if (is_highmem(zone)) {
4152 managed_highpages += zone->managed_pages;
4153 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
4154 }
4155 }
4156 val->totalhigh = managed_highpages;
4157 val->freehigh = free_highpages;
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07004158#else
Joonsoo Kimfc2bd792016-05-19 17:12:23 -07004159 val->totalhigh = managed_highpages;
4160 val->freehigh = free_highpages;
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07004161#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162 val->mem_unit = PAGE_SIZE;
4163}
4164#endif
4165
David Rientjesddd588b2011-03-22 16:30:46 -07004166/*
David Rientjes7bf02ea2011-05-24 17:11:16 -07004167 * Determine whether the node should be displayed or not, depending on whether
4168 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
David Rientjesddd588b2011-03-22 16:30:46 -07004169 */
David Rientjes7bf02ea2011-05-24 17:11:16 -07004170bool skip_free_areas_node(unsigned int flags, int nid)
David Rientjesddd588b2011-03-22 16:30:46 -07004171{
4172 bool ret = false;
Mel Gormancc9a6c82012-03-21 16:34:11 -07004173 unsigned int cpuset_mems_cookie;
David Rientjesddd588b2011-03-22 16:30:46 -07004174
4175 if (!(flags & SHOW_MEM_FILTER_NODES))
4176 goto out;
4177
Mel Gormancc9a6c82012-03-21 16:34:11 -07004178 do {
Mel Gormand26914d2014-04-03 14:47:24 -07004179 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07004180 ret = !node_isset(nid, cpuset_current_mems_allowed);
Mel Gormand26914d2014-04-03 14:47:24 -07004181 } while (read_mems_allowed_retry(cpuset_mems_cookie));
David Rientjesddd588b2011-03-22 16:30:46 -07004182out:
4183 return ret;
4184}
4185
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186#define K(x) ((x) << (PAGE_SHIFT-10))
4187
Rabin Vincent377e4f12012-12-11 16:00:24 -08004188static void show_migration_types(unsigned char type)
4189{
4190 static const char types[MIGRATE_TYPES] = {
4191 [MIGRATE_UNMOVABLE] = 'U',
Rabin Vincent377e4f12012-12-11 16:00:24 -08004192 [MIGRATE_MOVABLE] = 'M',
Vlastimil Babka475a2f92015-12-11 13:40:29 -08004193 [MIGRATE_RECLAIMABLE] = 'E',
4194 [MIGRATE_HIGHATOMIC] = 'H',
Rabin Vincent377e4f12012-12-11 16:00:24 -08004195#ifdef CONFIG_CMA
4196 [MIGRATE_CMA] = 'C',
4197#endif
Minchan Kim194159f2013-02-22 16:33:58 -08004198#ifdef CONFIG_MEMORY_ISOLATION
Rabin Vincent377e4f12012-12-11 16:00:24 -08004199 [MIGRATE_ISOLATE] = 'I',
Minchan Kim194159f2013-02-22 16:33:58 -08004200#endif
Rabin Vincent377e4f12012-12-11 16:00:24 -08004201 };
4202 char tmp[MIGRATE_TYPES + 1];
4203 char *p = tmp;
4204 int i;
4205
4206 for (i = 0; i < MIGRATE_TYPES; i++) {
4207 if (type & (1 << i))
4208 *p++ = types[i];
4209 }
4210
4211 *p = '\0';
4212 printk("(%s) ", tmp);
4213}
4214
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215/*
4216 * Show free area list (used inside shift_scroll-lock stuff)
4217 * We also calculate the percentage fragmentation. We do this by counting the
4218 * memory on each free list with the exception of the first item on the list.
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004219 *
4220 * Bits in @filter:
4221 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
4222 * cpuset.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223 */
David Rientjes7bf02ea2011-05-24 17:11:16 -07004224void show_free_areas(unsigned int filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225{
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004226 unsigned long free_pcp = 0;
Jes Sorensenc7241912006-09-27 01:50:05 -07004227 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 struct zone *zone;
Mel Gorman599d0c92016-07-28 15:45:31 -07004229 pg_data_t *pgdat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07004231 for_each_populated_zone(zone) {
David Rientjes7bf02ea2011-05-24 17:11:16 -07004232 if (skip_free_areas_node(filter, zone_to_nid(zone)))
David Rientjesddd588b2011-03-22 16:30:46 -07004233 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004234
Konstantin Khlebnikov761b0672015-04-14 15:45:32 -07004235 for_each_online_cpu(cpu)
4236 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 }
4238
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07004239 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
4240 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004241 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
4242 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07004243 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004244 " free:%lu free_pcp:%lu free_cma:%lu\n",
Mel Gorman599d0c92016-07-28 15:45:31 -07004245 global_node_page_state(NR_ACTIVE_ANON),
4246 global_node_page_state(NR_INACTIVE_ANON),
4247 global_node_page_state(NR_ISOLATED_ANON),
4248 global_node_page_state(NR_ACTIVE_FILE),
4249 global_node_page_state(NR_INACTIVE_FILE),
4250 global_node_page_state(NR_ISOLATED_FILE),
4251 global_node_page_state(NR_UNEVICTABLE),
Mel Gorman11fb9982016-07-28 15:46:20 -07004252 global_node_page_state(NR_FILE_DIRTY),
4253 global_node_page_state(NR_WRITEBACK),
4254 global_node_page_state(NR_UNSTABLE_NFS),
KOSAKI Motohiro3701b032009-09-21 17:01:29 -07004255 global_page_state(NR_SLAB_RECLAIMABLE),
4256 global_page_state(NR_SLAB_UNRECLAIMABLE),
Mel Gorman50658e22016-07-28 15:46:14 -07004257 global_node_page_state(NR_FILE_MAPPED),
Mel Gorman11fb9982016-07-28 15:46:20 -07004258 global_node_page_state(NR_SHMEM),
Andrew Mortona25700a2007-02-08 14:20:40 -08004259 global_page_state(NR_PAGETABLE),
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07004260 global_page_state(NR_BOUNCE),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004261 global_page_state(NR_FREE_PAGES),
4262 free_pcp,
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07004263 global_page_state(NR_FREE_CMA_PAGES));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264
Mel Gorman599d0c92016-07-28 15:45:31 -07004265 for_each_online_pgdat(pgdat) {
4266 printk("Node %d"
4267 " active_anon:%lukB"
4268 " inactive_anon:%lukB"
4269 " active_file:%lukB"
4270 " inactive_file:%lukB"
4271 " unevictable:%lukB"
4272 " isolated(anon):%lukB"
4273 " isolated(file):%lukB"
Mel Gorman50658e22016-07-28 15:46:14 -07004274 " mapped:%lukB"
Mel Gorman11fb9982016-07-28 15:46:20 -07004275 " dirty:%lukB"
4276 " writeback:%lukB"
4277 " shmem:%lukB"
4278#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4279 " shmem_thp: %lukB"
4280 " shmem_pmdmapped: %lukB"
4281 " anon_thp: %lukB"
4282#endif
4283 " writeback_tmp:%lukB"
4284 " unstable:%lukB"
Minchan Kim33e077b2016-07-28 15:47:14 -07004285 " pages_scanned:%lu"
Mel Gorman599d0c92016-07-28 15:45:31 -07004286 " all_unreclaimable? %s"
4287 "\n",
4288 pgdat->node_id,
4289 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
4290 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
4291 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
4292 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
4293 K(node_page_state(pgdat, NR_UNEVICTABLE)),
4294 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
4295 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
Mel Gorman50658e22016-07-28 15:46:14 -07004296 K(node_page_state(pgdat, NR_FILE_MAPPED)),
Mel Gorman11fb9982016-07-28 15:46:20 -07004297 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4298 K(node_page_state(pgdat, NR_WRITEBACK)),
4299#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4300 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4301 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4302 * HPAGE_PMD_NR),
4303 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4304#endif
4305 K(node_page_state(pgdat, NR_SHMEM)),
4306 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4307 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
Minchan Kim33e077b2016-07-28 15:47:14 -07004308 node_page_state(pgdat, NR_PAGES_SCANNED),
Mel Gorman599d0c92016-07-28 15:45:31 -07004309 !pgdat_reclaimable(pgdat) ? "yes" : "no");
4310 }
4311
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07004312 for_each_populated_zone(zone) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313 int i;
4314
David Rientjes7bf02ea2011-05-24 17:11:16 -07004315 if (skip_free_areas_node(filter, zone_to_nid(zone)))
David Rientjesddd588b2011-03-22 16:30:46 -07004316 continue;
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004317
4318 free_pcp = 0;
4319 for_each_online_cpu(cpu)
4320 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4321
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322 show_node(zone);
4323 printk("%s"
4324 " free:%lukB"
4325 " min:%lukB"
4326 " low:%lukB"
4327 " high:%lukB"
Minchan Kim71c799f2016-07-28 15:47:26 -07004328 " active_anon:%lukB"
4329 " inactive_anon:%lukB"
4330 " active_file:%lukB"
4331 " inactive_file:%lukB"
4332 " unevictable:%lukB"
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004333 " writepending:%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334 " present:%lukB"
Jiang Liu9feedc92012-12-12 13:52:12 -08004335 " managed:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004336 " mlocked:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004337 " slab_reclaimable:%lukB"
4338 " slab_unreclaimable:%lukB"
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -07004339 " kernel_stack:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004340 " pagetables:%lukB"
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004341 " bounce:%lukB"
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004342 " free_pcp:%lukB"
4343 " local_pcp:%ukB"
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -07004344 " free_cma:%lukB"
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345 "\n",
4346 zone->name,
Mel Gorman88f5acf2011-01-13 15:45:41 -08004347 K(zone_page_state(zone, NR_FREE_PAGES)),
Mel Gorman41858962009-06-16 15:32:12 -07004348 K(min_wmark_pages(zone)),
4349 K(low_wmark_pages(zone)),
4350 K(high_wmark_pages(zone)),
Minchan Kim71c799f2016-07-28 15:47:26 -07004351 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
4352 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
4353 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
4354 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
4355 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
Mel Gorman5a1c84b2016-07-28 15:47:31 -07004356 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357 K(zone->present_pages),
Jiang Liu9feedc92012-12-12 13:52:12 -08004358 K(zone->managed_pages),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004359 K(zone_page_state(zone, NR_MLOCK)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004360 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
4361 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
Andy Lutomirskid30dd8b2016-07-28 15:48:14 -07004362 zone_page_state(zone, NR_KERNEL_STACK_KB),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004363 K(zone_page_state(zone, NR_PAGETABLE)),
KOSAKI Motohiro4a0aa732009-09-21 17:01:30 -07004364 K(zone_page_state(zone, NR_BOUNCE)),
Konstantin Khlebnikovd1bfcdb2015-04-14 15:45:30 -07004365 K(free_pcp),
4366 K(this_cpu_read(zone->pageset->pcp.count)),
Minchan Kim33e077b2016-07-28 15:47:14 -07004367 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368 printk("lowmem_reserve[]:");
4369 for (i = 0; i < MAX_NR_ZONES; i++)
Mel Gorman3484b2d2014-08-06 16:07:14 -07004370 printk(" %ld", zone->lowmem_reserve[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371 printk("\n");
4372 }
4373
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07004374 for_each_populated_zone(zone) {
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08004375 unsigned int order;
4376 unsigned long nr[MAX_ORDER], flags, total = 0;
Rabin Vincent377e4f12012-12-11 16:00:24 -08004377 unsigned char types[MAX_ORDER];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378
David Rientjes7bf02ea2011-05-24 17:11:16 -07004379 if (skip_free_areas_node(filter, zone_to_nid(zone)))
David Rientjesddd588b2011-03-22 16:30:46 -07004380 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381 show_node(zone);
4382 printk("%s: ", zone->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383
4384 spin_lock_irqsave(&zone->lock, flags);
4385 for (order = 0; order < MAX_ORDER; order++) {
Rabin Vincent377e4f12012-12-11 16:00:24 -08004386 struct free_area *area = &zone->free_area[order];
4387 int type;
4388
4389 nr[order] = area->nr_free;
Kirill Korotaev8f9de512006-06-23 02:03:50 -07004390 total += nr[order] << order;
Rabin Vincent377e4f12012-12-11 16:00:24 -08004391
4392 types[order] = 0;
4393 for (type = 0; type < MIGRATE_TYPES; type++) {
4394 if (!list_empty(&area->free_list[type]))
4395 types[order] |= 1 << type;
4396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004397 }
4398 spin_unlock_irqrestore(&zone->lock, flags);
Rabin Vincent377e4f12012-12-11 16:00:24 -08004399 for (order = 0; order < MAX_ORDER; order++) {
Kirill Korotaev8f9de512006-06-23 02:03:50 -07004400 printk("%lu*%lukB ", nr[order], K(1UL) << order);
Rabin Vincent377e4f12012-12-11 16:00:24 -08004401 if (nr[order])
4402 show_migration_types(types[order]);
4403 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404 printk("= %lukB\n", K(total));
4405 }
4406
David Rientjes949f7ec2013-04-29 15:07:48 -07004407 hugetlb_show_meminfo();
4408
Mel Gorman11fb9982016-07-28 15:46:20 -07004409 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
Larry Woodmane6f36022008-02-04 22:29:30 -08004410
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411 show_swap_cache_info();
4412}
4413
Mel Gorman19770b32008-04-28 02:12:18 -07004414static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4415{
4416 zoneref->zone = zone;
4417 zoneref->zone_idx = zone_idx(zone);
4418}
4419
Linus Torvalds1da177e2005-04-16 15:20:36 -07004420/*
4421 * Builds allocation fallback zone lists.
Christoph Lameter1a932052006-01-06 00:11:16 -08004422 *
4423 * Add all populated zones of a node to the zonelist.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004424 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004425static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004426 int nr_zones)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004427{
Christoph Lameter1a932052006-01-06 00:11:16 -08004428 struct zone *zone;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004429 enum zone_type zone_type = MAX_NR_ZONES;
Christoph Lameter02a68a52006-01-06 00:11:18 -08004430
4431 do {
Christoph Lameter2f6726e2006-09-25 23:31:18 -07004432 zone_type--;
Christoph Lameter070f8032006-01-06 00:11:19 -08004433 zone = pgdat->node_zones + zone_type;
Mel Gorman6aa303d2016-09-01 16:14:55 -07004434 if (managed_zone(zone)) {
Mel Gormandd1a2392008-04-28 02:12:17 -07004435 zoneref_set_zone(zone,
4436 &zonelist->_zonerefs[nr_zones++]);
Christoph Lameter070f8032006-01-06 00:11:19 -08004437 check_highest_zone(zone_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 }
Christoph Lameter2f6726e2006-09-25 23:31:18 -07004439 } while (zone_type);
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004440
Christoph Lameter070f8032006-01-06 00:11:19 -08004441 return nr_zones;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442}
4443
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004444
4445/*
4446 * zonelist_order:
4447 * 0 = automatic detection of better ordering.
4448 * 1 = order by ([node] distance, -zonetype)
4449 * 2 = order by (-zonetype, [node] distance)
4450 *
4451 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
4452 * the same zonelist. So only NUMA can configure this param.
4453 */
4454#define ZONELIST_ORDER_DEFAULT 0
4455#define ZONELIST_ORDER_NODE 1
4456#define ZONELIST_ORDER_ZONE 2
4457
4458/* zonelist order in the kernel.
4459 * set_zonelist_order() will set this to NODE or ZONE.
4460 */
4461static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
4462static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
4463
4464
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465#ifdef CONFIG_NUMA
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004466/* The value user specified ....changed by config */
4467static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4468/* string for sysctl */
4469#define NUMA_ZONELIST_ORDER_LEN 16
4470char numa_zonelist_order[16] = "default";
4471
4472/*
4473 * interface for configure zonelist ordering.
4474 * command line option "numa_zonelist_order"
4475 * = "[dD]efault - default, automatic configuration.
4476 * = "[nN]ode - order by node locality, then by zone within node
4477 * = "[zZ]one - order by zone, then by locality within zone
4478 */
4479
4480static int __parse_numa_zonelist_order(char *s)
4481{
4482 if (*s == 'd' || *s == 'D') {
4483 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4484 } else if (*s == 'n' || *s == 'N') {
4485 user_zonelist_order = ZONELIST_ORDER_NODE;
4486 } else if (*s == 'z' || *s == 'Z') {
4487 user_zonelist_order = ZONELIST_ORDER_ZONE;
4488 } else {
Joe Perches11705322016-03-17 14:19:50 -07004489 pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004490 return -EINVAL;
4491 }
4492 return 0;
4493}
4494
4495static __init int setup_numa_zonelist_order(char *s)
4496{
Volodymyr G. Lukiianykecb256f2011-01-13 15:46:26 -08004497 int ret;
4498
4499 if (!s)
4500 return 0;
4501
4502 ret = __parse_numa_zonelist_order(s);
4503 if (ret == 0)
4504 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
4505
4506 return ret;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004507}
4508early_param("numa_zonelist_order", setup_numa_zonelist_order);
4509
4510/*
4511 * sysctl handler for numa_zonelist_order
4512 */
Joe Perchescccad5b2014-06-06 14:38:09 -07004513int numa_zonelist_order_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07004514 void __user *buffer, size_t *length,
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004515 loff_t *ppos)
4516{
4517 char saved_string[NUMA_ZONELIST_ORDER_LEN];
4518 int ret;
Andi Kleen443c6f12009-12-23 21:00:47 +01004519 static DEFINE_MUTEX(zl_order_mutex);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004520
Andi Kleen443c6f12009-12-23 21:00:47 +01004521 mutex_lock(&zl_order_mutex);
Chen Gangdacbde02013-07-03 15:02:35 -07004522 if (write) {
4523 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
4524 ret = -EINVAL;
4525 goto out;
4526 }
4527 strcpy(saved_string, (char *)table->data);
4528 }
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07004529 ret = proc_dostring(table, write, buffer, length, ppos);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004530 if (ret)
Andi Kleen443c6f12009-12-23 21:00:47 +01004531 goto out;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004532 if (write) {
4533 int oldval = user_zonelist_order;
Chen Gangdacbde02013-07-03 15:02:35 -07004534
4535 ret = __parse_numa_zonelist_order((char *)table->data);
4536 if (ret) {
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004537 /*
4538 * bogus value. restore saved string
4539 */
Chen Gangdacbde02013-07-03 15:02:35 -07004540 strncpy((char *)table->data, saved_string,
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004541 NUMA_ZONELIST_ORDER_LEN);
4542 user_zonelist_order = oldval;
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004543 } else if (oldval != user_zonelist_order) {
4544 mutex_lock(&zonelists_mutex);
Jiang Liu9adb62a2012-07-31 16:43:28 -07004545 build_all_zonelists(NULL, NULL);
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004546 mutex_unlock(&zonelists_mutex);
4547 }
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004548 }
Andi Kleen443c6f12009-12-23 21:00:47 +01004549out:
4550 mutex_unlock(&zl_order_mutex);
4551 return ret;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004552}
4553
4554
Christoph Lameter62bc62a2009-06-16 15:32:15 -07004555#define MAX_NODE_LOAD (nr_online_nodes)
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004556static int node_load[MAX_NUMNODES];
4557
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558/**
Pavel Pisa4dc3b162005-05-01 08:59:25 -07004559 * find_next_best_node - find the next node that should appear in a given node's fallback list
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560 * @node: node whose fallback list we're appending
4561 * @used_node_mask: nodemask_t of already used nodes
4562 *
4563 * We use a number of factors to determine which is the next node that should
4564 * appear on a given node's fallback list. The node should not have appeared
4565 * already in @node's fallback list, and it should be the next closest node
4566 * according to the distance array (which contains arbitrary distance values
4567 * from each node to each node in the system), and should also prefer nodes
4568 * with no CPUs, since presumably they'll have very little allocation pressure
4569 * on them otherwise.
4570 * It returns -1 if no node is found.
4571 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004572static int find_next_best_node(int node, nodemask_t *used_node_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573{
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01004574 int n, val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575 int min_val = INT_MAX;
David Rientjes00ef2d22013-02-22 16:35:36 -08004576 int best_node = NUMA_NO_NODE;
Rusty Russella70f7302009-03-13 14:49:46 +10304577 const struct cpumask *tmp = cpumask_of_node(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01004579 /* Use the local node if we haven't already */
4580 if (!node_isset(node, *used_node_mask)) {
4581 node_set(node, *used_node_mask);
4582 return node;
4583 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08004585 for_each_node_state(n, N_MEMORY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586
4587 /* Don't want a node to appear more than once */
4588 if (node_isset(n, *used_node_mask))
4589 continue;
4590
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591 /* Use the distance array to find the distance */
4592 val = node_distance(node, n);
4593
Linus Torvalds4cf808eb2006-02-17 20:38:21 +01004594 /* Penalize nodes under us ("prefer the next node") */
4595 val += (n < node);
4596
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597 /* Give preference to headless and unused nodes */
Rusty Russella70f7302009-03-13 14:49:46 +10304598 tmp = cpumask_of_node(n);
4599 if (!cpumask_empty(tmp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004600 val += PENALTY_FOR_NODE_WITH_CPUS;
4601
4602 /* Slight preference for less loaded node */
4603 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4604 val += node_load[n];
4605
4606 if (val < min_val) {
4607 min_val = val;
4608 best_node = n;
4609 }
4610 }
4611
4612 if (best_node >= 0)
4613 node_set(best_node, *used_node_mask);
4614
4615 return best_node;
4616}
4617
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004618
4619/*
4620 * Build zonelists ordered by node and zones within node.
4621 * This results in maximum locality--normal zone overflows into local
4622 * DMA zone, if any--but risks exhausting DMA zone.
4623 */
4624static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004626 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 struct zonelist *zonelist;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004628
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07004629 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
Mel Gormandd1a2392008-04-28 02:12:17 -07004630 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
Mel Gorman54a6eb52008-04-28 02:12:16 -07004631 ;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004632 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
Mel Gormandd1a2392008-04-28 02:12:17 -07004633 zonelist->_zonerefs[j].zone = NULL;
4634 zonelist->_zonerefs[j].zone_idx = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004635}
4636
4637/*
Christoph Lameter523b9452007-10-16 01:25:37 -07004638 * Build gfp_thisnode zonelists
4639 */
4640static void build_thisnode_zonelists(pg_data_t *pgdat)
4641{
Christoph Lameter523b9452007-10-16 01:25:37 -07004642 int j;
4643 struct zonelist *zonelist;
4644
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07004645 zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK];
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004646 j = build_zonelists_node(pgdat, zonelist, 0);
Mel Gormandd1a2392008-04-28 02:12:17 -07004647 zonelist->_zonerefs[j].zone = NULL;
4648 zonelist->_zonerefs[j].zone_idx = 0;
Christoph Lameter523b9452007-10-16 01:25:37 -07004649}
4650
4651/*
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004652 * Build zonelists ordered by zone and nodes within zones.
4653 * This results in conserving DMA zone[s] until all Normal memory is
4654 * exhausted, but results in overflowing to remote node while memory
4655 * may still exist in local DMA zone.
4656 */
4657static int node_order[MAX_NUMNODES];
4658
4659static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4660{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004661 int pos, j, node;
4662 int zone_type; /* needs to be signed */
4663 struct zone *z;
4664 struct zonelist *zonelist;
4665
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07004666 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
Mel Gorman54a6eb52008-04-28 02:12:16 -07004667 pos = 0;
4668 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
4669 for (j = 0; j < nr_nodes; j++) {
4670 node = node_order[j];
4671 z = &NODE_DATA(node)->node_zones[zone_type];
Mel Gorman6aa303d2016-09-01 16:14:55 -07004672 if (managed_zone(z)) {
Mel Gormandd1a2392008-04-28 02:12:17 -07004673 zoneref_set_zone(z,
4674 &zonelist->_zonerefs[pos++]);
Mel Gorman54a6eb52008-04-28 02:12:16 -07004675 check_highest_zone(zone_type);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004676 }
4677 }
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004678 }
Mel Gormandd1a2392008-04-28 02:12:17 -07004679 zonelist->_zonerefs[pos].zone = NULL;
4680 zonelist->_zonerefs[pos].zone_idx = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004681}
4682
Mel Gorman31939132014-10-09 15:28:30 -07004683#if defined(CONFIG_64BIT)
4684/*
4685 * Devices that require DMA32/DMA are relatively rare and do not justify a
4686 * penalty to every machine in case the specialised case applies. Default
4687 * to Node-ordering on 64-bit NUMA machines
4688 */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004689static int default_zonelist_order(void)
4690{
Mel Gorman31939132014-10-09 15:28:30 -07004691 return ZONELIST_ORDER_NODE;
4692}
4693#else
4694/*
4695 * On 32-bit, the Normal zone needs to be preserved for allocations accessible
4696 * by the kernel. If processes running on node 0 deplete the low memory zone
4697 * then reclaim will occur more frequency increasing stalls and potentially
4698 * be easier to OOM if a large percentage of the zone is under writeback or
4699 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
4700 * Hence, default to zone ordering on 32-bit.
4701 */
4702static int default_zonelist_order(void)
4703{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004704 return ZONELIST_ORDER_ZONE;
4705}
Mel Gorman31939132014-10-09 15:28:30 -07004706#endif /* CONFIG_64BIT */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004707
4708static void set_zonelist_order(void)
4709{
4710 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
4711 current_zonelist_order = default_zonelist_order();
4712 else
4713 current_zonelist_order = user_zonelist_order;
4714}
4715
4716static void build_zonelists(pg_data_t *pgdat)
4717{
Yaowei Baic00eb152016-01-14 15:19:00 -08004718 int i, node, load;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719 nodemask_t used_mask;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004720 int local_node, prev_node;
4721 struct zonelist *zonelist;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08004722 unsigned int order = current_zonelist_order;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723
4724 /* initialize zonelists */
Christoph Lameter523b9452007-10-16 01:25:37 -07004725 for (i = 0; i < MAX_ZONELISTS; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726 zonelist = pgdat->node_zonelists + i;
Mel Gormandd1a2392008-04-28 02:12:17 -07004727 zonelist->_zonerefs[0].zone = NULL;
4728 zonelist->_zonerefs[0].zone_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004729 }
4730
4731 /* NUMA-aware ordering of nodes */
4732 local_node = pgdat->node_id;
Christoph Lameter62bc62a2009-06-16 15:32:15 -07004733 load = nr_online_nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734 prev_node = local_node;
4735 nodes_clear(used_mask);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004736
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004737 memset(node_order, 0, sizeof(node_order));
Yaowei Baic00eb152016-01-14 15:19:00 -08004738 i = 0;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004739
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
4741 /*
4742 * We don't want to pressure a particular node.
4743 * So adding penalty to the first node in same
4744 * distance group to make it round-robin.
4745 */
David Rientjes957f8222012-10-08 16:33:24 -07004746 if (node_distance(local_node, node) !=
4747 node_distance(local_node, prev_node))
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004748 node_load[node] = load;
4749
Linus Torvalds1da177e2005-04-16 15:20:36 -07004750 prev_node = node;
4751 load--;
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004752 if (order == ZONELIST_ORDER_NODE)
4753 build_zonelists_in_node_order(pgdat, node);
4754 else
Yaowei Baic00eb152016-01-14 15:19:00 -08004755 node_order[i++] = node; /* remember order */
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004756 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004758 if (order == ZONELIST_ORDER_ZONE) {
4759 /* calculate node order -- i.e., DMA last! */
Yaowei Baic00eb152016-01-14 15:19:00 -08004760 build_zonelists_in_zone_order(pgdat, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761 }
Christoph Lameter523b9452007-10-16 01:25:37 -07004762
4763 build_thisnode_zonelists(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004764}
4765
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004766#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4767/*
4768 * Return node id of node used for "local" allocations.
4769 * I.e., first node id of first zone in arg node's generic zonelist.
4770 * Used for initializing percpu 'numa_mem', which is used primarily
4771 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
4772 */
4773int local_memory_node(int node)
4774{
Mel Gormanc33d6c02016-05-19 17:14:10 -07004775 struct zoneref *z;
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004776
Mel Gormanc33d6c02016-05-19 17:14:10 -07004777 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004778 gfp_zone(GFP_KERNEL),
Mel Gormanc33d6c02016-05-19 17:14:10 -07004779 NULL);
4780 return z->zone->node;
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004781}
4782#endif
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004783
Joonsoo Kim6423aa82016-08-10 16:27:49 -07004784static void setup_min_unmapped_ratio(void);
4785static void setup_min_slab_ratio(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786#else /* CONFIG_NUMA */
4787
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004788static void set_zonelist_order(void)
4789{
4790 current_zonelist_order = ZONELIST_ORDER_ZONE;
4791}
4792
4793static void build_zonelists(pg_data_t *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004794{
Christoph Lameter19655d32006-09-25 23:31:19 -07004795 int node, local_node;
Mel Gorman54a6eb52008-04-28 02:12:16 -07004796 enum zone_type j;
4797 struct zonelist *zonelist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798
4799 local_node = pgdat->node_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07004801 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004802 j = build_zonelists_node(pgdat, zonelist, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004803
Mel Gorman54a6eb52008-04-28 02:12:16 -07004804 /*
4805 * Now we build the zonelist so that it contains the zones
4806 * of all the other nodes.
4807 * We don't want to pressure a particular node, so when
4808 * building the zones for node N, we make sure that the
4809 * zones coming right after the local ones are those from
4810 * node N+1 (modulo N)
4811 */
4812 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
4813 if (!node_online(node))
4814 continue;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004815 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004816 }
Mel Gorman54a6eb52008-04-28 02:12:16 -07004817 for (node = 0; node < local_node; node++) {
4818 if (!node_online(node))
4819 continue;
Zhang Yanfeibc732f12013-07-08 16:00:06 -07004820 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
Mel Gorman54a6eb52008-04-28 02:12:16 -07004821 }
4822
Mel Gormandd1a2392008-04-28 02:12:17 -07004823 zonelist->_zonerefs[j].zone = NULL;
4824 zonelist->_zonerefs[j].zone_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004825}
4826
4827#endif /* CONFIG_NUMA */
4828
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004829/*
4830 * Boot pageset table. One per cpu which is going to be used for all
4831 * zones and all nodes. The parameters will be set in such a way
4832 * that an item put on a list will immediately be handed over to
4833 * the buddy list. This is safe since pageset manipulation is done
4834 * with interrupts disabled.
4835 *
4836 * The boot_pagesets must be kept even after bootup is complete for
4837 * unused processors and/or zones. They do play a role for bootstrapping
4838 * hotplugged processors.
4839 *
4840 * zoneinfo_show() and maybe other functions do
4841 * not check if the processor is online before following the pageset pointer.
4842 * Other parts of the kernel may not check if the zone is available.
4843 */
4844static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
4845static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
Haicheng Li1f522502010-05-24 14:32:51 -07004846static void setup_zone_pageset(struct zone *zone);
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004847
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004848/*
4849 * Global mutex to protect against size modification of zonelists
4850 * as well as to serialize pageset setup for the new populated zone.
4851 */
4852DEFINE_MUTEX(zonelists_mutex);
4853
Rusty Russell9b1a4d32008-07-28 12:16:30 -05004854/* return values int ....just for stop_machine() */
Jiang Liu4ed7e022012-07-31 16:43:35 -07004855static int __build_all_zonelists(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856{
Yasunori Goto68113782006-06-23 02:03:11 -07004857 int nid;
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004858 int cpu;
Jiang Liu9adb62a2012-07-31 16:43:28 -07004859 pg_data_t *self = data;
Paul Jackson9276b1bc2006-12-06 20:31:48 -08004860
Bo Liu7f9cfb32009-08-18 14:11:19 -07004861#ifdef CONFIG_NUMA
4862 memset(node_load, 0, sizeof(node_load));
4863#endif
Jiang Liu9adb62a2012-07-31 16:43:28 -07004864
4865 if (self && !node_online(self->node_id)) {
4866 build_zonelists(self);
Jiang Liu9adb62a2012-07-31 16:43:28 -07004867 }
4868
Paul Jackson9276b1bc2006-12-06 20:31:48 -08004869 for_each_online_node(nid) {
Christoph Lameter7ea15302007-10-16 01:25:29 -07004870 pg_data_t *pgdat = NODE_DATA(nid);
4871
4872 build_zonelists(pgdat);
Paul Jackson9276b1bc2006-12-06 20:31:48 -08004873 }
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004874
4875 /*
4876 * Initialize the boot_pagesets that are going to be used
4877 * for bootstrapping processors. The real pagesets for
4878 * each zone will be allocated later when the per cpu
4879 * allocator is available.
4880 *
4881 * boot_pagesets are used also for bootstrapping offline
4882 * cpus if the system is already booted because the pagesets
4883 * are needed to initialize allocators on a specific cpu too.
4884 * F.e. the percpu allocator needs the page allocator which
4885 * needs the percpu allocator in order to allocate its pagesets
4886 * (a chicken-egg dilemma).
4887 */
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004888 for_each_possible_cpu(cpu) {
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09004889 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
4890
Lee Schermerhorn7aac7892010-05-26 14:45:00 -07004891#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4892 /*
4893 * We now know the "local memory node" for each node--
4894 * i.e., the node of the first zone in the generic zonelist.
4895 * Set up numa_mem percpu variable for on-line cpus. During
4896 * boot, only the boot cpu should be on-line; we'll init the
4897 * secondary cpus' numa_mem as they come on-line. During
4898 * node/memory hotplug, we'll fixup all on-line cpus.
4899 */
4900 if (cpu_online(cpu))
4901 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
4902#endif
4903 }
4904
Yasunori Goto68113782006-06-23 02:03:11 -07004905 return 0;
4906}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004907
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08004908static noinline void __init
4909build_all_zonelists_init(void)
4910{
4911 __build_all_zonelists(NULL);
4912 mminit_verify_zonelist();
4913 cpuset_init_current_mems_allowed();
4914}
4915
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004916/*
4917 * Called with zonelists_mutex held always
4918 * unless system_state == SYSTEM_BOOTING.
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08004919 *
4920 * __ref due to (1) call of __meminit annotated setup_zone_pageset
4921 * [we're only called with non-NULL zone through __meminit paths] and
4922 * (2) call of __init annotated helper build_all_zonelists_init
4923 * [protected by SYSTEM_BOOTING].
Haicheng Li4eaf3f62010-05-24 14:32:52 -07004924 */
Jiang Liu9adb62a2012-07-31 16:43:28 -07004925void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
Yasunori Goto68113782006-06-23 02:03:11 -07004926{
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004927 set_zonelist_order();
4928
Yasunori Goto68113782006-06-23 02:03:11 -07004929 if (system_state == SYSTEM_BOOTING) {
Rasmus Villemoes061f67b2015-02-12 15:00:06 -08004930 build_all_zonelists_init();
Yasunori Goto68113782006-06-23 02:03:11 -07004931 } else {
KAMEZAWA Hiroyukie9959f02010-11-24 12:57:09 -08004932#ifdef CONFIG_MEMORY_HOTPLUG
Jiang Liu9adb62a2012-07-31 16:43:28 -07004933 if (zone)
4934 setup_zone_pageset(zone);
KAMEZAWA Hiroyukie9959f02010-11-24 12:57:09 -08004935#endif
Cody P Schaferdd1895e2013-07-03 15:01:36 -07004936 /* we have to stop all cpus to guarantee there is no user
4937 of zonelist */
Jiang Liu9adb62a2012-07-31 16:43:28 -07004938 stop_machine(__build_all_zonelists, pgdat, NULL);
Yasunori Goto68113782006-06-23 02:03:11 -07004939 /* cpuset refresh routine should be here */
4940 }
Andrew Mortonbd1e22b2006-06-23 02:03:47 -07004941 vm_total_pages = nr_free_pagecache_pages();
Mel Gorman9ef9acb2007-10-16 01:25:54 -07004942 /*
4943 * Disable grouping by mobility if the number of pages in the
4944 * system is too low to allow the mechanism to work. It would be
4945 * more accurate, but expensive to check per-zone. This check is
4946 * made on memory-hotadd so a system can start with mobility
4947 * disabled and enable it later
4948 */
Mel Gormand9c23402007-10-16 01:26:01 -07004949 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
Mel Gorman9ef9acb2007-10-16 01:25:54 -07004950 page_group_by_mobility_disabled = 1;
4951 else
4952 page_group_by_mobility_disabled = 0;
4953
Joe Perches756a0252016-03-17 14:19:47 -07004954 pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n",
4955 nr_online_nodes,
4956 zonelist_order_name[current_zonelist_order],
4957 page_group_by_mobility_disabled ? "off" : "on",
4958 vm_total_pages);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004959#ifdef CONFIG_NUMA
Anton Blanchardf88dfff2014-12-10 15:42:53 -08004960 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
KAMEZAWA Hiroyukif0c0b2b2007-07-15 23:38:01 -07004961#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962}
4963
4964/*
4965 * Helper functions to size the waitqueue hash table.
4966 * Essentially these want to choose hash table sizes sufficiently
4967 * large so that collisions trying to wait on pages are rare.
4968 * But in fact, the number of active page waitqueues on typical
4969 * systems is ridiculously low, less than 200. So this is even
4970 * conservative, even though it seems large.
4971 *
4972 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
4973 * waitqueues, i.e. the size of the waitq table given the number of pages.
4974 */
4975#define PAGES_PER_WAITQUEUE 256
4976
Yasunori Gotocca448f2006-06-23 02:03:10 -07004977#ifndef CONFIG_MEMORY_HOTPLUG
Yasunori Goto02b694d2006-06-23 02:03:08 -07004978static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004979{
4980 unsigned long size = 1;
4981
4982 pages /= PAGES_PER_WAITQUEUE;
4983
4984 while (size < pages)
4985 size <<= 1;
4986
4987 /*
4988 * Once we have dozens or even hundreds of threads sleeping
4989 * on IO we've got bigger problems than wait queue collision.
4990 * Limit the size of the wait table to a reasonable size.
4991 */
4992 size = min(size, 4096UL);
4993
4994 return max(size, 4UL);
4995}
Yasunori Gotocca448f2006-06-23 02:03:10 -07004996#else
4997/*
4998 * A zone's size might be changed by hot-add, so it is not possible to determine
4999 * a suitable size for its wait_table. So we use the maximum size now.
5000 *
5001 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
5002 *
5003 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
5004 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
5005 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
5006 *
5007 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
5008 * or more by the traditional way. (See above). It equals:
5009 *
5010 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
5011 * ia64(16K page size) : = ( 8G + 4M)byte.
5012 * powerpc (64K page size) : = (32G +16M)byte.
5013 */
5014static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
5015{
5016 return 4096UL;
5017}
5018#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005019
5020/*
5021 * This is an integer logarithm so that shifts can be used later
5022 * to extract the more random high bits from the multiplicative
5023 * hash function before the remainder is taken.
5024 */
5025static inline unsigned long wait_table_bits(unsigned long size)
5026{
5027 return ffz(~size);
5028}
5029
Mel Gorman56fd56b2007-10-16 01:25:58 -07005030/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07005031 * Initially all pages are reserved - free ones are freed
5032 * up by free_all_bootmem() once the early boot process is
5033 * done. Non-atomic initialization, single-pass.
5034 */
Matt Tolentinoc09b4242006-01-17 07:03:44 +01005035void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
Dave Hansena2f3aa022007-01-10 23:15:30 -08005036 unsigned long start_pfn, enum memmap_context context)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005037{
Dan Williams4b94ffd2016-01-15 16:56:22 -08005038 struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
Andy Whitcroft29751f62005-06-23 00:08:00 -07005039 unsigned long end_pfn = start_pfn + size;
Dan Williams4b94ffd2016-01-15 16:56:22 -08005040 pg_data_t *pgdat = NODE_DATA(nid);
Andy Whitcroft29751f62005-06-23 00:08:00 -07005041 unsigned long pfn;
Mel Gorman3a80a7f2015-06-30 14:57:02 -07005042 unsigned long nr_initialised = 0;
Taku Izumi342332e2016-03-15 14:55:22 -07005043#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5044 struct memblock_region *r = NULL, *tmp;
5045#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005046
Hugh Dickins22b31ee2009-01-06 14:40:09 -08005047 if (highest_memmap_pfn < end_pfn - 1)
5048 highest_memmap_pfn = end_pfn - 1;
5049
Dan Williams4b94ffd2016-01-15 16:56:22 -08005050 /*
5051 * Honor reservation requested by the driver for this ZONE_DEVICE
5052 * memory
5053 */
5054 if (altmap && start_pfn == altmap->base_pfn)
5055 start_pfn += altmap->reserve;
5056
Greg Ungerercbe8dd42006-01-12 01:05:24 -08005057 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
Dave Hansena2f3aa022007-01-10 23:15:30 -08005058 /*
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005059 * There can be holes in boot-time mem_map[]s handed to this
5060 * function. They do not exist on hotplugged memory.
Dave Hansena2f3aa022007-01-10 23:15:30 -08005061 */
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005062 if (context != MEMMAP_EARLY)
5063 goto not_early;
5064
5065 if (!early_pfn_valid(pfn))
5066 continue;
5067 if (!early_pfn_in_nid(pfn, nid))
5068 continue;
5069 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
5070 break;
Taku Izumi342332e2016-03-15 14:55:22 -07005071
5072#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005073 /*
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005074 * Check given memblock attribute by firmware which can affect
5075 * kernel memory layout. If zone==ZONE_MOVABLE but memory is
5076 * mirrored, it's an overlapped memmap init. skip it.
5077 */
5078 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5079 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
5080 for_each_memblock(memory, tmp)
5081 if (pfn < memblock_region_memory_end_pfn(tmp))
5082 break;
5083 r = tmp;
Taku Izumi342332e2016-03-15 14:55:22 -07005084 }
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005085 if (pfn >= memblock_region_memory_base_pfn(r) &&
5086 memblock_is_mirror(r)) {
5087 /* already initialized as NORMAL */
5088 pfn = memblock_region_memory_end_pfn(r);
5089 continue;
5090 }
Dave Hansena2f3aa022007-01-10 23:15:30 -08005091 }
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005092#endif
Mel Gormanac5d2532015-06-30 14:57:20 -07005093
Andrew Mortonb72d0ff2016-03-15 14:55:25 -07005094not_early:
Mel Gormanac5d2532015-06-30 14:57:20 -07005095 /*
5096 * Mark the block movable so that blocks are reserved for
5097 * movable at startup. This will force kernel allocations
5098 * to reserve their blocks rather than leaking throughout
5099 * the address space during boot when many long-lived
Mel Gorman974a7862015-11-06 16:28:34 -08005100 * kernel allocations are made.
Mel Gormanac5d2532015-06-30 14:57:20 -07005101 *
5102 * bitmap is created for zone's valid pfn range. but memmap
5103 * can be created for invalid pages (for alignment)
5104 * check here not to call set_pageblock_migratetype() against
5105 * pfn out of zone.
5106 */
5107 if (!(pfn & (pageblock_nr_pages - 1))) {
5108 struct page *page = pfn_to_page(pfn);
5109
5110 __init_single_page(page, pfn, zone, nid);
5111 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5112 } else {
5113 __init_single_pfn(pfn, zone, nid);
5114 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005115 }
5116}
5117
Andi Kleen1e548de2008-02-04 22:29:26 -08005118static void __meminit zone_init_free_lists(struct zone *zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005119{
Mel Gorman7aeb09f2014-06-04 16:10:21 -07005120 unsigned int order, t;
Mel Gormanb2a0ac82007-10-16 01:25:48 -07005121 for_each_migratetype_order(order, t) {
5122 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123 zone->free_area[order].nr_free = 0;
5124 }
5125}
5126
5127#ifndef __HAVE_ARCH_MEMMAP_INIT
5128#define memmap_init(size, nid, zone, start_pfn) \
Dave Hansena2f3aa022007-01-10 23:15:30 -08005129 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005130#endif
5131
David Rientjes7cd2b0a2014-06-23 13:22:04 -07005132static int zone_batchsize(struct zone *zone)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005133{
David Howells3a6be872009-05-06 16:03:03 -07005134#ifdef CONFIG_MMU
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005135 int batch;
5136
5137 /*
5138 * The per-cpu-pages pools are set to around 1000th of the
Seth, Rohitba56e912005-10-29 18:15:47 -07005139 * size of the zone. But no more than 1/2 of a meg.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005140 *
5141 * OK, so we don't know how big the cache is. So guess.
5142 */
Jiang Liub40da042013-02-22 16:33:52 -08005143 batch = zone->managed_pages / 1024;
Seth, Rohitba56e912005-10-29 18:15:47 -07005144 if (batch * PAGE_SIZE > 512 * 1024)
5145 batch = (512 * 1024) / PAGE_SIZE;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005146 batch /= 4; /* We effectively *= 4 below */
5147 if (batch < 1)
5148 batch = 1;
5149
5150 /*
Nick Piggin0ceaacc2005-12-04 13:55:25 +11005151 * Clamp the batch to a 2^n - 1 value. Having a power
5152 * of 2 value was found to be more likely to have
5153 * suboptimal cache aliasing properties in some cases.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005154 *
Nick Piggin0ceaacc2005-12-04 13:55:25 +11005155 * For example if 2 tasks are alternately allocating
5156 * batches of pages, one task can end up with a lot
5157 * of pages of one half of the possible page colors
5158 * and the other with pages of the other colors.
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005159 */
David Howells91552032009-05-06 16:03:02 -07005160 batch = rounddown_pow_of_two(batch + batch/2) - 1;
Seth, Rohitba56e912005-10-29 18:15:47 -07005161
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005162 return batch;
David Howells3a6be872009-05-06 16:03:03 -07005163
5164#else
5165 /* The deferral and batching of frees should be suppressed under NOMMU
5166 * conditions.
5167 *
5168 * The problem is that NOMMU needs to be able to allocate large chunks
5169 * of contiguous memory as there's no hardware page translation to
5170 * assemble apparent contiguous memory from discontiguous pages.
5171 *
5172 * Queueing large contiguous runs of pages for batching, however,
5173 * causes the pages to actually be freed in smaller chunks. As there
5174 * can be a significant delay between the individual batches being
5175 * recycled, this leads to the once large chunks of space being
5176 * fragmented and becoming unavailable for high-order allocations.
5177 */
5178 return 0;
5179#endif
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005180}
5181
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005182/*
5183 * pcp->high and pcp->batch values are related and dependent on one another:
5184 * ->batch must never be higher then ->high.
5185 * The following function updates them in a safe manner without read side
5186 * locking.
5187 *
5188 * Any new users of pcp->batch and pcp->high should ensure they can cope with
5189 * those fields changing asynchronously (acording the the above rule).
5190 *
5191 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5192 * outside of boot time (or some other assurance that no concurrent updaters
5193 * exist).
5194 */
5195static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5196 unsigned long batch)
5197{
5198 /* start with a fail safe value for batch */
5199 pcp->batch = 1;
5200 smp_wmb();
5201
5202 /* Update high, then batch, in order */
5203 pcp->high = high;
5204 smp_wmb();
5205
5206 pcp->batch = batch;
5207}
5208
Cody P Schafer36640332013-07-03 15:01:40 -07005209/* a companion to pageset_set_high() */
Cody P Schafer4008bab2013-07-03 15:01:28 -07005210static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
5211{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005212 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
Cody P Schafer4008bab2013-07-03 15:01:28 -07005213}
5214
Cody P Schafer88c90db2013-07-03 15:01:35 -07005215static void pageset_init(struct per_cpu_pageset *p)
Christoph Lameter2caaad42005-06-21 17:15:00 -07005216{
5217 struct per_cpu_pages *pcp;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07005218 int migratetype;
Christoph Lameter2caaad42005-06-21 17:15:00 -07005219
Magnus Damm1c6fe942005-10-26 01:58:59 -07005220 memset(p, 0, sizeof(*p));
5221
Christoph Lameter3dfa5722008-02-04 22:29:19 -08005222 pcp = &p->pcp;
Christoph Lameter2caaad42005-06-21 17:15:00 -07005223 pcp->count = 0;
Mel Gorman5f8dcc22009-09-21 17:03:19 -07005224 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
5225 INIT_LIST_HEAD(&pcp->lists[migratetype]);
Christoph Lameter2caaad42005-06-21 17:15:00 -07005226}
5227
Cody P Schafer88c90db2013-07-03 15:01:35 -07005228static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
5229{
5230 pageset_init(p);
5231 pageset_set_batch(p, batch);
5232}
5233
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005234/*
Cody P Schafer36640332013-07-03 15:01:40 -07005235 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005236 * to the value high for the pageset p.
5237 */
Cody P Schafer36640332013-07-03 15:01:40 -07005238static void pageset_set_high(struct per_cpu_pageset *p,
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005239 unsigned long high)
5240{
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005241 unsigned long batch = max(1UL, high / 4);
5242 if ((high / 4) > (PAGE_SHIFT * 8))
5243 batch = PAGE_SHIFT * 8;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005244
Cody P Schafer8d7a8fa2013-07-03 15:01:31 -07005245 pageset_update(&p->pcp, high, batch);
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08005246}
5247
David Rientjes7cd2b0a2014-06-23 13:22:04 -07005248static void pageset_set_high_and_batch(struct zone *zone,
5249 struct per_cpu_pageset *pcp)
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005250{
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005251 if (percpu_pagelist_fraction)
Cody P Schafer36640332013-07-03 15:01:40 -07005252 pageset_set_high(pcp,
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005253 (zone->managed_pages /
5254 percpu_pagelist_fraction));
5255 else
5256 pageset_set_batch(pcp, zone_batchsize(zone));
5257}
5258
Cody P Schafer169f6c12013-07-03 15:01:41 -07005259static void __meminit zone_pageset_init(struct zone *zone, int cpu)
5260{
5261 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
5262
5263 pageset_init(pcp);
5264 pageset_set_high_and_batch(zone, pcp);
5265}
5266
Jiang Liu4ed7e022012-07-31 16:43:35 -07005267static void __meminit setup_zone_pageset(struct zone *zone)
Wu Fengguang319774e2010-05-24 14:32:49 -07005268{
5269 int cpu;
Wu Fengguang319774e2010-05-24 14:32:49 -07005270 zone->pageset = alloc_percpu(struct per_cpu_pageset);
Cody P Schafer56cef2b2013-07-03 15:01:38 -07005271 for_each_possible_cpu(cpu)
5272 zone_pageset_init(zone, cpu);
Wu Fengguang319774e2010-05-24 14:32:49 -07005273}
5274
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005275/*
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005276 * Allocate per cpu pagesets and initialize them.
5277 * Before this call only boot pagesets were available.
Christoph Lameter2caaad42005-06-21 17:15:00 -07005278 */
Al Viro78d99552005-12-15 09:18:25 +00005279void __init setup_per_cpu_pageset(void)
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005280{
Mel Gormanb4911ea2016-08-04 15:31:49 -07005281 struct pglist_data *pgdat;
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005282 struct zone *zone;
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005283
Wu Fengguang319774e2010-05-24 14:32:49 -07005284 for_each_populated_zone(zone)
5285 setup_zone_pageset(zone);
Mel Gormanb4911ea2016-08-04 15:31:49 -07005286
5287 for_each_online_pgdat(pgdat)
5288 pgdat->per_cpu_nodestats =
5289 alloc_percpu(struct per_cpu_nodestat);
Christoph Lametere7c8d5c2005-06-21 17:14:47 -07005290}
5291
Fabian Frederickbd721ea2016-08-02 14:03:33 -07005292static noinline __ref
Yasunori Gotocca448f2006-06-23 02:03:10 -07005293int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
Dave Hansened8ece22005-10-29 18:16:50 -07005294{
5295 int i;
Yasunori Gotocca448f2006-06-23 02:03:10 -07005296 size_t alloc_size;
Dave Hansened8ece22005-10-29 18:16:50 -07005297
5298 /*
5299 * The per-page waitqueue mechanism uses hashed waitqueues
5300 * per zone.
5301 */
Yasunori Goto02b694d2006-06-23 02:03:08 -07005302 zone->wait_table_hash_nr_entries =
5303 wait_table_hash_nr_entries(zone_size_pages);
5304 zone->wait_table_bits =
5305 wait_table_bits(zone->wait_table_hash_nr_entries);
Yasunori Gotocca448f2006-06-23 02:03:10 -07005306 alloc_size = zone->wait_table_hash_nr_entries
5307 * sizeof(wait_queue_head_t);
5308
Heiko Carstenscd94b9d2008-05-23 13:04:52 -07005309 if (!slab_is_available()) {
Yasunori Gotocca448f2006-06-23 02:03:10 -07005310 zone->wait_table = (wait_queue_head_t *)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005311 memblock_virt_alloc_node_nopanic(
5312 alloc_size, zone->zone_pgdat->node_id);
Yasunori Gotocca448f2006-06-23 02:03:10 -07005313 } else {
5314 /*
5315 * This case means that a zone whose size was 0 gets new memory
5316 * via memory hot-add.
5317 * But it may be the case that a new node was hot-added. In
5318 * this case vmalloc() will not be able to use this new node's
5319 * memory - this wait_table must be initialized to use this new
5320 * node itself as well.
5321 * To use this new node's memory, further consideration will be
5322 * necessary.
5323 */
Jesper Juhl8691f3a2007-10-16 01:24:49 -07005324 zone->wait_table = vmalloc(alloc_size);
Yasunori Gotocca448f2006-06-23 02:03:10 -07005325 }
5326 if (!zone->wait_table)
5327 return -ENOMEM;
Dave Hansened8ece22005-10-29 18:16:50 -07005328
Pintu Kumarb8af2942013-09-11 14:20:34 -07005329 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
Dave Hansened8ece22005-10-29 18:16:50 -07005330 init_waitqueue_head(zone->wait_table + i);
Yasunori Gotocca448f2006-06-23 02:03:10 -07005331
5332 return 0;
Dave Hansened8ece22005-10-29 18:16:50 -07005333}
5334
Matt Tolentinoc09b4242006-01-17 07:03:44 +01005335static __meminit void zone_pcp_init(struct zone *zone)
Dave Hansened8ece22005-10-29 18:16:50 -07005336{
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005337 /*
5338 * per cpu subsystem is not up at this point. The following code
5339 * relies on the ability of the linker to provide the
5340 * offset of a (static) per cpu variable into the per cpu area.
5341 */
5342 zone->pageset = &boot_pageset;
Dave Hansened8ece22005-10-29 18:16:50 -07005343
Xishi Qiub38a8722013-11-12 15:07:20 -08005344 if (populated_zone(zone))
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09005345 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
5346 zone->name, zone->present_pages,
5347 zone_batchsize(zone));
Dave Hansened8ece22005-10-29 18:16:50 -07005348}
5349
Jiang Liu4ed7e022012-07-31 16:43:35 -07005350int __meminit init_currently_empty_zone(struct zone *zone,
Yasunori Goto718127c2006-06-23 02:03:10 -07005351 unsigned long zone_start_pfn,
Yaowei Baib171e402015-11-05 18:47:06 -08005352 unsigned long size)
Dave Hansened8ece22005-10-29 18:16:50 -07005353{
5354 struct pglist_data *pgdat = zone->zone_pgdat;
Yasunori Gotocca448f2006-06-23 02:03:10 -07005355 int ret;
5356 ret = zone_wait_table_init(zone, size);
5357 if (ret)
5358 return ret;
Dave Hansened8ece22005-10-29 18:16:50 -07005359 pgdat->nr_zones = zone_idx(zone) + 1;
5360
Dave Hansened8ece22005-10-29 18:16:50 -07005361 zone->zone_start_pfn = zone_start_pfn;
5362
Mel Gorman708614e2008-07-23 21:26:51 -07005363 mminit_dprintk(MMINIT_TRACE, "memmap_init",
5364 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
5365 pgdat->node_id,
5366 (unsigned long)zone_idx(zone),
5367 zone_start_pfn, (zone_start_pfn + size));
5368
Andi Kleen1e548de2008-02-04 22:29:26 -08005369 zone_init_free_lists(zone);
Yasunori Goto718127c2006-06-23 02:03:10 -07005370
5371 return 0;
Dave Hansened8ece22005-10-29 18:16:50 -07005372}
5373
Tejun Heo0ee332c2011-12-08 10:22:09 -08005374#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Mel Gormanc7132162006-09-27 01:49:43 -07005375#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
Mel Gorman8a942fd2015-06-30 14:56:55 -07005376
Mel Gormanc7132162006-09-27 01:49:43 -07005377/*
5378 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
Mel Gormanc7132162006-09-27 01:49:43 -07005379 */
Mel Gorman8a942fd2015-06-30 14:56:55 -07005380int __meminit __early_pfn_to_nid(unsigned long pfn,
5381 struct mminit_pfnnid_cache *state)
Mel Gormanc7132162006-09-27 01:49:43 -07005382{
Tejun Heoc13291a2011-07-12 10:46:30 +02005383 unsigned long start_pfn, end_pfn;
Yinghai Lue76b63f2013-09-11 14:22:17 -07005384 int nid;
Russ Anderson7c243c72013-04-29 15:07:59 -07005385
Mel Gorman8a942fd2015-06-30 14:56:55 -07005386 if (state->last_start <= pfn && pfn < state->last_end)
5387 return state->last_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005388
Yinghai Lue76b63f2013-09-11 14:22:17 -07005389 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
5390 if (nid != -1) {
Mel Gorman8a942fd2015-06-30 14:56:55 -07005391 state->last_start = start_pfn;
5392 state->last_end = end_pfn;
5393 state->last_nid = nid;
Yinghai Lue76b63f2013-09-11 14:22:17 -07005394 }
5395
5396 return nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005397}
5398#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
5399
Mel Gormanc7132162006-09-27 01:49:43 -07005400/**
Santosh Shilimkar67828322014-01-21 15:50:25 -08005401 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005402 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
Santosh Shilimkar67828322014-01-21 15:50:25 -08005403 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
Mel Gormanc7132162006-09-27 01:49:43 -07005404 *
Zhang Zhen7d018172014-06-04 16:10:53 -07005405 * If an architecture guarantees that all ranges registered contain no holes
5406 * and may be freed, this this function may be used instead of calling
5407 * memblock_free_early_nid() manually.
Mel Gormanc7132162006-09-27 01:49:43 -07005408 */
Tejun Heoc13291a2011-07-12 10:46:30 +02005409void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07005410{
Tejun Heoc13291a2011-07-12 10:46:30 +02005411 unsigned long start_pfn, end_pfn;
5412 int i, this_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005413
Tejun Heoc13291a2011-07-12 10:46:30 +02005414 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
5415 start_pfn = min(start_pfn, max_low_pfn);
5416 end_pfn = min(end_pfn, max_low_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005417
Tejun Heoc13291a2011-07-12 10:46:30 +02005418 if (start_pfn < end_pfn)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005419 memblock_free_early_nid(PFN_PHYS(start_pfn),
5420 (end_pfn - start_pfn) << PAGE_SHIFT,
5421 this_nid);
Mel Gormanc7132162006-09-27 01:49:43 -07005422 }
5423}
5424
5425/**
5426 * sparse_memory_present_with_active_regions - Call memory_present for each active range
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005427 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
Mel Gormanc7132162006-09-27 01:49:43 -07005428 *
Zhang Zhen7d018172014-06-04 16:10:53 -07005429 * If an architecture guarantees that all ranges registered contain no holes and may
5430 * be freed, this function may be used instead of calling memory_present() manually.
Mel Gormanc7132162006-09-27 01:49:43 -07005431 */
5432void __init sparse_memory_present_with_active_regions(int nid)
5433{
Tejun Heoc13291a2011-07-12 10:46:30 +02005434 unsigned long start_pfn, end_pfn;
5435 int i, this_nid;
Mel Gormanc7132162006-09-27 01:49:43 -07005436
Tejun Heoc13291a2011-07-12 10:46:30 +02005437 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
5438 memory_present(this_nid, start_pfn, end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005439}
5440
5441/**
5442 * get_pfn_range_for_nid - Return the start and end page frames for a node
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005443 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
5444 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
5445 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
Mel Gormanc7132162006-09-27 01:49:43 -07005446 *
5447 * It returns the start and end page frame of a node based on information
Zhang Zhen7d018172014-06-04 16:10:53 -07005448 * provided by memblock_set_node(). If called for a node
Mel Gormanc7132162006-09-27 01:49:43 -07005449 * with no available memory, a warning is printed and the start and end
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005450 * PFNs will be 0.
Mel Gormanc7132162006-09-27 01:49:43 -07005451 */
Yasunori Gotoa3142c82007-05-08 00:23:07 -07005452void __meminit get_pfn_range_for_nid(unsigned int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005453 unsigned long *start_pfn, unsigned long *end_pfn)
5454{
Tejun Heoc13291a2011-07-12 10:46:30 +02005455 unsigned long this_start_pfn, this_end_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005456 int i;
Tejun Heoc13291a2011-07-12 10:46:30 +02005457
Mel Gormanc7132162006-09-27 01:49:43 -07005458 *start_pfn = -1UL;
5459 *end_pfn = 0;
5460
Tejun Heoc13291a2011-07-12 10:46:30 +02005461 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
5462 *start_pfn = min(*start_pfn, this_start_pfn);
5463 *end_pfn = max(*end_pfn, this_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005464 }
5465
Christoph Lameter633c0662007-10-16 01:25:37 -07005466 if (*start_pfn == -1UL)
Mel Gormanc7132162006-09-27 01:49:43 -07005467 *start_pfn = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07005468}
5469
5470/*
Mel Gorman2a1e2742007-07-17 04:03:12 -07005471 * This finds a zone that can be used for ZONE_MOVABLE pages. The
5472 * assumption is made that zones within a node are ordered in monotonic
5473 * increasing memory addresses so that the "highest" populated zone is used
5474 */
Adrian Bunkb69a7282008-07-23 21:28:12 -07005475static void __init find_usable_zone_for_movable(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07005476{
5477 int zone_index;
5478 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
5479 if (zone_index == ZONE_MOVABLE)
5480 continue;
5481
5482 if (arch_zone_highest_possible_pfn[zone_index] >
5483 arch_zone_lowest_possible_pfn[zone_index])
5484 break;
5485 }
5486
5487 VM_BUG_ON(zone_index == -1);
5488 movable_zone = zone_index;
5489}
5490
5491/*
5492 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005493 * because it is sized independent of architecture. Unlike the other zones,
Mel Gorman2a1e2742007-07-17 04:03:12 -07005494 * the starting point for ZONE_MOVABLE is not fixed. It may be different
5495 * in each node depending on the size of each node and how evenly kernelcore
5496 * is distributed. This helper function adjusts the zone ranges
5497 * provided by the architecture for a given node by using the end of the
5498 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
5499 * zones within a node are in order of monotonic increases memory addresses
5500 */
Adrian Bunkb69a7282008-07-23 21:28:12 -07005501static void __meminit adjust_zone_range_for_zone_movable(int nid,
Mel Gorman2a1e2742007-07-17 04:03:12 -07005502 unsigned long zone_type,
5503 unsigned long node_start_pfn,
5504 unsigned long node_end_pfn,
5505 unsigned long *zone_start_pfn,
5506 unsigned long *zone_end_pfn)
5507{
5508 /* Only adjust if ZONE_MOVABLE is on this node */
5509 if (zone_movable_pfn[nid]) {
5510 /* Size ZONE_MOVABLE */
5511 if (zone_type == ZONE_MOVABLE) {
5512 *zone_start_pfn = zone_movable_pfn[nid];
5513 *zone_end_pfn = min(node_end_pfn,
5514 arch_zone_highest_possible_pfn[movable_zone]);
5515
Xishi Qiue506b992016-10-07 16:58:06 -07005516 /* Adjust for ZONE_MOVABLE starting within this range */
5517 } else if (!mirrored_kernelcore &&
5518 *zone_start_pfn < zone_movable_pfn[nid] &&
5519 *zone_end_pfn > zone_movable_pfn[nid]) {
5520 *zone_end_pfn = zone_movable_pfn[nid];
5521
Mel Gorman2a1e2742007-07-17 04:03:12 -07005522 /* Check if this whole range is within ZONE_MOVABLE */
5523 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5524 *zone_start_pfn = *zone_end_pfn;
5525 }
5526}
5527
5528/*
Mel Gormanc7132162006-09-27 01:49:43 -07005529 * Return the number of pages a zone spans in a node, including holes
5530 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
5531 */
Paul Mundt6ea6e682007-07-15 23:38:20 -07005532static unsigned long __meminit zone_spanned_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005533 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005534 unsigned long node_start_pfn,
5535 unsigned long node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005536 unsigned long *zone_start_pfn,
5537 unsigned long *zone_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005538 unsigned long *ignored)
5539{
Xishi Qiub5685e92015-09-08 15:04:16 -07005540 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07005541 if (!node_start_pfn && !node_end_pfn)
5542 return 0;
5543
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005544 /* Get the start and end of the zone */
Taku Izumid91749c2016-03-15 14:55:18 -07005545 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5546 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
Mel Gorman2a1e2742007-07-17 04:03:12 -07005547 adjust_zone_range_for_zone_movable(nid, zone_type,
5548 node_start_pfn, node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005549 zone_start_pfn, zone_end_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005550
5551 /* Check that this node has pages within the zone's required range */
Taku Izumid91749c2016-03-15 14:55:18 -07005552 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
Mel Gormanc7132162006-09-27 01:49:43 -07005553 return 0;
5554
5555 /* Move the zone boundaries inside the node if necessary */
Taku Izumid91749c2016-03-15 14:55:18 -07005556 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5557 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07005558
5559 /* Return the spanned pages */
Taku Izumid91749c2016-03-15 14:55:18 -07005560 return *zone_end_pfn - *zone_start_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005561}
5562
5563/*
5564 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005565 * then all holes in the requested range will be accounted for.
Mel Gormanc7132162006-09-27 01:49:43 -07005566 */
Yinghai Lu32996252009-12-15 17:59:02 -08005567unsigned long __meminit __absent_pages_in_range(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005568 unsigned long range_start_pfn,
5569 unsigned long range_end_pfn)
5570{
Tejun Heo96e907d2011-07-12 10:46:29 +02005571 unsigned long nr_absent = range_end_pfn - range_start_pfn;
5572 unsigned long start_pfn, end_pfn;
5573 int i;
Mel Gormanc7132162006-09-27 01:49:43 -07005574
Tejun Heo96e907d2011-07-12 10:46:29 +02005575 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5576 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5577 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5578 nr_absent -= end_pfn - start_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07005579 }
Tejun Heo96e907d2011-07-12 10:46:29 +02005580 return nr_absent;
Mel Gormanc7132162006-09-27 01:49:43 -07005581}
5582
5583/**
5584 * absent_pages_in_range - Return number of page frames in holes within a range
5585 * @start_pfn: The start PFN to start searching for holes
5586 * @end_pfn: The end PFN to stop searching for holes
5587 *
Randy Dunlap88ca3b92006-10-04 02:15:25 -07005588 * It returns the number of pages frames in memory holes within a range.
Mel Gormanc7132162006-09-27 01:49:43 -07005589 */
5590unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5591 unsigned long end_pfn)
5592{
5593 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5594}
5595
5596/* Return the number of page frames in holes in a zone on a node */
Paul Mundt6ea6e682007-07-15 23:38:20 -07005597static unsigned long __meminit zone_absent_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005598 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005599 unsigned long node_start_pfn,
5600 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005601 unsigned long *ignored)
5602{
Tejun Heo96e907d2011-07-12 10:46:29 +02005603 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5604 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
Mel Gorman9c7cd682006-09-27 01:49:58 -07005605 unsigned long zone_start_pfn, zone_end_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07005606 unsigned long nr_absent;
Mel Gorman9c7cd682006-09-27 01:49:58 -07005607
Xishi Qiub5685e92015-09-08 15:04:16 -07005608 /* When hotadd a new node from cpu_up(), the node should be empty */
Xishi Qiuf9126ab2015-08-14 15:35:16 -07005609 if (!node_start_pfn && !node_end_pfn)
5610 return 0;
5611
Tejun Heo96e907d2011-07-12 10:46:29 +02005612 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5613 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
Mel Gorman9c7cd682006-09-27 01:49:58 -07005614
Mel Gorman2a1e2742007-07-17 04:03:12 -07005615 adjust_zone_range_for_zone_movable(nid, zone_type,
5616 node_start_pfn, node_end_pfn,
5617 &zone_start_pfn, &zone_end_pfn);
Taku Izumi342332e2016-03-15 14:55:22 -07005618 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5619
5620 /*
5621 * ZONE_MOVABLE handling.
5622 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5623 * and vice versa.
5624 */
Xishi Qiue506b992016-10-07 16:58:06 -07005625 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
5626 unsigned long start_pfn, end_pfn;
5627 struct memblock_region *r;
Taku Izumi342332e2016-03-15 14:55:22 -07005628
Xishi Qiue506b992016-10-07 16:58:06 -07005629 for_each_memblock(memory, r) {
5630 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5631 zone_start_pfn, zone_end_pfn);
5632 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5633 zone_start_pfn, zone_end_pfn);
Taku Izumi342332e2016-03-15 14:55:22 -07005634
Xishi Qiue506b992016-10-07 16:58:06 -07005635 if (zone_type == ZONE_MOVABLE &&
5636 memblock_is_mirror(r))
5637 nr_absent += end_pfn - start_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07005638
Xishi Qiue506b992016-10-07 16:58:06 -07005639 if (zone_type == ZONE_NORMAL &&
5640 !memblock_is_mirror(r))
5641 nr_absent += end_pfn - start_pfn;
Taku Izumi342332e2016-03-15 14:55:22 -07005642 }
5643 }
5644
5645 return nr_absent;
Mel Gormanc7132162006-09-27 01:49:43 -07005646}
Mel Gorman0e0b8642006-09-27 01:49:56 -07005647
Tejun Heo0ee332c2011-12-08 10:22:09 -08005648#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Paul Mundt6ea6e682007-07-15 23:38:20 -07005649static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005650 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005651 unsigned long node_start_pfn,
5652 unsigned long node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005653 unsigned long *zone_start_pfn,
5654 unsigned long *zone_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005655 unsigned long *zones_size)
5656{
Taku Izumid91749c2016-03-15 14:55:18 -07005657 unsigned int zone;
5658
5659 *zone_start_pfn = node_start_pfn;
5660 for (zone = 0; zone < zone_type; zone++)
5661 *zone_start_pfn += zones_size[zone];
5662
5663 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
5664
Mel Gormanc7132162006-09-27 01:49:43 -07005665 return zones_size[zone_type];
5666}
5667
Paul Mundt6ea6e682007-07-15 23:38:20 -07005668static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
Mel Gormanc7132162006-09-27 01:49:43 -07005669 unsigned long zone_type,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005670 unsigned long node_start_pfn,
5671 unsigned long node_end_pfn,
Mel Gormanc7132162006-09-27 01:49:43 -07005672 unsigned long *zholes_size)
5673{
5674 if (!zholes_size)
5675 return 0;
5676
5677 return zholes_size[zone_type];
5678}
Yinghai Lu20e69262013-03-01 14:51:27 -08005679
Tejun Heo0ee332c2011-12-08 10:22:09 -08005680#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07005681
Yasunori Gotoa3142c82007-05-08 00:23:07 -07005682static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005683 unsigned long node_start_pfn,
5684 unsigned long node_end_pfn,
5685 unsigned long *zones_size,
5686 unsigned long *zholes_size)
Mel Gormanc7132162006-09-27 01:49:43 -07005687{
Gu Zhengfebd5942015-06-24 16:57:02 -07005688 unsigned long realtotalpages = 0, totalpages = 0;
Mel Gormanc7132162006-09-27 01:49:43 -07005689 enum zone_type i;
5690
Gu Zhengfebd5942015-06-24 16:57:02 -07005691 for (i = 0; i < MAX_NR_ZONES; i++) {
5692 struct zone *zone = pgdat->node_zones + i;
Taku Izumid91749c2016-03-15 14:55:18 -07005693 unsigned long zone_start_pfn, zone_end_pfn;
Gu Zhengfebd5942015-06-24 16:57:02 -07005694 unsigned long size, real_size;
Mel Gormanc7132162006-09-27 01:49:43 -07005695
Gu Zhengfebd5942015-06-24 16:57:02 -07005696 size = zone_spanned_pages_in_node(pgdat->node_id, i,
5697 node_start_pfn,
5698 node_end_pfn,
Taku Izumid91749c2016-03-15 14:55:18 -07005699 &zone_start_pfn,
5700 &zone_end_pfn,
Gu Zhengfebd5942015-06-24 16:57:02 -07005701 zones_size);
5702 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005703 node_start_pfn, node_end_pfn,
5704 zholes_size);
Taku Izumid91749c2016-03-15 14:55:18 -07005705 if (size)
5706 zone->zone_start_pfn = zone_start_pfn;
5707 else
5708 zone->zone_start_pfn = 0;
Gu Zhengfebd5942015-06-24 16:57:02 -07005709 zone->spanned_pages = size;
5710 zone->present_pages = real_size;
5711
5712 totalpages += size;
5713 realtotalpages += real_size;
5714 }
5715
5716 pgdat->node_spanned_pages = totalpages;
Mel Gormanc7132162006-09-27 01:49:43 -07005717 pgdat->node_present_pages = realtotalpages;
5718 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5719 realtotalpages);
5720}
5721
Mel Gorman835c1342007-10-16 01:25:47 -07005722#ifndef CONFIG_SPARSEMEM
5723/*
5724 * Calculate the size of the zone->blockflags rounded to an unsigned long
Mel Gormand9c23402007-10-16 01:26:01 -07005725 * Start by making sure zonesize is a multiple of pageblock_order by rounding
5726 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
Mel Gorman835c1342007-10-16 01:25:47 -07005727 * round what is now in bits to nearest long in bits, then return it in
5728 * bytes.
5729 */
Linus Torvalds7c455122013-02-18 09:58:02 -08005730static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07005731{
5732 unsigned long usemapsize;
5733
Linus Torvalds7c455122013-02-18 09:58:02 -08005734 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
Mel Gormand9c23402007-10-16 01:26:01 -07005735 usemapsize = roundup(zonesize, pageblock_nr_pages);
5736 usemapsize = usemapsize >> pageblock_order;
Mel Gorman835c1342007-10-16 01:25:47 -07005737 usemapsize *= NR_PAGEBLOCK_BITS;
5738 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
5739
5740 return usemapsize / 8;
5741}
5742
5743static void __init setup_usemap(struct pglist_data *pgdat,
Linus Torvalds7c455122013-02-18 09:58:02 -08005744 struct zone *zone,
5745 unsigned long zone_start_pfn,
5746 unsigned long zonesize)
Mel Gorman835c1342007-10-16 01:25:47 -07005747{
Linus Torvalds7c455122013-02-18 09:58:02 -08005748 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
Mel Gorman835c1342007-10-16 01:25:47 -07005749 zone->pageblock_flags = NULL;
Julia Lawall58a01a42009-01-06 14:39:28 -08005750 if (usemapsize)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005751 zone->pageblock_flags =
5752 memblock_virt_alloc_node_nopanic(usemapsize,
5753 pgdat->node_id);
Mel Gorman835c1342007-10-16 01:25:47 -07005754}
5755#else
Linus Torvalds7c455122013-02-18 09:58:02 -08005756static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
5757 unsigned long zone_start_pfn, unsigned long zonesize) {}
Mel Gorman835c1342007-10-16 01:25:47 -07005758#endif /* CONFIG_SPARSEMEM */
5759
Mel Gormand9c23402007-10-16 01:26:01 -07005760#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
Mel Gormanba72cb82007-11-28 16:21:13 -08005761
Mel Gormand9c23402007-10-16 01:26:01 -07005762/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
Chen Gang15ca2202013-09-11 14:20:27 -07005763void __paginginit set_pageblock_order(void)
Mel Gormand9c23402007-10-16 01:26:01 -07005764{
Andrew Morton955c1cd2012-05-29 15:06:31 -07005765 unsigned int order;
5766
Mel Gormand9c23402007-10-16 01:26:01 -07005767 /* Check that pageblock_nr_pages has not already been setup */
5768 if (pageblock_order)
5769 return;
5770
Andrew Morton955c1cd2012-05-29 15:06:31 -07005771 if (HPAGE_SHIFT > PAGE_SHIFT)
5772 order = HUGETLB_PAGE_ORDER;
5773 else
5774 order = MAX_ORDER - 1;
5775
Mel Gormand9c23402007-10-16 01:26:01 -07005776 /*
5777 * Assume the largest contiguous order of interest is a huge page.
Andrew Morton955c1cd2012-05-29 15:06:31 -07005778 * This value may be variable depending on boot parameters on IA64 and
5779 * powerpc.
Mel Gormand9c23402007-10-16 01:26:01 -07005780 */
5781 pageblock_order = order;
5782}
5783#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5784
Mel Gormanba72cb82007-11-28 16:21:13 -08005785/*
5786 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
Andrew Morton955c1cd2012-05-29 15:06:31 -07005787 * is unused as pageblock_order is set at compile-time. See
5788 * include/linux/pageblock-flags.h for the values of pageblock_order based on
5789 * the kernel config
Mel Gormanba72cb82007-11-28 16:21:13 -08005790 */
Chen Gang15ca2202013-09-11 14:20:27 -07005791void __paginginit set_pageblock_order(void)
Mel Gormanba72cb82007-11-28 16:21:13 -08005792{
Mel Gormanba72cb82007-11-28 16:21:13 -08005793}
Mel Gormand9c23402007-10-16 01:26:01 -07005794
5795#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5796
Jiang Liu01cefae2012-12-12 13:52:19 -08005797static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
5798 unsigned long present_pages)
5799{
5800 unsigned long pages = spanned_pages;
5801
5802 /*
5803 * Provide a more accurate estimation if there are holes within
5804 * the zone and SPARSEMEM is in use. If there are holes within the
5805 * zone, each populated memory region may cost us one or two extra
5806 * memmap pages due to alignment because memmap pages for each
5807 * populated regions may not naturally algined on page boundary.
5808 * So the (present_pages >> 4) heuristic is a tradeoff for that.
5809 */
5810 if (spanned_pages > present_pages + (present_pages >> 4) &&
5811 IS_ENABLED(CONFIG_SPARSEMEM))
5812 pages = present_pages;
5813
5814 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
5815}
5816
Linus Torvalds1da177e2005-04-16 15:20:36 -07005817/*
5818 * Set up the zone data structures:
5819 * - mark all pages reserved
5820 * - mark all memory queues empty
5821 * - clear the memory bitmaps
Minchan Kim6527af52012-07-31 16:46:16 -07005822 *
5823 * NOTE: pgdat should get zeroed by caller.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005824 */
Wei Yang7f3eb552015-09-08 14:59:50 -07005825static void __paginginit free_area_init_core(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005826{
Christoph Lameter2f1b6242006-09-25 23:31:13 -07005827 enum zone_type j;
Dave Hansened8ece22005-10-29 18:16:50 -07005828 int nid = pgdat->node_id;
Yasunori Goto718127c2006-06-23 02:03:10 -07005829 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005830
Dave Hansen208d54e2005-10-29 18:16:52 -07005831 pgdat_resize_init(pgdat);
Andrea Arcangeli8177a422012-03-23 20:56:34 +01005832#ifdef CONFIG_NUMA_BALANCING
5833 spin_lock_init(&pgdat->numabalancing_migrate_lock);
5834 pgdat->numabalancing_migrate_nr_pages = 0;
5835 pgdat->numabalancing_migrate_next_window = jiffies;
5836#endif
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08005837#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5838 spin_lock_init(&pgdat->split_queue_lock);
5839 INIT_LIST_HEAD(&pgdat->split_queue);
5840 pgdat->split_queue_len = 0;
5841#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005842 init_waitqueue_head(&pgdat->kswapd_wait);
Mel Gorman55150612012-07-31 16:44:35 -07005843 init_waitqueue_head(&pgdat->pfmemalloc_wait);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07005844#ifdef CONFIG_COMPACTION
5845 init_waitqueue_head(&pgdat->kcompactd_wait);
5846#endif
Joonsoo Kimeefa864b2014-12-12 16:55:46 -08005847 pgdat_page_ext_init(pgdat);
Mel Gormana52633d2016-07-28 15:45:28 -07005848 spin_lock_init(&pgdat->lru_lock);
Mel Gormana9dd0a82016-07-28 15:46:02 -07005849 lruvec_init(node_lruvec(pgdat));
Michal Nazarewicz5f63b722012-01-11 15:16:11 +01005850
Linus Torvalds1da177e2005-04-16 15:20:36 -07005851 for (j = 0; j < MAX_NR_ZONES; j++) {
5852 struct zone *zone = pgdat->node_zones + j;
Jiang Liu9feedc92012-12-12 13:52:12 -08005853 unsigned long size, realsize, freesize, memmap_pages;
Taku Izumid91749c2016-03-15 14:55:18 -07005854 unsigned long zone_start_pfn = zone->zone_start_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005855
Gu Zhengfebd5942015-06-24 16:57:02 -07005856 size = zone->spanned_pages;
5857 realsize = freesize = zone->present_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005858
Mel Gorman0e0b8642006-09-27 01:49:56 -07005859 /*
Jiang Liu9feedc92012-12-12 13:52:12 -08005860 * Adjust freesize so that it accounts for how much memory
Mel Gorman0e0b8642006-09-27 01:49:56 -07005861 * is used by this zone for memmap. This affects the watermark
5862 * and per-cpu initialisations
5863 */
Jiang Liu01cefae2012-12-12 13:52:19 -08005864 memmap_pages = calc_memmap_size(size, realsize);
Zhong Hongboba914f42014-12-12 16:56:21 -08005865 if (!is_highmem_idx(j)) {
5866 if (freesize >= memmap_pages) {
5867 freesize -= memmap_pages;
5868 if (memmap_pages)
5869 printk(KERN_DEBUG
5870 " %s zone: %lu pages used for memmap\n",
5871 zone_names[j], memmap_pages);
5872 } else
Joe Perches11705322016-03-17 14:19:50 -07005873 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
Zhong Hongboba914f42014-12-12 16:56:21 -08005874 zone_names[j], memmap_pages, freesize);
5875 }
Mel Gorman0e0b8642006-09-27 01:49:56 -07005876
Christoph Lameter62672762007-02-10 01:43:07 -08005877 /* Account for reserved pages */
Jiang Liu9feedc92012-12-12 13:52:12 -08005878 if (j == 0 && freesize > dma_reserve) {
5879 freesize -= dma_reserve;
Yinghai Lud903ef92008-10-18 20:27:06 -07005880 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
Christoph Lameter62672762007-02-10 01:43:07 -08005881 zone_names[0], dma_reserve);
Mel Gorman0e0b8642006-09-27 01:49:56 -07005882 }
5883
Christoph Lameter98d2b0e2006-09-25 23:31:12 -07005884 if (!is_highmem_idx(j))
Jiang Liu9feedc92012-12-12 13:52:12 -08005885 nr_kernel_pages += freesize;
Jiang Liu01cefae2012-12-12 13:52:19 -08005886 /* Charge for highmem memmap if there are enough kernel pages */
5887 else if (nr_kernel_pages > memmap_pages * 2)
5888 nr_kernel_pages -= memmap_pages;
Jiang Liu9feedc92012-12-12 13:52:12 -08005889 nr_all_pages += freesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005890
Jiang Liu9feedc92012-12-12 13:52:12 -08005891 /*
5892 * Set an approximate value for lowmem here, it will be adjusted
5893 * when the bootmem allocator frees pages into the buddy system.
5894 * And all highmem pages will be managed by the buddy system.
5895 */
5896 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
Christoph Lameter96146342006-07-03 00:24:13 -07005897#ifdef CONFIG_NUMA
Christoph Lameterd5f541e2006-09-27 01:50:08 -07005898 zone->node = nid;
Christoph Lameter96146342006-07-03 00:24:13 -07005899#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005900 zone->name = zone_names[j];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005901 zone->zone_pgdat = pgdat;
Mel Gormana52633d2016-07-28 15:45:28 -07005902 spin_lock_init(&zone->lock);
5903 zone_seqlock_init(zone);
Dave Hansened8ece22005-10-29 18:16:50 -07005904 zone_pcp_init(zone);
Johannes Weiner81c0a2b2013-09-11 14:20:47 -07005905
Linus Torvalds1da177e2005-04-16 15:20:36 -07005906 if (!size)
5907 continue;
5908
Andrew Morton955c1cd2012-05-29 15:06:31 -07005909 set_pageblock_order();
Linus Torvalds7c455122013-02-18 09:58:02 -08005910 setup_usemap(pgdat, zone, zone_start_pfn, size);
Yaowei Baib171e402015-11-05 18:47:06 -08005911 ret = init_currently_empty_zone(zone, zone_start_pfn, size);
Yasunori Goto718127c2006-06-23 02:03:10 -07005912 BUG_ON(ret);
Heiko Carstens76cdd582008-05-14 16:05:52 -07005913 memmap_init(size, nid, j, zone_start_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005914 }
5915}
5916
Fabian Frederickbd721ea2016-08-02 14:03:33 -07005917static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005918{
Tony Luckb0aeba72015-11-10 10:09:47 -08005919 unsigned long __maybe_unused start = 0;
Laura Abbotta1c34a32015-11-05 18:48:46 -08005920 unsigned long __maybe_unused offset = 0;
5921
Linus Torvalds1da177e2005-04-16 15:20:36 -07005922 /* Skip empty nodes */
5923 if (!pgdat->node_spanned_pages)
5924 return;
5925
Andy Whitcroftd41dee32005-06-23 00:07:54 -07005926#ifdef CONFIG_FLAT_NODE_MEM_MAP
Tony Luckb0aeba72015-11-10 10:09:47 -08005927 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
5928 offset = pgdat->node_start_pfn - start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005929 /* ia64 gets its own node_mem_map, before this, without bootmem */
5930 if (!pgdat->node_mem_map) {
Tony Luckb0aeba72015-11-10 10:09:47 -08005931 unsigned long size, end;
Andy Whitcroftd41dee32005-06-23 00:07:54 -07005932 struct page *map;
5933
Bob Piccoe984bb42006-05-20 15:00:31 -07005934 /*
5935 * The zone's endpoints aren't required to be MAX_ORDER
5936 * aligned but the node_mem_map endpoints must be in order
5937 * for the buddy allocator to function correctly.
5938 */
Cody P Schafer108bcc92013-02-22 16:35:23 -08005939 end = pgdat_end_pfn(pgdat);
Bob Piccoe984bb42006-05-20 15:00:31 -07005940 end = ALIGN(end, MAX_ORDER_NR_PAGES);
5941 size = (end - start) * sizeof(struct page);
Dave Hansen6f167ec2005-06-23 00:07:39 -07005942 map = alloc_remap(pgdat->node_id, size);
5943 if (!map)
Santosh Shilimkar67828322014-01-21 15:50:25 -08005944 map = memblock_virt_alloc_node_nopanic(size,
5945 pgdat->node_id);
Laura Abbotta1c34a32015-11-05 18:48:46 -08005946 pgdat->node_mem_map = map + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005947 }
Roman Zippel12d810c2007-05-31 00:40:54 -07005948#ifndef CONFIG_NEED_MULTIPLE_NODES
Linus Torvalds1da177e2005-04-16 15:20:36 -07005949 /*
5950 * With no DISCONTIG, the global mem_map is just set as node 0's
5951 */
Mel Gormanc7132162006-09-27 01:49:43 -07005952 if (pgdat == NODE_DATA(0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005953 mem_map = NODE_DATA(0)->node_mem_map;
Laura Abbotta1c34a32015-11-05 18:48:46 -08005954#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
Mel Gormanc7132162006-09-27 01:49:43 -07005955 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
Laura Abbotta1c34a32015-11-05 18:48:46 -08005956 mem_map -= offset;
Tejun Heo0ee332c2011-12-08 10:22:09 -08005957#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07005958 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005959#endif
Andy Whitcroftd41dee32005-06-23 00:07:54 -07005960#endif /* CONFIG_FLAT_NODE_MEM_MAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005961}
5962
Johannes Weiner9109fb72008-07-23 21:27:20 -07005963void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5964 unsigned long node_start_pfn, unsigned long *zholes_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005965{
Johannes Weiner9109fb72008-07-23 21:27:20 -07005966 pg_data_t *pgdat = NODE_DATA(nid);
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005967 unsigned long start_pfn = 0;
5968 unsigned long end_pfn = 0;
Johannes Weiner9109fb72008-07-23 21:27:20 -07005969
Minchan Kim88fdf752012-07-31 16:46:14 -07005970 /* pg_data_t should be reset to zero when it's allocated */
Mel Gorman38087d92016-07-28 15:45:49 -07005971 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
Minchan Kim88fdf752012-07-31 16:46:14 -07005972
Mel Gorman3a80a7f2015-06-30 14:57:02 -07005973 reset_deferred_meminit(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005974 pgdat->node_id = nid;
5975 pgdat->node_start_pfn = node_start_pfn;
Mel Gorman75ef7182016-07-28 15:45:24 -07005976 pgdat->per_cpu_nodestats = NULL;
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005977#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5978 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
Juergen Gross8d29e182015-02-11 15:26:01 -08005979 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
Zhen Lei4ada0c52015-09-08 15:04:19 -07005980 (u64)start_pfn << PAGE_SHIFT,
5981 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
Taku Izumid91749c2016-03-15 14:55:18 -07005982#else
5983 start_pfn = node_start_pfn;
Zhang Yanfei7960aed2013-07-08 15:59:52 -07005984#endif
5985 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
5986 zones_size, zholes_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005987
5988 alloc_node_mem_map(pgdat);
Yinghai Lue8c27ac2008-06-01 13:15:22 -07005989#ifdef CONFIG_FLAT_NODE_MEM_MAP
5990 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
5991 nid, (unsigned long)pgdat,
5992 (unsigned long)pgdat->node_mem_map);
5993#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005994
Wei Yang7f3eb552015-09-08 14:59:50 -07005995 free_area_init_core(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005996}
5997
Tejun Heo0ee332c2011-12-08 10:22:09 -08005998#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Miklos Szeredi418508c2007-05-23 13:57:55 -07005999
6000#if MAX_NUMNODES > 1
6001/*
6002 * Figure out the number of possible node ids.
6003 */
Cody P Schaferf9872ca2013-04-29 15:08:01 -07006004void __init setup_nr_node_ids(void)
Miklos Szeredi418508c2007-05-23 13:57:55 -07006005{
Wei Yang904a9552015-09-08 14:59:48 -07006006 unsigned int highest;
Miklos Szeredi418508c2007-05-23 13:57:55 -07006007
Wei Yang904a9552015-09-08 14:59:48 -07006008 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
Miklos Szeredi418508c2007-05-23 13:57:55 -07006009 nr_node_ids = highest + 1;
6010}
Miklos Szeredi418508c2007-05-23 13:57:55 -07006011#endif
6012
Mel Gormanc7132162006-09-27 01:49:43 -07006013/**
Tejun Heo1e019792011-07-12 09:45:34 +02006014 * node_map_pfn_alignment - determine the maximum internode alignment
6015 *
6016 * This function should be called after node map is populated and sorted.
6017 * It calculates the maximum power of two alignment which can distinguish
6018 * all the nodes.
6019 *
6020 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
6021 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
6022 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
6023 * shifted, 1GiB is enough and this function will indicate so.
6024 *
6025 * This is used to test whether pfn -> nid mapping of the chosen memory
6026 * model has fine enough granularity to avoid incorrect mapping for the
6027 * populated node map.
6028 *
6029 * Returns the determined alignment in pfn's. 0 if there is no alignment
6030 * requirement (single node).
6031 */
6032unsigned long __init node_map_pfn_alignment(void)
6033{
6034 unsigned long accl_mask = 0, last_end = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02006035 unsigned long start, end, mask;
Tejun Heo1e019792011-07-12 09:45:34 +02006036 int last_nid = -1;
Tejun Heoc13291a2011-07-12 10:46:30 +02006037 int i, nid;
Tejun Heo1e019792011-07-12 09:45:34 +02006038
Tejun Heoc13291a2011-07-12 10:46:30 +02006039 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
Tejun Heo1e019792011-07-12 09:45:34 +02006040 if (!start || last_nid < 0 || last_nid == nid) {
6041 last_nid = nid;
6042 last_end = end;
6043 continue;
6044 }
6045
6046 /*
6047 * Start with a mask granular enough to pin-point to the
6048 * start pfn and tick off bits one-by-one until it becomes
6049 * too coarse to separate the current node from the last.
6050 */
6051 mask = ~((1 << __ffs(start)) - 1);
6052 while (mask && last_end <= (start & (mask << 1)))
6053 mask <<= 1;
6054
6055 /* accumulate all internode masks */
6056 accl_mask |= mask;
6057 }
6058
6059 /* convert mask to number of pages */
6060 return ~accl_mask + 1;
6061}
6062
Mel Gormana6af2bc2007-02-10 01:42:57 -08006063/* Find the lowest pfn for a node */
Adrian Bunkb69a7282008-07-23 21:28:12 -07006064static unsigned long __init find_min_pfn_for_node(int nid)
Mel Gormanc7132162006-09-27 01:49:43 -07006065{
Mel Gormana6af2bc2007-02-10 01:42:57 -08006066 unsigned long min_pfn = ULONG_MAX;
Tejun Heoc13291a2011-07-12 10:46:30 +02006067 unsigned long start_pfn;
6068 int i;
Mel Gorman1abbfb42006-11-23 12:01:41 +00006069
Tejun Heoc13291a2011-07-12 10:46:30 +02006070 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
6071 min_pfn = min(min_pfn, start_pfn);
Mel Gormanc7132162006-09-27 01:49:43 -07006072
Mel Gormana6af2bc2007-02-10 01:42:57 -08006073 if (min_pfn == ULONG_MAX) {
Joe Perches11705322016-03-17 14:19:50 -07006074 pr_warn("Could not find start_pfn for node %d\n", nid);
Mel Gormana6af2bc2007-02-10 01:42:57 -08006075 return 0;
6076 }
6077
6078 return min_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07006079}
6080
6081/**
6082 * find_min_pfn_with_active_regions - Find the minimum PFN registered
6083 *
6084 * It returns the minimum PFN based on information provided via
Zhang Zhen7d018172014-06-04 16:10:53 -07006085 * memblock_set_node().
Mel Gormanc7132162006-09-27 01:49:43 -07006086 */
6087unsigned long __init find_min_pfn_with_active_regions(void)
6088{
6089 return find_min_pfn_for_node(MAX_NUMNODES);
6090}
6091
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006092/*
6093 * early_calculate_totalpages()
6094 * Sum pages in active regions for movable zone.
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006095 * Populate N_MEMORY for calculating usable_nodes.
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006096 */
Adrian Bunk484f51f2007-10-16 01:26:03 -07006097static unsigned long __init early_calculate_totalpages(void)
Mel Gorman7e63efe2007-07-17 04:03:15 -07006098{
Mel Gorman7e63efe2007-07-17 04:03:15 -07006099 unsigned long totalpages = 0;
Tejun Heoc13291a2011-07-12 10:46:30 +02006100 unsigned long start_pfn, end_pfn;
6101 int i, nid;
Mel Gorman7e63efe2007-07-17 04:03:15 -07006102
Tejun Heoc13291a2011-07-12 10:46:30 +02006103 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6104 unsigned long pages = end_pfn - start_pfn;
6105
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006106 totalpages += pages;
6107 if (pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006108 node_set_state(nid, N_MEMORY);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006109 }
Pintu Kumarb8af2942013-09-11 14:20:34 -07006110 return totalpages;
Mel Gorman7e63efe2007-07-17 04:03:15 -07006111}
6112
Mel Gorman2a1e2742007-07-17 04:03:12 -07006113/*
6114 * Find the PFN the Movable zone begins in each node. Kernel memory
6115 * is spread evenly between nodes as long as the nodes have enough
6116 * memory. When they don't, some nodes will have more kernelcore than
6117 * others
6118 */
Kautuk Consulb224ef82012-03-21 16:34:15 -07006119static void __init find_zone_movable_pfns_for_nodes(void)
Mel Gorman2a1e2742007-07-17 04:03:12 -07006120{
6121 int i, nid;
6122 unsigned long usable_startpfn;
6123 unsigned long kernelcore_node, kernelcore_remaining;
Yinghai Lu66918dc2009-06-30 11:41:37 -07006124 /* save the state before borrow the nodemask */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006125 nodemask_t saved_node_state = node_states[N_MEMORY];
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006126 unsigned long totalpages = early_calculate_totalpages();
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006127 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
Emil Medve136199f2014-04-07 15:37:52 -07006128 struct memblock_region *r;
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006129
6130 /* Need to find movable_zone earlier when movable_node is specified. */
6131 find_usable_zone_for_movable();
Mel Gorman2a1e2742007-07-17 04:03:12 -07006132
Mel Gorman7e63efe2007-07-17 04:03:15 -07006133 /*
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006134 * If movable_node is specified, ignore kernelcore and movablecore
6135 * options.
6136 */
6137 if (movable_node_is_enabled()) {
Emil Medve136199f2014-04-07 15:37:52 -07006138 for_each_memblock(memory, r) {
6139 if (!memblock_is_hotpluggable(r))
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006140 continue;
6141
Emil Medve136199f2014-04-07 15:37:52 -07006142 nid = r->nid;
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006143
Emil Medve136199f2014-04-07 15:37:52 -07006144 usable_startpfn = PFN_DOWN(r->base);
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006145 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6146 min(usable_startpfn, zone_movable_pfn[nid]) :
6147 usable_startpfn;
6148 }
6149
6150 goto out2;
6151 }
6152
6153 /*
Taku Izumi342332e2016-03-15 14:55:22 -07006154 * If kernelcore=mirror is specified, ignore movablecore option
6155 */
6156 if (mirrored_kernelcore) {
6157 bool mem_below_4gb_not_mirrored = false;
6158
6159 for_each_memblock(memory, r) {
6160 if (memblock_is_mirror(r))
6161 continue;
6162
6163 nid = r->nid;
6164
6165 usable_startpfn = memblock_region_memory_base_pfn(r);
6166
6167 if (usable_startpfn < 0x100000) {
6168 mem_below_4gb_not_mirrored = true;
6169 continue;
6170 }
6171
6172 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6173 min(usable_startpfn, zone_movable_pfn[nid]) :
6174 usable_startpfn;
6175 }
6176
6177 if (mem_below_4gb_not_mirrored)
6178 pr_warn("This configuration results in unmirrored kernel memory.");
6179
6180 goto out2;
6181 }
6182
6183 /*
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006184 * If movablecore=nn[KMG] was specified, calculate what size of
Mel Gorman7e63efe2007-07-17 04:03:15 -07006185 * kernelcore that corresponds so that memory usable for
6186 * any allocation type is evenly spread. If both kernelcore
6187 * and movablecore are specified, then the value of kernelcore
6188 * will be used for required_kernelcore if it's greater than
6189 * what movablecore would have allowed.
6190 */
6191 if (required_movablecore) {
Mel Gorman7e63efe2007-07-17 04:03:15 -07006192 unsigned long corepages;
6193
6194 /*
6195 * Round-up so that ZONE_MOVABLE is at least as large as what
6196 * was requested by the user
6197 */
6198 required_movablecore =
6199 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
Xishi Qiu9fd745d2015-11-05 18:48:11 -08006200 required_movablecore = min(totalpages, required_movablecore);
Mel Gorman7e63efe2007-07-17 04:03:15 -07006201 corepages = totalpages - required_movablecore;
6202
6203 required_kernelcore = max(required_kernelcore, corepages);
6204 }
6205
Xishi Qiubde304b2015-11-05 18:48:56 -08006206 /*
6207 * If kernelcore was not specified or kernelcore size is larger
6208 * than totalpages, there is no ZONE_MOVABLE.
6209 */
6210 if (!required_kernelcore || required_kernelcore >= totalpages)
Yinghai Lu66918dc2009-06-30 11:41:37 -07006211 goto out;
Mel Gorman2a1e2742007-07-17 04:03:12 -07006212
6213 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
Mel Gorman2a1e2742007-07-17 04:03:12 -07006214 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
6215
6216restart:
6217 /* Spread kernelcore memory as evenly as possible throughout nodes */
6218 kernelcore_node = required_kernelcore / usable_nodes;
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006219 for_each_node_state(nid, N_MEMORY) {
Tejun Heoc13291a2011-07-12 10:46:30 +02006220 unsigned long start_pfn, end_pfn;
6221
Mel Gorman2a1e2742007-07-17 04:03:12 -07006222 /*
6223 * Recalculate kernelcore_node if the division per node
6224 * now exceeds what is necessary to satisfy the requested
6225 * amount of memory for the kernel
6226 */
6227 if (required_kernelcore < kernelcore_node)
6228 kernelcore_node = required_kernelcore / usable_nodes;
6229
6230 /*
6231 * As the map is walked, we track how much memory is usable
6232 * by the kernel using kernelcore_remaining. When it is
6233 * 0, the rest of the node is usable by ZONE_MOVABLE
6234 */
6235 kernelcore_remaining = kernelcore_node;
6236
6237 /* Go through each range of PFNs within this node */
Tejun Heoc13291a2011-07-12 10:46:30 +02006238 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
Mel Gorman2a1e2742007-07-17 04:03:12 -07006239 unsigned long size_pages;
6240
Tejun Heoc13291a2011-07-12 10:46:30 +02006241 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
Mel Gorman2a1e2742007-07-17 04:03:12 -07006242 if (start_pfn >= end_pfn)
6243 continue;
6244
6245 /* Account for what is only usable for kernelcore */
6246 if (start_pfn < usable_startpfn) {
6247 unsigned long kernel_pages;
6248 kernel_pages = min(end_pfn, usable_startpfn)
6249 - start_pfn;
6250
6251 kernelcore_remaining -= min(kernel_pages,
6252 kernelcore_remaining);
6253 required_kernelcore -= min(kernel_pages,
6254 required_kernelcore);
6255
6256 /* Continue if range is now fully accounted */
6257 if (end_pfn <= usable_startpfn) {
6258
6259 /*
6260 * Push zone_movable_pfn to the end so
6261 * that if we have to rebalance
6262 * kernelcore across nodes, we will
6263 * not double account here
6264 */
6265 zone_movable_pfn[nid] = end_pfn;
6266 continue;
6267 }
6268 start_pfn = usable_startpfn;
6269 }
6270
6271 /*
6272 * The usable PFN range for ZONE_MOVABLE is from
6273 * start_pfn->end_pfn. Calculate size_pages as the
6274 * number of pages used as kernelcore
6275 */
6276 size_pages = end_pfn - start_pfn;
6277 if (size_pages > kernelcore_remaining)
6278 size_pages = kernelcore_remaining;
6279 zone_movable_pfn[nid] = start_pfn + size_pages;
6280
6281 /*
6282 * Some kernelcore has been met, update counts and
6283 * break if the kernelcore for this node has been
Pintu Kumarb8af2942013-09-11 14:20:34 -07006284 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07006285 */
6286 required_kernelcore -= min(required_kernelcore,
6287 size_pages);
6288 kernelcore_remaining -= size_pages;
6289 if (!kernelcore_remaining)
6290 break;
6291 }
6292 }
6293
6294 /*
6295 * If there is still required_kernelcore, we do another pass with one
6296 * less node in the count. This will push zone_movable_pfn[nid] further
6297 * along on the nodes that still have memory until kernelcore is
Pintu Kumarb8af2942013-09-11 14:20:34 -07006298 * satisfied
Mel Gorman2a1e2742007-07-17 04:03:12 -07006299 */
6300 usable_nodes--;
6301 if (usable_nodes && required_kernelcore > usable_nodes)
6302 goto restart;
6303
Tang Chenb2f3eeb2014-01-21 15:49:38 -08006304out2:
Mel Gorman2a1e2742007-07-17 04:03:12 -07006305 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
6306 for (nid = 0; nid < MAX_NUMNODES; nid++)
6307 zone_movable_pfn[nid] =
6308 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
Yinghai Lu66918dc2009-06-30 11:41:37 -07006309
Yinghai Lu20e69262013-03-01 14:51:27 -08006310out:
Yinghai Lu66918dc2009-06-30 11:41:37 -07006311 /* restore the node_state */
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006312 node_states[N_MEMORY] = saved_node_state;
Mel Gorman2a1e2742007-07-17 04:03:12 -07006313}
6314
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006315/* Any regular or high memory on that node ? */
6316static void check_for_memory(pg_data_t *pgdat, int nid)
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006317{
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006318 enum zone_type zone_type;
6319
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006320 if (N_MEMORY == N_NORMAL_MEMORY)
6321 return;
6322
6323 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006324 struct zone *zone = &pgdat->node_zones[zone_type];
Xishi Qiub38a8722013-11-12 15:07:20 -08006325 if (populated_zone(zone)) {
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006326 node_set_state(nid, N_HIGH_MEMORY);
6327 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
6328 zone_type <= ZONE_NORMAL)
6329 node_set_state(nid, N_NORMAL_MEMORY);
Bob Liud0048b02012-01-12 17:19:07 -08006330 break;
6331 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006332 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006333}
6334
Mel Gormanc7132162006-09-27 01:49:43 -07006335/**
6336 * free_area_init_nodes - Initialise all pg_data_t and zone data
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006337 * @max_zone_pfn: an array of max PFNs for each zone
Mel Gormanc7132162006-09-27 01:49:43 -07006338 *
6339 * This will call free_area_init_node() for each active node in the system.
Zhang Zhen7d018172014-06-04 16:10:53 -07006340 * Using the page ranges provided by memblock_set_node(), the size of each
Mel Gormanc7132162006-09-27 01:49:43 -07006341 * zone in each node and their holes is calculated. If the maximum PFN
6342 * between two adjacent zones match, it is assumed that the zone is empty.
6343 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
6344 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
6345 * starts where the previous one ended. For example, ZONE_DMA32 starts
6346 * at arch_max_dma_pfn.
6347 */
6348void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6349{
Tejun Heoc13291a2011-07-12 10:46:30 +02006350 unsigned long start_pfn, end_pfn;
6351 int i, nid;
Mel Gormana6af2bc2007-02-10 01:42:57 -08006352
Mel Gormanc7132162006-09-27 01:49:43 -07006353 /* Record where the zone boundaries are */
6354 memset(arch_zone_lowest_possible_pfn, 0,
6355 sizeof(arch_zone_lowest_possible_pfn));
6356 memset(arch_zone_highest_possible_pfn, 0,
6357 sizeof(arch_zone_highest_possible_pfn));
Oliver O'Halloran90cae1f2016-07-26 15:22:17 -07006358
6359 start_pfn = find_min_pfn_with_active_regions();
6360
6361 for (i = 0; i < MAX_NR_ZONES; i++) {
Mel Gorman2a1e2742007-07-17 04:03:12 -07006362 if (i == ZONE_MOVABLE)
6363 continue;
Oliver O'Halloran90cae1f2016-07-26 15:22:17 -07006364
6365 end_pfn = max(max_zone_pfn[i], start_pfn);
6366 arch_zone_lowest_possible_pfn[i] = start_pfn;
6367 arch_zone_highest_possible_pfn[i] = end_pfn;
6368
6369 start_pfn = end_pfn;
Mel Gormanc7132162006-09-27 01:49:43 -07006370 }
Mel Gorman2a1e2742007-07-17 04:03:12 -07006371 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
6372 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
6373
6374 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
6375 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
Kautuk Consulb224ef82012-03-21 16:34:15 -07006376 find_zone_movable_pfns_for_nodes();
Mel Gormanc7132162006-09-27 01:49:43 -07006377
Mel Gormanc7132162006-09-27 01:49:43 -07006378 /* Print out the zone ranges */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006379 pr_info("Zone ranges:\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07006380 for (i = 0; i < MAX_NR_ZONES; i++) {
6381 if (i == ZONE_MOVABLE)
6382 continue;
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006383 pr_info(" %-8s ", zone_names[i]);
David Rientjes72f0ba02010-03-05 13:42:14 -08006384 if (arch_zone_lowest_possible_pfn[i] ==
6385 arch_zone_highest_possible_pfn[i])
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006386 pr_cont("empty\n");
David Rientjes72f0ba02010-03-05 13:42:14 -08006387 else
Juergen Gross8d29e182015-02-11 15:26:01 -08006388 pr_cont("[mem %#018Lx-%#018Lx]\n",
6389 (u64)arch_zone_lowest_possible_pfn[i]
6390 << PAGE_SHIFT,
6391 ((u64)arch_zone_highest_possible_pfn[i]
Bjorn Helgaasa62e2f42012-05-29 15:06:30 -07006392 << PAGE_SHIFT) - 1);
Mel Gorman2a1e2742007-07-17 04:03:12 -07006393 }
6394
6395 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006396 pr_info("Movable zone start for each node\n");
Mel Gorman2a1e2742007-07-17 04:03:12 -07006397 for (i = 0; i < MAX_NUMNODES; i++) {
6398 if (zone_movable_pfn[i])
Juergen Gross8d29e182015-02-11 15:26:01 -08006399 pr_info(" Node %d: %#018Lx\n", i,
6400 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
Mel Gorman2a1e2742007-07-17 04:03:12 -07006401 }
Mel Gormanc7132162006-09-27 01:49:43 -07006402
Wanpeng Lif2d52fe2012-10-08 16:32:24 -07006403 /* Print out the early node map */
Anton Blanchardf88dfff2014-12-10 15:42:53 -08006404 pr_info("Early memory node ranges\n");
Tejun Heoc13291a2011-07-12 10:46:30 +02006405 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
Juergen Gross8d29e182015-02-11 15:26:01 -08006406 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
6407 (u64)start_pfn << PAGE_SHIFT,
6408 ((u64)end_pfn << PAGE_SHIFT) - 1);
Mel Gormanc7132162006-09-27 01:49:43 -07006409
6410 /* Initialise every node */
Mel Gorman708614e2008-07-23 21:26:51 -07006411 mminit_verify_pageflags_layout();
Christoph Lameter8ef82862007-02-20 13:57:52 -08006412 setup_nr_node_ids();
Mel Gormanc7132162006-09-27 01:49:43 -07006413 for_each_online_node(nid) {
6414 pg_data_t *pgdat = NODE_DATA(nid);
Johannes Weiner9109fb72008-07-23 21:27:20 -07006415 free_area_init_node(nid, NULL,
Mel Gormanc7132162006-09-27 01:49:43 -07006416 find_min_pfn_for_node(nid), NULL);
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07006417
6418 /* Any memory on that node */
6419 if (pgdat->node_present_pages)
Lai Jiangshan4b0ef1fe2012-12-12 13:51:46 -08006420 node_set_state(nid, N_MEMORY);
6421 check_for_memory(pgdat, nid);
Mel Gormanc7132162006-09-27 01:49:43 -07006422 }
6423}
Mel Gorman2a1e2742007-07-17 04:03:12 -07006424
Mel Gorman7e63efe2007-07-17 04:03:15 -07006425static int __init cmdline_parse_core(char *p, unsigned long *core)
Mel Gorman2a1e2742007-07-17 04:03:12 -07006426{
6427 unsigned long long coremem;
6428 if (!p)
6429 return -EINVAL;
6430
6431 coremem = memparse(p, &p);
Mel Gorman7e63efe2007-07-17 04:03:15 -07006432 *core = coremem >> PAGE_SHIFT;
Mel Gorman2a1e2742007-07-17 04:03:12 -07006433
Mel Gorman7e63efe2007-07-17 04:03:15 -07006434 /* Paranoid check that UL is enough for the coremem value */
Mel Gorman2a1e2742007-07-17 04:03:12 -07006435 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
6436
6437 return 0;
6438}
Mel Gormaned7ed362007-07-17 04:03:14 -07006439
Mel Gorman7e63efe2007-07-17 04:03:15 -07006440/*
6441 * kernelcore=size sets the amount of memory for use for allocations that
6442 * cannot be reclaimed or migrated.
6443 */
6444static int __init cmdline_parse_kernelcore(char *p)
6445{
Taku Izumi342332e2016-03-15 14:55:22 -07006446 /* parse kernelcore=mirror */
6447 if (parse_option_str(p, "mirror")) {
6448 mirrored_kernelcore = true;
6449 return 0;
6450 }
6451
Mel Gorman7e63efe2007-07-17 04:03:15 -07006452 return cmdline_parse_core(p, &required_kernelcore);
6453}
6454
6455/*
6456 * movablecore=size sets the amount of memory for use for allocations that
6457 * can be reclaimed or migrated.
6458 */
6459static int __init cmdline_parse_movablecore(char *p)
6460{
6461 return cmdline_parse_core(p, &required_movablecore);
6462}
6463
Mel Gormaned7ed362007-07-17 04:03:14 -07006464early_param("kernelcore", cmdline_parse_kernelcore);
Mel Gorman7e63efe2007-07-17 04:03:15 -07006465early_param("movablecore", cmdline_parse_movablecore);
Mel Gormaned7ed362007-07-17 04:03:14 -07006466
Tejun Heo0ee332c2011-12-08 10:22:09 -08006467#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Mel Gormanc7132162006-09-27 01:49:43 -07006468
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07006469void adjust_managed_page_count(struct page *page, long count)
6470{
6471 spin_lock(&managed_page_count_lock);
6472 page_zone(page)->managed_pages += count;
6473 totalram_pages += count;
Jiang Liu3dcc0572013-07-03 15:03:21 -07006474#ifdef CONFIG_HIGHMEM
6475 if (PageHighMem(page))
6476 totalhigh_pages += count;
6477#endif
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07006478 spin_unlock(&managed_page_count_lock);
6479}
Jiang Liu3dcc0572013-07-03 15:03:21 -07006480EXPORT_SYMBOL(adjust_managed_page_count);
Jiang Liuc3d5f5f2013-07-03 15:03:14 -07006481
Jiang Liu11199692013-07-03 15:02:48 -07006482unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
Jiang Liu69afade2013-04-29 15:06:21 -07006483{
Jiang Liu11199692013-07-03 15:02:48 -07006484 void *pos;
6485 unsigned long pages = 0;
Jiang Liu69afade2013-04-29 15:06:21 -07006486
Jiang Liu11199692013-07-03 15:02:48 -07006487 start = (void *)PAGE_ALIGN((unsigned long)start);
6488 end = (void *)((unsigned long)end & PAGE_MASK);
6489 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
Jiang Liudbe67df2013-07-03 15:02:51 -07006490 if ((unsigned int)poison <= 0xFF)
Jiang Liu11199692013-07-03 15:02:48 -07006491 memset(pos, poison, PAGE_SIZE);
6492 free_reserved_page(virt_to_page(pos));
Jiang Liu69afade2013-04-29 15:06:21 -07006493 }
6494
6495 if (pages && s)
Jiang Liu11199692013-07-03 15:02:48 -07006496 pr_info("Freeing %s memory: %ldK (%p - %p)\n",
Jiang Liu69afade2013-04-29 15:06:21 -07006497 s, pages << (PAGE_SHIFT - 10), start, end);
6498
6499 return pages;
6500}
Jiang Liu11199692013-07-03 15:02:48 -07006501EXPORT_SYMBOL(free_reserved_area);
Jiang Liu69afade2013-04-29 15:06:21 -07006502
Jiang Liucfa11e02013-04-29 15:07:00 -07006503#ifdef CONFIG_HIGHMEM
6504void free_highmem_page(struct page *page)
6505{
6506 __free_reserved_page(page);
6507 totalram_pages++;
Jiang Liu7b4b2a02013-07-03 15:03:11 -07006508 page_zone(page)->managed_pages++;
Jiang Liucfa11e02013-04-29 15:07:00 -07006509 totalhigh_pages++;
6510}
6511#endif
6512
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006513
6514void __init mem_init_print_info(const char *str)
6515{
6516 unsigned long physpages, codesize, datasize, rosize, bss_size;
6517 unsigned long init_code_size, init_data_size;
6518
6519 physpages = get_num_physpages();
6520 codesize = _etext - _stext;
6521 datasize = _edata - _sdata;
6522 rosize = __end_rodata - __start_rodata;
6523 bss_size = __bss_stop - __bss_start;
6524 init_data_size = __init_end - __init_begin;
6525 init_code_size = _einittext - _sinittext;
6526
6527 /*
6528 * Detect special cases and adjust section sizes accordingly:
6529 * 1) .init.* may be embedded into .data sections
6530 * 2) .init.text.* may be out of [__init_begin, __init_end],
6531 * please refer to arch/tile/kernel/vmlinux.lds.S.
6532 * 3) .rodata.* may be embedded into .text or .data sections.
6533 */
6534#define adj_init_size(start, end, size, pos, adj) \
Pintu Kumarb8af2942013-09-11 14:20:34 -07006535 do { \
6536 if (start <= pos && pos < end && size > adj) \
6537 size -= adj; \
6538 } while (0)
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006539
6540 adj_init_size(__init_begin, __init_end, init_data_size,
6541 _sinittext, init_code_size);
6542 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
6543 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
6544 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
6545 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
6546
6547#undef adj_init_size
6548
Joe Perches756a0252016-03-17 14:19:47 -07006549 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006550#ifdef CONFIG_HIGHMEM
Joe Perches756a0252016-03-17 14:19:47 -07006551 ", %luK highmem"
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006552#endif
Joe Perches756a0252016-03-17 14:19:47 -07006553 "%s%s)\n",
6554 nr_free_pages() << (PAGE_SHIFT - 10),
6555 physpages << (PAGE_SHIFT - 10),
6556 codesize >> 10, datasize >> 10, rosize >> 10,
6557 (init_data_size + init_code_size) >> 10, bss_size >> 10,
6558 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
6559 totalcma_pages << (PAGE_SHIFT - 10),
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006560#ifdef CONFIG_HIGHMEM
Joe Perches756a0252016-03-17 14:19:47 -07006561 totalhigh_pages << (PAGE_SHIFT - 10),
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006562#endif
Joe Perches756a0252016-03-17 14:19:47 -07006563 str ? ", " : "", str ? str : "");
Jiang Liu7ee3d4e2013-07-03 15:03:41 -07006564}
6565
Mel Gorman0e0b8642006-09-27 01:49:56 -07006566/**
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006567 * set_dma_reserve - set the specified number of pages reserved in the first zone
6568 * @new_dma_reserve: The number of pages to mark reserved
Mel Gorman0e0b8642006-09-27 01:49:56 -07006569 *
Yaowei Bai013110a2015-09-08 15:04:10 -07006570 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
Mel Gorman0e0b8642006-09-27 01:49:56 -07006571 * In the DMA zone, a significant percentage may be consumed by kernel image
6572 * and other unfreeable allocations which can skew the watermarks badly. This
Randy Dunlap88ca3b92006-10-04 02:15:25 -07006573 * function may optionally be used to account for unfreeable pages in the
6574 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
6575 * smaller per-cpu batchsize.
Mel Gorman0e0b8642006-09-27 01:49:56 -07006576 */
6577void __init set_dma_reserve(unsigned long new_dma_reserve)
6578{
6579 dma_reserve = new_dma_reserve;
6580}
6581
Linus Torvalds1da177e2005-04-16 15:20:36 -07006582void __init free_area_init(unsigned long *zones_size)
6583{
Johannes Weiner9109fb72008-07-23 21:27:20 -07006584 free_area_init_node(0, zones_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006585 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
6586}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006587
Linus Torvalds1da177e2005-04-16 15:20:36 -07006588static int page_alloc_cpu_notify(struct notifier_block *self,
6589 unsigned long action, void *hcpu)
6590{
6591 int cpu = (unsigned long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006592
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006593 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
Konstantin Khlebnikovf0cb3c72012-03-21 16:34:06 -07006594 lru_add_drain_cpu(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08006595 drain_pages(cpu);
6596
6597 /*
6598 * Spill the event counters of the dead processor
6599 * into the current processors event counters.
6600 * This artificially elevates the count of the current
6601 * processor.
6602 */
Christoph Lameterf8891e52006-06-30 01:55:45 -07006603 vm_events_fold_cpu(cpu);
Christoph Lameter9f8f2172008-02-04 22:29:11 -08006604
6605 /*
6606 * Zero the differential counters of the dead processor
6607 * so that the vm statistics are consistent.
6608 *
6609 * This is only okay since the processor is dead and cannot
6610 * race with what we are doing.
6611 */
Christoph Lameter2bb921e2013-09-11 14:21:30 -07006612 cpu_vm_stats_fold(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006613 }
6614 return NOTIFY_OK;
6615}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006616
6617void __init page_alloc_init(void)
6618{
6619 hotcpu_notifier(page_alloc_cpu_notify, 0);
6620}
6621
6622/*
Yaowei Bai34b10062015-09-08 15:04:13 -07006623 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006624 * or min_free_kbytes changes.
6625 */
6626static void calculate_totalreserve_pages(void)
6627{
6628 struct pglist_data *pgdat;
6629 unsigned long reserve_pages = 0;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006630 enum zone_type i, j;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006631
6632 for_each_online_pgdat(pgdat) {
Mel Gorman281e3722016-07-28 15:46:11 -07006633
6634 pgdat->totalreserve_pages = 0;
6635
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006636 for (i = 0; i < MAX_NR_ZONES; i++) {
6637 struct zone *zone = pgdat->node_zones + i;
Mel Gorman3484b2d2014-08-06 16:07:14 -07006638 long max = 0;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006639
6640 /* Find valid and maximum lowmem_reserve in the zone */
6641 for (j = i; j < MAX_NR_ZONES; j++) {
6642 if (zone->lowmem_reserve[j] > max)
6643 max = zone->lowmem_reserve[j];
6644 }
6645
Mel Gorman41858962009-06-16 15:32:12 -07006646 /* we treat the high watermark as reserved pages. */
6647 max += high_wmark_pages(zone);
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006648
Jiang Liub40da042013-02-22 16:33:52 -08006649 if (max > zone->managed_pages)
6650 max = zone->managed_pages;
Johannes Weinera8d01432016-01-14 15:20:15 -08006651
Mel Gorman281e3722016-07-28 15:46:11 -07006652 pgdat->totalreserve_pages += max;
Johannes Weinera8d01432016-01-14 15:20:15 -08006653
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006654 reserve_pages += max;
6655 }
6656 }
6657 totalreserve_pages = reserve_pages;
6658}
6659
6660/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07006661 * setup_per_zone_lowmem_reserve - called whenever
Yaowei Bai34b10062015-09-08 15:04:13 -07006662 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
Linus Torvalds1da177e2005-04-16 15:20:36 -07006663 * has a correct pages reserved value, so an adequate number of
6664 * pages are left in the zone after a successful __alloc_pages().
6665 */
6666static void setup_per_zone_lowmem_reserve(void)
6667{
6668 struct pglist_data *pgdat;
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006669 enum zone_type j, idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006670
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -08006671 for_each_online_pgdat(pgdat) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006672 for (j = 0; j < MAX_NR_ZONES; j++) {
6673 struct zone *zone = pgdat->node_zones + j;
Jiang Liub40da042013-02-22 16:33:52 -08006674 unsigned long managed_pages = zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006675
6676 zone->lowmem_reserve[j] = 0;
6677
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006678 idx = j;
6679 while (idx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006680 struct zone *lower_zone;
6681
Christoph Lameter2f6726e2006-09-25 23:31:18 -07006682 idx--;
6683
Linus Torvalds1da177e2005-04-16 15:20:36 -07006684 if (sysctl_lowmem_reserve_ratio[idx] < 1)
6685 sysctl_lowmem_reserve_ratio[idx] = 1;
6686
6687 lower_zone = pgdat->node_zones + idx;
Jiang Liub40da042013-02-22 16:33:52 -08006688 lower_zone->lowmem_reserve[j] = managed_pages /
Linus Torvalds1da177e2005-04-16 15:20:36 -07006689 sysctl_lowmem_reserve_ratio[idx];
Jiang Liub40da042013-02-22 16:33:52 -08006690 managed_pages += lower_zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006691 }
6692 }
6693 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006694
6695 /* update totalreserve_pages */
6696 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006697}
6698
Mel Gormancfd3da12011-04-25 21:36:42 +00006699static void __setup_per_zone_wmarks(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006700{
6701 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6702 unsigned long lowmem_pages = 0;
6703 struct zone *zone;
6704 unsigned long flags;
6705
6706 /* Calculate total number of !ZONE_HIGHMEM pages */
6707 for_each_zone(zone) {
6708 if (!is_highmem(zone))
Jiang Liub40da042013-02-22 16:33:52 -08006709 lowmem_pages += zone->managed_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006710 }
6711
6712 for_each_zone(zone) {
Andrew Mortonac924c62006-05-15 09:43:59 -07006713 u64 tmp;
6714
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07006715 spin_lock_irqsave(&zone->lock, flags);
Jiang Liub40da042013-02-22 16:33:52 -08006716 tmp = (u64)pages_min * zone->managed_pages;
Andrew Mortonac924c62006-05-15 09:43:59 -07006717 do_div(tmp, lowmem_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006718 if (is_highmem(zone)) {
6719 /*
Nick Piggin669ed172005-11-13 16:06:45 -08006720 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6721 * need highmem pages, so cap pages_min to a small
6722 * value here.
6723 *
Mel Gorman41858962009-06-16 15:32:12 -07006724 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
Yaowei Bai42ff2702015-04-14 15:47:14 -07006725 * deltas control asynch page reclaim, and so should
Nick Piggin669ed172005-11-13 16:06:45 -08006726 * not be capped for highmem.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006727 */
Andrew Morton90ae8d62013-02-22 16:32:22 -08006728 unsigned long min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006729
Jiang Liub40da042013-02-22 16:33:52 -08006730 min_pages = zone->managed_pages / 1024;
Andrew Morton90ae8d62013-02-22 16:32:22 -08006731 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
Mel Gorman41858962009-06-16 15:32:12 -07006732 zone->watermark[WMARK_MIN] = min_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006733 } else {
Nick Piggin669ed172005-11-13 16:06:45 -08006734 /*
6735 * If it's a lowmem zone, reserve a number of pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07006736 * proportionate to the zone's size.
6737 */
Mel Gorman41858962009-06-16 15:32:12 -07006738 zone->watermark[WMARK_MIN] = tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006739 }
6740
Johannes Weiner795ae7a2016-03-17 14:19:14 -07006741 /*
6742 * Set the kswapd watermarks distance according to the
6743 * scale factor in proportion to available memory, but
6744 * ensure a minimum size on small systems.
6745 */
6746 tmp = max_t(u64, tmp >> 2,
6747 mult_frac(zone->managed_pages,
6748 watermark_scale_factor, 10000));
6749
6750 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
6751 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
Marek Szyprowski49f223a2012-01-25 12:49:24 +01006752
Gerald Schaefer1125b4e2008-10-18 20:27:11 -07006753 spin_unlock_irqrestore(&zone->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006754 }
Hideo AOKIcb45b0e2006-04-10 22:52:59 -07006755
6756 /* update totalreserve_pages */
6757 calculate_totalreserve_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006758}
6759
Mel Gormancfd3da12011-04-25 21:36:42 +00006760/**
6761 * setup_per_zone_wmarks - called when min_free_kbytes changes
6762 * or when memory is hot-{added|removed}
6763 *
6764 * Ensures that the watermark[min,low,high] values for each zone are set
6765 * correctly with respect to min_free_kbytes.
6766 */
6767void setup_per_zone_wmarks(void)
6768{
6769 mutex_lock(&zonelists_mutex);
6770 __setup_per_zone_wmarks();
6771 mutex_unlock(&zonelists_mutex);
6772}
6773
Randy Dunlap55a44622009-09-21 17:01:20 -07006774/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07006775 * Initialise min_free_kbytes.
6776 *
6777 * For small machines we want it small (128k min). For large machines
6778 * we want it large (64MB max). But it is not linear, because network
6779 * bandwidth does not increase linearly with machine size. We use
6780 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07006781 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006782 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
6783 *
6784 * which yields
6785 *
6786 * 16MB: 512k
6787 * 32MB: 724k
6788 * 64MB: 1024k
6789 * 128MB: 1448k
6790 * 256MB: 2048k
6791 * 512MB: 2896k
6792 * 1024MB: 4096k
6793 * 2048MB: 5792k
6794 * 4096MB: 8192k
6795 * 8192MB: 11584k
6796 * 16384MB: 16384k
6797 */
KOSAKI Motohiro1b79acc2011-05-24 17:11:32 -07006798int __meminit init_per_zone_wmark_min(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006799{
6800 unsigned long lowmem_kbytes;
Michal Hocko5f127332013-07-08 16:00:40 -07006801 int new_min_free_kbytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006802
6803 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
Michal Hocko5f127332013-07-08 16:00:40 -07006804 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006805
Michal Hocko5f127332013-07-08 16:00:40 -07006806 if (new_min_free_kbytes > user_min_free_kbytes) {
6807 min_free_kbytes = new_min_free_kbytes;
6808 if (min_free_kbytes < 128)
6809 min_free_kbytes = 128;
6810 if (min_free_kbytes > 65536)
6811 min_free_kbytes = 65536;
6812 } else {
6813 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6814 new_min_free_kbytes, user_min_free_kbytes);
6815 }
Minchan Kimbc75d332009-06-16 15:32:48 -07006816 setup_per_zone_wmarks();
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -07006817 refresh_zone_stat_thresholds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006818 setup_per_zone_lowmem_reserve();
Joonsoo Kim6423aa82016-08-10 16:27:49 -07006819
6820#ifdef CONFIG_NUMA
6821 setup_min_unmapped_ratio();
6822 setup_min_slab_ratio();
6823#endif
6824
Linus Torvalds1da177e2005-04-16 15:20:36 -07006825 return 0;
6826}
Jason Baronbc22af742016-05-05 16:22:12 -07006827core_initcall(init_per_zone_wmark_min)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006828
6829/*
Pintu Kumarb8af2942013-09-11 14:20:34 -07006830 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
Linus Torvalds1da177e2005-04-16 15:20:36 -07006831 * that we can call two helper functions whenever min_free_kbytes
6832 * changes.
6833 */
Joe Perchescccad5b2014-06-06 14:38:09 -07006834int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006835 void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006836{
Han Pingtianda8c7572014-01-23 15:53:17 -08006837 int rc;
6838
6839 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6840 if (rc)
6841 return rc;
6842
Michal Hocko5f127332013-07-08 16:00:40 -07006843 if (write) {
6844 user_min_free_kbytes = min_free_kbytes;
Minchan Kimbc75d332009-06-16 15:32:48 -07006845 setup_per_zone_wmarks();
Michal Hocko5f127332013-07-08 16:00:40 -07006846 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006847 return 0;
6848}
6849
Johannes Weiner795ae7a2016-03-17 14:19:14 -07006850int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
6851 void __user *buffer, size_t *length, loff_t *ppos)
6852{
6853 int rc;
6854
6855 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6856 if (rc)
6857 return rc;
6858
6859 if (write)
6860 setup_per_zone_wmarks();
6861
6862 return 0;
6863}
6864
Christoph Lameter96146342006-07-03 00:24:13 -07006865#ifdef CONFIG_NUMA
Joonsoo Kim6423aa82016-08-10 16:27:49 -07006866static void setup_min_unmapped_ratio(void)
Christoph Lameter96146342006-07-03 00:24:13 -07006867{
Joonsoo Kim6423aa82016-08-10 16:27:49 -07006868 pg_data_t *pgdat;
Christoph Lameter96146342006-07-03 00:24:13 -07006869 struct zone *zone;
Christoph Lameter96146342006-07-03 00:24:13 -07006870
Mel Gormana5f5f912016-07-28 15:46:32 -07006871 for_each_online_pgdat(pgdat)
Joonsoo Kim81cbcbc2016-08-10 16:27:46 -07006872 pgdat->min_unmapped_pages = 0;
Mel Gormana5f5f912016-07-28 15:46:32 -07006873
Christoph Lameter96146342006-07-03 00:24:13 -07006874 for_each_zone(zone)
Mel Gormana5f5f912016-07-28 15:46:32 -07006875 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
Christoph Lameter96146342006-07-03 00:24:13 -07006876 sysctl_min_unmapped_ratio) / 100;
Christoph Lameter96146342006-07-03 00:24:13 -07006877}
Christoph Lameter0ff38492006-09-25 23:31:52 -07006878
Joonsoo Kim6423aa82016-08-10 16:27:49 -07006879
6880int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006881 void __user *buffer, size_t *length, loff_t *ppos)
Christoph Lameter0ff38492006-09-25 23:31:52 -07006882{
Christoph Lameter0ff38492006-09-25 23:31:52 -07006883 int rc;
6884
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006885 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
Christoph Lameter0ff38492006-09-25 23:31:52 -07006886 if (rc)
6887 return rc;
6888
Joonsoo Kim6423aa82016-08-10 16:27:49 -07006889 setup_min_unmapped_ratio();
6890
6891 return 0;
6892}
6893
6894static void setup_min_slab_ratio(void)
6895{
6896 pg_data_t *pgdat;
6897 struct zone *zone;
6898
Mel Gormana5f5f912016-07-28 15:46:32 -07006899 for_each_online_pgdat(pgdat)
6900 pgdat->min_slab_pages = 0;
6901
Christoph Lameter0ff38492006-09-25 23:31:52 -07006902 for_each_zone(zone)
Mel Gormana5f5f912016-07-28 15:46:32 -07006903 zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
Christoph Lameter0ff38492006-09-25 23:31:52 -07006904 sysctl_min_slab_ratio) / 100;
Joonsoo Kim6423aa82016-08-10 16:27:49 -07006905}
6906
6907int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
6908 void __user *buffer, size_t *length, loff_t *ppos)
6909{
6910 int rc;
6911
6912 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6913 if (rc)
6914 return rc;
6915
6916 setup_min_slab_ratio();
6917
Christoph Lameter0ff38492006-09-25 23:31:52 -07006918 return 0;
6919}
Christoph Lameter96146342006-07-03 00:24:13 -07006920#endif
6921
Linus Torvalds1da177e2005-04-16 15:20:36 -07006922/*
6923 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6924 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6925 * whenever sysctl_lowmem_reserve_ratio changes.
6926 *
6927 * The reserve ratio obviously has absolutely no relation with the
Mel Gorman41858962009-06-16 15:32:12 -07006928 * minimum watermarks. The lowmem reserve ratio can only make sense
Linus Torvalds1da177e2005-04-16 15:20:36 -07006929 * if in function of the boot time zone sizes.
6930 */
Joe Perchescccad5b2014-06-06 14:38:09 -07006931int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006932 void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006933{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006934 proc_dointvec_minmax(table, write, buffer, length, ppos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006935 setup_per_zone_lowmem_reserve();
6936 return 0;
6937}
6938
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006939/*
6940 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
Pintu Kumarb8af2942013-09-11 14:20:34 -07006941 * cpu. It is the fraction of total pages in each zone that a hot per cpu
6942 * pagelist can have before it gets flushed back to buddy allocator.
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006943 */
Joe Perchescccad5b2014-06-06 14:38:09 -07006944int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006945 void __user *buffer, size_t *length, loff_t *ppos)
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006946{
6947 struct zone *zone;
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006948 int old_percpu_pagelist_fraction;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006949 int ret;
6950
Cody P Schaferc8e251f2013-07-03 15:01:29 -07006951 mutex_lock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006952 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
6953
6954 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
6955 if (!write || ret < 0)
6956 goto out;
6957
6958 /* Sanity checking to avoid pcp imbalance */
6959 if (percpu_pagelist_fraction &&
6960 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
6961 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
6962 ret = -EINVAL;
6963 goto out;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006964 }
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006965
6966 /* No change? */
6967 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
6968 goto out;
6969
6970 for_each_populated_zone(zone) {
6971 unsigned int cpu;
6972
6973 for_each_possible_cpu(cpu)
6974 pageset_set_high_and_batch(zone,
6975 per_cpu_ptr(zone->pageset, cpu));
6976 }
6977out:
Cody P Schaferc8e251f2013-07-03 15:01:29 -07006978 mutex_unlock(&pcp_batch_high_lock);
David Rientjes7cd2b0a2014-06-23 13:22:04 -07006979 return ret;
Rohit Seth8ad4b1f2006-01-08 01:00:40 -08006980}
6981
Rasmus Villemoesa9919c72015-06-24 16:56:28 -07006982#ifdef CONFIG_NUMA
David S. Millerf034b5d2006-08-24 03:08:07 -07006983int hashdist = HASHDIST_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006984
Linus Torvalds1da177e2005-04-16 15:20:36 -07006985static int __init set_hashdist(char *str)
6986{
6987 if (!str)
6988 return 0;
6989 hashdist = simple_strtoul(str, &str, 0);
6990 return 1;
6991}
6992__setup("hashdist=", set_hashdist);
6993#endif
6994
Srikar Dronamrajuf6f34b42016-10-07 16:59:15 -07006995#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
6996/*
6997 * Returns the number of pages that arch has reserved but
6998 * is not known to alloc_large_system_hash().
6999 */
7000static unsigned long __init arch_reserved_kernel_pages(void)
7001{
7002 return 0;
7003}
7004#endif
7005
Linus Torvalds1da177e2005-04-16 15:20:36 -07007006/*
7007 * allocate a large system hash table from bootmem
7008 * - it is assumed that the hash table must contain an exact power-of-2
7009 * quantity of entries
7010 * - limit is the number of hash buckets, not the total allocation size
7011 */
7012void *__init alloc_large_system_hash(const char *tablename,
7013 unsigned long bucketsize,
7014 unsigned long numentries,
7015 int scale,
7016 int flags,
7017 unsigned int *_hash_shift,
7018 unsigned int *_hash_mask,
Tim Bird31fe62b2012-05-23 13:33:35 +00007019 unsigned long low_limit,
7020 unsigned long high_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007021{
Tim Bird31fe62b2012-05-23 13:33:35 +00007022 unsigned long long max = high_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007023 unsigned long log2qty, size;
7024 void *table = NULL;
7025
7026 /* allow the kernel cmdline to have a say */
7027 if (!numentries) {
7028 /* round applicable memory size up to nearest megabyte */
Andrew Morton04903662006-12-06 20:37:33 -08007029 numentries = nr_kernel_pages;
Srikar Dronamrajuf6f34b42016-10-07 16:59:15 -07007030 numentries -= arch_reserved_kernel_pages();
Jerry Zhoua7e83312013-09-11 14:20:26 -07007031
7032 /* It isn't necessary when PAGE_SIZE >= 1MB */
7033 if (PAGE_SHIFT < 20)
7034 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007035
7036 /* limit to 1 bucket per 2^scale bytes of low memory */
7037 if (scale > PAGE_SHIFT)
7038 numentries >>= (scale - PAGE_SHIFT);
7039 else
7040 numentries <<= (PAGE_SHIFT - scale);
Paul Mundt9ab37b82007-01-05 16:36:30 -08007041
7042 /* Make sure we've got at least a 0-order allocation.. */
Jan Beulich2c85f512009-09-21 17:03:07 -07007043 if (unlikely(flags & HASH_SMALL)) {
7044 /* Makes no sense without HASH_EARLY */
7045 WARN_ON(!(flags & HASH_EARLY));
7046 if (!(numentries >> *_hash_shift)) {
7047 numentries = 1UL << *_hash_shift;
7048 BUG_ON(!numentries);
7049 }
7050 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
Paul Mundt9ab37b82007-01-05 16:36:30 -08007051 numentries = PAGE_SIZE / bucketsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007052 }
John Hawkes6e692ed2006-03-25 03:08:02 -08007053 numentries = roundup_pow_of_two(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007054
7055 /* limit allocation size to 1/16 total memory by default */
7056 if (max == 0) {
7057 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
7058 do_div(max, bucketsize);
7059 }
Dimitri Sivanich074b8512012-02-08 12:39:07 -08007060 max = min(max, 0x80000000ULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007061
Tim Bird31fe62b2012-05-23 13:33:35 +00007062 if (numentries < low_limit)
7063 numentries = low_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007064 if (numentries > max)
7065 numentries = max;
7066
David Howellsf0d1b0b2006-12-08 02:37:49 -08007067 log2qty = ilog2(numentries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007068
7069 do {
7070 size = bucketsize << log2qty;
7071 if (flags & HASH_EARLY)
Santosh Shilimkar67828322014-01-21 15:50:25 -08007072 table = memblock_virt_alloc_nopanic(size, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007073 else if (hashdist)
7074 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
7075 else {
Eric Dumazet1037b832007-07-15 23:38:05 -07007076 /*
7077 * If bucketsize is not a power-of-two, we may free
Mel Gormana1dd2682009-06-16 15:32:19 -07007078 * some pages at the end of hash table which
7079 * alloc_pages_exact() automatically does
Eric Dumazet1037b832007-07-15 23:38:05 -07007080 */
Catalin Marinas264ef8a2009-07-07 10:33:01 +01007081 if (get_order(size) < MAX_ORDER) {
Mel Gormana1dd2682009-06-16 15:32:19 -07007082 table = alloc_pages_exact(size, GFP_ATOMIC);
Catalin Marinas264ef8a2009-07-07 10:33:01 +01007083 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
7084 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007085 }
7086 } while (!table && size > PAGE_SIZE && --log2qty);
7087
7088 if (!table)
7089 panic("Failed to allocate %s hash table\n", tablename);
7090
Joe Perches11705322016-03-17 14:19:50 -07007091 pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
7092 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007093
7094 if (_hash_shift)
7095 *_hash_shift = log2qty;
7096 if (_hash_mask)
7097 *_hash_mask = (1 << log2qty) - 1;
7098
7099 return table;
7100}
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -08007101
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07007102/*
Minchan Kim80934512012-07-31 16:43:01 -07007103 * This function checks whether pageblock includes unmovable pages or not.
7104 * If @count is not zero, it is okay to include less @count unmovable pages
7105 *
Pintu Kumarb8af2942013-09-11 14:20:34 -07007106 * PageLRU check without isolation or lru_lock could race so that
Minchan Kim80934512012-07-31 16:43:01 -07007107 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
7108 * expect this function should be exact.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07007109 */
Wen Congyangb023f462012-12-11 16:00:45 -08007110bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7111 bool skip_hwpoisoned_pages)
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007112{
7113 unsigned long pfn, iter, found;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01007114 int mt;
7115
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007116 /*
7117 * For avoiding noise data, lru_add_drain_all() should be called
Minchan Kim80934512012-07-31 16:43:01 -07007118 * If ZONE_MOVABLE, the zone never contains unmovable pages
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007119 */
7120 if (zone_idx(zone) == ZONE_MOVABLE)
Minchan Kim80934512012-07-31 16:43:01 -07007121 return false;
Michal Nazarewicz47118af2011-12-29 13:09:50 +01007122 mt = get_pageblock_migratetype(page);
7123 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
Minchan Kim80934512012-07-31 16:43:01 -07007124 return false;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007125
7126 pfn = page_to_pfn(page);
7127 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
7128 unsigned long check = pfn + iter;
7129
Namhyung Kim29723fc2011-02-25 14:44:25 -08007130 if (!pfn_valid_within(check))
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007131 continue;
Namhyung Kim29723fc2011-02-25 14:44:25 -08007132
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007133 page = pfn_to_page(check);
Naoya Horiguchic8721bb2013-09-11 14:22:09 -07007134
7135 /*
7136 * Hugepages are not in LRU lists, but they're movable.
7137 * We need not scan over tail pages bacause we don't
7138 * handle each tail page individually in migration.
7139 */
7140 if (PageHuge(page)) {
7141 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
7142 continue;
7143 }
7144
Minchan Kim97d255c2012-07-31 16:42:59 -07007145 /*
7146 * We can't use page_count without pin a page
7147 * because another CPU can free compound page.
7148 * This check already skips compound tails of THP
Joonsoo Kim0139aa72016-05-19 17:10:49 -07007149 * because their page->_refcount is zero at all time.
Minchan Kim97d255c2012-07-31 16:42:59 -07007150 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07007151 if (!page_ref_count(page)) {
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007152 if (PageBuddy(page))
7153 iter += (1 << page_order(page)) - 1;
7154 continue;
7155 }
Minchan Kim97d255c2012-07-31 16:42:59 -07007156
Wen Congyangb023f462012-12-11 16:00:45 -08007157 /*
7158 * The HWPoisoned page may be not in buddy system, and
7159 * page_count() is not 0.
7160 */
7161 if (skip_hwpoisoned_pages && PageHWPoison(page))
7162 continue;
7163
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007164 if (!PageLRU(page))
7165 found++;
7166 /*
Johannes Weiner6b4f7792014-12-12 16:56:13 -08007167 * If there are RECLAIMABLE pages, we need to check
7168 * it. But now, memory offline itself doesn't call
7169 * shrink_node_slabs() and it still to be fixed.
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007170 */
7171 /*
7172 * If the page is not RAM, page_count()should be 0.
7173 * we don't need more check. This is an _used_ not-movable page.
7174 *
7175 * The problematic thing here is PG_reserved pages. PG_reserved
7176 * is set to both of a memory hole page and a _used_ kernel
7177 * page at boot.
7178 */
7179 if (found > count)
Minchan Kim80934512012-07-31 16:43:01 -07007180 return true;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007181 }
Minchan Kim80934512012-07-31 16:43:01 -07007182 return false;
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -07007183}
7184
7185bool is_pageblock_removable_nolock(struct page *page)
7186{
Michal Hocko656a0702012-01-20 14:33:58 -08007187 struct zone *zone;
7188 unsigned long pfn;
Michal Hocko687875f2012-01-20 14:33:55 -08007189
7190 /*
7191 * We have to be careful here because we are iterating over memory
7192 * sections which are not zone aware so we might end up outside of
7193 * the zone but still within the section.
Michal Hocko656a0702012-01-20 14:33:58 -08007194 * We have to take care about the node as well. If the node is offline
7195 * its NODE_DATA will be NULL - see page_zone.
Michal Hocko687875f2012-01-20 14:33:55 -08007196 */
Michal Hocko656a0702012-01-20 14:33:58 -08007197 if (!node_online(page_to_nid(page)))
7198 return false;
7199
7200 zone = page_zone(page);
7201 pfn = page_to_pfn(page);
Cody P Schafer108bcc92013-02-22 16:35:23 -08007202 if (!zone_spans_pfn(zone, pfn))
Michal Hocko687875f2012-01-20 14:33:55 -08007203 return false;
7204
Wen Congyangb023f462012-12-11 16:00:45 -08007205 return !has_unmovable_pages(zone, page, 0, true);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07007206}
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007207
Vlastimil Babka080fe202016-02-05 15:36:41 -08007208#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007209
7210static unsigned long pfn_max_align_down(unsigned long pfn)
7211{
7212 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
7213 pageblock_nr_pages) - 1);
7214}
7215
7216static unsigned long pfn_max_align_up(unsigned long pfn)
7217{
7218 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
7219 pageblock_nr_pages));
7220}
7221
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007222/* [start, end) must belong to a single zone. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007223static int __alloc_contig_migrate_range(struct compact_control *cc,
7224 unsigned long start, unsigned long end)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007225{
7226 /* This function is based on compact_zone() from compaction.c. */
Minchan Kimbeb51ea2012-10-08 16:33:51 -07007227 unsigned long nr_reclaimed;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007228 unsigned long pfn = start;
7229 unsigned int tries = 0;
7230 int ret = 0;
7231
Marek Szyprowskibe49a6e2012-12-12 13:51:19 -08007232 migrate_prep();
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007233
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007234 while (pfn < end || !list_empty(&cc->migratepages)) {
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007235 if (fatal_signal_pending(current)) {
7236 ret = -EINTR;
7237 break;
7238 }
7239
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007240 if (list_empty(&cc->migratepages)) {
7241 cc->nr_migratepages = 0;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07007242 pfn = isolate_migratepages_range(cc, pfn, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007243 if (!pfn) {
7244 ret = -EINTR;
7245 break;
7246 }
7247 tries = 0;
7248 } else if (++tries == 5) {
7249 ret = ret < 0 ? ret : -EBUSY;
7250 break;
7251 }
7252
Minchan Kimbeb51ea2012-10-08 16:33:51 -07007253 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
7254 &cc->migratepages);
7255 cc->nr_migratepages -= nr_reclaimed;
Minchan Kim02c6de82012-10-08 16:31:55 -07007256
Hugh Dickins9c620e22013-02-22 16:35:14 -08007257 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
David Rientjese0b9dae2014-06-04 16:08:28 -07007258 NULL, 0, cc->mode, MR_CMA);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007259 }
Srinivas Pandruvada2a6f5122013-02-22 16:32:09 -08007260 if (ret < 0) {
7261 putback_movable_pages(&cc->migratepages);
7262 return ret;
7263 }
7264 return 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007265}
7266
7267/**
7268 * alloc_contig_range() -- tries to allocate given range of pages
7269 * @start: start PFN to allocate
7270 * @end: one-past-the-last PFN to allocate
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02007271 * @migratetype: migratetype of the underlaying pageblocks (either
7272 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
7273 * in range must have the same migratetype and it must
7274 * be either of the two.
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007275 *
7276 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
7277 * aligned, however it's the caller's responsibility to guarantee that
7278 * we are the only thread that changes migrate type of pageblocks the
7279 * pages fall in.
7280 *
7281 * The PFN range must belong to a single zone.
7282 *
7283 * Returns zero on success or negative error code. On success all
7284 * pages which PFN is in [start, end) are allocated for the caller and
7285 * need to be freed with free_contig_range().
7286 */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02007287int alloc_contig_range(unsigned long start, unsigned long end,
7288 unsigned migratetype)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007289{
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007290 unsigned long outer_start, outer_end;
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -08007291 unsigned int order;
7292 int ret = 0;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007293
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007294 struct compact_control cc = {
7295 .nr_migratepages = 0,
7296 .order = -1,
7297 .zone = page_zone(pfn_to_page(start)),
David Rientjese0b9dae2014-06-04 16:08:28 -07007298 .mode = MIGRATE_SYNC,
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007299 .ignore_skip_hint = true,
7300 };
7301 INIT_LIST_HEAD(&cc.migratepages);
7302
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007303 /*
7304 * What we do here is we mark all pageblocks in range as
7305 * MIGRATE_ISOLATE. Because pageblock and max order pages may
7306 * have different sizes, and due to the way page allocator
7307 * work, we align the range to biggest of the two pages so
7308 * that page allocator won't try to merge buddies from
7309 * different pageblocks and change MIGRATE_ISOLATE to some
7310 * other migration type.
7311 *
7312 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
7313 * migrate the pages from an unaligned range (ie. pages that
7314 * we are interested in). This will put all the pages in
7315 * range back to page allocator as MIGRATE_ISOLATE.
7316 *
7317 * When this is done, we take the pages in range from page
7318 * allocator removing them from the buddy system. This way
7319 * page allocator will never consider using them.
7320 *
7321 * This lets us mark the pageblocks back as
7322 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
7323 * aligned range but not in the unaligned, original range are
7324 * put back to page allocator so that buddy can use them.
7325 */
7326
7327 ret = start_isolate_page_range(pfn_max_align_down(start),
Wen Congyangb023f462012-12-11 16:00:45 -08007328 pfn_max_align_up(end), migratetype,
7329 false);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007330 if (ret)
Bob Liu86a595f2012-10-25 13:37:56 -07007331 return ret;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007332
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007333 /*
7334 * In case of -EBUSY, we'd like to know which page causes problem.
7335 * So, just fall through. We will check it in test_pages_isolated().
7336 */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007337 ret = __alloc_contig_migrate_range(&cc, start, end);
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007338 if (ret && ret != -EBUSY)
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007339 goto done;
7340
7341 /*
7342 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
7343 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
7344 * more, all pages in [start, end) are free in page allocator.
7345 * What we are going to do is to allocate all pages from
7346 * [start, end) (that is remove them from page allocator).
7347 *
7348 * The only problem is that pages at the beginning and at the
7349 * end of interesting range may be not aligned with pages that
7350 * page allocator holds, ie. they can be part of higher order
7351 * pages. Because of this, we reserve the bigger range and
7352 * once this is done free the pages we are not interested in.
7353 *
7354 * We don't have to hold zone->lock here because the pages are
7355 * isolated thus they won't get removed from buddy.
7356 */
7357
7358 lru_add_drain_all();
Vlastimil Babka510f5502014-12-10 15:43:07 -08007359 drain_all_pages(cc.zone);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007360
7361 order = 0;
7362 outer_start = start;
7363 while (!PageBuddy(pfn_to_page(outer_start))) {
7364 if (++order >= MAX_ORDER) {
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007365 outer_start = start;
7366 break;
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007367 }
7368 outer_start &= ~0UL << order;
7369 }
7370
Joonsoo Kim8ef58492016-01-14 15:18:45 -08007371 if (outer_start != start) {
7372 order = page_order(pfn_to_page(outer_start));
7373
7374 /*
7375 * outer_start page could be small order buddy page and
7376 * it doesn't include start page. Adjust outer_start
7377 * in this case to report failed page properly
7378 * on tracepoint in test_pages_isolated()
7379 */
7380 if (outer_start + (1UL << order) <= start)
7381 outer_start = start;
7382 }
7383
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007384 /* Make sure the range is really isolated. */
Wen Congyangb023f462012-12-11 16:00:45 -08007385 if (test_pages_isolated(outer_start, end, false)) {
Michal Nazarewiczdae803e2014-11-13 15:19:27 -08007386 pr_info("%s: [%lx, %lx) PFNs busy\n",
7387 __func__, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007388 ret = -EBUSY;
7389 goto done;
7390 }
7391
Marek Szyprowski49f223a2012-01-25 12:49:24 +01007392 /* Grab isolated pages from freelists. */
Mel Gormanbb13ffe2012-10-08 16:32:41 -07007393 outer_end = isolate_freepages_range(&cc, outer_start, end);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007394 if (!outer_end) {
7395 ret = -EBUSY;
7396 goto done;
7397 }
7398
7399 /* Free head and tail (if any) */
7400 if (start != outer_start)
7401 free_contig_range(outer_start, start - outer_start);
7402 if (end != outer_end)
7403 free_contig_range(end, outer_end - end);
7404
7405done:
7406 undo_isolate_page_range(pfn_max_align_down(start),
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +02007407 pfn_max_align_up(end), migratetype);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007408 return ret;
7409}
7410
7411void free_contig_range(unsigned long pfn, unsigned nr_pages)
7412{
Marek Szyprowskibcc2b022012-12-20 15:05:18 -08007413 unsigned int count = 0;
7414
7415 for (; nr_pages--; pfn++) {
7416 struct page *page = pfn_to_page(pfn);
7417
7418 count += page_count(page) != 1;
7419 __free_page(page);
7420 }
7421 WARN(count != 0, "%d pages are still in use!\n", count);
Michal Nazarewicz041d3a82011-12-29 13:09:50 +01007422}
7423#endif
7424
Jiang Liu4ed7e022012-07-31 16:43:35 -07007425#ifdef CONFIG_MEMORY_HOTPLUG
Cody P Schafer0a647f32013-07-03 15:01:33 -07007426/*
7427 * The zone indicated has a new number of managed_pages; batch sizes and percpu
7428 * page high values need to be recalulated.
7429 */
Jiang Liu4ed7e022012-07-31 16:43:35 -07007430void __meminit zone_pcp_update(struct zone *zone)
7431{
Cody P Schafer0a647f32013-07-03 15:01:33 -07007432 unsigned cpu;
Cody P Schaferc8e251f2013-07-03 15:01:29 -07007433 mutex_lock(&pcp_batch_high_lock);
Cody P Schafer0a647f32013-07-03 15:01:33 -07007434 for_each_possible_cpu(cpu)
Cody P Schafer169f6c12013-07-03 15:01:41 -07007435 pageset_set_high_and_batch(zone,
7436 per_cpu_ptr(zone->pageset, cpu));
Cody P Schaferc8e251f2013-07-03 15:01:29 -07007437 mutex_unlock(&pcp_batch_high_lock);
Jiang Liu4ed7e022012-07-31 16:43:35 -07007438}
7439#endif
7440
Jiang Liu340175b2012-07-31 16:43:32 -07007441void zone_pcp_reset(struct zone *zone)
7442{
7443 unsigned long flags;
Minchan Kim5a883812012-10-08 16:33:39 -07007444 int cpu;
7445 struct per_cpu_pageset *pset;
Jiang Liu340175b2012-07-31 16:43:32 -07007446
7447 /* avoid races with drain_pages() */
7448 local_irq_save(flags);
7449 if (zone->pageset != &boot_pageset) {
Minchan Kim5a883812012-10-08 16:33:39 -07007450 for_each_online_cpu(cpu) {
7451 pset = per_cpu_ptr(zone->pageset, cpu);
7452 drain_zonestat(zone, pset);
7453 }
Jiang Liu340175b2012-07-31 16:43:32 -07007454 free_percpu(zone->pageset);
7455 zone->pageset = &boot_pageset;
7456 }
7457 local_irq_restore(flags);
7458}
7459
Wen Congyang6dcd73d2012-12-11 16:01:01 -08007460#ifdef CONFIG_MEMORY_HOTREMOVE
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007461/*
Joonsoo Kimb9eb6312016-05-19 17:12:06 -07007462 * All pages in the range must be in a single zone and isolated
7463 * before calling this.
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007464 */
7465void
7466__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
7467{
7468 struct page *page;
7469 struct zone *zone;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07007470 unsigned int order, i;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007471 unsigned long pfn;
7472 unsigned long flags;
7473 /* find the first valid pfn */
7474 for (pfn = start_pfn; pfn < end_pfn; pfn++)
7475 if (pfn_valid(pfn))
7476 break;
7477 if (pfn == end_pfn)
7478 return;
7479 zone = page_zone(pfn_to_page(pfn));
7480 spin_lock_irqsave(&zone->lock, flags);
7481 pfn = start_pfn;
7482 while (pfn < end_pfn) {
7483 if (!pfn_valid(pfn)) {
7484 pfn++;
7485 continue;
7486 }
7487 page = pfn_to_page(pfn);
Wen Congyangb023f462012-12-11 16:00:45 -08007488 /*
7489 * The HWPoisoned page may be not in buddy system, and
7490 * page_count() is not 0.
7491 */
7492 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7493 pfn++;
7494 SetPageReserved(page);
7495 continue;
7496 }
7497
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007498 BUG_ON(page_count(page));
7499 BUG_ON(!PageBuddy(page));
7500 order = page_order(page);
7501#ifdef CONFIG_DEBUG_VM
Joe Perches11705322016-03-17 14:19:50 -07007502 pr_info("remove from free list %lx %d %lx\n",
7503 pfn, 1 << order, end_pfn);
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007504#endif
7505 list_del(&page->lru);
7506 rmv_page_order(page);
7507 zone->free_area[order].nr_free--;
KAMEZAWA Hiroyuki0c0e6192007-10-16 01:26:12 -07007508 for (i = 0; i < (1 << order); i++)
7509 SetPageReserved((page+i));
7510 pfn += (1 << order);
7511 }
7512 spin_unlock_irqrestore(&zone->lock, flags);
7513}
7514#endif
Wu Fengguang8d22ba12009-12-16 12:19:58 +01007515
Wu Fengguang8d22ba12009-12-16 12:19:58 +01007516bool is_free_buddy_page(struct page *page)
7517{
7518 struct zone *zone = page_zone(page);
7519 unsigned long pfn = page_to_pfn(page);
7520 unsigned long flags;
Mel Gorman7aeb09f2014-06-04 16:10:21 -07007521 unsigned int order;
Wu Fengguang8d22ba12009-12-16 12:19:58 +01007522
7523 spin_lock_irqsave(&zone->lock, flags);
7524 for (order = 0; order < MAX_ORDER; order++) {
7525 struct page *page_head = page - (pfn & ((1 << order) - 1));
7526
7527 if (PageBuddy(page_head) && page_order(page_head) >= order)
7528 break;
7529 }
7530 spin_unlock_irqrestore(&zone->lock, flags);
7531
7532 return order < MAX_ORDER;
7533}