blob: bf34fb8556db0a5b3805ec97137ae9ee1a3045f9 [file] [log] [blame]
KAMEZAWA Hiroyuki95144c72006-03-27 01:16:02 -08001/*
2 * linux/mm/mmzone.c
3 *
Mel Gorman4468b8f2013-02-22 16:34:46 -08004 * management codes for pgdats, zones and page flags
KAMEZAWA Hiroyuki95144c72006-03-27 01:16:02 -08005 */
6
7
KAMEZAWA Hiroyuki95144c72006-03-27 01:16:02 -08008#include <linux/stddef.h>
Mel Gormaneb335752009-05-13 17:34:48 +01009#include <linux/mm.h>
KAMEZAWA Hiroyuki95144c72006-03-27 01:16:02 -080010#include <linux/mmzone.h>
KAMEZAWA Hiroyuki95144c72006-03-27 01:16:02 -080011
12struct pglist_data *first_online_pgdat(void)
13{
14 return NODE_DATA(first_online_node);
15}
16
KAMEZAWA Hiroyuki95144c72006-03-27 01:16:02 -080017struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
18{
19 int nid = next_online_node(pgdat->node_id);
20
21 if (nid == MAX_NUMNODES)
22 return NULL;
23 return NODE_DATA(nid);
24}
KAMEZAWA Hiroyuki95144c72006-03-27 01:16:02 -080025
26/*
27 * next_zone - helper magic for for_each_zone()
28 */
29struct zone *next_zone(struct zone *zone)
30{
31 pg_data_t *pgdat = zone->zone_pgdat;
32
33 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
34 zone++;
35 else {
36 pgdat = next_online_pgdat(pgdat);
37 if (pgdat)
38 zone = pgdat->node_zones;
39 else
40 zone = NULL;
41 }
42 return zone;
43}
KAMEZAWA Hiroyuki95144c72006-03-27 01:16:02 -080044
Mel Gorman19770b32008-04-28 02:12:18 -070045static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
46{
47#ifdef CONFIG_NUMA
48 return node_isset(zonelist_node_idx(zref), *nodes);
49#else
50 return 1;
51#endif /* CONFIG_NUMA */
52}
53
54/* Returns the next zone at or below highest_zoneidx in a zonelist */
55struct zoneref *next_zones_zonelist(struct zoneref *z,
56 enum zone_type highest_zoneidx,
57 nodemask_t *nodes,
58 struct zone **zone)
59{
60 /*
61 * Find the next suitable zone to use for the allocation.
62 * Only filter based on nodemask if it's set
63 */
64 if (likely(nodes == NULL))
65 while (zonelist_zone_idx(z) > highest_zoneidx)
66 z++;
67 else
68 while (zonelist_zone_idx(z) > highest_zoneidx ||
69 (z->zone && !zref_in_nodemask(z, nodes)))
70 z++;
71
Mel Gorman5bead2a2008-09-13 02:33:19 -070072 *zone = zonelist_zone(z);
Mel Gorman19770b32008-04-28 02:12:18 -070073 return z;
74}
Mel Gormaneb335752009-05-13 17:34:48 +010075
76#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
77int memmap_valid_within(unsigned long pfn,
78 struct page *page, struct zone *zone)
79{
80 if (page_to_pfn(page) != pfn)
81 return 0;
82
83 if (page_zone(page) != zone)
84 return 0;
85
86 return 1;
87}
88#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
Konstantin Khlebnikov7f5e86c2012-05-29 15:06:58 -070089
Hugh Dickinsbea8c152012-11-16 14:14:54 -080090void lruvec_init(struct lruvec *lruvec)
Konstantin Khlebnikov7f5e86c2012-05-29 15:06:58 -070091{
92 enum lru_list lru;
93
94 memset(lruvec, 0, sizeof(struct lruvec));
95
96 for_each_lru(lru)
97 INIT_LIST_HEAD(&lruvec->lists[lru]);
Konstantin Khlebnikov7f5e86c2012-05-29 15:06:58 -070098}
Mel Gorman4468b8f2013-02-22 16:34:46 -080099
Peter Zijlstra90572892013-10-07 11:29:20 +0100100#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
101int page_cpupid_xchg_last(struct page *page, int cpupid)
Mel Gorman4468b8f2013-02-22 16:34:46 -0800102{
103 unsigned long old_flags, flags;
Peter Zijlstra90572892013-10-07 11:29:20 +0100104 int last_cpupid;
Mel Gorman4468b8f2013-02-22 16:34:46 -0800105
106 do {
107 old_flags = flags = page->flags;
Peter Zijlstra90572892013-10-07 11:29:20 +0100108 last_cpupid = page_cpupid_last(page);
Mel Gorman4468b8f2013-02-22 16:34:46 -0800109
Peter Zijlstra90572892013-10-07 11:29:20 +0100110 flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
111 flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
Mel Gorman4468b8f2013-02-22 16:34:46 -0800112 } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
113
Peter Zijlstra90572892013-10-07 11:29:20 +0100114 return last_cpupid;
Mel Gorman4468b8f2013-02-22 16:34:46 -0800115}
116#endif