KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/mmzone.c |
| 3 | * |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 4 | * management codes for pgdats, zones and page flags |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 8 | #include <linux/stddef.h> |
Mel Gorman | eb33575 | 2009-05-13 17:34:48 +0100 | [diff] [blame] | 9 | #include <linux/mm.h> |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 10 | #include <linux/mmzone.h> |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 11 | |
| 12 | struct pglist_data *first_online_pgdat(void) |
| 13 | { |
| 14 | return NODE_DATA(first_online_node); |
| 15 | } |
| 16 | |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 17 | struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) |
| 18 | { |
| 19 | int nid = next_online_node(pgdat->node_id); |
| 20 | |
| 21 | if (nid == MAX_NUMNODES) |
| 22 | return NULL; |
| 23 | return NODE_DATA(nid); |
| 24 | } |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 25 | |
| 26 | /* |
| 27 | * next_zone - helper magic for for_each_zone() |
| 28 | */ |
| 29 | struct zone *next_zone(struct zone *zone) |
| 30 | { |
| 31 | pg_data_t *pgdat = zone->zone_pgdat; |
| 32 | |
| 33 | if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) |
| 34 | zone++; |
| 35 | else { |
| 36 | pgdat = next_online_pgdat(pgdat); |
| 37 | if (pgdat) |
| 38 | zone = pgdat->node_zones; |
| 39 | else |
| 40 | zone = NULL; |
| 41 | } |
| 42 | return zone; |
| 43 | } |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 44 | |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 45 | static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes) |
| 46 | { |
| 47 | #ifdef CONFIG_NUMA |
| 48 | return node_isset(zonelist_node_idx(zref), *nodes); |
| 49 | #else |
| 50 | return 1; |
| 51 | #endif /* CONFIG_NUMA */ |
| 52 | } |
| 53 | |
| 54 | /* Returns the next zone at or below highest_zoneidx in a zonelist */ |
| 55 | struct zoneref *next_zones_zonelist(struct zoneref *z, |
| 56 | enum zone_type highest_zoneidx, |
| 57 | nodemask_t *nodes, |
| 58 | struct zone **zone) |
| 59 | { |
| 60 | /* |
| 61 | * Find the next suitable zone to use for the allocation. |
| 62 | * Only filter based on nodemask if it's set |
| 63 | */ |
| 64 | if (likely(nodes == NULL)) |
| 65 | while (zonelist_zone_idx(z) > highest_zoneidx) |
| 66 | z++; |
| 67 | else |
| 68 | while (zonelist_zone_idx(z) > highest_zoneidx || |
| 69 | (z->zone && !zref_in_nodemask(z, nodes))) |
| 70 | z++; |
| 71 | |
Mel Gorman | 5bead2a | 2008-09-13 02:33:19 -0700 | [diff] [blame] | 72 | *zone = zonelist_zone(z); |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 73 | return z; |
| 74 | } |
Mel Gorman | eb33575 | 2009-05-13 17:34:48 +0100 | [diff] [blame] | 75 | |
| 76 | #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL |
| 77 | int memmap_valid_within(unsigned long pfn, |
| 78 | struct page *page, struct zone *zone) |
| 79 | { |
| 80 | if (page_to_pfn(page) != pfn) |
| 81 | return 0; |
| 82 | |
| 83 | if (page_zone(page) != zone) |
| 84 | return 0; |
| 85 | |
| 86 | return 1; |
| 87 | } |
| 88 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ |
Konstantin Khlebnikov | 7f5e86c | 2012-05-29 15:06:58 -0700 | [diff] [blame] | 89 | |
Hugh Dickins | bea8c15 | 2012-11-16 14:14:54 -0800 | [diff] [blame] | 90 | void lruvec_init(struct lruvec *lruvec) |
Konstantin Khlebnikov | 7f5e86c | 2012-05-29 15:06:58 -0700 | [diff] [blame] | 91 | { |
| 92 | enum lru_list lru; |
| 93 | |
| 94 | memset(lruvec, 0, sizeof(struct lruvec)); |
| 95 | |
| 96 | for_each_lru(lru) |
| 97 | INIT_LIST_HEAD(&lruvec->lists[lru]); |
Konstantin Khlebnikov | 7f5e86c | 2012-05-29 15:06:58 -0700 | [diff] [blame] | 98 | } |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 99 | |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 100 | #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) |
| 101 | int page_cpupid_xchg_last(struct page *page, int cpupid) |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 102 | { |
| 103 | unsigned long old_flags, flags; |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 104 | int last_cpupid; |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 105 | |
| 106 | do { |
| 107 | old_flags = flags = page->flags; |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 108 | last_cpupid = page_cpupid_last(page); |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 109 | |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 110 | flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); |
| 111 | flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 112 | } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags)); |
| 113 | |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 114 | return last_cpupid; |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 115 | } |
| 116 | #endif |