blob: 33ce5d37e8948cae3c3eede690e4e9c8d67f9c88 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
3 *
4 */
5
6#ifndef _ASM_MMZONE_H_
7#define _ASM_MMZONE_H_
8
9#include <asm/smp.h>
10
Andy Whitcroft05b79bd2005-06-23 00:07:57 -070011#if CONFIG_NUMA
12extern struct pglist_data *node_data[];
13#define NODE_DATA(nid) (node_data[nid])
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
15#ifdef CONFIG_NUMA
16 #ifdef CONFIG_X86_NUMAQ
17 #include <asm/numaq.h>
18 #else /* summit or generic arch */
19 #include <asm/srat.h>
20 #endif
21#else /* !CONFIG_NUMA */
22 #define get_memcfg_numa get_memcfg_numa_flat
23 #define get_zholes_size(n) (0)
24#endif /* CONFIG_NUMA */
25
Andy Whitcroft05b79bd2005-06-23 00:07:57 -070026extern int get_memcfg_numa_flat(void );
27/*
28 * This allows any one NUMA architecture to be compiled
29 * for, and still fall back to the flat function if it
30 * fails.
31 */
32static inline void get_memcfg_numa(void)
33{
34#ifdef CONFIG_X86_NUMAQ
35 if (get_memcfg_numaq())
36 return;
37#elif CONFIG_ACPI_SRAT
38 if (get_memcfg_from_srat())
39 return;
40#endif
41
42 get_memcfg_numa_flat();
43}
44
45#endif /* CONFIG_NUMA */
46
47#ifdef CONFIG_DISCONTIGMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49/*
50 * generic node memory support, the following assumptions apply:
51 *
52 * 1) memory comes in 256Mb contigious chunks which are either present or not
53 * 2) we will not have more than 64Gb in total
54 *
55 * for now assume that 64Gb is max amount of RAM for whole system
56 * 64Gb / 4096bytes/page = 16777216 pages
57 */
58#define MAX_NR_PAGES 16777216
59#define MAX_ELEMENTS 256
60#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS)
61
62extern s8 physnode_map[];
63
64static inline int pfn_to_nid(unsigned long pfn)
65{
66#ifdef CONFIG_NUMA
67 return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]);
68#else
69 return 0;
70#endif
71}
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#define node_localnr(pfn, nid) ((pfn) - node_data[nid]->node_start_pfn)
74
75/*
76 * Following are macros that each numa implmentation must define.
77 */
78
79/*
80 * Given a kernel address, find the home node of the underlying memory.
81 */
82#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
83
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
85#define node_end_pfn(nid) \
86({ \
87 pg_data_t *__pgdat = NODE_DATA(nid); \
88 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
89})
90
91#define local_mapnr(kvaddr) \
92({ \
93 unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT; \
94 (__pfn - node_start_pfn(pfn_to_nid(__pfn))); \
95})
96
97/* XXX: FIXME -- wli */
98#define kern_addr_valid(kaddr) (0)
99
100#define pfn_to_page(pfn) \
101({ \
102 unsigned long __pfn = pfn; \
103 int __node = pfn_to_nid(__pfn); \
Dave Hansen408fde82005-06-23 00:07:37 -0700104 &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105})
106
107#define page_to_pfn(pg) \
108({ \
109 struct page *__page = pg; \
110 struct zone *__zone = page_zone(__page); \
111 (unsigned long)(__page - __zone->zone_mem_map) \
112 + __zone->zone_start_pfn; \
113})
114
115#ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */
116#define pfn_valid(pfn) ((pfn) < num_physpages)
117#else
118static inline int pfn_valid(int pfn)
119{
120 int nid = pfn_to_nid(pfn);
121
122 if (nid >= 0)
123 return (pfn < node_end_pfn(nid));
124 return 0;
125}
Andy Whitcroft05b79bd2005-06-23 00:07:57 -0700126#endif /* CONFIG_X86_NUMAQ */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128#endif /* CONFIG_DISCONTIGMEM */
Andy Whitcroftb159d432005-06-23 00:07:52 -0700129
Andy Whitcroft05b79bd2005-06-23 00:07:57 -0700130#ifdef CONFIG_NEED_MULTIPLE_NODES
131
132/*
133 * Following are macros that are specific to this numa platform.
134 */
135#define reserve_bootmem(addr, size) \
136 reserve_bootmem_node(NODE_DATA(0), (addr), (size))
137#define alloc_bootmem(x) \
138 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
139#define alloc_bootmem_low(x) \
140 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
141#define alloc_bootmem_pages(x) \
142 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
143#define alloc_bootmem_low_pages(x) \
144 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
145#define alloc_bootmem_node(ignore, x) \
146 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
147#define alloc_bootmem_pages_node(ignore, x) \
148 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
149#define alloc_bootmem_low_pages_node(ignore, x) \
150 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
151
152#endif /* CONFIG_NEED_MULTIPLE_NODES */
153
Andy Whitcroftb159d432005-06-23 00:07:52 -0700154extern int early_pfn_to_nid(unsigned long pfn);
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156#endif /* _ASM_MMZONE_H_ */