blob: 91df7c51806cefc0820dbf13a49be0b51136676e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
3 *
4 */
5
H. Peter Anvin1965aae2008-10-22 22:26:29 -07006#ifndef _ASM_X86_MMZONE_32_H
7#define _ASM_X86_MMZONE_32_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
9#include <asm/smp.h>
10
Dave Jones8ff8b272005-07-07 17:56:39 -070011#ifdef CONFIG_NUMA
Andy Whitcroft05b79bd2005-06-23 00:07:57 -070012extern struct pglist_data *node_data[];
13#define NODE_DATA(nid) (node_data[nid])
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Yinghai Lud49c4282008-06-08 18:31:54 -070015#include <asm/numaq.h>
16/* summit or generic arch */
17#include <asm/srat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Joe Perches7491d332008-03-23 01:02:46 -070019extern int get_memcfg_numa_flat(void);
Andy Whitcroft05b79bd2005-06-23 00:07:57 -070020/*
21 * This allows any one NUMA architecture to be compiled
22 * for, and still fall back to the flat function if it
23 * fails.
24 */
25static inline void get_memcfg_numa(void)
26{
Yinghai Lud49c4282008-06-08 18:31:54 -070027
Andy Whitcroft05b79bd2005-06-23 00:07:57 -070028 if (get_memcfg_numaq())
29 return;
Andy Whitcroft05b79bd2005-06-23 00:07:57 -070030 if (get_memcfg_from_srat())
31 return;
Andy Whitcroft05b79bd2005-06-23 00:07:57 -070032 get_memcfg_numa_flat();
33}
34
Rafael J. Wysocki97a70e52008-11-12 23:22:35 +010035extern void resume_map_numa_kva(pgd_t *pgd);
36
Dave Jones8ff8b272005-07-07 17:56:39 -070037#else /* !CONFIG_NUMA */
keith mannthey91023302006-09-25 23:31:03 -070038
Dave Jones8ff8b272005-07-07 17:56:39 -070039#define get_memcfg_numa get_memcfg_numa_flat
keith mannthey91023302006-09-25 23:31:03 -070040
Rafael J. Wysocki97a70e52008-11-12 23:22:35 +010041static inline void resume_map_numa_kva(pgd_t *pgd) {}
42
Andy Whitcroft05b79bd2005-06-23 00:07:57 -070043#endif /* CONFIG_NUMA */
44
45#ifdef CONFIG_DISCONTIGMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47/*
48 * generic node memory support, the following assumptions apply:
49 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020050 * 1) memory comes in 64Mb contiguous chunks which are either present or not
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 * 2) we will not have more than 64Gb in total
52 *
53 * for now assume that 64Gb is max amount of RAM for whole system
54 * 64Gb / 4096bytes/page = 16777216 pages
55 */
56#define MAX_NR_PAGES 16777216
Yinghai Luba924c82008-05-31 22:51:51 -070057#define MAX_ELEMENTS 1024
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS)
59
60extern s8 physnode_map[];
61
62static inline int pfn_to_nid(unsigned long pfn)
63{
64#ifdef CONFIG_NUMA
65 return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]);
66#else
67 return 0;
68#endif
69}
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071/*
72 * Following are macros that each numa implmentation must define.
73 */
74
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
76#define node_end_pfn(nid) \
77({ \
78 pg_data_t *__pgdat = NODE_DATA(nid); \
79 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
80})
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082static inline int pfn_valid(int pfn)
83{
84 int nid = pfn_to_nid(pfn);
85
86 if (nid >= 0)
87 return (pfn < node_end_pfn(nid));
88 return 0;
89}
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91#endif /* CONFIG_DISCONTIGMEM */
Andy Whitcroftb159d432005-06-23 00:07:52 -070092
Andy Whitcroft05b79bd2005-06-23 00:07:57 -070093#ifdef CONFIG_NEED_MULTIPLE_NODES
Tejun Heoc1329372009-02-24 11:57:20 +090094/* always use node 0 for bootmem on this numa platform */
Tejun Heod0c4f572009-03-01 16:06:56 +090095#define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \
96 (NODE_DATA(0)->bdata)
Andy Whitcroft05b79bd2005-06-23 00:07:57 -070097#endif /* CONFIG_NEED_MULTIPLE_NODES */
98
H. Peter Anvin1965aae2008-10-22 22:26:29 -070099#endif /* _ASM_X86_MMZONE_32_H */