blob: 9f3b5accda88a0b9d9c275b6c1cd9b69ce9e50f7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
3 *
4 */
5
6#ifndef _ASM_MMZONE_H_
7#define _ASM_MMZONE_H_
8
9#include <asm/smp.h>
10
11#ifdef CONFIG_DISCONTIGMEM
12
13extern struct pglist_data *node_data[];
14#define NODE_DATA(nid) (node_data[nid])
15
16#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
18#define node_end_pfn(nid) \
19({ \
20 pg_data_t *__pgdat = NODE_DATA(nid); \
21 __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \
22})
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
25/*
26 * pfn_valid should be made as fast as possible, and the current definition
27 * is valid for machines that are NUMA, but still contiguous, which is what
28 * is currently supported. A more generalised, but slower definition would
29 * be something like this - mbligh:
30 * ( pfn_to_pgdat(pfn) && ((pfn) < node_end_pfn(pfn_to_nid(pfn))) )
31 */
32#if 1 /* M32R_FIXME */
33#define pfn_valid(pfn) (1)
34#else
35#define pfn_valid(pfn) ((pfn) < num_physpages)
36#endif
37
38/*
39 * generic node memory support, the following assumptions apply:
40 */
41
42static __inline__ int pfn_to_nid(unsigned long pfn)
43{
44 int node;
45
46 for (node = 0 ; node < MAX_NUMNODES ; node++)
47 if (pfn >= node_start_pfn(node) && pfn <= node_end_pfn(node))
48 break;
49
50 return node;
51}
52
53static __inline__ struct pglist_data *pfn_to_pgdat(unsigned long pfn)
54{
55 return(NODE_DATA(pfn_to_nid(pfn)));
56}
57
58#endif /* CONFIG_DISCONTIGMEM */
59#endif /* _ASM_MMZONE_H_ */