KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 1 | #ifndef __ASM_MEMORY_MODEL_H |
| 2 | #define __ASM_MEMORY_MODEL_H |
| 3 | |
| 4 | #ifdef __KERNEL__ |
| 5 | #ifndef __ASSEMBLY__ |
| 6 | |
| 7 | #if defined(CONFIG_FLATMEM) |
| 8 | |
| 9 | #ifndef ARCH_PFN_OFFSET |
| 10 | #define ARCH_PFN_OFFSET (0UL) |
| 11 | #endif |
| 12 | |
| 13 | #elif defined(CONFIG_DISCONTIGMEM) |
| 14 | |
| 15 | #ifndef arch_pfn_to_nid |
| 16 | #define arch_pfn_to_nid(pfn) pfn_to_nid(pfn) |
| 17 | #endif |
| 18 | |
| 19 | #ifndef arch_local_page_offset |
| 20 | #define arch_local_page_offset(pfn, nid) \ |
| 21 | ((pfn) - NODE_DATA(nid)->node_start_pfn) |
| 22 | #endif |
| 23 | |
| 24 | #endif /* CONFIG_DISCONTIGMEM */ |
| 25 | |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 26 | /* |
| 27 | * supports 3 memory models. |
| 28 | */ |
| 29 | #if defined(CONFIG_FLATMEM) |
| 30 | |
Andy Whitcroft | 67de648 | 2006-06-23 02:03:12 -0700 | [diff] [blame] | 31 | #define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) |
| 32 | #define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 33 | ARCH_PFN_OFFSET) |
| 34 | #elif defined(CONFIG_DISCONTIGMEM) |
| 35 | |
Andy Whitcroft | 67de648 | 2006-06-23 02:03:12 -0700 | [diff] [blame] | 36 | #define __pfn_to_page(pfn) \ |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 37 | ({ unsigned long __pfn = (pfn); \ |
| 38 | unsigned long __nid = arch_pfn_to_nid(pfn); \ |
| 39 | NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ |
| 40 | }) |
| 41 | |
Andy Whitcroft | 67de648 | 2006-06-23 02:03:12 -0700 | [diff] [blame] | 42 | #define __page_to_pfn(pg) \ |
KAMEZAWA Hiroyuki | a0140c1 | 2006-03-27 01:15:55 -0800 | [diff] [blame] | 43 | ({ struct page *__pg = (pg); \ |
| 44 | struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ |
| 45 | (unsigned long)(__pg - __pgdat->node_mem_map) + \ |
| 46 | __pgdat->node_start_pfn; \ |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 47 | }) |
| 48 | |
| 49 | #elif defined(CONFIG_SPARSEMEM) |
| 50 | /* |
| 51 | * Note: section's mem_map is encorded to reflect its start_pfn. |
| 52 | * section[i].section_mem_map == mem_map's address - start_pfn; |
| 53 | */ |
Andy Whitcroft | 67de648 | 2006-06-23 02:03:12 -0700 | [diff] [blame] | 54 | #define __page_to_pfn(pg) \ |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 55 | ({ struct page *__pg = (pg); \ |
| 56 | int __sec = page_to_section(__pg); \ |
Randy Dunlap | f05b628 | 2007-02-10 01:42:59 -0800 | [diff] [blame] | 57 | (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 58 | }) |
| 59 | |
Andy Whitcroft | 67de648 | 2006-06-23 02:03:12 -0700 | [diff] [blame] | 60 | #define __pfn_to_page(pfn) \ |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 61 | ({ unsigned long __pfn = (pfn); \ |
| 62 | struct mem_section *__sec = __pfn_to_section(__pfn); \ |
| 63 | __section_mem_map_addr(__sec) + __pfn; \ |
| 64 | }) |
| 65 | #endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ |
Andy Whitcroft | 67de648 | 2006-06-23 02:03:12 -0700 | [diff] [blame] | 66 | |
| 67 | #ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE |
| 68 | struct page; |
| 69 | /* this is useful when inlined pfn_to_page is too big */ |
| 70 | extern struct page *pfn_to_page(unsigned long pfn); |
| 71 | extern unsigned long page_to_pfn(struct page *page); |
| 72 | #else |
| 73 | #define page_to_pfn __page_to_pfn |
| 74 | #define pfn_to_page __pfn_to_page |
KAMEZAWA Hiroyuki | a117e66 | 2006-03-27 01:15:25 -0800 | [diff] [blame] | 75 | #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ |
| 76 | |
| 77 | #endif /* __ASSEMBLY__ */ |
| 78 | #endif /* __KERNEL__ */ |
| 79 | |
| 80 | #endif |