blob: 4b4b056a6eb00ee844a5af970c2ce0c2e86d87d6 [file] [log] [blame]
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -08001#ifndef __ASM_MEMORY_MODEL_H
2#define __ASM_MEMORY_MODEL_H
3
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -08004#ifndef __ASSEMBLY__
5
6#if defined(CONFIG_FLATMEM)
7
8#ifndef ARCH_PFN_OFFSET
9#define ARCH_PFN_OFFSET (0UL)
10#endif
11
12#elif defined(CONFIG_DISCONTIGMEM)
13
14#ifndef arch_pfn_to_nid
15#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
16#endif
17
18#ifndef arch_local_page_offset
19#define arch_local_page_offset(pfn, nid) \
20 ((pfn) - NODE_DATA(nid)->node_start_pfn)
21#endif
22
23#endif /* CONFIG_DISCONTIGMEM */
24
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -080025/*
26 * supports 3 memory models.
27 */
28#if defined(CONFIG_FLATMEM)
29
Andy Whitcroft67de6482006-06-23 02:03:12 -070030#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
31#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -080032 ARCH_PFN_OFFSET)
33#elif defined(CONFIG_DISCONTIGMEM)
34
Andy Whitcroft67de6482006-06-23 02:03:12 -070035#define __pfn_to_page(pfn) \
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -080036({ unsigned long __pfn = (pfn); \
Rafael J. Wysockic5d71242008-11-08 13:53:33 +010037 unsigned long __nid = arch_pfn_to_nid(__pfn); \
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -080038 NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
39})
40
Andy Whitcroft67de6482006-06-23 02:03:12 -070041#define __page_to_pfn(pg) \
Ian Campbellaa462ab2011-08-17 17:40:33 +010042({ const struct page *__pg = (pg); \
KAMEZAWA Hiroyukia0140c12006-03-27 01:15:55 -080043 struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
44 (unsigned long)(__pg - __pgdat->node_mem_map) + \
45 __pgdat->node_start_pfn; \
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -080046})
47
Christoph Lameter8f6aac42007-10-16 01:24:13 -070048#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
49
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020050/* memmap is virtually contiguous. */
Christoph Lameter8f6aac42007-10-16 01:24:13 -070051#define __pfn_to_page(pfn) (vmemmap + (pfn))
Martin Schwidefsky32272a22008-12-25 13:38:59 +010052#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
Christoph Lameter8f6aac42007-10-16 01:24:13 -070053
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -080054#elif defined(CONFIG_SPARSEMEM)
55/*
Zhang Yanfei1a491232013-10-03 19:38:14 +080056 * Note: section's mem_map is encoded to reflect its start_pfn.
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -080057 * section[i].section_mem_map == mem_map's address - start_pfn;
58 */
Andy Whitcroft67de6482006-06-23 02:03:12 -070059#define __page_to_pfn(pg) \
Ian Campbellaa462ab2011-08-17 17:40:33 +010060({ const struct page *__pg = (pg); \
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -080061 int __sec = page_to_section(__pg); \
Randy Dunlapf05b6282007-02-10 01:42:59 -080062 (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -080063})
64
Andy Whitcroft67de6482006-06-23 02:03:12 -070065#define __pfn_to_page(pfn) \
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -080066({ unsigned long __pfn = (pfn); \
67 struct mem_section *__sec = __pfn_to_section(__pfn); \
68 __section_mem_map_addr(__sec) + __pfn; \
69})
70#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
Andy Whitcroft67de6482006-06-23 02:03:12 -070071
Christoph Hellwig012dcef2015-08-07 17:41:01 -040072/*
73 * Convert a physical address to a Page Frame Number and back
74 */
75#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
Tyler Bakerae4f9762015-09-19 03:58:10 -040076#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
Christoph Hellwig012dcef2015-08-07 17:41:01 -040077
Andy Whitcroft67de6482006-06-23 02:03:12 -070078#define page_to_pfn __page_to_pfn
79#define pfn_to_page __pfn_to_page
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -080080
81#endif /* __ASSEMBLY__ */
KAMEZAWA Hiroyukia117e662006-03-27 01:15:25 -080082
83#endif