Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _PPC_PAGE_H |
| 2 | #define _PPC_PAGE_H |
| 3 | |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 4 | #include <asm/asm-compat.h> |
| 5 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | /* PAGE_SHIFT determines the page size */ |
| 7 | #define PAGE_SHIFT 12 |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 8 | #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
| 10 | /* |
| 11 | * Subtle: this is an int (not an unsigned long) and so it |
| 12 | * gets extended to 64 bits the way want (i.e. with 1s). -- paulus |
| 13 | */ |
| 14 | #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) |
| 15 | |
| 16 | #ifdef __KERNEL__ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
| 18 | /* This must match what is in arch/ppc/Makefile */ |
| 19 | #define PAGE_OFFSET CONFIG_KERNEL_START |
| 20 | #define KERNELBASE PAGE_OFFSET |
Paul Mackerras | e4de002 | 2006-05-09 16:00:59 +1000 | [diff] [blame] | 21 | #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
| 23 | #ifndef __ASSEMBLY__ |
| 24 | |
| 25 | /* |
| 26 | * The basic type of a PTE - 64 bits for those CPUs with > 32 bit |
| 27 | * physical addressing. For now this just the IBM PPC440. |
| 28 | */ |
| 29 | #ifdef CONFIG_PTE_64BIT |
| 30 | typedef unsigned long long pte_basic_t; |
| 31 | #define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */ |
| 32 | #define PTE_FMT "%16Lx" |
| 33 | #else |
| 34 | typedef unsigned long pte_basic_t; |
| 35 | #define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */ |
| 36 | #define PTE_FMT "%.8lx" |
| 37 | #endif |
| 38 | |
Paul Mackerras | 7c8c6b9 | 2005-10-06 12:23:33 +1000 | [diff] [blame] | 39 | /* align addr on a size boundary - adjust address up/down if needed */ |
| 40 | #define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1))) |
| 41 | #define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1))) |
| 42 | |
| 43 | /* align addr on a size boundary - adjust address up if needed */ |
| 44 | #define _ALIGN(addr,size) _ALIGN_UP(addr,size) |
| 45 | |
| 46 | /* to align the pointer to the (next) page boundary */ |
| 47 | #define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) |
| 48 | |
| 49 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | #undef STRICT_MM_TYPECHECKS |
| 51 | |
| 52 | #ifdef STRICT_MM_TYPECHECKS |
| 53 | /* |
| 54 | * These are used to make use of C type-checking.. |
| 55 | */ |
| 56 | typedef struct { pte_basic_t pte; } pte_t; |
| 57 | typedef struct { unsigned long pmd; } pmd_t; |
| 58 | typedef struct { unsigned long pgd; } pgd_t; |
| 59 | typedef struct { unsigned long pgprot; } pgprot_t; |
| 60 | |
| 61 | #define pte_val(x) ((x).pte) |
| 62 | #define pmd_val(x) ((x).pmd) |
| 63 | #define pgd_val(x) ((x).pgd) |
| 64 | #define pgprot_val(x) ((x).pgprot) |
| 65 | |
| 66 | #define __pte(x) ((pte_t) { (x) } ) |
| 67 | #define __pmd(x) ((pmd_t) { (x) } ) |
| 68 | #define __pgd(x) ((pgd_t) { (x) } ) |
| 69 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
| 70 | |
| 71 | #else |
| 72 | /* |
| 73 | * .. while these make it easier on the compiler |
| 74 | */ |
| 75 | typedef pte_basic_t pte_t; |
| 76 | typedef unsigned long pmd_t; |
| 77 | typedef unsigned long pgd_t; |
| 78 | typedef unsigned long pgprot_t; |
| 79 | |
| 80 | #define pte_val(x) (x) |
| 81 | #define pmd_val(x) (x) |
| 82 | #define pgd_val(x) (x) |
| 83 | #define pgprot_val(x) (x) |
| 84 | |
| 85 | #define __pte(x) (x) |
| 86 | #define __pmd(x) (x) |
| 87 | #define __pgd(x) (x) |
| 88 | #define __pgprot(x) (x) |
| 89 | |
| 90 | #endif |
| 91 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | struct page; |
| 93 | extern void clear_pages(void *page, int order); |
| 94 | static inline void clear_page(void *page) { clear_pages(page, 0); } |
| 95 | extern void copy_page(void *to, void *from); |
| 96 | extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); |
| 97 | extern void copy_user_page(void *to, void *from, unsigned long vaddr, |
| 98 | struct page *pg); |
| 99 | |
| 100 | #ifndef CONFIG_APUS |
| 101 | #define PPC_MEMSTART 0 |
| 102 | #define PPC_PGSTART 0 |
| 103 | #define PPC_MEMOFFSET PAGE_OFFSET |
| 104 | #else |
| 105 | extern unsigned long ppc_memstart; |
| 106 | extern unsigned long ppc_pgstart; |
| 107 | extern unsigned long ppc_memoffset; |
| 108 | #define PPC_MEMSTART ppc_memstart |
| 109 | #define PPC_PGSTART ppc_pgstart |
| 110 | #define PPC_MEMOFFSET ppc_memoffset |
| 111 | #endif |
| 112 | |
| 113 | #if defined(CONFIG_APUS) && !defined(MODULE) |
| 114 | /* map phys->virtual and virtual->phys for RAM pages */ |
| 115 | static inline unsigned long ___pa(unsigned long v) |
| 116 | { |
| 117 | unsigned long p; |
| 118 | asm volatile ("1: addis %0, %1, %2;" |
| 119 | ".section \".vtop_fixup\",\"aw\";" |
| 120 | ".align 1;" |
| 121 | ".long 1b;" |
| 122 | ".previous;" |
| 123 | : "=r" (p) |
| 124 | : "b" (v), "K" (((-PAGE_OFFSET) >> 16) & 0xffff)); |
| 125 | |
| 126 | return p; |
| 127 | } |
| 128 | static inline void* ___va(unsigned long p) |
| 129 | { |
| 130 | unsigned long v; |
| 131 | asm volatile ("1: addis %0, %1, %2;" |
| 132 | ".section \".ptov_fixup\",\"aw\";" |
| 133 | ".align 1;" |
| 134 | ".long 1b;" |
| 135 | ".previous;" |
| 136 | : "=r" (v) |
| 137 | : "b" (p), "K" (((PAGE_OFFSET) >> 16) & 0xffff)); |
| 138 | |
| 139 | return (void*) v; |
| 140 | } |
| 141 | #else |
| 142 | #define ___pa(vaddr) ((vaddr)-PPC_MEMOFFSET) |
| 143 | #define ___va(paddr) ((paddr)+PPC_MEMOFFSET) |
| 144 | #endif |
| 145 | |
| 146 | extern int page_is_ram(unsigned long pfn); |
| 147 | |
| 148 | #define __pa(x) ___pa((unsigned long)(x)) |
| 149 | #define __va(x) ((void *)(___va((unsigned long)(x)))) |
| 150 | |
KAMEZAWA Hiroyuki | f68d4c9 | 2006-03-27 01:15:44 -0800 | [diff] [blame] | 151 | #define ARCH_PFN_OFFSET (PPC_PGSTART) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
| 153 | #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) |
| 154 | |
| 155 | #define pfn_valid(pfn) (((pfn) - PPC_PGSTART) < max_mapnr) |
| 156 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| 157 | |
| 158 | /* Pure 2^n version of get_order */ |
| 159 | extern __inline__ int get_order(unsigned long size) |
| 160 | { |
| 161 | int lz; |
| 162 | |
| 163 | size = (size-1) >> PAGE_SHIFT; |
| 164 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size)); |
| 165 | return 32 - lz; |
| 166 | } |
| 167 | |
| 168 | #endif /* __ASSEMBLY__ */ |
| 169 | |
| 170 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
| 171 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 172 | |
Adrian Bunk | b3c2ffd | 2006-06-30 18:20:44 +0200 | [diff] [blame] | 173 | /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */ |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 174 | #define __HAVE_ARCH_GATE_AREA 1 |
| 175 | |
KAMEZAWA Hiroyuki | f68d4c9 | 2006-03-27 01:15:44 -0800 | [diff] [blame] | 176 | #include <asm-generic/memory_model.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | #endif /* __KERNEL__ */ |
| 178 | #endif /* _PPC_PAGE_H */ |