Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame^] | 1 | #ifndef _PARISC_PAGE_H |
| 2 | #define _PARISC_PAGE_H |
| 3 | |
| 4 | /* PAGE_SHIFT determines the page size */ |
| 5 | #define PAGE_SHIFT 12 |
| 6 | #define PAGE_SIZE (1UL << PAGE_SHIFT) |
| 7 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
| 8 | |
| 9 | #ifdef __KERNEL__ |
| 10 | #include <linux/config.h> |
| 11 | #ifndef __ASSEMBLY__ |
| 12 | |
| 13 | #include <asm/types.h> |
| 14 | #include <asm/cache.h> |
| 15 | |
| 16 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) |
| 17 | #define copy_page(to,from) copy_user_page_asm((void *)(to), (void *)(from)) |
| 18 | |
| 19 | struct page; |
| 20 | |
| 21 | extern void purge_kernel_dcache_page(unsigned long); |
| 22 | extern void copy_user_page_asm(void *to, void *from); |
| 23 | extern void clear_user_page_asm(void *page, unsigned long vaddr); |
| 24 | |
| 25 | static inline void |
| 26 | copy_user_page(void *vto, void *vfrom, unsigned long vaddr, struct page *pg) |
| 27 | { |
| 28 | copy_user_page_asm(vto, vfrom); |
| 29 | flush_kernel_dcache_page(vto); |
| 30 | /* XXX: ppc flushes icache too, should we? */ |
| 31 | } |
| 32 | |
| 33 | static inline void |
| 34 | clear_user_page(void *page, unsigned long vaddr, struct page *pg) |
| 35 | { |
| 36 | purge_kernel_dcache_page((unsigned long)page); |
| 37 | clear_user_page_asm(page, vaddr); |
| 38 | } |
| 39 | |
| 40 | /* |
| 41 | * These are used to make use of C type-checking.. |
| 42 | */ |
| 43 | #ifdef __LP64__ |
| 44 | typedef struct { unsigned long pte; } pte_t; |
| 45 | #else |
| 46 | typedef struct { |
| 47 | unsigned long pte; |
| 48 | unsigned long flags; |
| 49 | } pte_t; |
| 50 | #endif |
| 51 | /* NOTE: even on 64 bits, these entries are __u32 because we allocate |
| 52 | * the pmd and pgd in ZONE_DMA (i.e. under 4GB) */ |
| 53 | typedef struct { __u32 pmd; } pmd_t; |
| 54 | typedef struct { __u32 pgd; } pgd_t; |
| 55 | typedef struct { unsigned long pgprot; } pgprot_t; |
| 56 | |
| 57 | #define pte_val(x) ((x).pte) |
| 58 | #ifdef __LP64__ |
| 59 | #define pte_flags(x) (*(__u32 *)&((x).pte)) |
| 60 | #else |
| 61 | #define pte_flags(x) ((x).flags) |
| 62 | #endif |
| 63 | |
| 64 | /* These do not work lvalues, so make sure we don't use them as such. */ |
| 65 | #define pmd_val(x) ((x).pmd + 0) |
| 66 | #define pgd_val(x) ((x).pgd + 0) |
| 67 | #define pgprot_val(x) ((x).pgprot) |
| 68 | |
| 69 | #define __pmd_val_set(x,n) (x).pmd = (n) |
| 70 | #define __pgd_val_set(x,n) (x).pgd = (n) |
| 71 | |
| 72 | #define __pte(x) ((pte_t) { (x) } ) |
| 73 | #define __pmd(x) ((pmd_t) { (x) } ) |
| 74 | #define __pgd(x) ((pgd_t) { (x) } ) |
| 75 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
| 76 | |
| 77 | /* Pure 2^n version of get_order */ |
| 78 | extern __inline__ int get_order(unsigned long size) |
| 79 | { |
| 80 | int order; |
| 81 | |
| 82 | size = (size-1) >> (PAGE_SHIFT-1); |
| 83 | order = -1; |
| 84 | do { |
| 85 | size >>= 1; |
| 86 | order++; |
| 87 | } while (size); |
| 88 | return order; |
| 89 | } |
| 90 | |
| 91 | typedef struct __physmem_range { |
| 92 | unsigned long start_pfn; |
| 93 | unsigned long pages; /* PAGE_SIZE pages */ |
| 94 | } physmem_range_t; |
| 95 | |
| 96 | extern physmem_range_t pmem_ranges[]; |
| 97 | extern int npmem_ranges; |
| 98 | |
| 99 | #endif /* !__ASSEMBLY__ */ |
| 100 | |
| 101 | /* WARNING: The definitions below must match exactly to sizeof(pte_t) |
| 102 | * etc |
| 103 | */ |
| 104 | #ifdef __LP64__ |
| 105 | #define BITS_PER_PTE_ENTRY 3 |
| 106 | #define BITS_PER_PMD_ENTRY 2 |
| 107 | #define BITS_PER_PGD_ENTRY 2 |
| 108 | #else |
| 109 | #define BITS_PER_PTE_ENTRY 3 |
| 110 | #define BITS_PER_PMD_ENTRY 2 |
| 111 | #define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY |
| 112 | #endif |
| 113 | #define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY) |
| 114 | #define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY) |
| 115 | #define PTE_ENTRY_SIZE (1UL << BITS_PER_PTE_ENTRY) |
| 116 | |
| 117 | /* to align the pointer to the (next) page boundary */ |
| 118 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) |
| 119 | |
| 120 | |
| 121 | #define LINUX_GATEWAY_SPACE 0 |
| 122 | |
| 123 | /* This governs the relationship between virtual and physical addresses. |
| 124 | * If you alter it, make sure to take care of our various fixed mapping |
| 125 | * segments in fixmap.h */ |
| 126 | #define __PAGE_OFFSET (0x10000000) |
| 127 | |
| 128 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) |
| 129 | |
| 130 | /* The size of the gateway page (we leave lots of room for expansion) */ |
| 131 | #define GATEWAY_PAGE_SIZE 0x4000 |
| 132 | |
| 133 | /* The start of the actual kernel binary---used in vmlinux.lds.S |
| 134 | * Leave some space after __PAGE_OFFSET for detecting kernel null |
| 135 | * ptr derefs */ |
| 136 | #define KERNEL_BINARY_TEXT_START (__PAGE_OFFSET + 0x100000) |
| 137 | |
| 138 | /* These macros don't work for 64-bit C code -- don't allow in C at all */ |
| 139 | #ifdef __ASSEMBLY__ |
| 140 | # define PA(x) ((x)-__PAGE_OFFSET) |
| 141 | # define VA(x) ((x)+__PAGE_OFFSET) |
| 142 | #endif |
| 143 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) |
| 144 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) |
| 145 | |
| 146 | #ifndef CONFIG_DISCONTIGMEM |
| 147 | #define pfn_to_page(pfn) (mem_map + (pfn)) |
| 148 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) |
| 149 | #define pfn_valid(pfn) ((pfn) < max_mapnr) |
| 150 | #endif /* CONFIG_DISCONTIGMEM */ |
| 151 | |
| 152 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| 153 | |
| 154 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
| 155 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
| 156 | |
| 157 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
| 158 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 159 | |
| 160 | #endif /* __KERNEL__ */ |
| 161 | |
| 162 | #endif /* _PARISC_PAGE_H */ |