Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* $Id: page.h,v 1.39 2002/02/09 19:49:31 davem Exp $ */ |
| 2 | |
| 3 | #ifndef _SPARC64_PAGE_H |
| 4 | #define _SPARC64_PAGE_H |
| 5 | |
| 6 | #include <linux/config.h> |
| 7 | #include <asm/const.h> |
| 8 | |
| 9 | #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) |
| 10 | #define PAGE_SHIFT 13 |
| 11 | #elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) |
| 12 | #define PAGE_SHIFT 16 |
| 13 | #elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB) |
| 14 | #define PAGE_SHIFT 19 |
| 15 | #elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB) |
| 16 | #define PAGE_SHIFT 22 |
| 17 | #else |
| 18 | #error No page size specified in kernel configuration |
| 19 | #endif |
| 20 | |
| 21 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
| 22 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
| 23 | |
| 24 | #ifdef __KERNEL__ |
| 25 | |
| 26 | #ifndef __ASSEMBLY__ |
| 27 | |
| 28 | extern void _clear_page(void *page); |
| 29 | #define clear_page(X) _clear_page((void *)(X)) |
| 30 | struct page; |
| 31 | extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page); |
| 32 | #define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE) |
| 33 | extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage); |
| 34 | |
| 35 | /* Unlike sparc32, sparc64's parameter passing API is more |
| 36 | * sane in that structures which as small enough are passed |
| 37 | * in registers instead of on the stack. Thus, setting |
| 38 | * STRICT_MM_TYPECHECKS does not generate worse code so |
| 39 | * let's enable it to get the type checking. |
| 40 | */ |
| 41 | |
| 42 | #define STRICT_MM_TYPECHECKS |
| 43 | |
| 44 | #ifdef STRICT_MM_TYPECHECKS |
| 45 | /* These are used to make use of C type-checking.. */ |
| 46 | typedef struct { unsigned long pte; } pte_t; |
| 47 | typedef struct { unsigned long iopte; } iopte_t; |
| 48 | typedef struct { unsigned int pmd; } pmd_t; |
| 49 | typedef struct { unsigned int pgd; } pgd_t; |
| 50 | typedef struct { unsigned long pgprot; } pgprot_t; |
| 51 | |
| 52 | #define pte_val(x) ((x).pte) |
| 53 | #define iopte_val(x) ((x).iopte) |
| 54 | #define pmd_val(x) ((x).pmd) |
| 55 | #define pgd_val(x) ((x).pgd) |
| 56 | #define pgprot_val(x) ((x).pgprot) |
| 57 | |
| 58 | #define __pte(x) ((pte_t) { (x) } ) |
| 59 | #define __iopte(x) ((iopte_t) { (x) } ) |
| 60 | #define __pmd(x) ((pmd_t) { (x) } ) |
| 61 | #define __pgd(x) ((pgd_t) { (x) } ) |
| 62 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
| 63 | |
| 64 | #else |
| 65 | /* .. while these make it easier on the compiler */ |
| 66 | typedef unsigned long pte_t; |
| 67 | typedef unsigned long iopte_t; |
| 68 | typedef unsigned int pmd_t; |
| 69 | typedef unsigned int pgd_t; |
| 70 | typedef unsigned long pgprot_t; |
| 71 | |
| 72 | #define pte_val(x) (x) |
| 73 | #define iopte_val(x) (x) |
| 74 | #define pmd_val(x) (x) |
| 75 | #define pgd_val(x) (x) |
| 76 | #define pgprot_val(x) (x) |
| 77 | |
| 78 | #define __pte(x) (x) |
| 79 | #define __iopte(x) (x) |
| 80 | #define __pmd(x) (x) |
| 81 | #define __pgd(x) (x) |
| 82 | #define __pgprot(x) (x) |
| 83 | |
| 84 | #endif /* (STRICT_MM_TYPECHECKS) */ |
| 85 | |
| 86 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) |
| 87 | #define HPAGE_SHIFT 22 |
| 88 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) |
| 89 | #define HPAGE_SHIFT 19 |
| 90 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
| 91 | #define HPAGE_SHIFT 16 |
| 92 | #endif |
| 93 | |
| 94 | #ifdef CONFIG_HUGETLB_PAGE |
| 95 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) |
| 96 | #define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) |
| 97 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 98 | #define ARCH_HAS_SETCLEAR_HUGE_PTE |
| 99 | #define ARCH_HAS_HUGETLB_PREFAULT_HOOK |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | #endif |
| 101 | |
| 102 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ |
| 103 | (_AC(0x0000000070000000,UL)) : (PAGE_OFFSET)) |
| 104 | |
| 105 | #endif /* !(__ASSEMBLY__) */ |
| 106 | |
| 107 | /* to align the pointer to the (next) page boundary */ |
| 108 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) |
| 109 | |
| 110 | /* We used to stick this into a hard-coded global register (%g4) |
| 111 | * but that does not make sense anymore. |
| 112 | */ |
| 113 | #define PAGE_OFFSET _AC(0xFFFFF80000000000,UL) |
| 114 | |
| 115 | #ifndef __ASSEMBLY__ |
| 116 | |
| 117 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) |
| 118 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) |
| 119 | |
| 120 | /* PFNs are real physical page numbers. However, mem_map only begins to record |
| 121 | * per-page information starting at pfn_base. This is to handle systems where |
| 122 | * the first physical page in the machine is at some huge physical address, |
| 123 | * such as 4GB. This is common on a partitioned E10000, for example. |
| 124 | */ |
| 125 | extern struct page *pfn_to_page(unsigned long pfn); |
| 126 | extern unsigned long page_to_pfn(struct page *); |
| 127 | |
| 128 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT) |
| 129 | |
| 130 | #define pfn_valid(pfn) (((pfn)-(pfn_base)) < max_mapnr) |
| 131 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| 132 | |
| 133 | #define virt_to_phys __pa |
| 134 | #define phys_to_virt __va |
| 135 | |
| 136 | /* The following structure is used to hold the physical |
| 137 | * memory configuration of the machine. This is filled in |
| 138 | * probe_memory() and is later used by mem_init() to set up |
| 139 | * mem_map[]. We statically allocate SPARC_PHYS_BANKS of |
| 140 | * these structs, this is arbitrary. The entry after the |
| 141 | * last valid one has num_bytes==0. |
| 142 | */ |
| 143 | |
| 144 | struct sparc_phys_banks { |
| 145 | unsigned long base_addr; |
| 146 | unsigned long num_bytes; |
| 147 | }; |
| 148 | |
| 149 | #define SPARC_PHYS_BANKS 32 |
| 150 | |
| 151 | extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; |
| 152 | |
| 153 | /* Pure 2^n version of get_order */ |
| 154 | static __inline__ int get_order(unsigned long size) |
| 155 | { |
| 156 | int order; |
| 157 | |
| 158 | size = (size-1) >> (PAGE_SHIFT-1); |
| 159 | order = -1; |
| 160 | do { |
| 161 | size >>= 1; |
| 162 | order++; |
| 163 | } while (size); |
| 164 | return order; |
| 165 | } |
| 166 | |
| 167 | #endif /* !(__ASSEMBLY__) */ |
| 168 | |
| 169 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
| 170 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 171 | |
| 172 | #endif /* !(__KERNEL__) */ |
| 173 | |
| 174 | #endif /* !(_SPARC64_PAGE_H) */ |