Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_PAGE_H |
| 2 | #define _ASM_IA64_PAGE_H |
| 3 | /* |
| 4 | * Pagetable related stuff. |
| 5 | * |
| 6 | * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co |
| 7 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 8 | */ |
| 9 | |
David Woodhouse | d575964 | 2006-09-16 12:15:47 -0700 | [diff] [blame] | 10 | # ifdef __KERNEL__ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
| 12 | #include <asm/intrinsics.h> |
| 13 | #include <asm/types.h> |
| 14 | |
| 15 | /* |
Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 16 | * The top three bits of an IA64 address are its Region Number. |
| 17 | * Different regions are assigned to different purposes. |
| 18 | */ |
| 19 | #define RGN_SHIFT (61) |
Greg Edwards | 1b66776 | 2005-08-22 09:57:00 -0700 | [diff] [blame] | 20 | #define RGN_BASE(r) (__IA64_UL_CONST(r)<<RGN_SHIFT) |
| 21 | #define RGN_BITS (RGN_BASE(-1)) |
Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 22 | |
Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 23 | #define RGN_KERNEL 7 /* Identity mapped region */ |
| 24 | #define RGN_UNCACHED 6 /* Identity mapped I/O region */ |
| 25 | #define RGN_GATE 5 /* Gate page, Kernel text, etc */ |
| 26 | #define RGN_HPAGE 4 /* For Huge TLB pages */ |
| 27 | |
| 28 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | * PAGE_SHIFT determines the actual kernel page size. |
| 30 | */ |
| 31 | #if defined(CONFIG_IA64_PAGE_SIZE_4KB) |
| 32 | # define PAGE_SHIFT 12 |
| 33 | #elif defined(CONFIG_IA64_PAGE_SIZE_8KB) |
| 34 | # define PAGE_SHIFT 13 |
| 35 | #elif defined(CONFIG_IA64_PAGE_SIZE_16KB) |
| 36 | # define PAGE_SHIFT 14 |
| 37 | #elif defined(CONFIG_IA64_PAGE_SIZE_64KB) |
| 38 | # define PAGE_SHIFT 16 |
| 39 | #else |
| 40 | # error Unsupported page size! |
| 41 | #endif |
| 42 | |
| 43 | #define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT) |
| 44 | #define PAGE_MASK (~(PAGE_SIZE - 1)) |
| 45 | #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) |
| 46 | |
| 47 | #define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */ |
| 48 | #define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT) |
| 49 | |
Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 50 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #ifdef CONFIG_HUGETLB_PAGE |
Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 52 | # define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | # define HPAGE_SHIFT hpage_shift |
| 54 | # define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */ |
| 55 | # define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT) |
| 56 | # define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
| 57 | |
| 58 | # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
| 59 | # define ARCH_HAS_HUGEPAGE_ONLY_RANGE |
David Gibson | 42b88be | 2006-03-22 00:09:01 -0800 | [diff] [blame] | 60 | # define ARCH_HAS_PREPARE_HUGEPAGE_RANGE |
David Gibson | 9da61ae | 2006-03-22 00:08:57 -0800 | [diff] [blame] | 61 | # define ARCH_HAS_HUGETLB_FREE_PGD_RANGE |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 63 | |
| 64 | #ifdef __ASSEMBLY__ |
| 65 | # define __pa(x) ((x) - PAGE_OFFSET) |
| 66 | # define __va(x) ((x) + PAGE_OFFSET) |
| 67 | #else /* !__ASSEMBLY */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | # define STRICT_MM_TYPECHECKS |
| 69 | |
| 70 | extern void clear_page (void *page); |
| 71 | extern void copy_page (void *to, void *from); |
| 72 | |
| 73 | /* |
| 74 | * clear_user_page() and copy_user_page() can't be inline functions because |
| 75 | * flush_dcache_page() can't be defined until later... |
| 76 | */ |
| 77 | #define clear_user_page(addr, vaddr, page) \ |
| 78 | do { \ |
| 79 | clear_page(addr); \ |
| 80 | flush_dcache_page(page); \ |
| 81 | } while (0) |
| 82 | |
| 83 | #define copy_user_page(to, from, vaddr, page) \ |
| 84 | do { \ |
| 85 | copy_page((to), (from)); \ |
| 86 | flush_dcache_page(page); \ |
| 87 | } while (0) |
| 88 | |
| 89 | |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 90 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ |
| 91 | ({ \ |
| 92 | struct page *page = alloc_page_vma( \ |
| 93 | GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr); \ |
| 94 | if (page) \ |
| 95 | flush_dcache_page(page); \ |
| 96 | page; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | }) |
| 98 | |
| 99 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
| 100 | |
| 101 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| 102 | |
| 103 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 104 | extern int ia64_pfn_valid (unsigned long pfn); |
Matthew Wilcox | b0f40ea | 2006-11-16 13:40:53 -0700 | [diff] [blame] | 105 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | # define ia64_pfn_valid(pfn) 1 |
| 107 | #endif |
| 108 | |
KAMEZAWA Hiroyuki | 0ecd702 | 2006-03-27 01:15:53 -0800 | [diff] [blame] | 109 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 110 | extern struct page *vmem_map; |
| 111 | #ifdef CONFIG_DISCONTIGMEM |
| 112 | # define page_to_pfn(page) ((unsigned long) (page - vmem_map)) |
| 113 | # define pfn_to_page(pfn) (vmem_map + (pfn)) |
Matthew Wilcox | b0f40ea | 2006-11-16 13:40:53 -0700 | [diff] [blame] | 114 | #else |
| 115 | # include <asm-generic/memory_model.h> |
KAMEZAWA Hiroyuki | 0ecd702 | 2006-03-27 01:15:53 -0800 | [diff] [blame] | 116 | #endif |
Matthew Wilcox | b0f40ea | 2006-11-16 13:40:53 -0700 | [diff] [blame] | 117 | #else |
| 118 | # include <asm-generic/memory_model.h> |
KAMEZAWA Hiroyuki | 0ecd702 | 2006-03-27 01:15:53 -0800 | [diff] [blame] | 119 | #endif |
| 120 | |
Bob Picco | 1be7d99 | 2005-10-04 15:13:50 -0400 | [diff] [blame] | 121 | #ifdef CONFIG_FLATMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) |
Bob Picco | 1be7d99 | 2005-10-04 15:13:50 -0400 | [diff] [blame] | 123 | #elif defined(CONFIG_DISCONTIGMEM) |
Dean Roe | b77dae5 | 2005-11-09 14:25:06 -0600 | [diff] [blame] | 124 | extern unsigned long min_low_pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | extern unsigned long max_low_pfn; |
Dean Roe | b77dae5 | 2005-11-09 14:25:06 -0600 | [diff] [blame] | 126 | # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | #endif |
| 128 | |
| 129 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
| 130 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
Bob Picco | 631bb0e | 2005-10-31 13:25:25 -0500 | [diff] [blame] | 131 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | |
| 133 | typedef union ia64_va { |
| 134 | struct { |
| 135 | unsigned long off : 61; /* intra-region offset */ |
| 136 | unsigned long reg : 3; /* region number */ |
| 137 | } f; |
| 138 | unsigned long l; |
| 139 | void *p; |
| 140 | } ia64_va; |
| 141 | |
| 142 | /* |
| 143 | * Note: These macros depend on the fact that PAGE_OFFSET has all |
| 144 | * region bits set to 1 and all other bits set to zero. They are |
| 145 | * expressed in this way to ensure they result in a single "dep" |
| 146 | * instruction. |
| 147 | */ |
| 148 | #define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;}) |
| 149 | #define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;}) |
| 150 | |
| 151 | #define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;}) |
| 152 | #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;}) |
| 153 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | #ifdef CONFIG_HUGETLB_PAGE |
| 155 | # define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \ |
| 156 | | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT))) |
| 157 | # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
| 158 | # define is_hugepage_only_range(mm, addr, len) \ |
Chen, Kenneth W | 2332c9a | 2006-03-22 10:49:00 -0800 | [diff] [blame] | 159 | (REGION_NUMBER(addr) == RGN_HPAGE || \ |
Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 160 | REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | extern unsigned int hpage_shift; |
| 162 | #endif |
| 163 | |
| 164 | static __inline__ int |
| 165 | get_order (unsigned long size) |
| 166 | { |
| 167 | long double d = size - 1; |
| 168 | long order; |
| 169 | |
| 170 | order = ia64_getf_exp(d); |
| 171 | order = order - PAGE_SHIFT - 0xffff + 1; |
| 172 | if (order < 0) |
| 173 | order = 0; |
| 174 | return order; |
| 175 | } |
| 176 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | #endif /* !__ASSEMBLY__ */ |
| 178 | |
| 179 | #ifdef STRICT_MM_TYPECHECKS |
| 180 | /* |
| 181 | * These are used to make use of C type-checking.. |
| 182 | */ |
| 183 | typedef struct { unsigned long pte; } pte_t; |
| 184 | typedef struct { unsigned long pmd; } pmd_t; |
Robin Holt | 837cd0b | 2005-11-11 09:35:43 -0600 | [diff] [blame] | 185 | #ifdef CONFIG_PGTABLE_4 |
| 186 | typedef struct { unsigned long pud; } pud_t; |
| 187 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | typedef struct { unsigned long pgd; } pgd_t; |
| 189 | typedef struct { unsigned long pgprot; } pgprot_t; |
| 190 | |
| 191 | # define pte_val(x) ((x).pte) |
| 192 | # define pmd_val(x) ((x).pmd) |
Robin Holt | 837cd0b | 2005-11-11 09:35:43 -0600 | [diff] [blame] | 193 | #ifdef CONFIG_PGTABLE_4 |
| 194 | # define pud_val(x) ((x).pud) |
| 195 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | # define pgd_val(x) ((x).pgd) |
| 197 | # define pgprot_val(x) ((x).pgprot) |
| 198 | |
| 199 | # define __pte(x) ((pte_t) { (x) } ) |
| 200 | # define __pgprot(x) ((pgprot_t) { (x) } ) |
| 201 | |
| 202 | #else /* !STRICT_MM_TYPECHECKS */ |
| 203 | /* |
| 204 | * .. while these make it easier on the compiler |
| 205 | */ |
| 206 | # ifndef __ASSEMBLY__ |
| 207 | typedef unsigned long pte_t; |
| 208 | typedef unsigned long pmd_t; |
| 209 | typedef unsigned long pgd_t; |
| 210 | typedef unsigned long pgprot_t; |
| 211 | # endif |
| 212 | |
| 213 | # define pte_val(x) (x) |
| 214 | # define pmd_val(x) (x) |
| 215 | # define pgd_val(x) (x) |
| 216 | # define pgprot_val(x) (x) |
| 217 | |
| 218 | # define __pte(x) (x) |
| 219 | # define __pgd(x) (x) |
| 220 | # define __pgprot(x) (x) |
| 221 | #endif /* !STRICT_MM_TYPECHECKS */ |
| 222 | |
Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 223 | #define PAGE_OFFSET RGN_BASE(RGN_KERNEL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | |
| 225 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ |
| 226 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \ |
| 227 | (((current->personality & READ_IMPLIES_EXEC) != 0) \ |
| 228 | ? VM_EXEC : 0)) |
| 229 | |
David Woodhouse | d575964 | 2006-09-16 12:15:47 -0700 | [diff] [blame] | 230 | # endif /* __KERNEL__ */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | #endif /* _ASM_IA64_PAGE_H */ |