Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 1 | #ifndef _SPARC64_PAGE_H |
| 2 | #define _SPARC64_PAGE_H |
| 3 | |
| 4 | #include <linux/const.h> |
| 5 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 6 | #define PAGE_SHIFT 13 |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 7 | |
| 8 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
| 9 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
| 10 | |
| 11 | /* Flushing for D-cache alias handling is only needed if |
| 12 | * the page size is smaller than 16K. |
| 13 | */ |
| 14 | #if PAGE_SHIFT < 14 |
| 15 | #define DCACHE_ALIASING_POSSIBLE |
| 16 | #endif |
| 17 | |
David S. Miller | 37b3a8f | 2013-09-25 13:48:49 -0700 | [diff] [blame] | 18 | #define HPAGE_SHIFT 23 |
| 19 | #define REAL_HPAGE_SHIFT 22 |
| 20 | |
| 21 | #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 22 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 23 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 24 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) |
| 25 | #define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) |
| 26 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
| 27 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
| 28 | #endif |
| 29 | |
| 30 | #ifndef __ASSEMBLY__ |
| 31 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 32 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
David S. Miller | 0fbebed | 2013-02-19 22:34:10 -0800 | [diff] [blame] | 33 | struct pt_regs; |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 34 | void hugetlb_setup(struct pt_regs *regs); |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 35 | #endif |
| 36 | |
David S. Miller | b0f1e79 | 2008-09-11 23:36:32 -0700 | [diff] [blame] | 37 | #define WANT_PAGE_VIRTUAL |
| 38 | |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 39 | void _clear_page(void *page); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 40 | #define clear_page(X) _clear_page((void *)(X)) |
| 41 | struct page; |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 42 | void clear_user_page(void *addr, unsigned long vaddr, struct page *page); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 43 | #define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE) |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 44 | void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 45 | |
| 46 | /* Unlike sparc32, sparc64's parameter passing API is more |
| 47 | * sane in that structures which as small enough are passed |
| 48 | * in registers instead of on the stack. Thus, setting |
| 49 | * STRICT_MM_TYPECHECKS does not generate worse code so |
| 50 | * let's enable it to get the type checking. |
| 51 | */ |
| 52 | |
| 53 | #define STRICT_MM_TYPECHECKS |
| 54 | |
| 55 | #ifdef STRICT_MM_TYPECHECKS |
| 56 | /* These are used to make use of C type-checking.. */ |
| 57 | typedef struct { unsigned long pte; } pte_t; |
| 58 | typedef struct { unsigned long iopte; } iopte_t; |
David S. Miller | 2b77933 | 2013-09-25 14:33:16 -0700 | [diff] [blame] | 59 | typedef struct { unsigned long pmd; } pmd_t; |
David S. Miller | ac55c76 | 2014-09-26 21:19:46 -0700 | [diff] [blame^] | 60 | typedef struct { unsigned long pud; } pud_t; |
David S. Miller | 2b77933 | 2013-09-25 14:33:16 -0700 | [diff] [blame] | 61 | typedef struct { unsigned long pgd; } pgd_t; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 62 | typedef struct { unsigned long pgprot; } pgprot_t; |
| 63 | |
| 64 | #define pte_val(x) ((x).pte) |
| 65 | #define iopte_val(x) ((x).iopte) |
| 66 | #define pmd_val(x) ((x).pmd) |
David S. Miller | ac55c76 | 2014-09-26 21:19:46 -0700 | [diff] [blame^] | 67 | #define pud_val(x) ((x).pud) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 68 | #define pgd_val(x) ((x).pgd) |
| 69 | #define pgprot_val(x) ((x).pgprot) |
| 70 | |
| 71 | #define __pte(x) ((pte_t) { (x) } ) |
| 72 | #define __iopte(x) ((iopte_t) { (x) } ) |
| 73 | #define __pmd(x) ((pmd_t) { (x) } ) |
David S. Miller | ac55c76 | 2014-09-26 21:19:46 -0700 | [diff] [blame^] | 74 | #define __pud(x) ((pud_t) { (x) } ) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 75 | #define __pgd(x) ((pgd_t) { (x) } ) |
| 76 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
| 77 | |
| 78 | #else |
| 79 | /* .. while these make it easier on the compiler */ |
| 80 | typedef unsigned long pte_t; |
| 81 | typedef unsigned long iopte_t; |
David S. Miller | 2b77933 | 2013-09-25 14:33:16 -0700 | [diff] [blame] | 82 | typedef unsigned long pmd_t; |
David S. Miller | ac55c76 | 2014-09-26 21:19:46 -0700 | [diff] [blame^] | 83 | typedef unsigned long pud_t; |
David S. Miller | 2b77933 | 2013-09-25 14:33:16 -0700 | [diff] [blame] | 84 | typedef unsigned long pgd_t; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 85 | typedef unsigned long pgprot_t; |
| 86 | |
| 87 | #define pte_val(x) (x) |
| 88 | #define iopte_val(x) (x) |
| 89 | #define pmd_val(x) (x) |
David S. Miller | ac55c76 | 2014-09-26 21:19:46 -0700 | [diff] [blame^] | 90 | #define pud_val(x) (x) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 91 | #define pgd_val(x) (x) |
| 92 | #define pgprot_val(x) (x) |
| 93 | |
| 94 | #define __pte(x) (x) |
| 95 | #define __iopte(x) (x) |
| 96 | #define __pmd(x) (x) |
David S. Miller | ac55c76 | 2014-09-26 21:19:46 -0700 | [diff] [blame^] | 97 | #define __pud(x) (x) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 98 | #define __pgd(x) (x) |
| 99 | #define __pgprot(x) (x) |
| 100 | |
| 101 | #endif /* (STRICT_MM_TYPECHECKS) */ |
| 102 | |
David Miller | c460bec | 2012-10-08 16:34:22 -0700 | [diff] [blame] | 103 | typedef pte_t *pgtable_t; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 104 | |
David S. Miller | c920745 | 2013-09-18 11:58:32 -0700 | [diff] [blame] | 105 | /* These two values define the virtual address space range in which we |
David S. Miller | 2b77933 | 2013-09-25 14:33:16 -0700 | [diff] [blame] | 106 | * must forbid 64-bit user processes from making mappings. It used to |
| 107 | * represent precisely the virtual address space hole present in most |
| 108 | * early sparc64 chips including UltraSPARC-I. But now it also is |
| 109 | * further constrained by the limits of our page tables, which is |
| 110 | * 43-bits of virtual address. |
David S. Miller | c920745 | 2013-09-18 11:58:32 -0700 | [diff] [blame] | 111 | */ |
David S. Miller | 2b77933 | 2013-09-25 14:33:16 -0700 | [diff] [blame] | 112 | #define SPARC64_VA_HOLE_TOP _AC(0xfffffc0000000000,UL) |
| 113 | #define SPARC64_VA_HOLE_BOTTOM _AC(0x0000040000000000,UL) |
David S. Miller | c920745 | 2013-09-18 11:58:32 -0700 | [diff] [blame] | 114 | |
David S. Miller | 2b77933 | 2013-09-25 14:33:16 -0700 | [diff] [blame] | 115 | /* The next two defines specify the actual exclusion region we |
| 116 | * enforce, wherein we use a 4GB red zone on each side of the VA hole. |
| 117 | */ |
David S. Miller | c920745 | 2013-09-18 11:58:32 -0700 | [diff] [blame] | 118 | #define VA_EXCLUDE_START (SPARC64_VA_HOLE_BOTTOM - (1UL << 32UL)) |
| 119 | #define VA_EXCLUDE_END (SPARC64_VA_HOLE_TOP + (1UL << 32UL)) |
| 120 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 121 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ |
David S. Miller | c920745 | 2013-09-18 11:58:32 -0700 | [diff] [blame] | 122 | _AC(0x0000000070000000,UL) : \ |
| 123 | VA_EXCLUDE_END) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 124 | |
| 125 | #include <asm-generic/memory_model.h> |
| 126 | |
David S. Miller | b2d4383 | 2013-09-20 21:50:41 -0700 | [diff] [blame] | 127 | #define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X))) |
| 128 | extern unsigned long PAGE_OFFSET; |
| 129 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 130 | #endif /* !(__ASSEMBLY__) */ |
| 131 | |
David S. Miller | b2d4383 | 2013-09-20 21:50:41 -0700 | [diff] [blame] | 132 | /* The maximum number of physical memory address bits we support, this |
| 133 | * is used to size various tables used to manage kernel TLB misses and |
| 134 | * also the sparsemem code. |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 135 | */ |
David S. Miller | b2d4383 | 2013-09-20 21:50:41 -0700 | [diff] [blame] | 136 | #define MAX_PHYS_ADDRESS_BITS 47 |
David S. Miller | bb7b435 | 2013-09-18 15:39:06 -0700 | [diff] [blame] | 137 | |
| 138 | /* These two shift counts are used when indexing sparc64_valid_addr_bitmap |
| 139 | * and kpte_linear_bitmap. |
| 140 | */ |
| 141 | #define ILOG2_4MB 22 |
| 142 | #define ILOG2_256MB 28 |
| 143 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 144 | #ifndef __ASSEMBLY__ |
| 145 | |
| 146 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) |
| 147 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) |
| 148 | |
| 149 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
| 150 | |
| 151 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT) |
| 152 | |
| 153 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| 154 | |
| 155 | #define virt_to_phys __pa |
| 156 | #define phys_to_virt __va |
| 157 | |
| 158 | #endif /* !(__ASSEMBLY__) */ |
| 159 | |
| 160 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
| 161 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 162 | |
Arnd Bergmann | 5b17e1c | 2009-05-13 22:56:30 +0000 | [diff] [blame] | 163 | #include <asm-generic/getorder.h> |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 164 | |
| 165 | #endif /* _SPARC64_PAGE_H */ |