Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 2 | #ifndef _SPARC64_PAGE_H |
| 3 | #define _SPARC64_PAGE_H |
| 4 | |
| 5 | #include <linux/const.h> |
| 6 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 7 | #define PAGE_SHIFT 13 |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 8 | |
| 9 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
| 10 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
| 11 | |
| 12 | /* Flushing for D-cache alias handling is only needed if |
| 13 | * the page size is smaller than 16K. |
| 14 | */ |
| 15 | #if PAGE_SHIFT < 14 |
| 16 | #define DCACHE_ALIASING_POSSIBLE |
| 17 | #endif |
| 18 | |
David S. Miller | 37b3a8f | 2013-09-25 13:48:49 -0700 | [diff] [blame] | 19 | #define HPAGE_SHIFT 23 |
| 20 | #define REAL_HPAGE_SHIFT 22 |
Nitin Gupta | df7b215 | 2017-08-11 16:46:50 -0700 | [diff] [blame] | 21 | #define HPAGE_16GB_SHIFT 34 |
Nitin Gupta | 85b1da7 | 2017-03-09 14:22:23 -0800 | [diff] [blame] | 22 | #define HPAGE_2GB_SHIFT 31 |
Nitin Gupta | c7d9f77 | 2017-02-01 16:16:36 -0800 | [diff] [blame] | 23 | #define HPAGE_256MB_SHIFT 28 |
Nitin Gupta | dcd1912 | 2017-02-06 12:33:26 -0800 | [diff] [blame] | 24 | #define HPAGE_64K_SHIFT 16 |
David S. Miller | 37b3a8f | 2013-09-25 13:48:49 -0700 | [diff] [blame] | 25 | #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 26 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 27 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 28 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) |
| 29 | #define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) |
| 30 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
| 31 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
Mike Kravetz | 1e953d8 | 2016-08-31 13:48:19 -0700 | [diff] [blame] | 32 | #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) |
Nitin Gupta | df7b215 | 2017-08-11 16:46:50 -0700 | [diff] [blame] | 33 | #define HUGE_MAX_HSTATE 5 |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 34 | #endif |
| 35 | |
| 36 | #ifndef __ASSEMBLY__ |
| 37 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 38 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
David S. Miller | 0fbebed | 2013-02-19 22:34:10 -0800 | [diff] [blame] | 39 | struct pt_regs; |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 40 | void hugetlb_setup(struct pt_regs *regs); |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 41 | #endif |
| 42 | |
David S. Miller | b0f1e79 | 2008-09-11 23:36:32 -0700 | [diff] [blame] | 43 | #define WANT_PAGE_VIRTUAL |
| 44 | |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 45 | void _clear_page(void *page); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 46 | #define clear_page(X) _clear_page((void *)(X)) |
| 47 | struct page; |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 48 | void clear_user_page(void *addr, unsigned long vaddr, struct page *page); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 49 | #define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE) |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 50 | void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage); |
Khalid Aziz | 74a0496 | 2018-02-23 15:46:41 -0700 | [diff] [blame] | 51 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE |
| 52 | struct vm_area_struct; |
| 53 | void copy_user_highpage(struct page *to, struct page *from, |
| 54 | unsigned long vaddr, struct vm_area_struct *vma); |
| 55 | #define __HAVE_ARCH_COPY_HIGHPAGE |
| 56 | void copy_highpage(struct page *to, struct page *from); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 57 | |
| 58 | /* Unlike sparc32, sparc64's parameter passing API is more |
| 59 | * sane in that structures which as small enough are passed |
| 60 | * in registers instead of on the stack. Thus, setting |
| 61 | * STRICT_MM_TYPECHECKS does not generate worse code so |
| 62 | * let's enable it to get the type checking. |
| 63 | */ |
| 64 | |
| 65 | #define STRICT_MM_TYPECHECKS |
| 66 | |
| 67 | #ifdef STRICT_MM_TYPECHECKS |
| 68 | /* These are used to make use of C type-checking.. */ |
| 69 | typedef struct { unsigned long pte; } pte_t; |
| 70 | typedef struct { unsigned long iopte; } iopte_t; |
David S. Miller | 2b77933 | 2013-09-25 14:33:16 -0700 | [diff] [blame] | 71 | typedef struct { unsigned long pmd; } pmd_t; |
David S. Miller | ac55c76 | 2014-09-26 21:19:46 -0700 | [diff] [blame] | 72 | typedef struct { unsigned long pud; } pud_t; |
David S. Miller | 2b77933 | 2013-09-25 14:33:16 -0700 | [diff] [blame] | 73 | typedef struct { unsigned long pgd; } pgd_t; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 74 | typedef struct { unsigned long pgprot; } pgprot_t; |
| 75 | |
| 76 | #define pte_val(x) ((x).pte) |
| 77 | #define iopte_val(x) ((x).iopte) |
| 78 | #define pmd_val(x) ((x).pmd) |
David S. Miller | ac55c76 | 2014-09-26 21:19:46 -0700 | [diff] [blame] | 79 | #define pud_val(x) ((x).pud) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 80 | #define pgd_val(x) ((x).pgd) |
| 81 | #define pgprot_val(x) ((x).pgprot) |
| 82 | |
| 83 | #define __pte(x) ((pte_t) { (x) } ) |
| 84 | #define __iopte(x) ((iopte_t) { (x) } ) |
| 85 | #define __pmd(x) ((pmd_t) { (x) } ) |
David S. Miller | ac55c76 | 2014-09-26 21:19:46 -0700 | [diff] [blame] | 86 | #define __pud(x) ((pud_t) { (x) } ) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 87 | #define __pgd(x) ((pgd_t) { (x) } ) |
| 88 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
| 89 | |
| 90 | #else |
| 91 | /* .. while these make it easier on the compiler */ |
| 92 | typedef unsigned long pte_t; |
| 93 | typedef unsigned long iopte_t; |
David S. Miller | 2b77933 | 2013-09-25 14:33:16 -0700 | [diff] [blame] | 94 | typedef unsigned long pmd_t; |
David S. Miller | ac55c76 | 2014-09-26 21:19:46 -0700 | [diff] [blame] | 95 | typedef unsigned long pud_t; |
David S. Miller | 2b77933 | 2013-09-25 14:33:16 -0700 | [diff] [blame] | 96 | typedef unsigned long pgd_t; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 97 | typedef unsigned long pgprot_t; |
| 98 | |
| 99 | #define pte_val(x) (x) |
| 100 | #define iopte_val(x) (x) |
| 101 | #define pmd_val(x) (x) |
David S. Miller | ac55c76 | 2014-09-26 21:19:46 -0700 | [diff] [blame] | 102 | #define pud_val(x) (x) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 103 | #define pgd_val(x) (x) |
| 104 | #define pgprot_val(x) (x) |
| 105 | |
| 106 | #define __pte(x) (x) |
| 107 | #define __iopte(x) (x) |
| 108 | #define __pmd(x) (x) |
David S. Miller | ac55c76 | 2014-09-26 21:19:46 -0700 | [diff] [blame] | 109 | #define __pud(x) (x) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 110 | #define __pgd(x) (x) |
| 111 | #define __pgprot(x) (x) |
| 112 | |
| 113 | #endif /* (STRICT_MM_TYPECHECKS) */ |
| 114 | |
David Miller | c460bec | 2012-10-08 16:34:22 -0700 | [diff] [blame] | 115 | typedef pte_t *pgtable_t; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 116 | |
David S. Miller | 4397bed | 2014-09-26 21:58:33 -0700 | [diff] [blame] | 117 | extern unsigned long sparc64_va_hole_top; |
| 118 | extern unsigned long sparc64_va_hole_bottom; |
David S. Miller | c920745 | 2013-09-18 11:58:32 -0700 | [diff] [blame] | 119 | |
David S. Miller | 2b77933 | 2013-09-25 14:33:16 -0700 | [diff] [blame] | 120 | /* The next two defines specify the actual exclusion region we |
| 121 | * enforce, wherein we use a 4GB red zone on each side of the VA hole. |
| 122 | */ |
David S. Miller | 4397bed | 2014-09-26 21:58:33 -0700 | [diff] [blame] | 123 | #define VA_EXCLUDE_START (sparc64_va_hole_bottom - (1UL << 32UL)) |
| 124 | #define VA_EXCLUDE_END (sparc64_va_hole_top + (1UL << 32UL)) |
David S. Miller | c920745 | 2013-09-18 11:58:32 -0700 | [diff] [blame] | 125 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 126 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ |
David S. Miller | c920745 | 2013-09-18 11:58:32 -0700 | [diff] [blame] | 127 | _AC(0x0000000070000000,UL) : \ |
| 128 | VA_EXCLUDE_END) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 129 | |
| 130 | #include <asm-generic/memory_model.h> |
| 131 | |
David S. Miller | b2d4383 | 2013-09-20 21:50:41 -0700 | [diff] [blame] | 132 | extern unsigned long PAGE_OFFSET; |
| 133 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 134 | #endif /* !(__ASSEMBLY__) */ |
| 135 | |
David S. Miller | 7c0fa0f | 2014-09-24 21:49:29 -0700 | [diff] [blame] | 136 | /* The maximum number of physical memory address bits we support. The |
| 137 | * largest value we can support is whatever "KPGD_SHIFT + KPTE_BITS" |
| 138 | * evaluates to. |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 139 | */ |
David S. Miller | 7c0fa0f | 2014-09-24 21:49:29 -0700 | [diff] [blame] | 140 | #define MAX_PHYS_ADDRESS_BITS 53 |
David S. Miller | bb7b435 | 2013-09-18 15:39:06 -0700 | [diff] [blame] | 141 | |
David S. Miller | bb7b435 | 2013-09-18 15:39:06 -0700 | [diff] [blame] | 142 | #define ILOG2_4MB 22 |
| 143 | #define ILOG2_256MB 28 |
| 144 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 145 | #ifndef __ASSEMBLY__ |
| 146 | |
| 147 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) |
| 148 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) |
| 149 | |
| 150 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
| 151 | |
| 152 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT) |
| 153 | |
| 154 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| 155 | |
| 156 | #define virt_to_phys __pa |
| 157 | #define phys_to_virt __va |
| 158 | |
| 159 | #endif /* !(__ASSEMBLY__) */ |
| 160 | |
| 161 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
| 162 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 163 | |
Arnd Bergmann | 5b17e1c | 2009-05-13 22:56:30 +0000 | [diff] [blame] | 164 | #include <asm-generic/getorder.h> |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 165 | |
| 166 | #endif /* _SPARC64_PAGE_H */ |