blob: f61cad3de4e69ec093674355332f7d3ee093cf2b [file] [log] [blame]
Aneesh Kumar K.V26b6a3d2015-12-01 09:06:26 +05301#ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
2#define _ASM_POWERPC_BOOK3S_64_HASH_H
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +00003#ifdef __KERNEL__
4
Aneesh Kumar K.Ve34aa032015-12-01 09:06:53 +05305/*
6 * Common bits between 4K and 64K pages in a linux-style PTE.
Paul Mackerras1ec3f932016-02-22 13:41:12 +11007 * Additional bits may be defined in pgtable-hash64-*.h
Aneesh Kumar K.Ve34aa032015-12-01 09:06:53 +05308 *
9 * Note: We only support user read/write permissions. Supervisor always
10 * have full read/write to pages above PAGE_OFFSET (pages below that
11 * always use the user access permissions).
12 *
13 * We could create separate kernel read-only if we used the 3 PP bits
14 * combinations that newer processors provide but we currently don't.
15 */
Aneesh Kumar K.V945537d2016-04-29 23:25:45 +100016#define H_PAGE_BUSY 0x00800 /* software: PTE & hash are busy */
Aneesh Kumar K.Vd2cf0052016-04-29 23:25:46 +100017#define H_PTE_NONE_MASK _PAGE_HPTEFLAGS
Aneesh Kumar K.V945537d2016-04-29 23:25:45 +100018#define H_PAGE_F_GIX_SHIFT 57
19#define H_PAGE_F_GIX (7ul << 57) /* HPTE index within HPTEG */
20#define H_PAGE_F_SECOND (1ul << 60) /* HPTE is in 2ndary HPTEG */
21#define H_PAGE_HASHPTE (1ul << 61) /* PTE has associated HPTE */
Aneesh Kumar K.Ve34aa032015-12-01 09:06:53 +053022
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +053023#ifdef CONFIG_PPC_64K_PAGES
24#include <asm/book3s/64/hash-64k.h>
25#else
26#include <asm/book3s/64/hash-4k.h>
27#endif
28
29/*
30 * Size of EA range mapped by our pagetables.
31 */
Aneesh Kumar K.Vdd1842a2016-04-29 23:25:49 +100032#define H_PGTABLE_EADDR_SIZE (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
33 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
34#define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +053035
36#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Aneesh Kumar K.Vdd1842a2016-04-29 23:25:49 +100037/*
38 * only with hash we need to use the second half of pmd page table
39 * to store pointer to deposited pgtable_t
40 */
41#define H_PMD_CACHE_INDEX (H_PMD_INDEX_SIZE + 1)
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +053042#else
Aneesh Kumar K.Vdd1842a2016-04-29 23:25:49 +100043#define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +053044#endif
45/*
46 * Define the address range of the kernel non-linear virtual area
47 */
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +100048#define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
49#define H_KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +053050
51/*
52 * The vmalloc space starts at the beginning of that region, and
53 * occupies half of it on hash CPUs and a quarter of it on Book3E
54 * (we keep a quarter for the virtual memmap)
55 */
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +100056#define H_VMALLOC_START H_KERN_VIRT_START
57#define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE >> 1)
58#define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE)
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +053059
60/*
61 * Region IDs
62 */
63#define REGION_SHIFT 60UL
64#define REGION_MASK (0xfUL << REGION_SHIFT)
65#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
66
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +100067#define VMALLOC_REGION_ID (REGION_ID(H_VMALLOC_START))
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +053068#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
69#define VMEMMAP_REGION_ID (0xfUL) /* Server only */
70#define USER_REGION_ID (0UL)
71
72/*
73 * Defines the address of the vmemap area, in its own region on
74 * hash table CPUs.
75 */
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +100076#define H_VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +053077
78#ifdef CONFIG_PPC_MM_SLICES
79#define HAVE_ARCH_UNMAPPED_AREA
80#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
81#endif /* CONFIG_PPC_MM_SLICES */
Benjamin Herrenschmidt8d1cf342009-03-19 19:34:08 +000082
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +000083
84/* PTEIDX nibble */
85#define _PTEIDX_SECONDARY 0x8
86#define _PTEIDX_GROUP_IX 0x7
87
Aneesh Kumar K.Vac94ac792016-04-29 23:25:54 +100088#define H_PMD_BAD_BITS (PTE_TABLE_SIZE-1)
89#define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1)
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +053090
91#ifndef __ASSEMBLY__
Aneesh Kumar K.Vac94ac792016-04-29 23:25:54 +100092#define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
93#define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
94static inline int hash__pgd_bad(pgd_t pgd)
95{
96 return (pgd_val(pgd) == 0);
97}
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +053098
99extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
100 pte_t *ptep, unsigned long pte, int huge);
Aneesh Kumar K.Vc6a3c492015-12-01 09:06:50 +0530101extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530102/* Atomic PTE updates */
Aneesh Kumar K.Vac94ac792016-04-29 23:25:54 +1000103static inline unsigned long hash__pte_update(struct mm_struct *mm,
104 unsigned long addr,
105 pte_t *ptep, unsigned long clr,
106 unsigned long set,
107 int huge)
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530108{
Aneesh Kumar K.V5dc1ef82016-04-29 23:25:28 +1000109 __be64 old_be, tmp_be;
110 unsigned long old;
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530111
112 __asm__ __volatile__(
113 "1: ldarx %0,0,%3 # pte_update\n\
Aneesh Kumar K.V5dc1ef82016-04-29 23:25:28 +1000114 and. %1,%0,%6\n\
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530115 bne- 1b \n\
116 andc %1,%0,%4 \n\
117 or %1,%1,%7\n\
118 stdcx. %1,0,%3 \n\
119 bne- 1b"
Aneesh Kumar K.V5dc1ef82016-04-29 23:25:28 +1000120 : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
121 : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
Aneesh Kumar K.V945537d2016-04-29 23:25:45 +1000122 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530123 : "cc" );
124 /* huge pages use the old page table lock */
125 if (!huge)
126 assert_pte_locked(mm, addr);
127
Aneesh Kumar K.V5dc1ef82016-04-29 23:25:28 +1000128 old = be64_to_cpu(old_be);
Aneesh Kumar K.V945537d2016-04-29 23:25:45 +1000129 if (old & H_PAGE_HASHPTE)
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530130 hpte_need_flush(mm, addr, ptep, old, huge);
131
132 return old;
133}
134
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530135/* Set the dirty and/or accessed bits atomically in a linux PTE, this
136 * function doesn't need to flush the hash entry
137 */
Aneesh Kumar K.Vac94ac792016-04-29 23:25:54 +1000138static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530139{
Aneesh Kumar K.V5dc1ef82016-04-29 23:25:28 +1000140 __be64 old, tmp, val, mask;
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530141
Aneesh Kumar K.Vc7d54842016-04-29 23:25:30 +1000142 mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
Aneesh Kumar K.V5dc1ef82016-04-29 23:25:28 +1000143 _PAGE_EXEC | _PAGE_SOFT_DIRTY);
144
145 val = pte_raw(entry) & mask;
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530146
147 __asm__ __volatile__(
148 "1: ldarx %0,0,%4\n\
Aneesh Kumar K.V5dc1ef82016-04-29 23:25:28 +1000149 and. %1,%0,%6\n\
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530150 bne- 1b \n\
151 or %0,%3,%0\n\
152 stdcx. %0,0,%4\n\
153 bne- 1b"
154 :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
Aneesh Kumar K.V945537d2016-04-29 23:25:45 +1000155 :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530156 :"cc");
157}
158
Aneesh Kumar K.Vac94ac792016-04-29 23:25:54 +1000159static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
Michael Ellermanee3caed2016-04-29 23:25:29 +1000160{
161 return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
162}
163
Aneesh Kumar K.Vac94ac792016-04-29 23:25:54 +1000164static inline int hash__pte_none(pte_t pte)
165{
166 return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
167}
Aneesh Kumar K.V1ca72122015-12-01 09:06:37 +0530168
169/* This low level function performs the actual PTE insertion
170 * Setting the PTE depends on the MMU type and other factors. It's
171 * an horrible mess that I'm not going to try to clean up now but
172 * I'm keeping it in one place rather than spread around
173 */
Aneesh Kumar K.Vac94ac792016-04-29 23:25:54 +1000174static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
175 pte_t *ptep, pte_t pte, int percpu)
Aneesh Kumar K.V1ca72122015-12-01 09:06:37 +0530176{
177 /*
178 * Anything else just stores the PTE normally. That covers all 64-bit
179 * cases, and 32-bit non-hash with 32-bit PTEs.
180 */
181 *ptep = pte;
182}
183
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530184#ifdef CONFIG_TRANSPARENT_HUGEPAGE
185extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
186 pmd_t *pmdp, unsigned long old_pmd);
187#else
188static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
189 unsigned long addr, pmd_t *pmdp,
190 unsigned long old_pmd)
191{
192 WARN(1, "%s called with THP disabled\n", __func__);
193}
194#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
195
Aneesh Kumar K.V31a14fa2016-04-29 23:25:59 +1000196
197extern int hash__map_kernel_page(unsigned long ea, unsigned long pa,
198 unsigned long flags);
199extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
200 unsigned long page_size,
201 unsigned long phys);
202extern void hash__vmemmap_remove_mapping(unsigned long start,
203 unsigned long page_size);
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530204#endif /* !__ASSEMBLY__ */
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +0000205#endif /* __KERNEL__ */
Aneesh Kumar K.V26b6a3d2015-12-01 09:06:26 +0530206#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */