blob: 96fe58b451188a3f3a31d560036ff3031823f05b [file] [log] [blame]
Chris Metcalf18aecc22011-05-04 14:38:26 -04001/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#ifndef _ASM_TILE_PGTABLE_64_H
17#define _ASM_TILE_PGTABLE_64_H
18
19/* The level-0 page table breaks the address space into 32-bit chunks. */
20#define PGDIR_SHIFT HV_LOG2_L1_SPAN
21#define PGDIR_SIZE HV_L1_SPAN
22#define PGDIR_MASK (~(PGDIR_SIZE-1))
23#define PTRS_PER_PGD HV_L0_ENTRIES
Chris Metcalfd5d14ed2012-03-29 13:58:43 -040024#define PGD_INDEX(va) HV_L0_INDEX(va)
25#define SIZEOF_PGD HV_L0_SIZE
Chris Metcalf18aecc22011-05-04 14:38:26 -040026
27/*
28 * The level-1 index is defined by the huge page size. A PMD is composed
29 * of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
30 */
Chris Metcalfd5d14ed2012-03-29 13:58:43 -040031#define PMD_SHIFT HPAGE_SHIFT
32#define PMD_SIZE HPAGE_SIZE
Chris Metcalf18aecc22011-05-04 14:38:26 -040033#define PMD_MASK (~(PMD_SIZE-1))
Chris Metcalfd5d14ed2012-03-29 13:58:43 -040034#define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT)
35#define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
36#define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT)
Chris Metcalf18aecc22011-05-04 14:38:26 -040037
38/*
39 * The level-2 index is defined by the difference between the huge
40 * page size and the normal page size. A PTE is composed of
41 * PTRS_PER_PTE pte_t's and is the bottom level of the page table.
42 * Note that the hypervisor docs use PTE for what we call pte_t, so
43 * this nomenclature is somewhat confusing.
44 */
Chris Metcalfd5d14ed2012-03-29 13:58:43 -040045#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
46#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
47#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
Chris Metcalf18aecc22011-05-04 14:38:26 -040048
49/*
Chris Metcalfd5d14ed2012-03-29 13:58:43 -040050 * Align the vmalloc area to an L2 page table. Omit guard pages at
51 * the beginning and end for simplicity (particularly in the per-cpu
52 * memory allocation code). The vmalloc code puts in an internal
Chris Metcalf18aecc22011-05-04 14:38:26 -040053 * guard page between each allocation.
54 */
Chris Metcalf4b129092013-09-11 13:57:15 -040055#define _VMALLOC_END MEM_SV_START
Chris Metcalfd5d14ed2012-03-29 13:58:43 -040056#define VMALLOC_END _VMALLOC_END
57#define VMALLOC_START _VMALLOC_START
Chris Metcalf18aecc22011-05-04 14:38:26 -040058
Chris Metcalf18aecc22011-05-04 14:38:26 -040059#ifndef __ASSEMBLY__
60
61/* We have no pud since we are a three-level page table. */
Kirill A. Shutemov9849a562017-03-09 17:24:05 +030062#define __ARCH_USE_5LEVEL_HACK
Chris Metcalf18aecc22011-05-04 14:38:26 -040063#include <asm-generic/pgtable-nopud.h>
64
Chris Metcalfa718e102013-08-10 13:15:46 -040065/*
66 * pmds are the same as pgds and ptes, so converting is a no-op.
67 */
68#define pmd_pte(pmd) (pmd)
69#define pmdp_ptep(pmdp) (pmdp)
70#define pte_pmd(pte) (pte)
71
72#define pud_pte(pud) ((pud).pgd)
73
Chris Metcalf18aecc22011-05-04 14:38:26 -040074static inline int pud_none(pud_t pud)
75{
76 return pud_val(pud) == 0;
77}
78
79static inline int pud_present(pud_t pud)
80{
81 return pud_val(pud) & _PAGE_PRESENT;
82}
83
Chris Metcalfa718e102013-08-10 13:15:46 -040084static inline int pud_huge_page(pud_t pud)
85{
86 return pud_val(pud) & _PAGE_HUGE_PAGE;
87}
88
Chris Metcalf18aecc22011-05-04 14:38:26 -040089#define pmd_ERROR(e) \
Joe Perchesf4743672014-10-31 10:50:46 -070090 pr_err("%s:%d: bad pmd 0x%016llx\n", __FILE__, __LINE__, pmd_val(e))
Chris Metcalf18aecc22011-05-04 14:38:26 -040091
92static inline void pud_clear(pud_t *pudp)
93{
94 __pte_clear(&pudp->pgd);
95}
96
97static inline int pud_bad(pud_t pud)
98{
99 return ((pud_val(pud) & _PAGE_ALL) != _PAGE_TABLE);
100}
101
102/* Return the page-table frame number (ptfn) that a pud_t points at. */
103#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
104
Chris Metcalfa718e102013-08-10 13:15:46 -0400105/* Return the page frame number (pfn) that a pud_t points at. */
106#define pud_pfn(pud) pte_pfn(pud_pte(pud))
107
Chris Metcalf18aecc22011-05-04 14:38:26 -0400108/*
109 * A given kernel pud_t maps to a kernel pmd_t table at a specific
110 * virtual address. Since kernel pmd_t tables can be aligned at
111 * sub-page granularity, this macro can return non-page-aligned
112 * pointers, despite its name.
113 */
114#define pud_page_vaddr(pud) \
115 (__va((phys_addr_t)pud_ptfn(pud) << HV_LOG2_PAGE_TABLE_ALIGN))
116
117/*
118 * A pud_t points to a pmd_t array. Since we can have multiple per
119 * page, we don't have a one-to-one mapping of pud_t's to pages.
120 */
Chris Metcalfd5d14ed2012-03-29 13:58:43 -0400121#define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud))))
Chris Metcalf18aecc22011-05-04 14:38:26 -0400122
123static inline unsigned long pud_index(unsigned long address)
124{
125 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
126}
127
128#define pmd_offset(pud, address) \
129 ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
130
Chris Metcalf18aecc22011-05-04 14:38:26 -0400131/* Normalize an address to having the correct high bits set. */
132#define pgd_addr_normalize pgd_addr_normalize
133static inline unsigned long pgd_addr_normalize(unsigned long addr)
134{
135 return ((long)addr << (CHIP_WORD_SIZE() - CHIP_VA_WIDTH())) >>
136 (CHIP_WORD_SIZE() - CHIP_VA_WIDTH());
137}
138
139/* We don't define any pgds for these addresses. */
140static inline int pgd_addr_invalid(unsigned long addr)
141{
Chris Metcalfacbde1d2013-09-03 14:41:36 -0400142 return addr >= KERNEL_HIGH_VADDR || addr != pgd_addr_normalize(addr);
Chris Metcalf18aecc22011-05-04 14:38:26 -0400143}
144
145/*
146 * Use atomic instructions to provide atomicity against the hypervisor.
147 */
148#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
149static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
150 unsigned long addr, pte_t *ptep)
151{
152 return (__insn_fetchand(&ptep->val, ~HV_PTE_ACCESSED) >>
153 HV_PTE_INDEX_ACCESSED) & 0x1;
154}
155
156#define __HAVE_ARCH_PTEP_SET_WRPROTECT
157static inline void ptep_set_wrprotect(struct mm_struct *mm,
158 unsigned long addr, pte_t *ptep)
159{
160 __insn_fetchand(&ptep->val, ~HV_PTE_WRITABLE);
161}
162
163#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
164static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
165 unsigned long addr, pte_t *ptep)
166{
167 return hv_pte(__insn_exch(&ptep->val, 0UL));
168}
169
Chris Metcalf18aecc22011-05-04 14:38:26 -0400170#endif /* __ASSEMBLY__ */
171
172#endif /* _ASM_TILE_PGTABLE_64_H */