blob: 216446295ada686ccb4b454319b2b45e85d96720 [file] [log] [blame]
Chris Zankel9a8fd552005-06-23 22:01:26 -07001/*
Chris Zankel66569202007-08-22 10:14:51 -07002 * include/asm-xtensa/pgtable.h
Chris Zankel9a8fd552005-06-23 22:01:26 -07003 *
4 * This program is free software; you can redistribute it and/or modify
Chris Zankel01858d12007-08-06 23:57:57 -07005 * it under the terms of the GNU General Public License version 2 as
Chris Zankel9a8fd552005-06-23 22:01:26 -07006 * published by the Free Software Foundation.
7 *
Chris Zankel7711ece2013-05-20 11:09:13 -07008 * Copyright (C) 2001 - 2013 Tensilica Inc.
Chris Zankel9a8fd552005-06-23 22:01:26 -07009 */
10
11#ifndef _XTENSA_PGTABLE_H
12#define _XTENSA_PGTABLE_H
13
14#include <asm-generic/pgtable-nopmd.h>
15#include <asm/page.h>
16
Chris Zankel9a8fd552005-06-23 22:01:26 -070017/*
18 * We only use two ring levels, user and kernel space.
19 */
20
21#define USER_RING 1 /* user ring level */
22#define KERNEL_RING 0 /* kernel ring level */
23
24/*
25 * The Xtensa architecture port of Linux has a two-level page table system,
Chris Zankel01858d12007-08-06 23:57:57 -070026 * i.e. the logical three-level Linux page table layout is folded.
Chris Zankel9a8fd552005-06-23 22:01:26 -070027 * Each task has the following memory page tables:
28 *
29 * PGD table (page directory), ie. 3rd-level page table:
30 * One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables
31 * (Architectures that don't have the PMD folded point to the PMD tables)
32 *
33 * The pointer to the PGD table for a given task can be retrieved from
34 * the task structure (struct task_struct*) t, e.g. current():
35 * (t->mm ? t->mm : t->active_mm)->pgd
36 *
37 * PMD tables (page middle-directory), ie. 2nd-level page tables:
38 * Absent for the Xtensa architecture (folded, PTRS_PER_PMD == 1).
39 *
40 * PTE tables (page table entry), ie. 1st-level page tables:
41 * One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE
42 * invalid_pte_table for absent mappings.
43 *
44 * The individual pages are 4 kB big with special pages for the empty_zero_page.
45 */
Chris Zankel01858d12007-08-06 23:57:57 -070046
Chris Zankel9a8fd552005-06-23 22:01:26 -070047#define PGDIR_SHIFT 22
48#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
49#define PGDIR_MASK (~(PGDIR_SIZE-1))
50
51/*
52 * Entries per page directory level: we use two-level, so
53 * we don't really have any PMD directory physically.
54 */
55#define PTRS_PER_PTE 1024
56#define PTRS_PER_PTE_SHIFT 10
Chris Zankel9a8fd552005-06-23 22:01:26 -070057#define PTRS_PER_PGD 1024
58#define PGD_ORDER 0
Chris Zankel9a8fd552005-06-23 22:01:26 -070059#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
Chris Zankel01858d12007-08-06 23:57:57 -070060#define FIRST_USER_ADDRESS 0
Chris Zankel9a8fd552005-06-23 22:01:26 -070061#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
62
Chris Zankel66569202007-08-22 10:14:51 -070063/*
64 * Virtual memory area. We keep a distance to other memory regions to be
Chris Zankel9a8fd552005-06-23 22:01:26 -070065 * on the safe side. We also use this area for cache aliasing.
66 */
Chris Zankel9a8fd552005-06-23 22:01:26 -070067#define VMALLOC_START 0xC0000000
Chris Zankel3b4a49e2008-01-07 16:42:21 -080068#define VMALLOC_END 0xC7FEFFFF
69#define TLBTEMP_BASE_1 0xC7FF0000
70#define TLBTEMP_BASE_2 0xC7FF8000
Chris Zankel9a8fd552005-06-23 22:01:26 -070071
Chris Zankel66569202007-08-22 10:14:51 -070072/*
Chris Zankel7711ece2013-05-20 11:09:13 -070073 * For the Xtensa architecture, the PTE layout is as follows:
Chris Zankel9a8fd552005-06-23 22:01:26 -070074 *
Chris Zankel7711ece2013-05-20 11:09:13 -070075 * 31------12 11 10-9 8-6 5-4 3-2 1-0
76 * +-----------------------------------------+
77 * | | Software | HARDWARE |
78 * | PPN | ADW | RI |Attribute|
79 * +-----------------------------------------+
80 * pte_none | MBZ | 01 | 11 | 00 |
81 * +-----------------------------------------+
82 * present | PPN | 0 | 00 | ADW | RI | CA | wx |
83 * +- - - - - - - - - - - - - - - - - - - - -+
84 * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 11 | 11 |
85 * +-----------------------------------------+
86 * swap | index | type | 01 | 11 | 00 |
87 * +- - - - - - - - - - - - - - - - - - - - -+
88 * file | file offset | 01 | 11 | 10 |
89 * +-----------------------------------------+
Chris Zankel9a8fd552005-06-23 22:01:26 -070090 *
Chris Zankel7711ece2013-05-20 11:09:13 -070091 * For T1050 hardware and earlier the layout differs for present and (PAGE_NONE)
92 * +-----------------------------------------+
93 * present | PPN | 0 | 00 | ADW | RI | CA | w1 |
94 * +-----------------------------------------+
95 * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 01 | 00 |
96 * +-----------------------------------------+
97 *
98 * Legend:
99 * PPN Physical Page Number
100 * ADW software: accessed (young) / dirty / writable
101 * RI ring (0=privileged, 1=user, 2 and 3 are unused)
102 * CA cache attribute: 00 bypass, 01 writeback, 10 writethrough
103 * (11 is invalid and used to mark pages that are not present)
104 * w page is writable (hw)
105 * x page is executable (hw)
106 * index swap offset / PAGE_SIZE (bit 11-31: 21 bits -> 8 GB)
107 * (note that the index is always non-zero)
108 * type swap type (5 bits -> 32 types)
109 * file offset 26-bit offset into the file, in increments of PAGE_SIZE
110 *
111 * Notes:
112 * - (PROT_NONE) is a special case of 'present' but causes an exception for
113 * any access (read, write, and execute).
114 * - 'multihit-exception' has the highest priority of all MMU exceptions,
115 * so the ring must be set to 'RING_USER' even for 'non-present' pages.
116 * - on older hardware, the exectuable flag was not supported and
117 * used as a 'valid' flag, so it needs to be always set.
118 * - we need to keep track of certain flags in software (dirty and young)
119 * to do this, we use write exceptions and have a separate software w-flag.
120 * - attribute value 1101 (and 1111 on T1050 and earlier) is reserved
Chris Zankel9a8fd552005-06-23 22:01:26 -0700121 */
122
Chris Zankel7711ece2013-05-20 11:09:13 -0700123#define _PAGE_ATTRIB_MASK 0xf
124
Chris Zankel01858d12007-08-06 23:57:57 -0700125#define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */
126#define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */
127
Chris Zankel01858d12007-08-06 23:57:57 -0700128#define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */
129#define _PAGE_CA_WB (1<<2) /* write-back */
130#define _PAGE_CA_WT (2<<2) /* write-through */
131#define _PAGE_CA_MASK (3<<2)
Chris Zankel7711ece2013-05-20 11:09:13 -0700132#define _PAGE_CA_INVALID (3<<2)
133
134/* We use invalid attribute values to distinguish special pte entries */
135#if XCHAL_HW_VERSION_MAJOR < 2000
136#define _PAGE_HW_VALID 0x01 /* older HW needed this bit set */
137#define _PAGE_NONE 0x04
138#else
139#define _PAGE_HW_VALID 0x00
140#define _PAGE_NONE 0x0f
141#endif
142#define _PAGE_FILE (1<<1) /* file mapped page, only if !present */
Chris Zankel9a8fd552005-06-23 22:01:26 -0700143
144#define _PAGE_USER (1<<4) /* user access (ring=1) */
Chris Zankel9a8fd552005-06-23 22:01:26 -0700145
146/* Software */
Chris Zankel01858d12007-08-06 23:57:57 -0700147#define _PAGE_WRITABLE_BIT 6
148#define _PAGE_WRITABLE (1<<6) /* software: page writable */
Chris Zankel9a8fd552005-06-23 22:01:26 -0700149#define _PAGE_DIRTY (1<<7) /* software: page dirty */
150#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
Chris Zankel9a8fd552005-06-23 22:01:26 -0700151
Chris Zankel9a8fd552005-06-23 22:01:26 -0700152#ifdef CONFIG_MMU
153
Chris Zankel7711ece2013-05-20 11:09:13 -0700154#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
155#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
156
157#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER)
Chris Zankel01858d12007-08-06 23:57:57 -0700158#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
159#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
160#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
161#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
162#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
163#define PAGE_SHARED_EXEC \
164 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
Chris Zankel66569202007-08-22 10:14:51 -0700165#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
166#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700167
Chris Zankel01858d12007-08-06 23:57:57 -0700168#if (DCACHE_WAY_SIZE > PAGE_SIZE)
Chris Zankel7711ece2013-05-20 11:09:13 -0700169# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
Chris Zankel01858d12007-08-06 23:57:57 -0700170#else
Chris Zankel7711ece2013-05-20 11:09:13 -0700171# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
Chris Zankel01858d12007-08-06 23:57:57 -0700172#endif
Chris Zankel9a8fd552005-06-23 22:01:26 -0700173
174#else /* no mmu */
175
176# define PAGE_NONE __pgprot(0)
177# define PAGE_SHARED __pgprot(0)
178# define PAGE_COPY __pgprot(0)
179# define PAGE_READONLY __pgprot(0)
180# define PAGE_KERNEL __pgprot(0)
181
182#endif
183
184/*
185 * On certain configurations of Xtensa MMUs (eg. the initial Linux config),
186 * the MMU can't do page protection for execute, and considers that the same as
187 * read. Also, write permissions may imply read permissions.
188 * What follows is the closest we can get by reasonable means..
189 * See linux/mm/mmap.c for protection_map[] array that uses these definitions.
190 */
Chris Zankel01858d12007-08-06 23:57:57 -0700191#define __P000 PAGE_NONE /* private --- */
192#define __P001 PAGE_READONLY /* private --r */
193#define __P010 PAGE_COPY /* private -w- */
194#define __P011 PAGE_COPY /* private -wr */
195#define __P100 PAGE_READONLY_EXEC /* private x-- */
196#define __P101 PAGE_READONLY_EXEC /* private x-r */
197#define __P110 PAGE_COPY_EXEC /* private xw- */
198#define __P111 PAGE_COPY_EXEC /* private xwr */
Chris Zankel9a8fd552005-06-23 22:01:26 -0700199
Chris Zankel01858d12007-08-06 23:57:57 -0700200#define __S000 PAGE_NONE /* shared --- */
201#define __S001 PAGE_READONLY /* shared --r */
202#define __S010 PAGE_SHARED /* shared -w- */
203#define __S011 PAGE_SHARED /* shared -wr */
204#define __S100 PAGE_READONLY_EXEC /* shared x-- */
205#define __S101 PAGE_READONLY_EXEC /* shared x-r */
206#define __S110 PAGE_SHARED_EXEC /* shared xw- */
207#define __S111 PAGE_SHARED_EXEC /* shared xwr */
Chris Zankel9a8fd552005-06-23 22:01:26 -0700208
209#ifndef __ASSEMBLY__
210
211#define pte_ERROR(e) \
212 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
213#define pgd_ERROR(e) \
214 printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
215
216extern unsigned long empty_zero_page[1024];
217
218#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
219
Johannes Weinere5083a62009-03-04 16:21:31 +0100220#ifdef CONFIG_MMU
Chris Zankel9a8fd552005-06-23 22:01:26 -0700221extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
Johannes Weinere5083a62009-03-04 16:21:31 +0100222extern void paging_init(void);
Johannes Weinere5083a62009-03-04 16:21:31 +0100223#else
224# define swapper_pg_dir NULL
225static inline void paging_init(void) { }
Johannes Weinere5083a62009-03-04 16:21:31 +0100226#endif
Kirill A. Shutemovf820e282013-11-14 14:31:50 -0800227static inline void pgtable_cache_init(void) { }
Chris Zankel9a8fd552005-06-23 22:01:26 -0700228
229/*
230 * The pmd contains the kernel virtual address of the pte page.
231 */
Dave McCracken46a82b22006-09-25 23:31:48 -0700232#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
Chris Zankel9a8fd552005-06-23 22:01:26 -0700233#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
234
235/*
Chris Zankel01858d12007-08-06 23:57:57 -0700236 * pte status.
Chris Zankel9a8fd552005-06-23 22:01:26 -0700237 */
Chris Zankel7711ece2013-05-20 11:09:13 -0700238# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
239#if XCHAL_HW_VERSION_MAJOR < 2000
240# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
241#else
242# define pte_present(pte) \
243 (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \
244 || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
245#endif
Chris Zankel9a8fd552005-06-23 22:01:26 -0700246#define pte_clear(mm,addr,ptep) \
Chris Zankel7711ece2013-05-20 11:09:13 -0700247 do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700248
249#define pmd_none(pmd) (!pmd_val(pmd))
250#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700251#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
Chris Zankel01858d12007-08-06 23:57:57 -0700252#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700253
Chris Zankel01858d12007-08-06 23:57:57 -0700254static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
Chris Zankel9a8fd552005-06-23 22:01:26 -0700255static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
256static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
257static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
Nick Piggin7e675132008-04-28 02:13:00 -0700258static inline int pte_special(pte_t pte) { return 0; }
259
Chris Zankel01858d12007-08-06 23:57:57 -0700260static inline pte_t pte_wrprotect(pte_t pte)
261 { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
262static inline pte_t pte_mkclean(pte_t pte)
263 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
264static inline pte_t pte_mkold(pte_t pte)
265 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
266static inline pte_t pte_mkdirty(pte_t pte)
267 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
268static inline pte_t pte_mkyoung(pte_t pte)
269 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
270static inline pte_t pte_mkwrite(pte_t pte)
271 { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
Nick Piggin7e675132008-04-28 02:13:00 -0700272static inline pte_t pte_mkspecial(pte_t pte)
273 { return pte; }
Chris Zankel9a8fd552005-06-23 22:01:26 -0700274
275/*
276 * Conversion functions: convert a page and protection to a page entry,
277 * and a page entry and page directory to the page they refer to.
278 */
Chris Zankel01858d12007-08-06 23:57:57 -0700279
Chris Zankel9a8fd552005-06-23 22:01:26 -0700280#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
281#define pte_same(a,b) (pte_val(a) == pte_val(b))
282#define pte_page(x) pfn_to_page(pte_pfn(x))
283#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
284#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
285
Adrian Bunkd99cf712005-09-03 15:57:53 -0700286static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700287{
288 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
289}
290
291/*
292 * Certain architectures need to do special things when pte's
293 * within a page table are directly modified. Thus, the following
294 * hook is made available.
295 */
296static inline void update_pte(pte_t *ptep, pte_t pteval)
297{
298 *ptep = pteval;
Chris Zankel66569202007-08-22 10:14:51 -0700299#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
300 __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
301#endif
302
Chris Zankel9a8fd552005-06-23 22:01:26 -0700303}
304
Tim Schmielau8c65b4a2005-11-07 00:59:43 -0800305struct mm_struct;
306
Adrian Bunkd99cf712005-09-03 15:57:53 -0700307static inline void
Chris Zankel9a8fd552005-06-23 22:01:26 -0700308set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
309{
310 update_pte(ptep, pteval);
311}
312
313
Adrian Bunkd99cf712005-09-03 15:57:53 -0700314static inline void
Chris Zankel9a8fd552005-06-23 22:01:26 -0700315set_pmd(pmd_t *pmdp, pmd_t pmdval)
316{
317 *pmdp = pmdval;
Chris Zankel9a8fd552005-06-23 22:01:26 -0700318}
319
Tim Schmielau8c65b4a2005-11-07 00:59:43 -0800320struct vm_area_struct;
Chris Zankel9a8fd552005-06-23 22:01:26 -0700321
322static inline int
323ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
Chris Zankelc4c45942012-11-28 16:53:51 -0800324 pte_t *ptep)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700325{
326 pte_t pte = *ptep;
327 if (!pte_young(pte))
328 return 0;
329 update_pte(ptep, pte_mkold(pte));
330 return 1;
331}
332
Chris Zankel9a8fd552005-06-23 22:01:26 -0700333static inline pte_t
334ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
335{
336 pte_t pte = *ptep;
337 pte_clear(mm, addr, ptep);
338 return pte;
339}
340
341static inline void
342ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
343{
Chris Zankelc4c45942012-11-28 16:53:51 -0800344 pte_t pte = *ptep;
345 update_pte(ptep, pte_wrprotect(pte));
Chris Zankel9a8fd552005-06-23 22:01:26 -0700346}
347
348/* to find an entry in a kernel page-table-directory */
349#define pgd_offset_k(address) pgd_offset(&init_mm, address)
350
351/* to find an entry in a page-table-directory */
352#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address))
353
354#define pgd_index(address) ((address) >> PGDIR_SHIFT)
355
356/* Find an entry in the second-level page table.. */
357#define pmd_offset(dir,address) ((pmd_t*)(dir))
358
359/* Find an entry in the third-level page table.. */
360#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
361#define pte_offset_kernel(dir,addr) \
Dave McCracken46a82b22006-09-25 23:31:48 -0700362 ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
Chris Zankel9a8fd552005-06-23 22:01:26 -0700363#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
Chris Zankel9a8fd552005-06-23 22:01:26 -0700364#define pte_unmap(pte) do { } while (0)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700365
366
367/*
Chris Zankel7711ece2013-05-20 11:09:13 -0700368 * Encode and decode a swap and file entry.
Chris Zankel9a8fd552005-06-23 22:01:26 -0700369 */
Chris Zankel7711ece2013-05-20 11:09:13 -0700370#define SWP_TYPE_BITS 5
371#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700372
Chris Zankel01858d12007-08-06 23:57:57 -0700373#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
374#define __swp_offset(entry) ((entry).val >> 11)
375#define __swp_entry(type,offs) \
Chris Zankel7711ece2013-05-20 11:09:13 -0700376 ((swp_entry_t){((type) << 6) | ((offs) << 11) | \
377 _PAGE_CA_INVALID | _PAGE_USER})
Chris Zankel9a8fd552005-06-23 22:01:26 -0700378#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
379#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
380
Chris Zankel7711ece2013-05-20 11:09:13 -0700381#define PTE_FILE_MAX_BITS 26
382#define pte_to_pgoff(pte) (pte_val(pte) >> 6)
Chris Zankel01858d12007-08-06 23:57:57 -0700383#define pgoff_to_pte(off) \
Chris Zankel7711ece2013-05-20 11:09:13 -0700384 ((pte_t) { ((off) << 6) | _PAGE_CA_INVALID | _PAGE_FILE | _PAGE_USER })
Chris Zankel9a8fd552005-06-23 22:01:26 -0700385
386#endif /* !defined (__ASSEMBLY__) */
387
388
389#ifdef __ASSEMBLY__
390
391/* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long),
392 * _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long),
393 * _PMD_OFFSET as C pmd_offset(pgd_t*, unsigned long)
394 * _PTE_OFFSET as C pte_offset(pmd_t*, unsigned long)
395 *
396 * Note: We require an additional temporary register which can be the same as
397 * the register that holds the address.
398 *
399 * ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr))
400 *
401 */
402#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
403#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
404
405#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
406 _PGD_INDEX(tmp, adr); \
407 addx4 mm, tmp, mm
408
409#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
410 srli pmd, pmd, PAGE_SHIFT; \
411 slli pmd, pmd, PAGE_SHIFT; \
412 addx4 pmd, tmp, pmd
413
414#else
415
Chris Zankel9a8fd552005-06-23 22:01:26 -0700416#define kern_addr_valid(addr) (1)
417
418extern void update_mmu_cache(struct vm_area_struct * vma,
Russell King4b3073e2009-12-18 16:40:18 +0000419 unsigned long address, pte_t *ptep);
Chris Zankel9a8fd552005-06-23 22:01:26 -0700420
Chris Zankel9a8fd552005-06-23 22:01:26 -0700421typedef pte_t *pte_addr_t;
422
423#endif /* !defined (__ASSEMBLY__) */
424
425#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Chris Zankel9a8fd552005-06-23 22:01:26 -0700426#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
427#define __HAVE_ARCH_PTEP_SET_WRPROTECT
428#define __HAVE_ARCH_PTEP_MKDIRTY
429#define __HAVE_ARCH_PTE_SAME
Max Filippovde73b6b2012-12-22 06:35:04 +0400430/* We provide our own get_unmapped_area to cope with
431 * SHM area cache aliasing for userland.
432 */
433#define HAVE_ARCH_UNMAPPED_AREA
Chris Zankel9a8fd552005-06-23 22:01:26 -0700434
435#include <asm-generic/pgtable.h>
436
437#endif /* _XTENSA_PGTABLE_H */