blob: 0a9cc73d35c78fac88f4733511021849a0b2a248 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_IA64_PGTABLE_H
2#define _ASM_IA64_PGTABLE_H
3
4/*
5 * This file contains the functions and defines necessary to modify and use
6 * the IA-64 page table tree.
7 *
8 * This hopefully works with any (fixed) IA-64 page-size, as defined
9 * in <asm/page.h>.
10 *
David Mosberger-Tangad597bd2005-06-08 10:45:00 -070011 * Copyright (C) 1998-2005 Hewlett-Packard Co
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * David Mosberger-Tang <davidm@hpl.hp.com>
13 */
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16#include <asm/mman.h>
17#include <asm/page.h>
18#include <asm/processor.h>
19#include <asm/system.h>
20#include <asm/types.h>
21
22#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
23
24/*
25 * First, define the various bits in a PTE. Note that the PTE format
26 * matches the VHPT short format, the firt doubleword of the VHPD long
27 * format, and the first doubleword of the TLB insertion format.
28 */
29#define _PAGE_P_BIT 0
30#define _PAGE_A_BIT 5
31#define _PAGE_D_BIT 6
32
33#define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */
34#define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
35#define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
36#define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
37#define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
38#define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
39#define _PAGE_MA_MASK (0x7 << 2)
40#define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
41#define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */
42#define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */
43#define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */
44#define _PAGE_PL_MASK (3 << 7)
45#define _PAGE_AR_R (0 << 9) /* read only */
46#define _PAGE_AR_RX (1 << 9) /* read & execute */
47#define _PAGE_AR_RW (2 << 9) /* read & write */
48#define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
49#define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
50#define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
51#define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
52#define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
53#define _PAGE_AR_MASK (7 << 9)
54#define _PAGE_AR_SHIFT 9
55#define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */
56#define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */
57#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
58#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
59#define _PAGE_PROTNONE (__IA64_UL(1) << 63)
60
61/* Valid only for a PTE with the present bit cleared: */
62#define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */
63
64#define _PFN_MASK _PAGE_PPN_MASK
65/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
66#define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
67
68#define _PAGE_SIZE_4K 12
69#define _PAGE_SIZE_8K 13
70#define _PAGE_SIZE_16K 14
71#define _PAGE_SIZE_64K 16
72#define _PAGE_SIZE_256K 18
73#define _PAGE_SIZE_1M 20
74#define _PAGE_SIZE_4M 22
75#define _PAGE_SIZE_16M 24
76#define _PAGE_SIZE_64M 26
77#define _PAGE_SIZE_256M 28
78#define _PAGE_SIZE_1G 30
79#define _PAGE_SIZE_4G 32
80
81#define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
82#define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
83#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
84
85/*
Robin Holt837cd0b2005-11-11 09:35:43 -060086 * How many pointers will a page table level hold expressed in shift
87 */
88#define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3)
89
90/*
91 * Definitions for fourth level:
92 */
93#define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT))
94
95/*
96 * Definitions for third level:
97 *
98 * PMD_SHIFT determines the size of the area a third-level page table
99 * can map.
100 */
101#define PMD_SHIFT (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT))
102#define PMD_SIZE (1UL << PMD_SHIFT)
103#define PMD_MASK (~(PMD_SIZE-1))
104#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT))
105
106#ifdef CONFIG_PGTABLE_4
107/*
108 * Definitions for second level:
109 *
110 * PUD_SHIFT determines the size of the area a second-level page table
111 * can map.
112 */
113#define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
114#define PUD_SIZE (1UL << PUD_SHIFT)
115#define PUD_MASK (~(PUD_SIZE-1))
116#define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT))
117#endif
118
119/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 * Definitions for first level:
121 *
122 * PGDIR_SHIFT determines what a first-level page table entry can map.
123 */
Robin Holt837cd0b2005-11-11 09:35:43 -0600124#ifdef CONFIG_PGTABLE_4
125#define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
126#else
127#define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
128#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
130#define PGDIR_MASK (~(PGDIR_SIZE-1))
Robin Holt837cd0b2005-11-11 09:35:43 -0600131#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT
132#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
Hugh Dickinsd455a362005-04-19 13:29:23 -0700134#define FIRST_USER_ADDRESS 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
136/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 * All the normal masks have the "page accessed" bits on, as any time
138 * they are used, the page is accessed. They are cleared only by the
139 * page-out routines.
140 */
141#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
142#define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
143#define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
144#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
145#define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
146#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
147#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
148#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
Jes Sorensen0c72ea72009-02-25 10:38:52 -0600149#define PAGE_KERNEL_UC __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX | \
150 _PAGE_MA_UC)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
152# ifndef __ASSEMBLY__
153
Tim Schmielau8c65b4a2005-11-07 00:59:43 -0800154#include <linux/sched.h> /* for mm_struct */
Jiri Slaby1977f032007-10-18 23:40:25 -0700155#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156#include <asm/cacheflush.h>
157#include <asm/mmu_context.h>
158#include <asm/processor.h>
159
160/*
161 * Next come the mappings that determine how mmap() protection bits
162 * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The
163 * _P version gets used for a private shared memory segment, the _S
164 * version gets used for a shared memory segment with MAP_SHARED on.
165 * In a private shared memory segment, we do a copy-on-write if a task
166 * attempts to write to the page.
167 */
168 /* xwr */
169#define __P000 PAGE_NONE
170#define __P001 PAGE_READONLY
171#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
172#define __P011 PAGE_READONLY /* ditto */
173#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
174#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
175#define __P110 PAGE_COPY_EXEC
176#define __P111 PAGE_COPY_EXEC
177
178#define __S000 PAGE_NONE
179#define __S001 PAGE_READONLY
180#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
181#define __S011 PAGE_SHARED
182#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
183#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
184#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
185#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
186
187#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
Robin Holt837cd0b2005-11-11 09:35:43 -0600188#ifdef CONFIG_PGTABLE_4
189#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
190#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
192#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
193
194
195/*
196 * Some definitions to translate between mem_map, PTEs, and page addresses:
197 */
198
199
200/* Quick test to see if ADDR is a (potentially) valid physical address. */
201static inline long
202ia64_phys_addr_valid (unsigned long addr)
203{
204 return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
205}
206
207/*
208 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
209 * memory. For the return value to be meaningful, ADDR must be >=
210 * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
211 * require a hash-, or multi-level tree-lookup or something of that
212 * sort) but it guarantees to return TRUE only if accessing the page
213 * at that address does not cause an error. Note that there may be
214 * addresses for which kern_addr_valid() returns FALSE even though an
215 * access would not cause an error (e.g., this is typically true for
216 * memory mapped I/O regions.
217 *
218 * XXX Need to implement this for IA-64.
219 */
220#define kern_addr_valid(addr) (1)
221
222
223/*
224 * Now come the defines and routines to manage and access the three-level
225 * page table.
226 */
227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Peter Chubb0a41e252005-08-16 19:54:00 -0700229#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230#ifdef CONFIG_VIRTUAL_MEM_MAP
Peter Chubb0a41e252005-08-16 19:54:00 -0700231# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232# define VMALLOC_END vmalloc_end
233 extern unsigned long vmalloc_end;
234#else
Christoph Lameteref229c52007-10-16 01:24:15 -0700235#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
236/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
237# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10)))
238# define vmemmap ((struct page *)VMALLOC_END)
239#else
Peter Chubb0a41e252005-08-16 19:54:00 -0700240# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241#endif
Christoph Lameteref229c52007-10-16 01:24:15 -0700242#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
244/* fs/proc/kcore.c */
Peter Chubb0a41e252005-08-16 19:54:00 -0700245#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
246#define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Robin Holt837cd0b2005-11-11 09:35:43 -0600248#define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3)
249#define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */
250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251/*
252 * Conversion functions: convert page frame number (pfn) and a protection value to a page
253 * table entry (pte).
254 */
255#define pfn_pte(pfn, pgprot) \
256({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
257
258/* Extract pfn from pte. */
259#define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
260
261#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
262
263/* This takes a physical page address that is used by the remapping functions */
264#define mk_pte_phys(physpage, pgprot) \
265({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
266
267#define pte_modify(_pte, newprot) \
268 (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270#define pte_none(pte) (!pte_val(pte))
271#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
272#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
273/* pte_page() returns the "struct page *" corresponding to the PTE: */
274#define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
275
276#define pmd_none(pmd) (!pmd_val(pmd))
277#define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
278#define pmd_present(pmd) (pmd_val(pmd) != 0UL)
279#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
Dave McCracken46a82b22006-09-25 23:31:48 -0700280#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281#define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
282
283#define pud_none(pud) (!pud_val(pud))
284#define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
285#define pud_present(pud) (pud_val(pud) != 0UL)
286#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
Dave McCracken46a82b22006-09-25 23:31:48 -0700287#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
288#define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
Robin Holt837cd0b2005-11-11 09:35:43 -0600290#ifdef CONFIG_PGTABLE_4
291#define pgd_none(pgd) (!pgd_val(pgd))
292#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
293#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
294#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
Dave McCracken46a82b22006-09-25 23:31:48 -0700295#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
296#define pgd_page(pgd) virt_to_page((pgd_val(pgd) + PAGE_OFFSET))
Robin Holt837cd0b2005-11-11 09:35:43 -0600297#endif
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299/*
300 * The following have defined behavior only work if pte_present() is true.
301 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
303#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
304#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
305#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
306#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
Nick Piggin7e675132008-04-28 02:13:00 -0700307#define pte_special(pte) 0
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309/*
310 * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
311 * access rights:
312 */
313#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
314#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
316#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
317#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
318#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800319#define pte_mkhuge(pte) (__pte(pte_val(pte)))
Nick Piggin7e675132008-04-28 02:13:00 -0700320#define pte_mkspecial(pte) (pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
322/*
KAMEZAWA Hiroyuki954ffcb2007-10-16 01:25:44 -0700323 * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
324 * sync icache and dcache when we insert *new* executable page.
325 * __ia64_sync_icache_dcache() check Pg_arch_1 bit and flush icache
326 * if necessary.
327 *
328 * set_pte() is also called by the kernel, but we can expect that the kernel
329 * flushes icache explicitly if necessary.
330 */
331#define pte_present_exec_user(pte)\
332 ((pte_val(pte) & (_PAGE_P | _PAGE_PL_MASK | _PAGE_AR_RX)) == \
333 (_PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX))
334
335extern void __ia64_sync_icache_dcache(pte_t pteval);
336static inline void set_pte(pte_t *ptep, pte_t pteval)
337{
338 /* page is present && page is user && page is executable
339 * && (page swapin or new page or page migraton
340 * || copy_on_write with page copying.)
341 */
342 if (pte_present_exec_user(pteval) &&
343 (!pte_present(*ptep) ||
344 pte_pfn(*ptep) != pte_pfn(pteval)))
345 /* load_module() calles flush_icache_range() explicitly*/
346 __ia64_sync_icache_dcache(pteval);
347 *ptep = pteval;
348}
349
350#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
351
352/*
Bjorn Helgaas32e62c62006-05-05 17:19:50 -0600353 * Make page protection values cacheable, uncacheable, or write-
354 * combining. Note that "protection" is really a misnomer here as the
355 * protection value contains the memory attribute bits, dirty bits, and
356 * various other bits as well.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 */
Bjorn Helgaas32e62c62006-05-05 17:19:50 -0600358#define pgprot_cacheable(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WB)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
361
Bjorn Helgaas32e62c62006-05-05 17:19:50 -0600362struct file;
363extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
364 unsigned long size, pgprot_t vma_prot);
365#define __HAVE_PHYS_MEM_ACCESS_PROT
366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367static inline unsigned long
368pgd_index (unsigned long address)
369{
370 unsigned long region = address >> 61;
371 unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
372
373 return (region << (PAGE_SHIFT - 6)) | l1index;
374}
375
376/* The offset in the 1-level directory is given by the 3 region bits
377 (61..63) and the level-1 bits. */
378static inline pgd_t*
KOSAKI Motohiroe4b05d42008-04-09 12:26:10 +0900379pgd_offset (const struct mm_struct *mm, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
381 return mm->pgd + pgd_index(address);
382}
383
384/* In the kernel's mapped region we completely ignore the region number
385 (since we know it's in region number 5). */
386#define pgd_offset_k(addr) \
387 (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
388
389/* Look up a pgd entry in the gate area. On IA-64, the gate-area
390 resides in the kernel-mapped segment, hence we use pgd_offset_k()
391 here. */
392#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
393
Robin Holt837cd0b2005-11-11 09:35:43 -0600394#ifdef CONFIG_PGTABLE_4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395/* Find an entry in the second-level page table.. */
Robin Holt837cd0b2005-11-11 09:35:43 -0600396#define pud_offset(dir,addr) \
Dave McCracken46a82b22006-09-25 23:31:48 -0700397 ((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
Robin Holt837cd0b2005-11-11 09:35:43 -0600398#endif
399
400/* Find an entry in the third-level page table.. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401#define pmd_offset(dir,addr) \
Dave McCracken46a82b22006-09-25 23:31:48 -0700402 ((pmd_t *) pud_page_vaddr(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404/*
405 * Find an entry in the third-level page table. This looks more complicated than it
406 * should be because some platforms place page tables in high memory.
407 */
408#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
Dave McCracken46a82b22006-09-25 23:31:48 -0700409#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
411#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)
412#define pte_unmap(pte) do { } while (0)
413#define pte_unmap_nested(pte) do { } while (0)
414
415/* atomic versions of the some PTE manipulations: */
416
417static inline int
418ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
419{
420#ifdef CONFIG_SMP
421 if (!pte_young(*ptep))
422 return 0;
423 return test_and_clear_bit(_PAGE_A_BIT, ptep);
424#else
425 pte_t pte = *ptep;
426 if (!pte_young(pte))
427 return 0;
428 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
429 return 1;
430#endif
431}
432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433static inline pte_t
434ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
435{
436#ifdef CONFIG_SMP
437 return __pte(xchg((long *) ptep, 0));
438#else
439 pte_t pte = *ptep;
440 pte_clear(mm, addr, ptep);
441 return pte;
442#endif
443}
444
445static inline void
446ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
447{
448#ifdef CONFIG_SMP
449 unsigned long new, old;
450
451 do {
452 old = pte_val(*ptep);
453 new = pte_val(pte_wrprotect(__pte (old)));
454 } while (cmpxchg((unsigned long *) ptep, old, new) != old);
455#else
456 pte_t old_pte = *ptep;
457 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
458#endif
459}
460
461static inline int
462pte_same (pte_t a, pte_t b)
463{
464 return pte_val(a) == pte_val(b);
465}
466
467#define update_mmu_cache(vma, address, pte) do { } while (0)
468
469extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
470extern void paging_init (void);
471
472/*
473 * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
474 * bits in the swap-type field of the swap pte. It would be nice to
475 * enforce that, but we can't easily include <linux/swap.h> here.
476 * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
477 *
478 * Format of swap pte:
479 * bit 0 : present bit (must be zero)
480 * bit 1 : _PAGE_FILE (must be zero)
481 * bits 2- 8: swap-type
482 * bits 9-62: swap offset
483 * bit 63 : _PAGE_PROTNONE bit
484 *
485 * Format of file pte:
486 * bit 0 : present bit (must be zero)
487 * bit 1 : _PAGE_FILE (must be one)
488 * bits 2-62: file_offset/PAGE_SIZE
489 * bit 63 : _PAGE_PROTNONE bit
490 */
491#define __swp_type(entry) (((entry).val >> 2) & 0x7f)
492#define __swp_offset(entry) (((entry).val << 1) >> 10)
493#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
494#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
495#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
496
497#define PTE_FILE_MAX_BITS 61
498#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
499#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
500
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
502 remap_pfn_range(vma, vaddr, pfn, size, prot)
503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504/*
505 * ZERO_PAGE is a global shared page that is always zero: used
506 * for zero-mapped memory areas etc..
507 */
508extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
509extern struct page *zero_page_memmap_ptr;
510#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
511
512/* We provide our own get_unmapped_area to cope with VA holes for userland */
513#define HAVE_ARCH_UNMAPPED_AREA
514
515#ifdef CONFIG_HUGETLB_PAGE
516#define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
517#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
518#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519#endif
520
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
522#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
523/*
524 * Update PTEP with ENTRY, which is guaranteed to be a less
525 * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and
526 * WRITABLE bits turned on, when the value at PTEP did not. The
527 * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
528 *
529 * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
530 * having to worry about races. On SMP machines, there are only two
531 * cases where this is true:
532 *
533 * (1) *PTEP has the PRESENT bit turned OFF
534 * (2) ENTRY has the DIRTY bit turned ON
535 *
536 * On ia64, we could implement this routine with a cmpxchg()-loop
537 * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
538 * However, like on x86, we can get a more streamlined version by
539 * observing that it is OK to drop ACCESSED bit updates when
540 * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is
541 * result in an extra Access-bit fault, which would then turn on the
542 * ACCESSED bit in the low-level fault handler (iaccess_bit or
543 * daccess_bit in ivt.S).
544 */
545#ifdef CONFIG_SMP
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -0700546# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
547({ \
548 int __changed = !pte_same(*(__ptep), __entry); \
549 if (__changed && __safely_writable) { \
550 set_pte(__ptep, __entry); \
551 flush_tlb_page(__vma, __addr); \
552 } \
553 __changed; \
554})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555#else
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -0700556# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
557({ \
558 int __changed = !pte_same(*(__ptep), __entry); \
Martin Schwidefskyf0e47c22007-07-17 04:03:03 -0700559 if (__changed) { \
560 set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \
561 flush_tlb_page(__vma, __addr); \
562 } \
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -0700563 __changed; \
564})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565#endif
566
567# ifdef CONFIG_VIRTUAL_MEM_MAP
568 /* arch mem_map init routine is needed due to holes in a virtual mem_map */
569# define __HAVE_ARCH_MEMMAP_INIT
570 extern void memmap_init (unsigned long size, int nid, unsigned long zone,
571 unsigned long start_pfn);
572# endif /* CONFIG_VIRTUAL_MEM_MAP */
573# endif /* !__ASSEMBLY__ */
574
575/*
576 * Identity-mapped regions use a large page size. We'll call such large pages
577 * "granules". If you can think of a better name that's unambiguous, let me
578 * know...
579 */
580#if defined(CONFIG_IA64_GRANULE_64MB)
581# define IA64_GRANULE_SHIFT _PAGE_SIZE_64M
582#elif defined(CONFIG_IA64_GRANULE_16MB)
583# define IA64_GRANULE_SHIFT _PAGE_SIZE_16M
584#endif
585#define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)
586/*
587 * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
588 */
589#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
590#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
591
592/*
593 * No page table caches to initialise
594 */
595#define pgtable_cache_init() do { } while (0)
596
597/* These tell get_user_pages() that the first gate page is accessible from user-level. */
598#define FIXADDR_USER_START GATE_ADDR
David Mosberger-Tangad597bd2005-06-08 10:45:00 -0700599#ifdef HAVE_BUGGY_SEGREL
600# define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE)
601#else
602# define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
603#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
605#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
607#define __HAVE_ARCH_PTEP_SET_WRPROTECT
608#define __HAVE_ARCH_PTE_SAME
609#define __HAVE_ARCH_PGD_OFFSET_GATE
KAMEZAWA Hiroyuki954ffcb2007-10-16 01:25:44 -0700610
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
Robin Holt837cd0b2005-11-11 09:35:43 -0600612#ifndef CONFIG_PGTABLE_4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613#include <asm-generic/pgtable-nopud.h>
Robin Holt837cd0b2005-11-11 09:35:43 -0600614#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615#include <asm-generic/pgtable.h>
616
617#endif /* _ASM_IA64_PGTABLE_H */