blob: c34ba80c1c31fcdd80c94cf7ec32981348f3c609 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_IA64_PGTABLE_H
2#define _ASM_IA64_PGTABLE_H
3
4/*
5 * This file contains the functions and defines necessary to modify and use
6 * the IA-64 page table tree.
7 *
8 * This hopefully works with any (fixed) IA-64 page-size, as defined
9 * in <asm/page.h>.
10 *
David Mosberger-Tangad597bd2005-06-08 10:45:00 -070011 * Copyright (C) 1998-2005 Hewlett-Packard Co
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * David Mosberger-Tang <davidm@hpl.hp.com>
13 */
14
15#include <linux/config.h>
16
17#include <asm/mman.h>
18#include <asm/page.h>
19#include <asm/processor.h>
20#include <asm/system.h>
21#include <asm/types.h>
22
23#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
24
25/*
26 * First, define the various bits in a PTE. Note that the PTE format
27 * matches the VHPT short format, the firt doubleword of the VHPD long
28 * format, and the first doubleword of the TLB insertion format.
29 */
30#define _PAGE_P_BIT 0
31#define _PAGE_A_BIT 5
32#define _PAGE_D_BIT 6
33
34#define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */
35#define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
36#define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
37#define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
38#define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
39#define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
40#define _PAGE_MA_MASK (0x7 << 2)
41#define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
42#define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */
43#define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */
44#define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */
45#define _PAGE_PL_MASK (3 << 7)
46#define _PAGE_AR_R (0 << 9) /* read only */
47#define _PAGE_AR_RX (1 << 9) /* read & execute */
48#define _PAGE_AR_RW (2 << 9) /* read & write */
49#define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
50#define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
51#define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
52#define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
53#define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
54#define _PAGE_AR_MASK (7 << 9)
55#define _PAGE_AR_SHIFT 9
56#define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */
57#define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */
58#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
59#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
60#define _PAGE_PROTNONE (__IA64_UL(1) << 63)
61
62/* Valid only for a PTE with the present bit cleared: */
63#define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */
64
65#define _PFN_MASK _PAGE_PPN_MASK
66/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
67#define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
68
69#define _PAGE_SIZE_4K 12
70#define _PAGE_SIZE_8K 13
71#define _PAGE_SIZE_16K 14
72#define _PAGE_SIZE_64K 16
73#define _PAGE_SIZE_256K 18
74#define _PAGE_SIZE_1M 20
75#define _PAGE_SIZE_4M 22
76#define _PAGE_SIZE_16M 24
77#define _PAGE_SIZE_64M 26
78#define _PAGE_SIZE_256M 28
79#define _PAGE_SIZE_1G 30
80#define _PAGE_SIZE_4G 32
81
82#define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
83#define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
84#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
85
86/*
87 * Definitions for first level:
88 *
89 * PGDIR_SHIFT determines what a first-level page table entry can map.
90 */
91#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
92#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
93#define PGDIR_MASK (~(PGDIR_SIZE-1))
94#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
95#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
Hugh Dickinsd455a362005-04-19 13:29:23 -070096#define FIRST_USER_ADDRESS 0
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/*
99 * Definitions for second level:
100 *
101 * PMD_SHIFT determines the size of the area a second-level page table
102 * can map.
103 */
104#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
105#define PMD_SIZE (1UL << PMD_SHIFT)
106#define PMD_MASK (~(PMD_SIZE-1))
107#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
108
109/*
110 * Definitions for third level:
111 */
112#define PTRS_PER_PTE (__IA64_UL(1) << (PAGE_SHIFT-3))
113
114/*
115 * All the normal masks have the "page accessed" bits on, as any time
116 * they are used, the page is accessed. They are cleared only by the
117 * page-out routines.
118 */
119#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
120#define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
121#define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
122#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
123#define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
124#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
125#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
126#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
127
128# ifndef __ASSEMBLY__
129
Tim Schmielau8c65b4a2005-11-07 00:59:43 -0800130#include <linux/sched.h> /* for mm_struct */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131#include <asm/bitops.h>
132#include <asm/cacheflush.h>
133#include <asm/mmu_context.h>
134#include <asm/processor.h>
135
136/*
137 * Next come the mappings that determine how mmap() protection bits
138 * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The
139 * _P version gets used for a private shared memory segment, the _S
140 * version gets used for a shared memory segment with MAP_SHARED on.
141 * In a private shared memory segment, we do a copy-on-write if a task
142 * attempts to write to the page.
143 */
144 /* xwr */
145#define __P000 PAGE_NONE
146#define __P001 PAGE_READONLY
147#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
148#define __P011 PAGE_READONLY /* ditto */
149#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
150#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
151#define __P110 PAGE_COPY_EXEC
152#define __P111 PAGE_COPY_EXEC
153
154#define __S000 PAGE_NONE
155#define __S001 PAGE_READONLY
156#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
157#define __S011 PAGE_SHARED
158#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
159#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
160#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
161#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
162
163#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
164#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
165#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
166
167
168/*
169 * Some definitions to translate between mem_map, PTEs, and page addresses:
170 */
171
172
173/* Quick test to see if ADDR is a (potentially) valid physical address. */
174static inline long
175ia64_phys_addr_valid (unsigned long addr)
176{
177 return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
178}
179
180/*
181 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
182 * memory. For the return value to be meaningful, ADDR must be >=
183 * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
184 * require a hash-, or multi-level tree-lookup or something of that
185 * sort) but it guarantees to return TRUE only if accessing the page
186 * at that address does not cause an error. Note that there may be
187 * addresses for which kern_addr_valid() returns FALSE even though an
188 * access would not cause an error (e.g., this is typically true for
189 * memory mapped I/O regions.
190 *
191 * XXX Need to implement this for IA-64.
192 */
193#define kern_addr_valid(addr) (1)
194
195
196/*
197 * Now come the defines and routines to manage and access the three-level
198 * page table.
199 */
200
201/*
202 * On some architectures, special things need to be done when setting
203 * the PTE in a page table. Nothing special needs to be on IA-64.
204 */
205#define set_pte(ptep, pteval) (*(ptep) = (pteval))
206#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
207
Peter Chubb0a41e252005-08-16 19:54:00 -0700208#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209#ifdef CONFIG_VIRTUAL_MEM_MAP
Peter Chubb0a41e252005-08-16 19:54:00 -0700210# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211# define VMALLOC_END vmalloc_end
212 extern unsigned long vmalloc_end;
213#else
Peter Chubb0a41e252005-08-16 19:54:00 -0700214# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215#endif
216
217/* fs/proc/kcore.c */
Peter Chubb0a41e252005-08-16 19:54:00 -0700218#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
219#define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221/*
222 * Conversion functions: convert page frame number (pfn) and a protection value to a page
223 * table entry (pte).
224 */
225#define pfn_pte(pfn, pgprot) \
226({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
227
228/* Extract pfn from pte. */
229#define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
230
231#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
232
233/* This takes a physical page address that is used by the remapping functions */
234#define mk_pte_phys(physpage, pgprot) \
235({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
236
237#define pte_modify(_pte, newprot) \
238 (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240#define pte_none(pte) (!pte_val(pte))
241#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
242#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
243/* pte_page() returns the "struct page *" corresponding to the PTE: */
244#define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
245
246#define pmd_none(pmd) (!pmd_val(pmd))
247#define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
248#define pmd_present(pmd) (pmd_val(pmd) != 0UL)
249#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
250#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
251#define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
252
253#define pud_none(pud) (!pud_val(pud))
254#define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
255#define pud_present(pud) (pud_val(pud) != 0UL)
256#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
257
258#define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
259
260/*
261 * The following have defined behavior only work if pte_present() is true.
262 */
263#define pte_user(pte) ((pte_val(pte) & _PAGE_PL_MASK) == _PAGE_PL_3)
264#define pte_read(pte) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6)
265#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
266#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
267#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
268#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
269#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
270/*
271 * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
272 * access rights:
273 */
274#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
275#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
276#define pte_mkexec(pte) (__pte(pte_val(pte) | _PAGE_AR_RX))
277#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
278#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
279#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
280#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
David Gibson63551ae2005-06-21 17:14:44 -0700281#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_P))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
283/*
284 * Macro to a page protection value as "uncacheable". Note that "protection" is really a
285 * misnomer here as the protection value contains the memory attribute bits, dirty bits,
286 * and various other bits as well.
287 */
288#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
289
290/*
291 * Macro to make mark a page protection value as "write-combining".
292 * Note that "protection" is really a misnomer here as the protection
293 * value contains the memory attribute bits, dirty bits, and various
294 * other bits as well. Accesses through a write-combining translation
295 * works bypasses the caches, but does allow for consecutive writes to
296 * be combined into single (but larger) write transactions.
297 */
298#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
299
300static inline unsigned long
301pgd_index (unsigned long address)
302{
303 unsigned long region = address >> 61;
304 unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
305
306 return (region << (PAGE_SHIFT - 6)) | l1index;
307}
308
309/* The offset in the 1-level directory is given by the 3 region bits
310 (61..63) and the level-1 bits. */
311static inline pgd_t*
312pgd_offset (struct mm_struct *mm, unsigned long address)
313{
314 return mm->pgd + pgd_index(address);
315}
316
317/* In the kernel's mapped region we completely ignore the region number
318 (since we know it's in region number 5). */
319#define pgd_offset_k(addr) \
320 (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
321
322/* Look up a pgd entry in the gate area. On IA-64, the gate-area
323 resides in the kernel-mapped segment, hence we use pgd_offset_k()
324 here. */
325#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
326
327/* Find an entry in the second-level page table.. */
328#define pmd_offset(dir,addr) \
329 ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
330
331/*
332 * Find an entry in the third-level page table. This looks more complicated than it
333 * should be because some platforms place page tables in high memory.
334 */
335#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
336#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
337#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
338#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)
339#define pte_unmap(pte) do { } while (0)
340#define pte_unmap_nested(pte) do { } while (0)
341
342/* atomic versions of the some PTE manipulations: */
343
344static inline int
345ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
346{
347#ifdef CONFIG_SMP
348 if (!pte_young(*ptep))
349 return 0;
350 return test_and_clear_bit(_PAGE_A_BIT, ptep);
351#else
352 pte_t pte = *ptep;
353 if (!pte_young(pte))
354 return 0;
355 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
356 return 1;
357#endif
358}
359
360static inline int
361ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
362{
363#ifdef CONFIG_SMP
364 if (!pte_dirty(*ptep))
365 return 0;
366 return test_and_clear_bit(_PAGE_D_BIT, ptep);
367#else
368 pte_t pte = *ptep;
369 if (!pte_dirty(pte))
370 return 0;
371 set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte));
372 return 1;
373#endif
374}
375
376static inline pte_t
377ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
378{
379#ifdef CONFIG_SMP
380 return __pte(xchg((long *) ptep, 0));
381#else
382 pte_t pte = *ptep;
383 pte_clear(mm, addr, ptep);
384 return pte;
385#endif
386}
387
388static inline void
389ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
390{
391#ifdef CONFIG_SMP
392 unsigned long new, old;
393
394 do {
395 old = pte_val(*ptep);
396 new = pte_val(pte_wrprotect(__pte (old)));
397 } while (cmpxchg((unsigned long *) ptep, old, new) != old);
398#else
399 pte_t old_pte = *ptep;
400 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
401#endif
402}
403
404static inline int
405pte_same (pte_t a, pte_t b)
406{
407 return pte_val(a) == pte_val(b);
408}
409
410#define update_mmu_cache(vma, address, pte) do { } while (0)
411
412extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
413extern void paging_init (void);
414
415/*
416 * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
417 * bits in the swap-type field of the swap pte. It would be nice to
418 * enforce that, but we can't easily include <linux/swap.h> here.
419 * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
420 *
421 * Format of swap pte:
422 * bit 0 : present bit (must be zero)
423 * bit 1 : _PAGE_FILE (must be zero)
424 * bits 2- 8: swap-type
425 * bits 9-62: swap offset
426 * bit 63 : _PAGE_PROTNONE bit
427 *
428 * Format of file pte:
429 * bit 0 : present bit (must be zero)
430 * bit 1 : _PAGE_FILE (must be one)
431 * bits 2-62: file_offset/PAGE_SIZE
432 * bit 63 : _PAGE_PROTNONE bit
433 */
434#define __swp_type(entry) (((entry).val >> 2) & 0x7f)
435#define __swp_offset(entry) (((entry).val << 1) >> 10)
436#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
437#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
438#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
439
440#define PTE_FILE_MAX_BITS 61
441#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
442#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
445 remap_pfn_range(vma, vaddr, pfn, size, prot)
446
447#define MK_IOSPACE_PFN(space, pfn) (pfn)
448#define GET_IOSPACE(pfn) 0
449#define GET_PFN(pfn) (pfn)
450
451/*
452 * ZERO_PAGE is a global shared page that is always zero: used
453 * for zero-mapped memory areas etc..
454 */
455extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
456extern struct page *zero_page_memmap_ptr;
457#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
458
459/* We provide our own get_unmapped_area to cope with VA holes for userland */
460#define HAVE_ARCH_UNMAPPED_AREA
461
462#ifdef CONFIG_HUGETLB_PAGE
463#define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
464#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
465#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
466struct mmu_gather;
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700467void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
468 unsigned long end, unsigned long floor, unsigned long ceiling);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469#endif
470
471/*
472 * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
473 * information. However, we use this routine to take care of any (delayed) i-cache
474 * flushing that may be necessary.
475 */
476extern void lazy_mmu_prot_update (pte_t pte);
477
478#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
479/*
480 * Update PTEP with ENTRY, which is guaranteed to be a less
481 * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and
482 * WRITABLE bits turned on, when the value at PTEP did not. The
483 * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
484 *
485 * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
486 * having to worry about races. On SMP machines, there are only two
487 * cases where this is true:
488 *
489 * (1) *PTEP has the PRESENT bit turned OFF
490 * (2) ENTRY has the DIRTY bit turned ON
491 *
492 * On ia64, we could implement this routine with a cmpxchg()-loop
493 * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
494 * However, like on x86, we can get a more streamlined version by
495 * observing that it is OK to drop ACCESSED bit updates when
496 * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is
497 * result in an extra Access-bit fault, which would then turn on the
498 * ACCESSED bit in the low-level fault handler (iaccess_bit or
499 * daccess_bit in ivt.S).
500 */
501#ifdef CONFIG_SMP
502# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
503do { \
504 if (__safely_writable) { \
505 set_pte(__ptep, __entry); \
506 flush_tlb_page(__vma, __addr); \
507 } \
508} while (0)
509#else
510# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
511 ptep_establish(__vma, __addr, __ptep, __entry)
512#endif
513
514# ifdef CONFIG_VIRTUAL_MEM_MAP
515 /* arch mem_map init routine is needed due to holes in a virtual mem_map */
516# define __HAVE_ARCH_MEMMAP_INIT
517 extern void memmap_init (unsigned long size, int nid, unsigned long zone,
518 unsigned long start_pfn);
519# endif /* CONFIG_VIRTUAL_MEM_MAP */
520# endif /* !__ASSEMBLY__ */
521
522/*
523 * Identity-mapped regions use a large page size. We'll call such large pages
524 * "granules". If you can think of a better name that's unambiguous, let me
525 * know...
526 */
527#if defined(CONFIG_IA64_GRANULE_64MB)
528# define IA64_GRANULE_SHIFT _PAGE_SIZE_64M
529#elif defined(CONFIG_IA64_GRANULE_16MB)
530# define IA64_GRANULE_SHIFT _PAGE_SIZE_16M
531#endif
532#define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)
533/*
534 * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
535 */
536#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
537#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
538
539/*
540 * No page table caches to initialise
541 */
542#define pgtable_cache_init() do { } while (0)
543
544/* These tell get_user_pages() that the first gate page is accessible from user-level. */
545#define FIXADDR_USER_START GATE_ADDR
David Mosberger-Tangad597bd2005-06-08 10:45:00 -0700546#ifdef HAVE_BUGGY_SEGREL
547# define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE)
548#else
549# define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
550#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
552#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
553#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
554#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
555#define __HAVE_ARCH_PTEP_SET_WRPROTECT
556#define __HAVE_ARCH_PTE_SAME
557#define __HAVE_ARCH_PGD_OFFSET_GATE
558#define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560#include <asm-generic/pgtable-nopud.h>
561#include <asm-generic/pgtable.h>
562
563#endif /* _ASM_IA64_PGTABLE_H */