Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) |
| 3 | * Copyright 2003 PathScale, Inc. |
| 4 | * Derived from include/asm-i386/pgtable.h |
| 5 | * Licensed under the GPL |
| 6 | */ |
| 7 | |
| 8 | #ifndef __UM_PGTABLE_2LEVEL_H |
| 9 | #define __UM_PGTABLE_2LEVEL_H |
| 10 | |
| 11 | #include <asm-generic/pgtable-nopmd.h> |
| 12 | |
| 13 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ |
| 14 | |
| 15 | #define PGDIR_SHIFT 22 |
| 16 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 17 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 18 | |
| 19 | /* |
| 20 | * entries per page directory level: the i386 is two-level, so |
| 21 | * we don't really have any PMD directory physically. |
| 22 | */ |
| 23 | #define PTRS_PER_PTE 1024 |
| 24 | #define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE) |
| 25 | #define PTRS_PER_PGD 1024 |
Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 26 | #define FIRST_USER_ADDRESS 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
| 28 | #define pte_ERROR(e) \ |
| 29 | printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \ |
| 30 | pte_val(e)) |
| 31 | #define pgd_ERROR(e) \ |
| 32 | printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \ |
| 33 | pgd_val(e)) |
| 34 | |
| 35 | static inline int pgd_newpage(pgd_t pgd) { return 0; } |
| 36 | static inline void pgd_mkuptodate(pgd_t pgd) { } |
| 37 | |
| 38 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
| 39 | |
| 40 | static inline pte_t pte_mknewprot(pte_t pte) |
| 41 | { |
| 42 | pte_val(pte) |= _PAGE_NEWPROT; |
| 43 | return(pte); |
| 44 | } |
| 45 | |
| 46 | static inline pte_t pte_mknewpage(pte_t pte) |
| 47 | { |
| 48 | pte_val(pte) |= _PAGE_NEWPAGE; |
| 49 | return(pte); |
| 50 | } |
| 51 | |
| 52 | static inline void set_pte(pte_t *pteptr, pte_t pteval) |
| 53 | { |
| 54 | /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so |
| 55 | * fix_range knows to unmap it. _PAGE_NEWPROT is specific to |
| 56 | * mapped pages. |
| 57 | */ |
| 58 | *pteptr = pte_mknewpage(pteval); |
| 59 | if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); |
| 60 | } |
| 61 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
| 62 | |
| 63 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) |
| 64 | |
| 65 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 66 | #define pte_none(x) !(pte_val(x) & ~_PAGE_NEWPAGE) |
| 67 | #define pte_pfn(x) phys_to_pfn(pte_val(x)) |
| 68 | #define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot)) |
| 69 | #define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot)) |
| 70 | |
| 71 | #define pmd_page_kernel(pmd) \ |
| 72 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
| 73 | |
| 74 | /* |
| 75 | * Bits 0 through 3 are taken |
| 76 | */ |
| 77 | #define PTE_FILE_MAX_BITS 28 |
| 78 | |
| 79 | #define pte_to_pgoff(pte) (pte_val(pte) >> 4) |
| 80 | |
| 81 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 4) + _PAGE_FILE }) |
| 82 | |
| 83 | #endif |