Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) |
| 3 | * Copyright 2003 PathScale, Inc. |
| 4 | * Licensed under the GPL |
| 5 | */ |
| 6 | |
| 7 | #ifndef __UM_PAGE_H |
| 8 | #define __UM_PAGE_H |
| 9 | |
Cyrill Gorcunov | a7dfa94 | 2008-05-12 14:01:56 -0700 | [diff] [blame] | 10 | #include <linux/const.h> |
| 11 | |
| 12 | /* PAGE_SHIFT determines the page size */ |
| 13 | #define PAGE_SHIFT 12 |
| 14 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) |
| 15 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
| 16 | |
| 17 | #ifndef __ASSEMBLY__ |
| 18 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | struct page; |
| 20 | |
Jeff Dike | e8012b5 | 2007-10-16 01:27:16 -0700 | [diff] [blame] | 21 | #include <linux/types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <asm/vm-flags.h> |
| 23 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | /* |
| 25 | * These are used to make use of C type-checking.. |
| 26 | */ |
| 27 | |
| 28 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) |
| 29 | #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) |
| 30 | |
| 31 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
| 32 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
| 33 | |
Paolo 'Blaisorblade' Giarrusso | c45166b | 2005-05-01 08:58:54 -0700 | [diff] [blame] | 34 | #if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | |
| 36 | typedef struct { unsigned long pte_low, pte_high; } pte_t; |
Jeff Dike | 655e4ed | 2008-02-04 22:30:55 -0800 | [diff] [blame] | 37 | typedef struct { unsigned long pmd; } pmd_t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | typedef struct { unsigned long pgd; } pgd_t; |
| 39 | #define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32)) |
| 40 | |
| 41 | #define pte_get_bits(pte, bits) ((pte).pte_low & (bits)) |
| 42 | #define pte_set_bits(pte, bits) ((pte).pte_low |= (bits)) |
| 43 | #define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits)) |
| 44 | #define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \ |
| 45 | smp_wmb(); \ |
| 46 | (to).pte_low = (from).pte_low; }) |
| 47 | #define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high) |
| 48 | #define pte_set_val(pte, phys, prot) \ |
| 49 | ({ (pte).pte_high = (phys) >> 32; \ |
| 50 | (pte).pte_low = (phys) | pgprot_val(prot); }) |
| 51 | |
Paolo 'Blaisorblade' Giarrusso | f7fe878 | 2005-05-05 16:15:15 -0700 | [diff] [blame] | 52 | #define pmd_val(x) ((x).pmd) |
| 53 | #define __pmd(x) ((pmd_t) { (x) } ) |
| 54 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | typedef unsigned long long pfn_t; |
| 56 | typedef unsigned long long phys_t; |
| 57 | |
| 58 | #else |
| 59 | |
| 60 | typedef struct { unsigned long pte; } pte_t; |
| 61 | typedef struct { unsigned long pgd; } pgd_t; |
| 62 | |
| 63 | #ifdef CONFIG_3_LEVEL_PGTABLES |
| 64 | typedef struct { unsigned long pmd; } pmd_t; |
| 65 | #define pmd_val(x) ((x).pmd) |
| 66 | #define __pmd(x) ((pmd_t) { (x) } ) |
| 67 | #endif |
| 68 | |
| 69 | #define pte_val(x) ((x).pte) |
| 70 | |
| 71 | |
| 72 | #define pte_get_bits(p, bits) ((p).pte & (bits)) |
| 73 | #define pte_set_bits(p, bits) ((p).pte |= (bits)) |
| 74 | #define pte_clear_bits(p, bits) ((p).pte &= ~(bits)) |
| 75 | #define pte_copy(to, from) ((to).pte = (from).pte) |
| 76 | #define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE)) |
| 77 | #define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot)) |
| 78 | |
| 79 | typedef unsigned long pfn_t; |
| 80 | typedef unsigned long phys_t; |
| 81 | |
| 82 | #endif |
| 83 | |
| 84 | typedef struct { unsigned long pgprot; } pgprot_t; |
| 85 | |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 86 | typedef struct page *pgtable_t; |
| 87 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | #define pgd_val(x) ((x).pgd) |
| 89 | #define pgprot_val(x) ((x).pgprot) |
| 90 | |
| 91 | #define __pte(x) ((pte_t) { (x) } ) |
| 92 | #define __pgd(x) ((pgd_t) { (x) } ) |
| 93 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
| 94 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | extern unsigned long uml_physmem; |
| 96 | |
| 97 | #define PAGE_OFFSET (uml_physmem) |
| 98 | #define KERNELBASE PAGE_OFFSET |
| 99 | |
| 100 | #define __va_space (8*1024*1024) |
| 101 | |
Paolo 'Blaisorblade' Giarrusso | d99c402 | 2005-09-10 19:44:56 +0200 | [diff] [blame] | 102 | #include "mem.h" |
Jeff Dike | b9e0d06 | 2005-05-28 15:51:53 -0700 | [diff] [blame] | 103 | |
| 104 | /* Cast to unsigned long before casting to void * to avoid a warning from |
| 105 | * mmap_kmem about cutting a long long down to a void *. Not sure that |
| 106 | * casting is the right thing, but 32-bit UML can't have 64-bit virtual |
| 107 | * addresses |
| 108 | */ |
Linus Torvalds | 75cd968 | 2005-08-15 17:40:46 -0700 | [diff] [blame] | 109 | #define __pa(virt) to_phys((void *) (unsigned long) (virt)) |
| 110 | #define __va(phys) to_virt((unsigned long) (phys)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | |
Jeff Dike | 655e4ed | 2008-02-04 22:30:55 -0800 | [diff] [blame] | 112 | #define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT)) |
| 113 | #define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | |
| 115 | #define pfn_valid(pfn) ((pfn) < max_mapnr) |
| 116 | #define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v))) |
| 117 | |
KAMEZAWA Hiroyuki | 9828c18 | 2006-03-27 01:15:50 -0800 | [diff] [blame] | 118 | #include <asm-generic/memory_model.h> |
Stephen Rothwell | fd4fd5a | 2005-09-03 15:54:30 -0700 | [diff] [blame] | 119 | #include <asm-generic/page.h> |
| 120 | |
Cyrill Gorcunov | a7dfa94 | 2008-05-12 14:01:56 -0700 | [diff] [blame] | 121 | #endif /* __ASSEMBLY__ */ |
| 122 | #endif /* __UM_PAGE_H */ |