Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) |
| 3 | * Copyright 2003 PathScale, Inc. |
| 4 | * Licensed under the GPL |
| 5 | */ |
| 6 | |
| 7 | #ifndef __UM_PAGE_H |
| 8 | #define __UM_PAGE_H |
| 9 | |
| 10 | struct page; |
| 11 | |
| 12 | #include <linux/config.h> |
| 13 | #include <asm/vm-flags.h> |
| 14 | |
| 15 | /* PAGE_SHIFT determines the page size */ |
| 16 | #define PAGE_SHIFT 12 |
| 17 | #define PAGE_SIZE (1UL << PAGE_SHIFT) |
| 18 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
| 19 | |
| 20 | /* |
| 21 | * These are used to make use of C type-checking.. |
| 22 | */ |
| 23 | |
| 24 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) |
| 25 | #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) |
| 26 | |
| 27 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
| 28 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
| 29 | |
Paolo 'Blaisorblade' Giarrusso | c45166be3 | 2005-05-01 08:58:54 -0700 | [diff] [blame] | 30 | #if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
| 32 | typedef struct { unsigned long pte_low, pte_high; } pte_t; |
| 33 | typedef struct { unsigned long long pmd; } pmd_t; |
| 34 | typedef struct { unsigned long pgd; } pgd_t; |
| 35 | #define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32)) |
| 36 | |
| 37 | #define pte_get_bits(pte, bits) ((pte).pte_low & (bits)) |
| 38 | #define pte_set_bits(pte, bits) ((pte).pte_low |= (bits)) |
| 39 | #define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits)) |
| 40 | #define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \ |
| 41 | smp_wmb(); \ |
| 42 | (to).pte_low = (from).pte_low; }) |
| 43 | #define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high) |
| 44 | #define pte_set_val(pte, phys, prot) \ |
| 45 | ({ (pte).pte_high = (phys) >> 32; \ |
| 46 | (pte).pte_low = (phys) | pgprot_val(prot); }) |
| 47 | |
Paolo 'Blaisorblade' Giarrusso | f7fe878 | 2005-05-05 16:15:15 -0700 | [diff] [blame] | 48 | #define pmd_val(x) ((x).pmd) |
| 49 | #define __pmd(x) ((pmd_t) { (x) } ) |
| 50 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | typedef unsigned long long pfn_t; |
| 52 | typedef unsigned long long phys_t; |
| 53 | |
| 54 | #else |
| 55 | |
| 56 | typedef struct { unsigned long pte; } pte_t; |
| 57 | typedef struct { unsigned long pgd; } pgd_t; |
| 58 | |
| 59 | #ifdef CONFIG_3_LEVEL_PGTABLES |
| 60 | typedef struct { unsigned long pmd; } pmd_t; |
| 61 | #define pmd_val(x) ((x).pmd) |
| 62 | #define __pmd(x) ((pmd_t) { (x) } ) |
| 63 | #endif |
| 64 | |
| 65 | #define pte_val(x) ((x).pte) |
| 66 | |
| 67 | |
| 68 | #define pte_get_bits(p, bits) ((p).pte & (bits)) |
| 69 | #define pte_set_bits(p, bits) ((p).pte |= (bits)) |
| 70 | #define pte_clear_bits(p, bits) ((p).pte &= ~(bits)) |
| 71 | #define pte_copy(to, from) ((to).pte = (from).pte) |
| 72 | #define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE)) |
| 73 | #define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot)) |
| 74 | |
| 75 | typedef unsigned long pfn_t; |
| 76 | typedef unsigned long phys_t; |
| 77 | |
| 78 | #endif |
| 79 | |
| 80 | typedef struct { unsigned long pgprot; } pgprot_t; |
| 81 | |
| 82 | #define pgd_val(x) ((x).pgd) |
| 83 | #define pgprot_val(x) ((x).pgprot) |
| 84 | |
| 85 | #define __pte(x) ((pte_t) { (x) } ) |
| 86 | #define __pgd(x) ((pgd_t) { (x) } ) |
| 87 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
| 88 | |
| 89 | /* to align the pointer to the (next) page boundary */ |
| 90 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) |
| 91 | |
| 92 | extern unsigned long uml_physmem; |
| 93 | |
| 94 | #define PAGE_OFFSET (uml_physmem) |
| 95 | #define KERNELBASE PAGE_OFFSET |
| 96 | |
| 97 | #define __va_space (8*1024*1024) |
| 98 | |
Paolo 'Blaisorblade' Giarrusso | d99c402 | 2005-09-10 19:44:56 +0200 | [diff] [blame] | 99 | #include "mem.h" |
Jeff Dike | b9e0d069 | 2005-05-28 15:51:53 -0700 | [diff] [blame] | 100 | |
| 101 | /* Cast to unsigned long before casting to void * to avoid a warning from |
| 102 | * mmap_kmem about cutting a long long down to a void *. Not sure that |
| 103 | * casting is the right thing, but 32-bit UML can't have 64-bit virtual |
| 104 | * addresses |
| 105 | */ |
Linus Torvalds | 75cd968 | 2005-08-15 17:40:46 -0700 | [diff] [blame] | 106 | #define __pa(virt) to_phys((void *) (unsigned long) (virt)) |
| 107 | #define __va(phys) to_virt((unsigned long) (phys)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | #define phys_to_pfn(p) ((p) >> PAGE_SHIFT) |
| 110 | #define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) |
| 111 | |
| 112 | #define pfn_valid(pfn) ((pfn) < max_mapnr) |
| 113 | #define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v))) |
| 114 | |
Al Viro | 53f9fc9 | 2005-10-21 03:22:24 -0400 | [diff] [blame] | 115 | extern struct page *arch_validate(struct page *page, gfp_t mask, int order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | #define HAVE_ARCH_VALIDATE |
| 117 | |
| 118 | extern void arch_free_page(struct page *page, int order); |
| 119 | #define HAVE_ARCH_FREE_PAGE |
| 120 | |
KAMEZAWA Hiroyuki | 9828c18 | 2006-03-27 01:15:50 -0800 | [diff] [blame] | 121 | #include <asm-generic/memory_model.h> |
Stephen Rothwell | fd4fd5a | 2005-09-03 15:54:30 -0700 | [diff] [blame] | 122 | #include <asm-generic/page.h> |
| 123 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | #endif |