Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * include/asm-v850/page.h -- VM ops |
| 3 | * |
Miles Bader | e9c3d6b | 2005-07-27 11:44:52 -0700 | [diff] [blame] | 4 | * Copyright (C) 2001,02,03,05 NEC Electronics Corporation |
| 5 | * Copyright (C) 2001,02,03,05 Miles Bader <miles@gnu.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | * This file is subject to the terms and conditions of the GNU General |
| 8 | * Public License. See the file COPYING in the main directory of this |
| 9 | * archive for more details. |
| 10 | * |
| 11 | * Written by Miles Bader <miles@gnu.org> |
| 12 | */ |
| 13 | |
| 14 | #ifndef __V850_PAGE_H__ |
| 15 | #define __V850_PAGE_H__ |
| 16 | |
David Woodhouse | 2a1b181 | 2006-09-21 09:05:25 +0100 | [diff] [blame] | 17 | #ifdef __KERNEL__ |
| 18 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <asm/machdep.h> |
| 20 | |
| 21 | |
| 22 | #define PAGE_SHIFT 12 |
| 23 | #define PAGE_SIZE (1UL << PAGE_SHIFT) |
| 24 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
| 25 | |
| 26 | |
| 27 | /* |
| 28 | * PAGE_OFFSET -- the first address of the first page of memory. For archs with |
| 29 | * no MMU this corresponds to the first free page in physical memory (aligned |
| 30 | * on a page boundary). |
| 31 | */ |
| 32 | #ifndef PAGE_OFFSET |
| 33 | #define PAGE_OFFSET 0x0000000 |
| 34 | #endif |
| 35 | |
| 36 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #ifndef __ASSEMBLY__ |
| 38 | |
| 39 | #define STRICT_MM_TYPECHECKS |
| 40 | |
| 41 | #define clear_page(page) memset ((void *)(page), 0, PAGE_SIZE) |
| 42 | #define copy_page(to, from) memcpy ((void *)(to), (void *)from, PAGE_SIZE) |
| 43 | |
| 44 | #define clear_user_page(addr, vaddr, page) \ |
| 45 | do { clear_page(addr); \ |
| 46 | flush_dcache_page(page); \ |
| 47 | } while (0) |
| 48 | #define copy_user_page(to, from, vaddr, page) \ |
| 49 | do { copy_page(to, from); \ |
| 50 | flush_dcache_page(page); \ |
| 51 | } while (0) |
| 52 | |
| 53 | #ifdef STRICT_MM_TYPECHECKS |
| 54 | /* |
| 55 | * These are used to make use of C type-checking.. |
| 56 | */ |
| 57 | |
| 58 | typedef struct { unsigned long pte; } pte_t; |
| 59 | typedef struct { unsigned long pmd; } pmd_t; |
| 60 | typedef struct { unsigned long pgd; } pgd_t; |
| 61 | typedef struct { unsigned long pgprot; } pgprot_t; |
| 62 | |
| 63 | #define pte_val(x) ((x).pte) |
| 64 | #define pmd_val(x) ((x).pmd) |
| 65 | #define pgd_val(x) ((x).pgd) |
| 66 | #define pgprot_val(x) ((x).pgprot) |
| 67 | |
| 68 | #define __pte(x) ((pte_t) { (x) } ) |
| 69 | #define __pmd(x) ((pmd_t) { (x) } ) |
| 70 | #define __pgd(x) ((pgd_t) { (x) } ) |
| 71 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
| 72 | |
| 73 | #else /* !STRICT_MM_TYPECHECKS */ |
| 74 | /* |
| 75 | * .. while these make it easier on the compiler |
| 76 | */ |
| 77 | |
| 78 | typedef unsigned long pte_t; |
| 79 | typedef unsigned long pmd_t; |
| 80 | typedef unsigned long pgd_t; |
| 81 | typedef unsigned long pgprot_t; |
| 82 | |
| 83 | #define pte_val(x) (x) |
| 84 | #define pmd_val(x) (x) |
| 85 | #define pgd_val(x) (x) |
| 86 | #define pgprot_val(x) (x) |
| 87 | |
| 88 | #define __pte(x) (x) |
| 89 | #define __pmd(x) (x) |
| 90 | #define __pgd(x) (x) |
| 91 | #define __pgprot(x) (x) |
| 92 | |
| 93 | #endif /* STRICT_MM_TYPECHECKS */ |
| 94 | |
| 95 | #endif /* !__ASSEMBLY__ */ |
| 96 | |
| 97 | |
| 98 | /* to align the pointer to the (next) page boundary */ |
| 99 | #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) |
| 100 | |
| 101 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | /* No current v850 processor has virtual memory. */ |
| 103 | #define __virt_to_phys(addr) (addr) |
| 104 | #define __phys_to_virt(addr) (addr) |
| 105 | |
| 106 | #define virt_to_pfn(kaddr) (__virt_to_phys (kaddr) >> PAGE_SHIFT) |
| 107 | #define pfn_to_virt(pfn) __phys_to_virt ((pfn) << PAGE_SHIFT) |
| 108 | |
| 109 | #define MAP_NR(kaddr) \ |
| 110 | (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) |
| 111 | #define virt_to_page(kaddr) (mem_map + MAP_NR (kaddr)) |
| 112 | #define page_to_virt(page) \ |
| 113 | ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) |
| 114 | |
KAMEZAWA Hiroyuki | e6009f1 | 2006-03-27 01:15:52 -0800 | [diff] [blame] | 115 | #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) |
Miles Bader | e9c3d6b | 2005-07-27 11:44:52 -0700 | [diff] [blame] | 116 | #define pfn_valid(pfn) ((pfn) < max_mapnr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
| 118 | #define virt_addr_valid(kaddr) \ |
| 119 | (((void *)(kaddr) >= (void *)PAGE_OFFSET) && MAP_NR (kaddr) < max_mapnr) |
| 120 | |
| 121 | |
| 122 | #define __pa(x) __virt_to_phys ((unsigned long)(x)) |
| 123 | #define __va(x) ((void *)__phys_to_virt ((unsigned long)(x))) |
| 124 | |
| 125 | |
KAMEZAWA Hiroyuki | e6009f1 | 2006-03-27 01:15:52 -0800 | [diff] [blame] | 126 | #include <asm-generic/memory_model.h> |
Stephen Rothwell | fd4fd5a | 2005-09-03 15:54:30 -0700 | [diff] [blame] | 127 | #include <asm-generic/page.h> |
| 128 | |
David Woodhouse | 2a1b181 | 2006-09-21 09:05:25 +0100 | [diff] [blame] | 129 | #endif /* KERNEL */ |
| 130 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | #endif /* __V850_PAGE_H__ */ |