Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_PAGE_H |
| 2 | #define __ASM_SH_PAGE_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright (C) 1999 Niibe Yutaka |
| 6 | */ |
| 7 | |
| 8 | /* |
| 9 | [ P0/U0 (virtual) ] 0x00000000 <------ User space |
| 10 | [ P1 (fixed) cached ] 0x80000000 <------ Kernel space |
| 11 | [ P2 (fixed) non-cachable] 0xA0000000 <------ Physical access |
| 12 | [ P3 (virtual) cached] 0xC0000000 <------ vmalloced area |
| 13 | [ P4 control ] 0xE0000000 |
| 14 | */ |
| 15 | |
Paul Mundt | a5ba7d5 | 2007-02-07 19:58:07 +0900 | [diff] [blame] | 16 | #ifdef __KERNEL__ |
| 17 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | /* PAGE_SHIFT determines the page size */ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 19 | #if defined(CONFIG_PAGE_SIZE_4KB) |
| 20 | # define PAGE_SHIFT 12 |
| 21 | #elif defined(CONFIG_PAGE_SIZE_8KB) |
| 22 | # define PAGE_SHIFT 13 |
| 23 | #elif defined(CONFIG_PAGE_SIZE_64KB) |
| 24 | # define PAGE_SHIFT 16 |
| 25 | #else |
| 26 | # error "Bogus kernel page size?" |
| 27 | #endif |
Paul Mundt | 8c12b5d | 2006-09-27 18:31:06 +0900 | [diff] [blame] | 28 | |
| 29 | #ifdef __ASSEMBLY__ |
Paul Mundt | d153ea8 | 2006-09-27 18:20:16 +0900 | [diff] [blame] | 30 | #define PAGE_SIZE (1 << PAGE_SHIFT) |
Paul Mundt | 8c12b5d | 2006-09-27 18:31:06 +0900 | [diff] [blame] | 31 | #else |
| 32 | #define PAGE_SIZE (1UL << PAGE_SHIFT) |
| 33 | #endif |
| 34 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
| 36 | #define PTE_MASK PAGE_MASK |
| 37 | |
| 38 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
| 39 | #define HPAGE_SHIFT 16 |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 40 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) |
| 41 | #define HPAGE_SHIFT 18 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) |
| 43 | #define HPAGE_SHIFT 20 |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 44 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) |
| 45 | #define HPAGE_SHIFT 22 |
| 46 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) |
| 47 | #define HPAGE_SHIFT 26 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #endif |
| 49 | |
| 50 | #ifdef CONFIG_HUGETLB_PAGE |
| 51 | #define HPAGE_SIZE (1UL << HPAGE_SHIFT) |
| 52 | #define HPAGE_MASK (~(HPAGE_SIZE-1)) |
| 53 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #endif |
| 55 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | #ifndef __ASSEMBLY__ |
| 57 | |
| 58 | extern void (*clear_page)(void *to); |
| 59 | extern void (*copy_page)(void *to, void *from); |
| 60 | |
Paul Mundt | f3c2575 | 2006-09-27 18:36:17 +0900 | [diff] [blame] | 61 | extern unsigned long shm_align_mask; |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 62 | extern unsigned long max_low_pfn, min_low_pfn; |
Paul Mundt | e08f457 | 2007-05-14 12:52:56 +0900 | [diff] [blame] | 63 | extern unsigned long memory_start, memory_end; |
Paul Mundt | f3c2575 | 2006-09-27 18:36:17 +0900 | [diff] [blame] | 64 | |
Yoshinori Sato | e96636c | 2006-09-27 17:21:02 +0900 | [diff] [blame] | 65 | #ifdef CONFIG_MMU |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | extern void clear_page_slow(void *to); |
| 67 | extern void copy_page_slow(void *to, void *from); |
Yoshinori Sato | e96636c | 2006-09-27 17:21:02 +0900 | [diff] [blame] | 68 | #else |
| 69 | extern void clear_page_nommu(void *to); |
| 70 | extern void copy_page_nommu(void *to, void *from); |
| 71 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | |
Paul Mundt | e7bd34a | 2007-07-31 17:07:28 +0900 | [diff] [blame] | 73 | #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ |
| 74 | (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | struct page; |
Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 76 | struct vm_area_struct; |
Paul Mundt | af39c16 | 2007-11-05 16:20:42 +0900 | [diff] [blame] | 77 | extern void clear_user_page(void *to, unsigned long address, struct page *page); |
Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 78 | #ifdef CONFIG_CPU_SH4 |
| 79 | extern void copy_user_highpage(struct page *to, struct page *from, |
| 80 | unsigned long vaddr, struct vm_area_struct *vma); |
| 81 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE |
| 82 | #endif |
Paul Mundt | e7bd34a | 2007-07-31 17:07:28 +0900 | [diff] [blame] | 83 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
| 85 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | #endif |
| 87 | |
| 88 | /* |
| 89 | * These are used to make use of C type-checking.. |
| 90 | */ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 91 | #ifdef CONFIG_X2TLB |
| 92 | typedef struct { unsigned long pte_low, pte_high; } pte_t; |
| 93 | typedef struct { unsigned long long pgprot; } pgprot_t; |
Paul Mundt | d04a0f7 | 2007-09-21 11:55:03 +0900 | [diff] [blame] | 94 | typedef struct { unsigned long long pgd; } pgd_t; |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 95 | #define pte_val(x) \ |
| 96 | ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) |
| 97 | #define __pte(x) \ |
| 98 | ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) |
| 99 | #else |
| 100 | typedef struct { unsigned long pte_low; } pte_t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | typedef struct { unsigned long pgprot; } pgprot_t; |
Paul Mundt | d04a0f7 | 2007-09-21 11:55:03 +0900 | [diff] [blame] | 102 | typedef struct { unsigned long pgd; } pgd_t; |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 103 | #define pte_val(x) ((x).pte_low) |
| 104 | #define __pte(x) ((pte_t) { (x) } ) |
| 105 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | #define pgd_val(x) ((x).pgd) |
| 108 | #define pgprot_val(x) ((x).pgprot) |
| 109 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | #define __pgd(x) ((pgd_t) { (x) } ) |
| 111 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
| 112 | |
| 113 | #endif /* !__ASSEMBLY__ */ |
| 114 | |
| 115 | /* to align the pointer to the (next) page boundary */ |
| 116 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) |
| 117 | |
| 118 | /* |
| 119 | * IF YOU CHANGE THIS, PLEASE ALSO CHANGE |
| 120 | * |
| 121 | * arch/sh/kernel/vmlinux.lds.S |
| 122 | * |
| 123 | * which has the same constant encoded.. |
| 124 | */ |
| 125 | |
| 126 | #define __MEMORY_START CONFIG_MEMORY_START |
| 127 | #define __MEMORY_SIZE CONFIG_MEMORY_SIZE |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | |
Paul Mundt | e7f93a3 | 2006-09-27 17:19:13 +0900 | [diff] [blame] | 129 | #define PAGE_OFFSET CONFIG_PAGE_OFFSET |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) |
| 131 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 132 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 134 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
| 136 | /* PFN start number, because of __MEMORY_START */ |
| 137 | #define PFN_START (__MEMORY_START >> PAGE_SHIFT) |
Alexey Dobriyan | 67bb2c6 | 2006-09-08 09:47:34 -0700 | [diff] [blame] | 138 | #define ARCH_PFN_OFFSET (PFN_START) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
Paul Mundt | 5900711 | 2007-05-23 17:40:56 +0900 | [diff] [blame] | 140 | #ifdef CONFIG_FLATMEM |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 141 | #define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn) |
Paul Mundt | 5900711 | 2007-05-23 17:40:56 +0900 | [diff] [blame] | 142 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| 144 | |
| 145 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
| 146 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 147 | |
KAMEZAWA Hiroyuki | 104b8de | 2006-03-27 01:15:46 -0800 | [diff] [blame] | 148 | #include <asm-generic/memory_model.h> |
Stephen Rothwell | fd4fd5a | 2005-09-03 15:54:30 -0700 | [diff] [blame] | 149 | #include <asm-generic/page.h> |
| 150 | |
Paul Mundt | 19f9a34 | 2006-09-27 18:33:49 +0900 | [diff] [blame] | 151 | /* vDSO support */ |
| 152 | #ifdef CONFIG_VSYSCALL |
| 153 | #define __HAVE_ARCH_GATE_AREA |
| 154 | #endif |
| 155 | |
Paul Mundt | cbd2d9d | 2007-06-04 15:46:56 +0900 | [diff] [blame] | 156 | /* |
| 157 | * Slub defaults to 8-byte alignment, we're only interested in 4. |
| 158 | * Slab defaults to BYTES_PER_WORD, which ends up being the same anyways. |
| 159 | */ |
Paul Mundt | 01fed93 | 2007-11-10 19:57:58 +0900 | [diff] [blame^] | 160 | #ifdef CONFIG_SUPERH32 |
Paul Mundt | cbd2d9d | 2007-06-04 15:46:56 +0900 | [diff] [blame] | 161 | #define ARCH_KMALLOC_MINALIGN 4 |
| 162 | #define ARCH_SLAB_MINALIGN 4 |
Paul Mundt | 01fed93 | 2007-11-10 19:57:58 +0900 | [diff] [blame^] | 163 | #else |
| 164 | /* If gcc inlines memset, it will use st.q instructions. Therefore, we need |
| 165 | kmalloc allocations to be 8-byte aligned. Without this, the alignment |
| 166 | becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on |
| 167 | sh64 at the moment). */ |
| 168 | #define ARCH_KMALLOC_MINALIGN 8 |
| 169 | |
| 170 | /* |
| 171 | * We want 8-byte alignment for the slab caches as well, otherwise we have |
| 172 | * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create(). |
| 173 | */ |
| 174 | #define ARCH_SLAB_MINALIGN 8 |
| 175 | #endif |
Paul Mundt | cbd2d9d | 2007-06-04 15:46:56 +0900 | [diff] [blame] | 176 | |
Paul Mundt | b5233d0 | 2006-09-20 03:25:34 +0900 | [diff] [blame] | 177 | #endif /* __KERNEL__ */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | #endif /* __ASM_SH_PAGE_H */ |