Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2002 Andi Kleen, SuSE Labs. |
| 3 | * Thanks to Ben LaHaise for precious feedback. |
| 4 | */ |
| 5 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <linux/mm.h> |
| 7 | #include <linux/sched.h> |
| 8 | #include <linux/highmem.h> |
| 9 | #include <linux/module.h> |
| 10 | #include <linux/slab.h> |
| 11 | #include <asm/uaccess.h> |
| 12 | #include <asm/processor.h> |
| 13 | #include <asm/tlbflush.h> |
| 14 | #include <asm/io.h> |
| 15 | |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 16 | pte_t *lookup_address(unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | { |
| 18 | pgd_t *pgd = pgd_offset_k(address); |
| 19 | pud_t *pud; |
| 20 | pmd_t *pmd; |
| 21 | pte_t *pte; |
| 22 | if (pgd_none(*pgd)) |
| 23 | return NULL; |
| 24 | pud = pud_offset(pgd, address); |
| 25 | if (!pud_present(*pud)) |
| 26 | return NULL; |
| 27 | pmd = pmd_offset(pud, address); |
| 28 | if (!pmd_present(*pmd)) |
| 29 | return NULL; |
| 30 | if (pmd_large(*pmd)) |
| 31 | return (pte_t *)pmd; |
| 32 | pte = pte_offset_kernel(pmd, address); |
| 33 | if (pte && !pte_present(*pte)) |
| 34 | pte = NULL; |
| 35 | return pte; |
| 36 | } |
| 37 | |
| 38 | static struct page *split_large_page(unsigned long address, pgprot_t prot, |
| 39 | pgprot_t ref_prot) |
| 40 | { |
| 41 | int i; |
| 42 | unsigned long addr; |
| 43 | struct page *base = alloc_pages(GFP_KERNEL, 0); |
| 44 | pte_t *pbase; |
| 45 | if (!base) |
| 46 | return NULL; |
Nick Piggin | 4fa4f53 | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 47 | /* |
| 48 | * page_private is used to track the number of entries in |
| 49 | * the page table page have non standard attributes. |
| 50 | */ |
| 51 | SetPagePrivate(base); |
| 52 | page_private(base) = 0; |
| 53 | |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 54 | address = __pa(address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | addr = address & LARGE_PAGE_MASK; |
| 56 | pbase = (pte_t *)page_address(base); |
| 57 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { |
| 58 | pbase[i] = pfn_pte(addr >> PAGE_SHIFT, |
| 59 | addr == address ? prot : ref_prot); |
| 60 | } |
| 61 | return base; |
| 62 | } |
| 63 | |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 64 | static void cache_flush_page(void *adr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | { |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 66 | int i; |
| 67 | for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) |
| 68 | asm volatile("clflush (%0)" :: "r" (adr + i)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | } |
| 70 | |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 71 | static void flush_kernel_map(void *arg) |
| 72 | { |
| 73 | struct list_head *l = (struct list_head *)arg; |
| 74 | struct page *pg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 76 | /* When clflush is available always use it because it is |
Andi Kleen | 65d2f0b | 2007-07-21 17:09:51 +0200 | [diff] [blame] | 77 | much cheaper than WBINVD. */ |
| 78 | if (!cpu_has_clflush) |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 79 | asm volatile("wbinvd" ::: "memory"); |
Andi Kleen | 018d2ad | 2007-06-20 12:23:36 +0200 | [diff] [blame] | 80 | else list_for_each_entry(pg, l, lru) { |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 81 | void *adr = page_address(pg); |
Andi Kleen | 65d2f0b | 2007-07-21 17:09:51 +0200 | [diff] [blame] | 82 | cache_flush_page(adr); |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 83 | } |
Andi Kleen | 90767bd | 2007-04-24 13:05:37 +0200 | [diff] [blame] | 84 | __flush_tlb_all(); |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 85 | } |
| 86 | |
| 87 | static inline void flush_map(struct list_head *l) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | { |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 89 | on_each_cpu(flush_kernel_map, l, 1, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | } |
| 91 | |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 92 | static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
Nick Piggin | 20aaffd | 2006-03-22 00:08:32 -0800 | [diff] [blame] | 94 | static inline void save_page(struct page *fpage) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | { |
Andi Kleen | 65d2f0b | 2007-07-21 17:09:51 +0200 | [diff] [blame] | 96 | if (!test_and_set_bit(PG_arch_1, &fpage->flags)) |
| 97 | list_add(&fpage->lru, &deferred_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | /* |
| 101 | * No more special protections in this 2/4MB area - revert to a |
| 102 | * large page again. |
| 103 | */ |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 104 | static void revert_page(unsigned long address, pgprot_t ref_prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | { |
| 106 | pgd_t *pgd; |
| 107 | pud_t *pud; |
| 108 | pmd_t *pmd; |
| 109 | pte_t large_pte; |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 110 | unsigned long pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | |
| 112 | pgd = pgd_offset_k(address); |
| 113 | BUG_ON(pgd_none(*pgd)); |
| 114 | pud = pud_offset(pgd,address); |
| 115 | BUG_ON(pud_none(*pud)); |
| 116 | pmd = pmd_offset(pud, address); |
| 117 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 118 | pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT; |
Andi Kleen | 126b192 | 2007-02-13 13:26:26 +0100 | [diff] [blame] | 119 | large_pte = pfn_pte(pfn, ref_prot); |
Andi Kleen | 5e6b0bf | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 120 | large_pte = pte_mkhuge(large_pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | set_pte((pte_t *)pmd, large_pte); |
| 122 | } |
| 123 | |
| 124 | static int |
| 125 | __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, |
| 126 | pgprot_t ref_prot) |
| 127 | { |
| 128 | pte_t *kpte; |
| 129 | struct page *kpte_page; |
Arjan van de Ven | c728252 | 2006-01-06 00:12:03 -0800 | [diff] [blame] | 130 | pgprot_t ref_prot2; |
Andi Kleen | 65d2f0b | 2007-07-21 17:09:51 +0200 | [diff] [blame] | 131 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | kpte = lookup_address(address); |
| 133 | if (!kpte) return 0; |
| 134 | kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); |
Andi Kleen | 65d2f0b | 2007-07-21 17:09:51 +0200 | [diff] [blame] | 135 | BUG_ON(PageLRU(kpte_page)); |
| 136 | BUG_ON(PageCompound(kpte_page)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | if (pgprot_val(prot) != pgprot_val(ref_prot)) { |
Andi Kleen | 5e6b0bf | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 138 | if (!pte_huge(*kpte)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | set_pte(kpte, pfn_pte(pfn, prot)); |
| 140 | } else { |
| 141 | /* |
Nick Piggin | 4fa4f53 | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 142 | * split_large_page will take the reference for this |
| 143 | * change_page_attr on the split page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | */ |
Arjan van de Ven | c728252 | 2006-01-06 00:12:03 -0800 | [diff] [blame] | 145 | struct page *split; |
Andi Kleen | 5e6b0bf | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 146 | ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 147 | split = split_large_page(address, prot, ref_prot2); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | if (!split) |
| 149 | return -ENOMEM; |
Andi Kleen | 5e6b0bf | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 150 | set_pte(kpte, mk_pte(split, ref_prot2)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | kpte_page = split; |
Andi Kleen | 5e6b0bf | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 152 | } |
Nick Piggin | 4fa4f53 | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 153 | page_private(kpte_page)++; |
Andi Kleen | 5e6b0bf | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 154 | } else if (!pte_huge(*kpte)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | set_pte(kpte, pfn_pte(pfn, ref_prot)); |
Nick Piggin | 4fa4f53 | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 156 | BUG_ON(page_private(kpte_page) == 0); |
| 157 | page_private(kpte_page)--; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | } else |
| 159 | BUG(); |
| 160 | |
| 161 | /* on x86-64 the direct mapping set at boot is not using 4k pages */ |
| 162 | BUG_ON(PageReserved(kpte_page)); |
| 163 | |
Andi Kleen | 65d2f0b | 2007-07-21 17:09:51 +0200 | [diff] [blame] | 164 | save_page(kpte_page); |
| 165 | if (page_private(kpte_page) == 0) |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 166 | revert_page(address, ref_prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | return 0; |
| 168 | } |
| 169 | |
| 170 | /* |
| 171 | * Change the page attributes of an page in the linear mapping. |
| 172 | * |
| 173 | * This should be used when a page is mapped with a different caching policy |
| 174 | * than write-back somewhere - some CPUs do not like it when mappings with |
| 175 | * different caching policies exist. This changes the page attributes of the |
| 176 | * in kernel linear mapping too. |
| 177 | * |
| 178 | * The caller needs to ensure that there are no conflicting mappings elsewhere. |
| 179 | * This function only deals with the kernel linear map. |
| 180 | * |
| 181 | * Caller must call global_flush_tlb() after this. |
| 182 | */ |
| 183 | int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) |
| 184 | { |
Jan Beulich | d01ad8d | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 185 | int err = 0, kernel_map = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | int i; |
| 187 | |
Jan Beulich | d01ad8d | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 188 | if (address >= __START_KERNEL_map |
| 189 | && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) { |
| 190 | address = (unsigned long)__va(__pa(address)); |
| 191 | kernel_map = 1; |
| 192 | } |
| 193 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | down_write(&init_mm.mmap_sem); |
| 195 | for (i = 0; i < numpages; i++, address += PAGE_SIZE) { |
| 196 | unsigned long pfn = __pa(address) >> PAGE_SHIFT; |
| 197 | |
Jan Beulich | d01ad8d | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 198 | if (!kernel_map || pte_present(pfn_pte(0, prot))) { |
| 199 | err = __change_page_attr(address, pfn, prot, PAGE_KERNEL); |
| 200 | if (err) |
| 201 | break; |
| 202 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | /* Handle kernel mapping too which aliases part of the |
| 204 | * lowmem */ |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 205 | if (__pa(address) < KERNEL_TEXT_SIZE) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | unsigned long addr2; |
Andi Kleen | df992848 | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 207 | pgprot_t prot2; |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 208 | addr2 = __START_KERNEL_map + __pa(address); |
Andi Kleen | df992848 | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 209 | /* Make sure the kernel mappings stay executable */ |
| 210 | prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); |
| 211 | err = __change_page_attr(addr2, pfn, prot2, |
| 212 | PAGE_KERNEL_EXEC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | } |
| 214 | } |
| 215 | up_write(&init_mm.mmap_sem); |
| 216 | return err; |
| 217 | } |
| 218 | |
| 219 | /* Don't call this for MMIO areas that may not have a mem_map entry */ |
| 220 | int change_page_attr(struct page *page, int numpages, pgprot_t prot) |
| 221 | { |
| 222 | unsigned long addr = (unsigned long)page_address(page); |
| 223 | return change_page_attr_addr(addr, numpages, prot); |
| 224 | } |
| 225 | |
| 226 | void global_flush_tlb(void) |
| 227 | { |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 228 | struct page *pg, *next; |
| 229 | struct list_head l; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | |
| 231 | down_read(&init_mm.mmap_sem); |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 232 | list_replace_init(&deferred_pages, &l); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | up_read(&init_mm.mmap_sem); |
Nick Piggin | 20aaffd | 2006-03-22 00:08:32 -0800 | [diff] [blame] | 234 | |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 235 | flush_map(&l); |
| 236 | |
| 237 | list_for_each_entry_safe(pg, next, &l, lru) { |
Andi Kleen | 65d2f0b | 2007-07-21 17:09:51 +0200 | [diff] [blame] | 238 | list_del(&pg->lru); |
| 239 | clear_bit(PG_arch_1, &pg->flags); |
| 240 | if (page_private(pg) != 0) |
| 241 | continue; |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 242 | ClearPagePrivate(pg); |
| 243 | __free_page(pg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | } |
| 245 | } |
| 246 | |
| 247 | EXPORT_SYMBOL(change_page_attr); |
| 248 | EXPORT_SYMBOL(global_flush_tlb); |