Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/mm/fault-armv.c |
| 3 | * |
| 4 | * Copyright (C) 1995 Linus Torvalds |
| 5 | * Modifications for ARM processor (c) 1995-2002 Russell King |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/sched.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/bitops.h> |
| 15 | #include <linux/vmalloc.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/pagemap.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
Russell King | 09d9bae | 2008-09-05 14:08:44 +0100 | [diff] [blame] | 20 | #include <asm/bugs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <asm/cacheflush.h> |
Russell King | 46097c7 | 2008-08-10 18:10:19 +0100 | [diff] [blame] | 22 | #include <asm/cachetype.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <asm/pgtable.h> |
| 24 | #include <asm/tlbflush.h> |
| 25 | |
Russell King | 7b0a100 | 2009-10-24 14:11:59 +0100 | [diff] [blame] | 26 | #include "mm.h" |
| 27 | |
Russell King | f6e3354 | 2010-11-16 00:22:09 +0000 | [diff] [blame] | 28 | static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Catalin Marinas | 6012191 | 2010-09-13 15:58:06 +0100 | [diff] [blame] | 30 | #if __LINUX_ARM_ARCH__ < 6 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | /* |
| 32 | * We take the easy way out of this problem - we make the |
| 33 | * PTE uncacheable. However, we leave the write buffer on. |
Hugh Dickins | 69b0475 | 2005-10-29 18:16:36 -0700 | [diff] [blame] | 34 | * |
| 35 | * Note that the pte lock held when calling update_mmu_cache must also |
| 36 | * guard the pte (somewhere else in the same mm) that we modify here. |
| 37 | * Therefore those configurations which might call adjust_pte (those |
| 38 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | */ |
Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 40 | static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, |
Russell King | ed42aca | 2009-12-18 16:31:38 +0000 | [diff] [blame] | 41 | unsigned long pfn, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | { |
Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 43 | pte_t entry = *ptep; |
Russell King | 53cdb27 | 2008-07-27 10:35:54 +0100 | [diff] [blame] | 44 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | /* |
Russell King | 53cdb27 | 2008-07-27 10:35:54 +0100 | [diff] [blame] | 47 | * If this page is present, it's actually being shared. |
| 48 | */ |
| 49 | ret = pte_present(entry); |
| 50 | |
| 51 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | * If this page isn't present, or is already setup to |
| 53 | * fault (ie, is old), we can safely ignore any issues. |
| 54 | */ |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 55 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { |
Nicolas Pitre | 08e445b | 2009-01-16 23:02:54 +0100 | [diff] [blame] | 56 | flush_cache_page(vma, address, pfn); |
| 57 | outer_flush_range((pfn << PAGE_SHIFT), |
| 58 | (pfn << PAGE_SHIFT) + PAGE_SIZE); |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 59 | pte_val(entry) &= ~L_PTE_MT_MASK; |
| 60 | pte_val(entry) |= shared_pte_mask; |
Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 61 | set_pte_at(vma->vm_mm, address, ptep, entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | flush_tlb_page(vma, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | } |
Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 64 | |
| 65 | return ret; |
| 66 | } |
| 67 | |
Mika Westerberg | 4e54d93 | 2010-10-28 11:45:22 +0100 | [diff] [blame] | 68 | #if USE_SPLIT_PTLOCKS |
| 69 | /* |
| 70 | * If we are using split PTE locks, then we need to take the page |
| 71 | * lock here. Otherwise we are using shared mm->page_table_lock |
| 72 | * which is already locked, thus cannot take it. |
| 73 | */ |
| 74 | static inline void do_pte_lock(spinlock_t *ptl) |
| 75 | { |
| 76 | /* |
| 77 | * Use nested version here to indicate that we are already |
| 78 | * holding one similar spinlock. |
| 79 | */ |
| 80 | spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); |
| 81 | } |
| 82 | |
| 83 | static inline void do_pte_unlock(spinlock_t *ptl) |
| 84 | { |
| 85 | spin_unlock(ptl); |
| 86 | } |
| 87 | #else /* !USE_SPLIT_PTLOCKS */ |
| 88 | static inline void do_pte_lock(spinlock_t *ptl) {} |
| 89 | static inline void do_pte_unlock(spinlock_t *ptl) {} |
| 90 | #endif /* USE_SPLIT_PTLOCKS */ |
| 91 | |
Russell King | ed42aca | 2009-12-18 16:31:38 +0000 | [diff] [blame] | 92 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, |
| 93 | unsigned long pfn) |
Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 94 | { |
Russell King | 56dd470 | 2009-12-18 16:24:34 +0000 | [diff] [blame] | 95 | spinlock_t *ptl; |
Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 96 | pgd_t *pgd; |
Russell King | 516295e | 2010-11-21 16:27:49 +0000 | [diff] [blame] | 97 | pud_t *pud; |
Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 98 | pmd_t *pmd; |
| 99 | pte_t *pte; |
| 100 | int ret; |
| 101 | |
| 102 | pgd = pgd_offset(vma->vm_mm, address); |
Russell King | f8a85f1 | 2009-12-18 16:23:44 +0000 | [diff] [blame] | 103 | if (pgd_none_or_clear_bad(pgd)) |
| 104 | return 0; |
Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 105 | |
Russell King | 516295e | 2010-11-21 16:27:49 +0000 | [diff] [blame] | 106 | pud = pud_offset(pgd, address); |
| 107 | if (pud_none_or_clear_bad(pud)) |
| 108 | return 0; |
| 109 | |
| 110 | pmd = pmd_offset(pud, address); |
Russell King | f8a85f1 | 2009-12-18 16:23:44 +0000 | [diff] [blame] | 111 | if (pmd_none_or_clear_bad(pmd)) |
| 112 | return 0; |
Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 113 | |
Russell King | 56dd470 | 2009-12-18 16:24:34 +0000 | [diff] [blame] | 114 | /* |
| 115 | * This is called while another page table is mapped, so we |
| 116 | * must use the nested version. This also means we need to |
| 117 | * open-code the spin-locking. |
| 118 | */ |
| 119 | ptl = pte_lockptr(vma->vm_mm, pmd); |
Peter Zijlstra | ece0e2b | 2010-10-26 14:21:52 -0700 | [diff] [blame] | 120 | pte = pte_offset_map(pmd, address); |
Mika Westerberg | 4e54d93 | 2010-10-28 11:45:22 +0100 | [diff] [blame] | 121 | do_pte_lock(ptl); |
Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 122 | |
Russell King | ed42aca | 2009-12-18 16:31:38 +0000 | [diff] [blame] | 123 | ret = do_adjust_pte(vma, address, pfn, pte); |
Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 124 | |
Mika Westerberg | 4e54d93 | 2010-10-28 11:45:22 +0100 | [diff] [blame] | 125 | do_pte_unlock(ptl); |
Peter Zijlstra | ece0e2b | 2010-10-26 14:21:52 -0700 | [diff] [blame] | 126 | pte_unmap(pte); |
Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 127 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | } |
| 130 | |
| 131 | static void |
Russell King | ae14020 | 2009-12-18 16:43:57 +0000 | [diff] [blame] | 132 | make_coherent(struct address_space *mapping, struct vm_area_struct *vma, |
| 133 | unsigned long addr, pte_t *ptep, unsigned long pfn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | struct mm_struct *mm = vma->vm_mm; |
| 136 | struct vm_area_struct *mpnt; |
| 137 | struct prio_tree_iter iter; |
| 138 | unsigned long offset; |
| 139 | pgoff_t pgoff; |
| 140 | int aliases = 0; |
| 141 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); |
| 143 | |
| 144 | /* |
| 145 | * If we have any shared mappings that are in the same mm |
| 146 | * space, then we need to handle them specially to maintain |
| 147 | * cache coherency. |
| 148 | */ |
| 149 | flush_dcache_mmap_lock(mapping); |
| 150 | vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { |
| 151 | /* |
| 152 | * If this VMA is not in our MM, we can ignore it. |
| 153 | * Note that we intentionally mask out the VMA |
| 154 | * that we are fixing up. |
| 155 | */ |
| 156 | if (mpnt->vm_mm != mm || mpnt == vma) |
| 157 | continue; |
| 158 | if (!(mpnt->vm_flags & VM_MAYSHARE)) |
| 159 | continue; |
| 160 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; |
Russell King | ed42aca | 2009-12-18 16:31:38 +0000 | [diff] [blame] | 161 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } |
| 163 | flush_dcache_mmap_unlock(mapping); |
| 164 | if (aliases) |
Russell King | ae14020 | 2009-12-18 16:43:57 +0000 | [diff] [blame] | 165 | do_adjust_pte(vma, addr, pfn, ptep); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | } |
| 167 | |
| 168 | /* |
| 169 | * Take care of architecture specific things when placing a new PTE into |
| 170 | * a page table, or changing an existing PTE. Basically, there are two |
| 171 | * things that we need to take care of: |
| 172 | * |
Catalin Marinas | c017780 | 2010-09-13 15:57:36 +0100 | [diff] [blame] | 173 | * 1. If PG_dcache_clean is not set for the page, we need to ensure |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | * that any cache entries for the kernels virtual memory |
| 175 | * range are written back to the page. |
| 176 | * 2. If we have multiple shared mappings of the same space in |
| 177 | * an object, we need to deal with the cache aliasing issues. |
| 178 | * |
Hugh Dickins | 69b0475 | 2005-10-29 18:16:36 -0700 | [diff] [blame] | 179 | * Note that the pte lock will be held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | */ |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 181 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, |
| 182 | pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | { |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 184 | unsigned long pfn = pte_pfn(*ptep); |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 185 | struct address_space *mapping; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | struct page *page; |
| 187 | |
| 188 | if (!pfn_valid(pfn)) |
| 189 | return; |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 190 | |
Russell King | 421fe93 | 2009-10-25 10:23:04 +0000 | [diff] [blame] | 191 | /* |
| 192 | * The zero page is never written to, so never has any dirty |
| 193 | * cache lines, and therefore never needs to be flushed. |
| 194 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | page = pfn_to_page(pfn); |
Russell King | 421fe93 | 2009-10-25 10:23:04 +0000 | [diff] [blame] | 196 | if (page == ZERO_PAGE(0)) |
| 197 | return; |
| 198 | |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 199 | mapping = page_mapping(page); |
Catalin Marinas | c017780 | 2010-09-13 15:57:36 +0100 | [diff] [blame] | 200 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) |
Nitin Gupta | 787b2fa | 2009-10-12 14:20:23 +0530 | [diff] [blame] | 201 | __flush_dcache_page(mapping, page); |
Nitin Gupta | 787b2fa | 2009-10-12 14:20:23 +0530 | [diff] [blame] | 202 | if (mapping) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | if (cache_is_vivt()) |
Russell King | ae14020 | 2009-12-18 16:43:57 +0000 | [diff] [blame] | 204 | make_coherent(mapping, vma, addr, ptep, pfn); |
Catalin Marinas | 826cbda | 2008-06-13 10:28:36 +0100 | [diff] [blame] | 205 | else if (vma->vm_flags & VM_EXEC) |
| 206 | __flush_icache_all(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | } |
| 208 | } |
Catalin Marinas | 6012191 | 2010-09-13 15:58:06 +0100 | [diff] [blame] | 209 | #endif /* __LINUX_ARM_ARCH__ < 6 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | |
| 211 | /* |
| 212 | * Check whether the write buffer has physical address aliasing |
| 213 | * issues. If it has, we need to avoid them for the case where |
| 214 | * we have several shared mappings of the same object in user |
| 215 | * space. |
| 216 | */ |
| 217 | static int __init check_writebuffer(unsigned long *p1, unsigned long *p2) |
| 218 | { |
| 219 | register unsigned long zero = 0, one = 1, val; |
| 220 | |
| 221 | local_irq_disable(); |
| 222 | mb(); |
| 223 | *p1 = one; |
| 224 | mb(); |
| 225 | *p2 = zero; |
| 226 | mb(); |
| 227 | val = *p1; |
| 228 | mb(); |
| 229 | local_irq_enable(); |
| 230 | return val != zero; |
| 231 | } |
| 232 | |
| 233 | void __init check_writebuffer_bugs(void) |
| 234 | { |
| 235 | struct page *page; |
| 236 | const char *reason; |
| 237 | unsigned long v = 1; |
| 238 | |
| 239 | printk(KERN_INFO "CPU: Testing write buffer coherency: "); |
| 240 | |
| 241 | page = alloc_page(GFP_KERNEL); |
| 242 | if (page) { |
| 243 | unsigned long *p1, *p2; |
Russell King | 52e8bfd | 2009-12-23 19:54:31 +0000 | [diff] [blame] | 244 | pgprot_t prot = __pgprot_modify(PAGE_KERNEL, |
| 245 | L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | |
| 247 | p1 = vmap(&page, 1, VM_IOREMAP, prot); |
| 248 | p2 = vmap(&page, 1, VM_IOREMAP, prot); |
| 249 | |
| 250 | if (p1 && p2) { |
| 251 | v = check_writebuffer(p1, p2); |
| 252 | reason = "enabling work-around"; |
| 253 | } else { |
| 254 | reason = "unable to map memory\n"; |
| 255 | } |
| 256 | |
| 257 | vunmap(p1); |
| 258 | vunmap(p2); |
| 259 | put_page(page); |
| 260 | } else { |
| 261 | reason = "unable to grab page\n"; |
| 262 | } |
| 263 | |
| 264 | if (v) { |
| 265 | printk("failed, %s\n", reason); |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 266 | shared_pte_mask = L_PTE_MT_UNCACHED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | } else { |
| 268 | printk("ok\n"); |
| 269 | } |
| 270 | } |