Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/mm/fault-armv.c |
| 3 | * |
| 4 | * Copyright (C) 1995 Linus Torvalds |
| 5 | * Modifications for ARM processor (c) 1995-2002 Russell King |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/sched.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/bitops.h> |
| 16 | #include <linux/vmalloc.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/pagemap.h> |
| 19 | |
| 20 | #include <asm/cacheflush.h> |
| 21 | #include <asm/pgtable.h> |
| 22 | #include <asm/tlbflush.h> |
| 23 | |
| 24 | static unsigned long shared_pte_mask = L_PTE_CACHEABLE; |
| 25 | |
| 26 | /* |
| 27 | * We take the easy way out of this problem - we make the |
| 28 | * PTE uncacheable. However, we leave the write buffer on. |
Hugh Dickins | 69b0475 | 2005-10-29 18:16:36 -0700 | [diff] [blame] | 29 | * |
| 30 | * Note that the pte lock held when calling update_mmu_cache must also |
| 31 | * guard the pte (somewhere else in the same mm) that we modify here. |
| 32 | * Therefore those configurations which might call adjust_pte (those |
| 33 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | */ |
| 35 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address) |
| 36 | { |
| 37 | pgd_t *pgd; |
| 38 | pmd_t *pmd; |
| 39 | pte_t *pte, entry; |
| 40 | int ret = 0; |
| 41 | |
| 42 | pgd = pgd_offset(vma->vm_mm, address); |
| 43 | if (pgd_none(*pgd)) |
| 44 | goto no_pgd; |
| 45 | if (pgd_bad(*pgd)) |
| 46 | goto bad_pgd; |
| 47 | |
| 48 | pmd = pmd_offset(pgd, address); |
| 49 | if (pmd_none(*pmd)) |
| 50 | goto no_pmd; |
| 51 | if (pmd_bad(*pmd)) |
| 52 | goto bad_pmd; |
| 53 | |
| 54 | pte = pte_offset_map(pmd, address); |
| 55 | entry = *pte; |
| 56 | |
| 57 | /* |
| 58 | * If this page isn't present, or is already setup to |
| 59 | * fault (ie, is old), we can safely ignore any issues. |
| 60 | */ |
| 61 | if (pte_present(entry) && pte_val(entry) & shared_pte_mask) { |
| 62 | flush_cache_page(vma, address, pte_pfn(entry)); |
| 63 | pte_val(entry) &= ~shared_pte_mask; |
| 64 | set_pte(pte, entry); |
| 65 | flush_tlb_page(vma, address); |
| 66 | ret = 1; |
| 67 | } |
| 68 | pte_unmap(pte); |
| 69 | return ret; |
| 70 | |
| 71 | bad_pgd: |
| 72 | pgd_ERROR(*pgd); |
| 73 | pgd_clear(pgd); |
| 74 | no_pgd: |
| 75 | return 0; |
| 76 | |
| 77 | bad_pmd: |
| 78 | pmd_ERROR(*pmd); |
| 79 | pmd_clear(pmd); |
| 80 | no_pmd: |
| 81 | return 0; |
| 82 | } |
| 83 | |
| 84 | static void |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 85 | make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | struct mm_struct *mm = vma->vm_mm; |
| 88 | struct vm_area_struct *mpnt; |
| 89 | struct prio_tree_iter iter; |
| 90 | unsigned long offset; |
| 91 | pgoff_t pgoff; |
| 92 | int aliases = 0; |
| 93 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); |
| 95 | |
| 96 | /* |
| 97 | * If we have any shared mappings that are in the same mm |
| 98 | * space, then we need to handle them specially to maintain |
| 99 | * cache coherency. |
| 100 | */ |
| 101 | flush_dcache_mmap_lock(mapping); |
| 102 | vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { |
| 103 | /* |
| 104 | * If this VMA is not in our MM, we can ignore it. |
| 105 | * Note that we intentionally mask out the VMA |
| 106 | * that we are fixing up. |
| 107 | */ |
| 108 | if (mpnt->vm_mm != mm || mpnt == vma) |
| 109 | continue; |
| 110 | if (!(mpnt->vm_flags & VM_MAYSHARE)) |
| 111 | continue; |
| 112 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; |
| 113 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset); |
| 114 | } |
| 115 | flush_dcache_mmap_unlock(mapping); |
| 116 | if (aliases) |
| 117 | adjust_pte(vma, addr); |
| 118 | else |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 119 | flush_cache_page(vma, addr, pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | } |
| 121 | |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 122 | void __flush_dcache_page(struct address_space *mapping, struct page *page); |
| 123 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | /* |
| 125 | * Take care of architecture specific things when placing a new PTE into |
| 126 | * a page table, or changing an existing PTE. Basically, there are two |
| 127 | * things that we need to take care of: |
| 128 | * |
| 129 | * 1. If PG_dcache_dirty is set for the page, we need to ensure |
| 130 | * that any cache entries for the kernels virtual memory |
| 131 | * range are written back to the page. |
| 132 | * 2. If we have multiple shared mappings of the same space in |
| 133 | * an object, we need to deal with the cache aliasing issues. |
| 134 | * |
Hugh Dickins | 69b0475 | 2005-10-29 18:16:36 -0700 | [diff] [blame] | 135 | * Note that the pte lock will be held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | */ |
| 137 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) |
| 138 | { |
| 139 | unsigned long pfn = pte_pfn(pte); |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 140 | struct address_space *mapping; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | struct page *page; |
| 142 | |
| 143 | if (!pfn_valid(pfn)) |
| 144 | return; |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 145 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | page = pfn_to_page(pfn); |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 147 | mapping = page_mapping(page); |
| 148 | if (mapping) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); |
| 150 | |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 151 | if (dirty) |
| 152 | __flush_dcache_page(mapping, page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | |
| 154 | if (cache_is_vivt()) |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 155 | make_coherent(mapping, vma, addr, pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | } |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * Check whether the write buffer has physical address aliasing |
| 161 | * issues. If it has, we need to avoid them for the case where |
| 162 | * we have several shared mappings of the same object in user |
| 163 | * space. |
| 164 | */ |
| 165 | static int __init check_writebuffer(unsigned long *p1, unsigned long *p2) |
| 166 | { |
| 167 | register unsigned long zero = 0, one = 1, val; |
| 168 | |
| 169 | local_irq_disable(); |
| 170 | mb(); |
| 171 | *p1 = one; |
| 172 | mb(); |
| 173 | *p2 = zero; |
| 174 | mb(); |
| 175 | val = *p1; |
| 176 | mb(); |
| 177 | local_irq_enable(); |
| 178 | return val != zero; |
| 179 | } |
| 180 | |
| 181 | void __init check_writebuffer_bugs(void) |
| 182 | { |
| 183 | struct page *page; |
| 184 | const char *reason; |
| 185 | unsigned long v = 1; |
| 186 | |
| 187 | printk(KERN_INFO "CPU: Testing write buffer coherency: "); |
| 188 | |
| 189 | page = alloc_page(GFP_KERNEL); |
| 190 | if (page) { |
| 191 | unsigned long *p1, *p2; |
| 192 | pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG| |
| 193 | L_PTE_DIRTY|L_PTE_WRITE| |
| 194 | L_PTE_BUFFERABLE); |
| 195 | |
| 196 | p1 = vmap(&page, 1, VM_IOREMAP, prot); |
| 197 | p2 = vmap(&page, 1, VM_IOREMAP, prot); |
| 198 | |
| 199 | if (p1 && p2) { |
| 200 | v = check_writebuffer(p1, p2); |
| 201 | reason = "enabling work-around"; |
| 202 | } else { |
| 203 | reason = "unable to map memory\n"; |
| 204 | } |
| 205 | |
| 206 | vunmap(p1); |
| 207 | vunmap(p2); |
| 208 | put_page(page); |
| 209 | } else { |
| 210 | reason = "unable to grab page\n"; |
| 211 | } |
| 212 | |
| 213 | if (v) { |
| 214 | printk("failed, %s\n", reason); |
| 215 | shared_pte_mask |= L_PTE_BUFFERABLE; |
| 216 | } else { |
| 217 | printk("ok\n"); |
| 218 | } |
| 219 | } |