blob: af6ed6ef9a81d3aeb654fccd8f3cf72ad9b8ab4f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/fault-armv.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2002 Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/bitops.h>
16#include <linux/vmalloc.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19
Russell King09d9bae2008-09-05 14:08:44 +010020#include <asm/bugs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/cacheflush.h>
Russell King46097c72008-08-10 18:10:19 +010022#include <asm/cachetype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/pgtable.h>
24#include <asm/tlbflush.h>
25
26static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
27
28/*
29 * We take the easy way out of this problem - we make the
30 * PTE uncacheable. However, we leave the write buffer on.
Hugh Dickins69b04752005-10-29 18:16:36 -070031 *
32 * Note that the pte lock held when calling update_mmu_cache must also
33 * guard the pte (somewhere else in the same mm) that we modify here.
34 * Therefore those configurations which might call adjust_pte (those
35 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 */
37static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
38{
39 pgd_t *pgd;
40 pmd_t *pmd;
41 pte_t *pte, entry;
Russell King53cdb272008-07-27 10:35:54 +010042 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44 pgd = pgd_offset(vma->vm_mm, address);
45 if (pgd_none(*pgd))
46 goto no_pgd;
47 if (pgd_bad(*pgd))
48 goto bad_pgd;
49
50 pmd = pmd_offset(pgd, address);
51 if (pmd_none(*pmd))
52 goto no_pmd;
53 if (pmd_bad(*pmd))
54 goto bad_pmd;
55
56 pte = pte_offset_map(pmd, address);
57 entry = *pte;
58
59 /*
Russell King53cdb272008-07-27 10:35:54 +010060 * If this page is present, it's actually being shared.
61 */
62 ret = pte_present(entry);
63
64 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 * If this page isn't present, or is already setup to
66 * fault (ie, is old), we can safely ignore any issues.
67 */
Russell King53cdb272008-07-27 10:35:54 +010068 if (ret && pte_val(entry) & shared_pte_mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 flush_cache_page(vma, address, pte_pfn(entry));
70 pte_val(entry) &= ~shared_pte_mask;
Russell Kingad1ae2f2006-12-13 14:34:43 +000071 set_pte_at(vma->vm_mm, address, pte, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 flush_tlb_page(vma, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 }
74 pte_unmap(pte);
75 return ret;
76
77bad_pgd:
78 pgd_ERROR(*pgd);
79 pgd_clear(pgd);
80no_pgd:
81 return 0;
82
83bad_pmd:
84 pmd_ERROR(*pmd);
85 pmd_clear(pmd);
86no_pmd:
87 return 0;
88}
89
90static void
Russell King8830f042005-06-20 09:51:03 +010091make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092{
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 struct mm_struct *mm = vma->vm_mm;
94 struct vm_area_struct *mpnt;
95 struct prio_tree_iter iter;
96 unsigned long offset;
97 pgoff_t pgoff;
98 int aliases = 0;
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
101
102 /*
103 * If we have any shared mappings that are in the same mm
104 * space, then we need to handle them specially to maintain
105 * cache coherency.
106 */
107 flush_dcache_mmap_lock(mapping);
108 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
109 /*
110 * If this VMA is not in our MM, we can ignore it.
111 * Note that we intentionally mask out the VMA
112 * that we are fixing up.
113 */
114 if (mpnt->vm_mm != mm || mpnt == vma)
115 continue;
116 if (!(mpnt->vm_flags & VM_MAYSHARE))
117 continue;
118 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
119 aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
120 }
121 flush_dcache_mmap_unlock(mapping);
122 if (aliases)
123 adjust_pte(vma, addr);
124 else
Russell King8830f042005-06-20 09:51:03 +0100125 flush_cache_page(vma, addr, pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126}
127
128/*
129 * Take care of architecture specific things when placing a new PTE into
130 * a page table, or changing an existing PTE. Basically, there are two
131 * things that we need to take care of:
132 *
133 * 1. If PG_dcache_dirty is set for the page, we need to ensure
134 * that any cache entries for the kernels virtual memory
135 * range are written back to the page.
136 * 2. If we have multiple shared mappings of the same space in
137 * an object, we need to deal with the cache aliasing issues.
138 *
Hugh Dickins69b04752005-10-29 18:16:36 -0700139 * Note that the pte lock will be held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 */
141void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
142{
143 unsigned long pfn = pte_pfn(pte);
Russell King8830f042005-06-20 09:51:03 +0100144 struct address_space *mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 struct page *page;
146
147 if (!pfn_valid(pfn))
148 return;
Russell King8830f042005-06-20 09:51:03 +0100149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 page = pfn_to_page(pfn);
Russell King8830f042005-06-20 09:51:03 +0100151 mapping = page_mapping(page);
152 if (mapping) {
Catalin Marinas826cbda2008-06-13 10:28:36 +0100153#ifndef CONFIG_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
155
Russell King8830f042005-06-20 09:51:03 +0100156 if (dirty)
157 __flush_dcache_page(mapping, page);
Catalin Marinas826cbda2008-06-13 10:28:36 +0100158#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 if (cache_is_vivt())
Russell King8830f042005-06-20 09:51:03 +0100161 make_coherent(mapping, vma, addr, pfn);
Catalin Marinas826cbda2008-06-13 10:28:36 +0100162 else if (vma->vm_flags & VM_EXEC)
163 __flush_icache_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 }
165}
166
167/*
168 * Check whether the write buffer has physical address aliasing
169 * issues. If it has, we need to avoid them for the case where
170 * we have several shared mappings of the same object in user
171 * space.
172 */
173static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
174{
175 register unsigned long zero = 0, one = 1, val;
176
177 local_irq_disable();
178 mb();
179 *p1 = one;
180 mb();
181 *p2 = zero;
182 mb();
183 val = *p1;
184 mb();
185 local_irq_enable();
186 return val != zero;
187}
188
189void __init check_writebuffer_bugs(void)
190{
191 struct page *page;
192 const char *reason;
193 unsigned long v = 1;
194
195 printk(KERN_INFO "CPU: Testing write buffer coherency: ");
196
197 page = alloc_page(GFP_KERNEL);
198 if (page) {
199 unsigned long *p1, *p2;
200 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
201 L_PTE_DIRTY|L_PTE_WRITE|
202 L_PTE_BUFFERABLE);
203
204 p1 = vmap(&page, 1, VM_IOREMAP, prot);
205 p2 = vmap(&page, 1, VM_IOREMAP, prot);
206
207 if (p1 && p2) {
208 v = check_writebuffer(p1, p2);
209 reason = "enabling work-around";
210 } else {
211 reason = "unable to map memory\n";
212 }
213
214 vunmap(p1);
215 vunmap(p2);
216 put_page(page);
217 } else {
218 reason = "unable to grab page\n";
219 }
220
221 if (v) {
222 printk("failed, %s\n", reason);
223 shared_pte_mask |= L_PTE_BUFFERABLE;
224 } else {
225 printk("ok\n");
226 }
227}