Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/mm/flush.c |
| 3 | * |
| 4 | * Copyright (C) 1995-2002 Russell King |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/pagemap.h> |
Nicolas Pitre | 39af22a | 2010-12-15 15:14:45 -0500 | [diff] [blame] | 13 | #include <linux/highmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
| 15 | #include <asm/cacheflush.h> |
Russell King | 46097c7 | 2008-08-10 18:10:19 +0100 | [diff] [blame] | 16 | #include <asm/cachetype.h> |
Nicolas Pitre | 7e5a69e | 2010-03-29 21:46:02 +0100 | [diff] [blame] | 17 | #include <asm/highmem.h> |
Russell King | 2ef7f3d | 2009-11-05 13:29:36 +0000 | [diff] [blame] | 18 | #include <asm/smp_plat.h> |
Russell King | 8d802d2 | 2005-05-10 17:31:43 +0100 | [diff] [blame] | 19 | #include <asm/tlbflush.h> |
Steve Capper | 0b19f93 | 2013-05-17 12:33:28 +0100 | [diff] [blame] | 20 | #include <linux/hugetlb.h> |
Russell King | 8d802d2 | 2005-05-10 17:31:43 +0100 | [diff] [blame] | 21 | |
Russell King | 1b2e2b7 | 2006-08-21 17:06:38 +0100 | [diff] [blame] | 22 | #include "mm.h" |
| 23 | |
Russell King | f813090 | 2015-06-01 23:44:46 +0100 | [diff] [blame] | 24 | #ifdef CONFIG_ARM_HEAVY_MB |
Russell King | 4e1f8a6 | 2015-06-03 13:10:16 +0100 | [diff] [blame] | 25 | void (*soc_mb)(void); |
| 26 | |
Russell King | f813090 | 2015-06-01 23:44:46 +0100 | [diff] [blame] | 27 | void arm_heavy_mb(void) |
| 28 | { |
| 29 | #ifdef CONFIG_OUTER_CACHE_SYNC |
| 30 | if (outer_cache.sync) |
| 31 | outer_cache.sync(); |
| 32 | #endif |
Russell King | 4e1f8a6 | 2015-06-03 13:10:16 +0100 | [diff] [blame] | 33 | if (soc_mb) |
| 34 | soc_mb(); |
Russell King | f813090 | 2015-06-01 23:44:46 +0100 | [diff] [blame] | 35 | } |
| 36 | EXPORT_SYMBOL(arm_heavy_mb); |
| 37 | #endif |
| 38 | |
Russell King | 8d802d2 | 2005-05-10 17:31:43 +0100 | [diff] [blame] | 39 | #ifdef CONFIG_CPU_CACHE_VIPT |
Russell King | d7b6b35 | 2005-09-08 15:32:23 +0100 | [diff] [blame] | 40 | |
Catalin Marinas | 481467d | 2005-09-30 16:07:04 +0100 | [diff] [blame] | 41 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) |
| 42 | { |
Russell King | de27c30 | 2011-07-02 14:46:27 +0100 | [diff] [blame] | 43 | unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
Catalin Marinas | 141fa40 | 2006-03-10 22:26:47 +0000 | [diff] [blame] | 44 | const int zero = 0; |
Catalin Marinas | 481467d | 2005-09-30 16:07:04 +0100 | [diff] [blame] | 45 | |
Russell King | 67ece14 | 2011-07-02 15:20:44 +0100 | [diff] [blame] | 46 | set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); |
Catalin Marinas | 481467d | 2005-09-30 16:07:04 +0100 | [diff] [blame] | 47 | |
| 48 | asm( "mcrr p15, 0, %1, %0, c14\n" |
Russell King | df71dfd | 2009-10-24 22:36:36 +0100 | [diff] [blame] | 49 | " mcr p15, 0, %2, c7, c10, 4" |
Catalin Marinas | 481467d | 2005-09-30 16:07:04 +0100 | [diff] [blame] | 50 | : |
Jungseung Lee | 12e669b | 2014-11-29 02:54:27 +0100 | [diff] [blame] | 51 | : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero) |
Catalin Marinas | 481467d | 2005-09-30 16:07:04 +0100 | [diff] [blame] | 52 | : "cc"); |
| 53 | } |
| 54 | |
Will Deacon | c4e259c | 2010-09-13 16:19:41 +0100 | [diff] [blame] | 55 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) |
| 56 | { |
Russell King | 67ece14 | 2011-07-02 15:20:44 +0100 | [diff] [blame] | 57 | unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
Will Deacon | c4e259c | 2010-09-13 16:19:41 +0100 | [diff] [blame] | 58 | unsigned long offset = vaddr & (PAGE_SIZE - 1); |
| 59 | unsigned long to; |
| 60 | |
Russell King | 67ece14 | 2011-07-02 15:20:44 +0100 | [diff] [blame] | 61 | set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); |
| 62 | to = va + offset; |
Will Deacon | c4e259c | 2010-09-13 16:19:41 +0100 | [diff] [blame] | 63 | flush_icache_range(to, to + len); |
| 64 | } |
| 65 | |
Russell King | d7b6b35 | 2005-09-08 15:32:23 +0100 | [diff] [blame] | 66 | void flush_cache_mm(struct mm_struct *mm) |
| 67 | { |
| 68 | if (cache_is_vivt()) { |
Russell King | 2f0b192 | 2009-10-25 10:40:02 +0000 | [diff] [blame] | 69 | vivt_flush_cache_mm(mm); |
Russell King | d7b6b35 | 2005-09-08 15:32:23 +0100 | [diff] [blame] | 70 | return; |
| 71 | } |
| 72 | |
| 73 | if (cache_is_vipt_aliasing()) { |
| 74 | asm( "mcr p15, 0, %0, c7, c14, 0\n" |
Russell King | df71dfd | 2009-10-24 22:36:36 +0100 | [diff] [blame] | 75 | " mcr p15, 0, %0, c7, c10, 4" |
Russell King | d7b6b35 | 2005-09-08 15:32:23 +0100 | [diff] [blame] | 76 | : |
| 77 | : "r" (0) |
| 78 | : "cc"); |
| 79 | } |
| 80 | } |
| 81 | |
| 82 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
| 83 | { |
| 84 | if (cache_is_vivt()) { |
Russell King | 2f0b192 | 2009-10-25 10:40:02 +0000 | [diff] [blame] | 85 | vivt_flush_cache_range(vma, start, end); |
Russell King | d7b6b35 | 2005-09-08 15:32:23 +0100 | [diff] [blame] | 86 | return; |
| 87 | } |
| 88 | |
| 89 | if (cache_is_vipt_aliasing()) { |
| 90 | asm( "mcr p15, 0, %0, c7, c14, 0\n" |
Russell King | df71dfd | 2009-10-24 22:36:36 +0100 | [diff] [blame] | 91 | " mcr p15, 0, %0, c7, c10, 4" |
Russell King | d7b6b35 | 2005-09-08 15:32:23 +0100 | [diff] [blame] | 92 | : |
| 93 | : "r" (0) |
| 94 | : "cc"); |
| 95 | } |
Russell King | 9e95922 | 2009-10-25 13:35:13 +0000 | [diff] [blame] | 96 | |
Russell King | 6060e8d | 2009-10-25 14:12:27 +0000 | [diff] [blame] | 97 | if (vma->vm_flags & VM_EXEC) |
Russell King | 9e95922 | 2009-10-25 13:35:13 +0000 | [diff] [blame] | 98 | __flush_icache_all(); |
Russell King | d7b6b35 | 2005-09-08 15:32:23 +0100 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) |
| 102 | { |
| 103 | if (cache_is_vivt()) { |
Russell King | 2f0b192 | 2009-10-25 10:40:02 +0000 | [diff] [blame] | 104 | vivt_flush_cache_page(vma, user_addr, pfn); |
Russell King | d7b6b35 | 2005-09-08 15:32:23 +0100 | [diff] [blame] | 105 | return; |
| 106 | } |
| 107 | |
Russell King | 2df341e | 2009-10-24 22:58:40 +0100 | [diff] [blame] | 108 | if (cache_is_vipt_aliasing()) { |
Russell King | d7b6b35 | 2005-09-08 15:32:23 +0100 | [diff] [blame] | 109 | flush_pfn_alias(pfn, user_addr); |
Russell King | 2df341e | 2009-10-24 22:58:40 +0100 | [diff] [blame] | 110 | __flush_icache_all(); |
| 111 | } |
Russell King | 9e95922 | 2009-10-25 13:35:13 +0000 | [diff] [blame] | 112 | |
| 113 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) |
| 114 | __flush_icache_all(); |
Russell King | d7b6b35 | 2005-09-08 15:32:23 +0100 | [diff] [blame] | 115 | } |
Will Deacon | c4e259c | 2010-09-13 16:19:41 +0100 | [diff] [blame] | 116 | |
Russell King | 2ef7f3d | 2009-11-05 13:29:36 +0000 | [diff] [blame] | 117 | #else |
Will Deacon | c4e259c | 2010-09-13 16:19:41 +0100 | [diff] [blame] | 118 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) |
| 119 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) |
Russell King | 2ef7f3d | 2009-11-05 13:29:36 +0000 | [diff] [blame] | 120 | #endif |
George G. Davis | a188ad2 | 2006-09-02 18:43:20 +0100 | [diff] [blame] | 121 | |
Victor Kamensky | 72e6ae2 | 2014-04-29 04:20:52 +0100 | [diff] [blame] | 122 | #define FLAG_PA_IS_EXEC 1 |
| 123 | #define FLAG_PA_CORE_IN_MM 2 |
| 124 | |
Russell King | 2ef7f3d | 2009-11-05 13:29:36 +0000 | [diff] [blame] | 125 | static void flush_ptrace_access_other(void *args) |
| 126 | { |
| 127 | __flush_icache_all(); |
| 128 | } |
Russell King | 2ef7f3d | 2009-11-05 13:29:36 +0000 | [diff] [blame] | 129 | |
Victor Kamensky | 72e6ae2 | 2014-04-29 04:20:52 +0100 | [diff] [blame] | 130 | static inline |
| 131 | void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, |
| 132 | unsigned long len, unsigned int flags) |
George G. Davis | a188ad2 | 2006-09-02 18:43:20 +0100 | [diff] [blame] | 133 | { |
| 134 | if (cache_is_vivt()) { |
Victor Kamensky | 72e6ae2 | 2014-04-29 04:20:52 +0100 | [diff] [blame] | 135 | if (flags & FLAG_PA_CORE_IN_MM) { |
Russell King | 2ef7f3d | 2009-11-05 13:29:36 +0000 | [diff] [blame] | 136 | unsigned long addr = (unsigned long)kaddr; |
| 137 | __cpuc_coherent_kern_range(addr, addr + len); |
| 138 | } |
George G. Davis | a188ad2 | 2006-09-02 18:43:20 +0100 | [diff] [blame] | 139 | return; |
| 140 | } |
| 141 | |
| 142 | if (cache_is_vipt_aliasing()) { |
| 143 | flush_pfn_alias(page_to_pfn(page), uaddr); |
Russell King | 2df341e | 2009-10-24 22:58:40 +0100 | [diff] [blame] | 144 | __flush_icache_all(); |
George G. Davis | a188ad2 | 2006-09-02 18:43:20 +0100 | [diff] [blame] | 145 | return; |
| 146 | } |
| 147 | |
Will Deacon | c4e259c | 2010-09-13 16:19:41 +0100 | [diff] [blame] | 148 | /* VIPT non-aliasing D-cache */ |
Victor Kamensky | 72e6ae2 | 2014-04-29 04:20:52 +0100 | [diff] [blame] | 149 | if (flags & FLAG_PA_IS_EXEC) { |
George G. Davis | a188ad2 | 2006-09-02 18:43:20 +0100 | [diff] [blame] | 150 | unsigned long addr = (unsigned long)kaddr; |
Will Deacon | c4e259c | 2010-09-13 16:19:41 +0100 | [diff] [blame] | 151 | if (icache_is_vipt_aliasing()) |
| 152 | flush_icache_alias(page_to_pfn(page), uaddr, len); |
| 153 | else |
| 154 | __cpuc_coherent_kern_range(addr, addr + len); |
Russell King | 2ef7f3d | 2009-11-05 13:29:36 +0000 | [diff] [blame] | 155 | if (cache_ops_need_broadcast()) |
| 156 | smp_call_function(flush_ptrace_access_other, |
| 157 | NULL, 1); |
George G. Davis | a188ad2 | 2006-09-02 18:43:20 +0100 | [diff] [blame] | 158 | } |
| 159 | } |
Russell King | 2ef7f3d | 2009-11-05 13:29:36 +0000 | [diff] [blame] | 160 | |
Victor Kamensky | 72e6ae2 | 2014-04-29 04:20:52 +0100 | [diff] [blame] | 161 | static |
| 162 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
| 163 | unsigned long uaddr, void *kaddr, unsigned long len) |
| 164 | { |
| 165 | unsigned int flags = 0; |
| 166 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) |
| 167 | flags |= FLAG_PA_CORE_IN_MM; |
| 168 | if (vma->vm_flags & VM_EXEC) |
| 169 | flags |= FLAG_PA_IS_EXEC; |
| 170 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); |
| 171 | } |
| 172 | |
| 173 | void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, |
| 174 | void *kaddr, unsigned long len) |
| 175 | { |
| 176 | unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC; |
| 177 | |
| 178 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); |
| 179 | } |
| 180 | |
Russell King | 2ef7f3d | 2009-11-05 13:29:36 +0000 | [diff] [blame] | 181 | /* |
| 182 | * Copy user data from/to a page which is mapped into a different |
| 183 | * processes address space. Really, we want to allow our "user |
| 184 | * space" model to handle this. |
| 185 | * |
| 186 | * Note that this code needs to run on the current CPU. |
| 187 | */ |
| 188 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
| 189 | unsigned long uaddr, void *dst, const void *src, |
| 190 | unsigned long len) |
| 191 | { |
| 192 | #ifdef CONFIG_SMP |
| 193 | preempt_disable(); |
Russell King | 8d802d2 | 2005-05-10 17:31:43 +0100 | [diff] [blame] | 194 | #endif |
Russell King | 2ef7f3d | 2009-11-05 13:29:36 +0000 | [diff] [blame] | 195 | memcpy(dst, src, len); |
| 196 | flush_ptrace_access(vma, page, uaddr, dst, len); |
| 197 | #ifdef CONFIG_SMP |
| 198 | preempt_enable(); |
| 199 | #endif |
| 200 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 202 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | /* |
| 205 | * Writeback any data associated with the kernel mapping of this |
| 206 | * page. This ensures that data in the physical page is mutually |
| 207 | * coherent with the kernels mapping. |
| 208 | */ |
Nicolas Pitre | 7e5a69e | 2010-03-29 21:46:02 +0100 | [diff] [blame] | 209 | if (!PageHighMem(page)) { |
Steve Capper | 0b19f93 | 2013-05-17 12:33:28 +0100 | [diff] [blame] | 210 | size_t page_size = PAGE_SIZE << compound_order(page); |
| 211 | __cpuc_flush_dcache_area(page_address(page), page_size); |
Nicolas Pitre | 7e5a69e | 2010-03-29 21:46:02 +0100 | [diff] [blame] | 212 | } else { |
Steve Capper | 0b19f93 | 2013-05-17 12:33:28 +0100 | [diff] [blame] | 213 | unsigned long i; |
Joonsoo Kim | dd0f67f | 2013-04-05 03:16:14 +0100 | [diff] [blame] | 214 | if (cache_is_vipt_nonaliasing()) { |
Steve Capper | 0b19f93 | 2013-05-17 12:33:28 +0100 | [diff] [blame] | 215 | for (i = 0; i < (1 << compound_order(page)); i++) { |
Steven Capper | 2a7cfcb | 2013-12-16 17:25:52 +0100 | [diff] [blame] | 216 | void *addr = kmap_atomic(page + i); |
Joonsoo Kim | dd0f67f | 2013-04-05 03:16:14 +0100 | [diff] [blame] | 217 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
Steve Capper | 0b19f93 | 2013-05-17 12:33:28 +0100 | [diff] [blame] | 218 | kunmap_atomic(addr); |
| 219 | } |
| 220 | } else { |
| 221 | for (i = 0; i < (1 << compound_order(page)); i++) { |
Steven Capper | 2a7cfcb | 2013-12-16 17:25:52 +0100 | [diff] [blame] | 222 | void *addr = kmap_high_get(page + i); |
Steve Capper | 0b19f93 | 2013-05-17 12:33:28 +0100 | [diff] [blame] | 223 | if (addr) { |
| 224 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
Steven Capper | 2a7cfcb | 2013-12-16 17:25:52 +0100 | [diff] [blame] | 225 | kunmap_high(page + i); |
Steve Capper | 0b19f93 | 2013-05-17 12:33:28 +0100 | [diff] [blame] | 226 | } |
Joonsoo Kim | dd0f67f | 2013-04-05 03:16:14 +0100 | [diff] [blame] | 227 | } |
Nicolas Pitre | 7e5a69e | 2010-03-29 21:46:02 +0100 | [diff] [blame] | 228 | } |
| 229 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | |
| 231 | /* |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 232 | * If this is a page cache page, and we have an aliasing VIPT cache, |
| 233 | * we only need to do one flush - which would be at the relevant |
Russell King | 8d802d2 | 2005-05-10 17:31:43 +0100 | [diff] [blame] | 234 | * userspace colour, which is congruent with page->index. |
| 235 | */ |
Russell King | f91fb05 | 2009-10-24 23:05:34 +0100 | [diff] [blame] | 236 | if (mapping && cache_is_vipt_aliasing()) |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 237 | flush_pfn_alias(page_to_pfn(page), |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 238 | page->index << PAGE_SHIFT); |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 239 | } |
| 240 | |
| 241 | static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) |
| 242 | { |
| 243 | struct mm_struct *mm = current->active_mm; |
| 244 | struct vm_area_struct *mpnt; |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 245 | pgoff_t pgoff; |
Russell King | 8d802d2 | 2005-05-10 17:31:43 +0100 | [diff] [blame] | 246 | |
| 247 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | * There are possible user space mappings of this page: |
| 249 | * - VIVT cache: we need to also write back and invalidate all user |
| 250 | * data in the current VM view associated with this page. |
| 251 | * - aliasing VIPT: we only need to find one mapping of this page. |
| 252 | */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 253 | pgoff = page->index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | |
| 255 | flush_dcache_mmap_lock(mapping); |
Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 256 | vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | unsigned long offset; |
| 258 | |
| 259 | /* |
| 260 | * If this VMA is not in our MM, we can ignore it. |
| 261 | */ |
| 262 | if (mpnt->vm_mm != mm) |
| 263 | continue; |
| 264 | if (!(mpnt->vm_flags & VM_MAYSHARE)) |
| 265 | continue; |
| 266 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; |
| 267 | flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | } |
| 269 | flush_dcache_mmap_unlock(mapping); |
| 270 | } |
| 271 | |
Catalin Marinas | 6012191 | 2010-09-13 15:58:06 +0100 | [diff] [blame] | 272 | #if __LINUX_ARM_ARCH__ >= 6 |
| 273 | void __sync_icache_dcache(pte_t pteval) |
| 274 | { |
| 275 | unsigned long pfn; |
| 276 | struct page *page; |
| 277 | struct address_space *mapping; |
| 278 | |
Catalin Marinas | 6012191 | 2010-09-13 15:58:06 +0100 | [diff] [blame] | 279 | if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) |
| 280 | /* only flush non-aliasing VIPT caches for exec mappings */ |
| 281 | return; |
| 282 | pfn = pte_pfn(pteval); |
| 283 | if (!pfn_valid(pfn)) |
| 284 | return; |
| 285 | |
| 286 | page = pfn_to_page(pfn); |
| 287 | if (cache_is_vipt_aliasing()) |
| 288 | mapping = page_mapping(page); |
| 289 | else |
| 290 | mapping = NULL; |
| 291 | |
| 292 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) |
| 293 | __flush_dcache_page(mapping, page); |
saeed bishara | 8373dc3 | 2011-05-16 15:41:15 +0100 | [diff] [blame] | 294 | |
| 295 | if (pte_exec(pteval)) |
Catalin Marinas | 6012191 | 2010-09-13 15:58:06 +0100 | [diff] [blame] | 296 | __flush_icache_all(); |
| 297 | } |
| 298 | #endif |
| 299 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | /* |
| 301 | * Ensure cache coherency between kernel mapping and userspace mapping |
| 302 | * of this page. |
| 303 | * |
| 304 | * We have three cases to consider: |
| 305 | * - VIPT non-aliasing cache: fully coherent so nothing required. |
| 306 | * - VIVT: fully aliasing, so we need to handle every alias in our |
| 307 | * current VM view. |
| 308 | * - VIPT aliasing: need to handle one alias in our current VM view. |
| 309 | * |
| 310 | * If we need to handle aliasing: |
| 311 | * If the page only exists in the page cache and there are no user |
| 312 | * space mappings, we can be lazy and remember that we may have dirty |
| 313 | * kernel cache lines for later. Otherwise, we assume we have |
| 314 | * aliasing mappings. |
Russell King | df2f5e7 | 2005-11-30 16:02:54 +0000 | [diff] [blame] | 315 | * |
saeed bishara | 31bee4c | 2011-05-16 11:25:21 +0100 | [diff] [blame] | 316 | * Note that we disable the lazy flush for SMP configurations where |
| 317 | * the cache maintenance operations are not automatically broadcasted. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | */ |
| 319 | void flush_dcache_page(struct page *page) |
| 320 | { |
Russell King | 421fe93 | 2009-10-25 10:23:04 +0000 | [diff] [blame] | 321 | struct address_space *mapping; |
| 322 | |
| 323 | /* |
| 324 | * The zero page is never written to, so never has any dirty |
| 325 | * cache lines, and therefore never needs to be flushed. |
| 326 | */ |
| 327 | if (page == ZERO_PAGE(0)) |
| 328 | return; |
| 329 | |
| 330 | mapping = page_mapping(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | |
Catalin Marinas | 85848dd | 2010-09-13 15:58:37 +0100 | [diff] [blame] | 332 | if (!cache_ops_need_broadcast() && |
Kirill A. Shutemov | e1534ae | 2016-01-15 16:53:46 -0800 | [diff] [blame] | 333 | mapping && !page_mapcount(page)) |
Catalin Marinas | c017780 | 2010-09-13 15:57:36 +0100 | [diff] [blame] | 334 | clear_bit(PG_dcache_clean, &page->flags); |
Catalin Marinas | 85848dd | 2010-09-13 15:58:37 +0100 | [diff] [blame] | 335 | else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | __flush_dcache_page(mapping, page); |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 337 | if (mapping && cache_is_vivt()) |
| 338 | __flush_dcache_aliases(mapping, page); |
Catalin Marinas | 826cbda | 2008-06-13 10:28:36 +0100 | [diff] [blame] | 339 | else if (mapping) |
| 340 | __flush_icache_all(); |
Catalin Marinas | c017780 | 2010-09-13 15:57:36 +0100 | [diff] [blame] | 341 | set_bit(PG_dcache_clean, &page->flags); |
Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 342 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | } |
| 344 | EXPORT_SYMBOL(flush_dcache_page); |
Russell King | 6020dff | 2006-12-30 23:17:40 +0000 | [diff] [blame] | 345 | |
| 346 | /* |
Simon Baatz | 1bc3974 | 2013-06-10 21:10:12 +0100 | [diff] [blame] | 347 | * Ensure cache coherency for the kernel mapping of this page. We can |
| 348 | * assume that the page is pinned via kmap. |
| 349 | * |
| 350 | * If the page only exists in the page cache and there are no user |
| 351 | * space mappings, this is a no-op since the page was already marked |
| 352 | * dirty at creation. Otherwise, we need to flush the dirty kernel |
| 353 | * cache lines directly. |
| 354 | */ |
| 355 | void flush_kernel_dcache_page(struct page *page) |
| 356 | { |
| 357 | if (cache_is_vivt() || cache_is_vipt_aliasing()) { |
| 358 | struct address_space *mapping; |
| 359 | |
| 360 | mapping = page_mapping(page); |
| 361 | |
| 362 | if (!mapping || mapping_mapped(mapping)) { |
| 363 | void *addr; |
| 364 | |
| 365 | addr = page_address(page); |
| 366 | /* |
| 367 | * kmap_atomic() doesn't set the page virtual |
| 368 | * address for highmem pages, and |
| 369 | * kunmap_atomic() takes care of cache |
| 370 | * flushing already. |
| 371 | */ |
| 372 | if (!IS_ENABLED(CONFIG_HIGHMEM) || addr) |
| 373 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
| 374 | } |
| 375 | } |
| 376 | } |
| 377 | EXPORT_SYMBOL(flush_kernel_dcache_page); |
| 378 | |
| 379 | /* |
Russell King | 6020dff | 2006-12-30 23:17:40 +0000 | [diff] [blame] | 380 | * Flush an anonymous page so that users of get_user_pages() |
| 381 | * can safely access the data. The expected sequence is: |
| 382 | * |
| 383 | * get_user_pages() |
| 384 | * -> flush_anon_page |
| 385 | * memcpy() to/from page |
| 386 | * if written to page, flush_dcache_page() |
| 387 | */ |
| 388 | void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
| 389 | { |
| 390 | unsigned long pfn; |
| 391 | |
| 392 | /* VIPT non-aliasing caches need do nothing */ |
| 393 | if (cache_is_vipt_nonaliasing()) |
| 394 | return; |
| 395 | |
| 396 | /* |
| 397 | * Write back and invalidate userspace mapping. |
| 398 | */ |
| 399 | pfn = page_to_pfn(page); |
| 400 | if (cache_is_vivt()) { |
| 401 | flush_cache_page(vma, vmaddr, pfn); |
| 402 | } else { |
| 403 | /* |
| 404 | * For aliasing VIPT, we can flush an alias of the |
| 405 | * userspace address only. |
| 406 | */ |
| 407 | flush_pfn_alias(pfn, vmaddr); |
Russell King | 2df341e | 2009-10-24 22:58:40 +0100 | [diff] [blame] | 408 | __flush_icache_all(); |
Russell King | 6020dff | 2006-12-30 23:17:40 +0000 | [diff] [blame] | 409 | } |
| 410 | |
| 411 | /* |
| 412 | * Invalidate kernel mapping. No data should be contained |
| 413 | * in this mapping of the page. FIXME: this is overkill |
| 414 | * since we actually ask for a write-back and invalidate. |
| 415 | */ |
Russell King | 2c9b9c8 | 2009-11-26 12:56:21 +0000 | [diff] [blame] | 416 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
Russell King | 6020dff | 2006-12-30 23:17:40 +0000 | [diff] [blame] | 417 | } |