David Gibson | 26ef5c0 | 2005-11-10 11:50:16 +1100 | [diff] [blame] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or |
| 3 | * modify it under the terms of the GNU General Public License |
| 4 | * as published by the Free Software Foundation; either version |
| 5 | * 2 of the License, or (at your option) any later version. |
| 6 | */ |
| 7 | #ifndef _ASM_POWERPC_CACHEFLUSH_H |
| 8 | #define _ASM_POWERPC_CACHEFLUSH_H |
| 9 | |
| 10 | #ifdef __KERNEL__ |
| 11 | |
| 12 | #include <linux/mm.h> |
| 13 | #include <asm/cputable.h> |
| 14 | |
| 15 | /* |
| 16 | * No cache flushing is required when address mappings are changed, |
| 17 | * because the caches on PowerPCs are physically addressed. |
| 18 | */ |
| 19 | #define flush_cache_all() do { } while (0) |
| 20 | #define flush_cache_mm(mm) do { } while (0) |
Ralf Baechle | ec8c044 | 2006-12-12 17:14:57 +0000 | [diff] [blame] | 21 | #define flush_cache_dup_mm(mm) do { } while (0) |
David Gibson | 26ef5c0 | 2005-11-10 11:50:16 +1100 | [diff] [blame] | 22 | #define flush_cache_range(vma, start, end) do { } while (0) |
| 23 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
| 24 | #define flush_icache_page(vma, page) do { } while (0) |
David Gibson | 26ef5c0 | 2005-11-10 11:50:16 +1100 | [diff] [blame] | 25 | #define flush_cache_vunmap(start, end) do { } while (0) |
| 26 | |
Nicholas Piggin | ff5bc79 | 2018-06-06 11:40:08 +1000 | [diff] [blame] | 27 | #ifdef CONFIG_PPC_BOOK3S_64 |
Nicholas Piggin | f1cb8f9 | 2018-06-01 20:01:19 +1000 | [diff] [blame] | 28 | /* |
| 29 | * Book3s has no ptesync after setting a pte, so without this ptesync it's |
| 30 | * possible for a kernel virtual mapping access to return a spurious fault |
| 31 | * if it's accessed right after the pte is set. The page fault handler does |
| 32 | * not expect this type of fault. flush_cache_vmap is not exactly the right |
| 33 | * place to put this, but it seems to work well enough. |
| 34 | */ |
Qian Cai | a80f67d | 2019-06-06 09:58:13 -0400 | [diff] [blame] | 35 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
| 36 | { |
| 37 | asm volatile("ptesync" ::: "memory"); |
| 38 | } |
Nicholas Piggin | f1cb8f9 | 2018-06-01 20:01:19 +1000 | [diff] [blame] | 39 | #else |
Qian Cai | a80f67d | 2019-06-06 09:58:13 -0400 | [diff] [blame] | 40 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } |
Nicholas Piggin | f1cb8f9 | 2018-06-01 20:01:19 +1000 | [diff] [blame] | 41 | #endif |
| 42 | |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 43 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
David Gibson | 26ef5c0 | 2005-11-10 11:50:16 +1100 | [diff] [blame] | 44 | extern void flush_dcache_page(struct page *page); |
| 45 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
| 46 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
| 47 | |
Kevin Hao | 3b04c30 | 2013-08-06 18:23:31 +0800 | [diff] [blame] | 48 | extern void flush_icache_range(unsigned long, unsigned long); |
David Gibson | 26ef5c0 | 2005-11-10 11:50:16 +1100 | [diff] [blame] | 49 | extern void flush_icache_user_range(struct vm_area_struct *vma, |
| 50 | struct page *page, unsigned long addr, |
| 51 | int len); |
| 52 | extern void __flush_dcache_icache(void *page_va); |
| 53 | extern void flush_dcache_icache_page(struct page *page); |
| 54 | #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE) |
| 55 | extern void __flush_dcache_icache_phys(unsigned long physaddr); |
Scott Wood | 2f7d2b7 | 2015-04-15 19:40:23 -0500 | [diff] [blame] | 56 | #else |
| 57 | static inline void __flush_dcache_icache_phys(unsigned long physaddr) |
| 58 | { |
| 59 | BUG(); |
| 60 | } |
| 61 | #endif |
David Gibson | 26ef5c0 | 2005-11-10 11:50:16 +1100 | [diff] [blame] | 62 | |
David Gibson | 26ef5c0 | 2005-11-10 11:50:16 +1100 | [diff] [blame] | 63 | #ifdef CONFIG_PPC32 |
Christophe Leroy | affe587 | 2016-02-09 17:08:27 +0100 | [diff] [blame] | 64 | /* |
| 65 | * Write any modified data cache blocks out to memory and invalidate them. |
| 66 | * Does not invalidate the corresponding instruction cache blocks. |
| 67 | */ |
| 68 | static inline void flush_dcache_range(unsigned long start, unsigned long stop) |
| 69 | { |
| 70 | void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1)); |
| 71 | unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1); |
| 72 | unsigned long i; |
| 73 | |
| 74 | for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES) |
| 75 | dcbf(addr); |
| 76 | mb(); /* sync */ |
| 77 | } |
| 78 | |
| 79 | /* |
| 80 | * Write any modified data cache blocks out to memory. |
| 81 | * Does not invalidate the corresponding cache lines (especially for |
| 82 | * any corresponding instruction cache). |
| 83 | */ |
| 84 | static inline void clean_dcache_range(unsigned long start, unsigned long stop) |
| 85 | { |
| 86 | void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1)); |
| 87 | unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1); |
| 88 | unsigned long i; |
| 89 | |
| 90 | for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES) |
| 91 | dcbst(addr); |
| 92 | mb(); /* sync */ |
| 93 | } |
| 94 | |
| 95 | /* |
| 96 | * Like above, but invalidate the D-cache. This is used by the 8xx |
| 97 | * to invalidate the cache so the PPC core doesn't get stale data |
| 98 | * from the CPM (no cache snooping here :-). |
| 99 | */ |
| 100 | static inline void invalidate_dcache_range(unsigned long start, |
| 101 | unsigned long stop) |
| 102 | { |
| 103 | void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1)); |
| 104 | unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1); |
| 105 | unsigned long i; |
| 106 | |
| 107 | for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES) |
| 108 | dcbi(addr); |
| 109 | mb(); /* sync */ |
| 110 | } |
| 111 | |
David Gibson | 26ef5c0 | 2005-11-10 11:50:16 +1100 | [diff] [blame] | 112 | #endif /* CONFIG_PPC32 */ |
| 113 | #ifdef CONFIG_PPC64 |
Christophe Leroy | affe587 | 2016-02-09 17:08:27 +0100 | [diff] [blame] | 114 | extern void flush_dcache_range(unsigned long start, unsigned long stop); |
David Gibson | 26ef5c0 | 2005-11-10 11:50:16 +1100 | [diff] [blame] | 115 | extern void flush_inval_dcache_range(unsigned long start, unsigned long stop); |
David Gibson | 26ef5c0 | 2005-11-10 11:50:16 +1100 | [diff] [blame] | 116 | #endif |
| 117 | |
| 118 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
| 119 | do { \ |
| 120 | memcpy(dst, src, len); \ |
| 121 | flush_icache_user_range(vma, page, vaddr, len); \ |
| 122 | } while (0) |
| 123 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
| 124 | memcpy(dst, src, len) |
| 125 | |
David Gibson | 26ef5c0 | 2005-11-10 11:50:16 +1100 | [diff] [blame] | 126 | #endif /* __KERNEL__ */ |
| 127 | |
| 128 | #endif /* _ASM_POWERPC_CACHEFLUSH_H */ |