blob: b189f7aee222e3277e73c98e0a16ce704f8e8797 [file] [log] [blame]
David Gibson26ef5c02005-11-10 11:50:16 +11001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 */
7#ifndef _ASM_POWERPC_CACHEFLUSH_H
8#define _ASM_POWERPC_CACHEFLUSH_H
9
10#ifdef __KERNEL__
11
12#include <linux/mm.h>
13#include <asm/cputable.h>
14
15/*
16 * No cache flushing is required when address mappings are changed,
17 * because the caches on PowerPCs are physically addressed.
18 */
19#define flush_cache_all() do { } while (0)
20#define flush_cache_mm(mm) do { } while (0)
Ralf Baechleec8c0442006-12-12 17:14:57 +000021#define flush_cache_dup_mm(mm) do { } while (0)
David Gibson26ef5c02005-11-10 11:50:16 +110022#define flush_cache_range(vma, start, end) do { } while (0)
23#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
24#define flush_icache_page(vma, page) do { } while (0)
David Gibson26ef5c02005-11-10 11:50:16 +110025#define flush_cache_vunmap(start, end) do { } while (0)
26
Nicholas Pigginff5bc792018-06-06 11:40:08 +100027#ifdef CONFIG_PPC_BOOK3S_64
Nicholas Pigginf1cb8f92018-06-01 20:01:19 +100028/*
29 * Book3s has no ptesync after setting a pte, so without this ptesync it's
30 * possible for a kernel virtual mapping access to return a spurious fault
31 * if it's accessed right after the pte is set. The page fault handler does
32 * not expect this type of fault. flush_cache_vmap is not exactly the right
33 * place to put this, but it seems to work well enough.
34 */
Qian Caia80f67d2019-06-06 09:58:13 -040035static inline void flush_cache_vmap(unsigned long start, unsigned long end)
36{
37 asm volatile("ptesync" ::: "memory");
38}
Nicholas Pigginf1cb8f92018-06-01 20:01:19 +100039#else
Qian Caia80f67d2019-06-06 09:58:13 -040040static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
Nicholas Pigginf1cb8f92018-06-01 20:01:19 +100041#endif
42
Ilya Loginov2d4dc892009-11-26 09:16:19 +010043#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
David Gibson26ef5c02005-11-10 11:50:16 +110044extern void flush_dcache_page(struct page *page);
45#define flush_dcache_mmap_lock(mapping) do { } while (0)
46#define flush_dcache_mmap_unlock(mapping) do { } while (0)
47
Kevin Hao3b04c302013-08-06 18:23:31 +080048extern void flush_icache_range(unsigned long, unsigned long);
David Gibson26ef5c02005-11-10 11:50:16 +110049extern void flush_icache_user_range(struct vm_area_struct *vma,
50 struct page *page, unsigned long addr,
51 int len);
52extern void __flush_dcache_icache(void *page_va);
53extern void flush_dcache_icache_page(struct page *page);
54#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
55extern void __flush_dcache_icache_phys(unsigned long physaddr);
Scott Wood2f7d2b72015-04-15 19:40:23 -050056#else
57static inline void __flush_dcache_icache_phys(unsigned long physaddr)
58{
59 BUG();
60}
61#endif
David Gibson26ef5c02005-11-10 11:50:16 +110062
David Gibson26ef5c02005-11-10 11:50:16 +110063#ifdef CONFIG_PPC32
Christophe Leroyaffe5872016-02-09 17:08:27 +010064/*
65 * Write any modified data cache blocks out to memory and invalidate them.
66 * Does not invalidate the corresponding instruction cache blocks.
67 */
68static inline void flush_dcache_range(unsigned long start, unsigned long stop)
69{
70 void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
71 unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
72 unsigned long i;
73
74 for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
75 dcbf(addr);
76 mb(); /* sync */
77}
78
79/*
80 * Write any modified data cache blocks out to memory.
81 * Does not invalidate the corresponding cache lines (especially for
82 * any corresponding instruction cache).
83 */
84static inline void clean_dcache_range(unsigned long start, unsigned long stop)
85{
86 void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
87 unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
88 unsigned long i;
89
90 for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
91 dcbst(addr);
92 mb(); /* sync */
93}
94
95/*
96 * Like above, but invalidate the D-cache. This is used by the 8xx
97 * to invalidate the cache so the PPC core doesn't get stale data
98 * from the CPM (no cache snooping here :-).
99 */
100static inline void invalidate_dcache_range(unsigned long start,
101 unsigned long stop)
102{
103 void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
104 unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
105 unsigned long i;
106
107 for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
108 dcbi(addr);
109 mb(); /* sync */
110}
111
David Gibson26ef5c02005-11-10 11:50:16 +1100112#endif /* CONFIG_PPC32 */
113#ifdef CONFIG_PPC64
Christophe Leroyaffe5872016-02-09 17:08:27 +0100114extern void flush_dcache_range(unsigned long start, unsigned long stop);
David Gibson26ef5c02005-11-10 11:50:16 +1100115extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
David Gibson26ef5c02005-11-10 11:50:16 +1100116#endif
117
118#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
119 do { \
120 memcpy(dst, src, len); \
121 flush_icache_user_range(vma, page, vaddr, len); \
122 } while (0)
123#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
124 memcpy(dst, src, len)
125
David Gibson26ef5c02005-11-10 11:50:16 +1100126#endif /* __KERNEL__ */
127
128#endif /* _ASM_POWERPC_CACHEFLUSH_H */