blob: ee1f6eae82d2f6afa9a85b78a19d06602e94f862 [file] [log] [blame]
Vineet Gupta95d69762013-01-18 15:12:19 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
9 * -flush_cache_dup_mm (fork)
10 * -likewise for flush_cache_mm (exit/execve)
11 * -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
12 *
13 * vineetg: April 2008
14 * -Added a critical CacheLine flush to copy_to_user_page( ) which
15 * was causing gdbserver to not setup breakpoints consistently
16 */
17
18#ifndef _ASM_CACHEFLUSH_H
19#define _ASM_CACHEFLUSH_H
20
21#include <linux/mm.h>
22
Vineet Gupta24603fd2013-04-11 18:36:35 +053023/*
24 * Semantically we need this because icache doesn't snoop dcache/dma.
25 * However ARC Cache flush requires paddr as well as vaddr, latter not available
26 * in the flush_icache_page() API. So we no-op it but do the equivalent work
27 * in update_mmu_cache()
28 */
29#define flush_icache_page(vma, page)
30
Vineet Gupta95d69762013-01-18 15:12:19 +053031void flush_cache_all(void);
32
33void flush_icache_range(unsigned long start, unsigned long end);
Vineet Gupta94bad1a2013-04-12 12:20:23 +053034void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
Vineet Gupta24603fd2013-04-11 18:36:35 +053035void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
Vineet Guptaeacd0e92013-04-16 14:10:48 +053036void __flush_dcache_page(unsigned long paddr);
Vineet Gupta95d69762013-01-18 15:12:19 +053037
38#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
39
40void flush_dcache_page(struct page *page);
41
42void dma_cache_wback_inv(unsigned long start, unsigned long sz);
43void dma_cache_inv(unsigned long start, unsigned long sz);
44void dma_cache_wback(unsigned long start, unsigned long sz);
45
46#define flush_dcache_mmap_lock(mapping) do { } while (0)
47#define flush_dcache_mmap_unlock(mapping) do { } while (0)
48
49/* TBD: optimize this */
50#define flush_cache_vmap(start, end) flush_cache_all()
51#define flush_cache_vunmap(start, end) flush_cache_all()
52
53/*
54 * VM callbacks when entire/range of user-space V-P mappings are
55 * torn-down/get-invalidated
56 *
57 * Currently we don't support D$ aliasing configs for our VIPT caches
58 * NOPS for VIPT Cache with non-aliasing D$ configurations only
59 */
60#define flush_cache_dup_mm(mm) /* called on fork */
61#define flush_cache_mm(mm) /* called on munmap/exit */
62#define flush_cache_range(mm, u_vstart, u_vend)
63#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
64
65#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
66do { \
67 memcpy(dst, src, len); \
68 if (vma->vm_flags & VM_EXEC) \
Vineet Gupta94bad1a2013-04-12 12:20:23 +053069 __sync_icache_dcache((unsigned long)(dst), vaddr, len); \
Vineet Gupta95d69762013-01-18 15:12:19 +053070} while (0)
71
72#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
73 memcpy(dst, src, len); \
74
75#endif