blob: e1994788cf0e155c07df28364a8481403a65018c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/cache-v3.S
3 *
4 * Copyright (C) 1997-2002 Russell king
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/linkage.h>
11#include <linux/init.h>
12#include <asm/hardware.h>
13#include <asm/page.h>
14#include "proc-macros.S"
15
16/*
17 * flush_user_cache_all()
18 *
19 * Invalidate all cache entries in a particular address
20 * space.
21 *
22 * - mm - mm_struct describing address space
23 */
24ENTRY(v3_flush_user_cache_all)
25 /* FALLTHROUGH */
26/*
27 * flush_kern_cache_all()
28 *
29 * Clean and invalidate the entire cache.
30 */
31ENTRY(v3_flush_kern_cache_all)
32 /* FALLTHROUGH */
33
34/*
35 * flush_user_cache_range(start, end, flags)
36 *
37 * Invalidate a range of cache entries in the specified
38 * address space.
39 *
40 * - start - start address (may not be aligned)
41 * - end - end address (exclusive, may not be aligned)
42 * - flags - vma_area_struct flags describing address space
43 */
44ENTRY(v3_flush_user_cache_range)
45 mov ip, #0
46 mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
47 mov pc, lr
48
49/*
50 * coherent_kern_range(start, end)
51 *
52 * Ensure coherency between the Icache and the Dcache in the
53 * region described by start. If you have non-snooping
54 * Harvard caches, you need to implement this function.
55 *
56 * - start - virtual start address
57 * - end - virtual end address
58 */
59ENTRY(v3_coherent_kern_range)
60 /* FALLTHROUGH */
61
62/*
63 * coherent_user_range(start, end)
64 *
65 * Ensure coherency between the Icache and the Dcache in the
66 * region described by start. If you have non-snooping
67 * Harvard caches, you need to implement this function.
68 *
69 * - start - virtual start address
70 * - end - virtual end address
71 */
72ENTRY(v3_coherent_user_range)
73 mov pc, lr
74
75/*
76 * flush_kern_dcache_page(void *page)
77 *
78 * Ensure no D cache aliasing occurs, either with itself or
79 * the I cache
80 *
81 * - addr - page aligned address
82 */
83ENTRY(v3_flush_kern_dcache_page)
84 /* FALLTHROUGH */
85
86/*
87 * dma_inv_range(start, end)
88 *
89 * Invalidate (discard) the specified virtual address range.
90 * May not write back any entries. If 'start' or 'end'
91 * are not cache line aligned, those lines must be written
92 * back.
93 *
94 * - start - virtual start address
95 * - end - virtual end address
96 */
97ENTRY(v3_dma_inv_range)
98 /* FALLTHROUGH */
99
100/*
101 * dma_flush_range(start, end)
102 *
103 * Clean and invalidate the specified virtual address range.
104 *
105 * - start - virtual start address
106 * - end - virtual end address
107 */
108ENTRY(v3_dma_flush_range)
109 mov r0, #0
110 mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
111 /* FALLTHROUGH */
112
113/*
114 * dma_clean_range(start, end)
115 *
116 * Clean (write back) the specified virtual address range.
117 *
118 * - start - virtual start address
119 * - end - virtual end address
120 */
121ENTRY(v3_dma_clean_range)
122 mov pc, lr
123
124 __INITDATA
125
126 .type v3_cache_fns, #object
127ENTRY(v3_cache_fns)
128 .long v3_flush_kern_cache_all
129 .long v3_flush_user_cache_all
130 .long v3_flush_user_cache_range
131 .long v3_coherent_kern_range
132 .long v3_coherent_user_range
133 .long v3_flush_kern_dcache_page
134 .long v3_dma_inv_range
135 .long v3_dma_clean_range
136 .long v3_dma_flush_range
137 .size v3_cache_fns, . - v3_cache_fns