blob: 8a4abebc478a29617a55efeabd871480ca4ab9c2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/cache-v3.S
3 *
4 * Copyright (C) 1997-2002 Russell king
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/linkage.h>
11#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <asm/page.h>
13#include "proc-macros.S"
14
15/*
16 * flush_user_cache_all()
17 *
18 * Invalidate all cache entries in a particular address
19 * space.
20 *
21 * - mm - mm_struct describing address space
22 */
23ENTRY(v3_flush_user_cache_all)
24 /* FALLTHROUGH */
25/*
26 * flush_kern_cache_all()
27 *
28 * Clean and invalidate the entire cache.
29 */
30ENTRY(v3_flush_kern_cache_all)
31 /* FALLTHROUGH */
32
33/*
34 * flush_user_cache_range(start, end, flags)
35 *
36 * Invalidate a range of cache entries in the specified
37 * address space.
38 *
39 * - start - start address (may not be aligned)
40 * - end - end address (exclusive, may not be aligned)
41 * - flags - vma_area_struct flags describing address space
42 */
43ENTRY(v3_flush_user_cache_range)
44 mov ip, #0
45 mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
46 mov pc, lr
47
48/*
49 * coherent_kern_range(start, end)
50 *
51 * Ensure coherency between the Icache and the Dcache in the
52 * region described by start. If you have non-snooping
53 * Harvard caches, you need to implement this function.
54 *
55 * - start - virtual start address
56 * - end - virtual end address
57 */
58ENTRY(v3_coherent_kern_range)
59 /* FALLTHROUGH */
60
61/*
62 * coherent_user_range(start, end)
63 *
64 * Ensure coherency between the Icache and the Dcache in the
65 * region described by start. If you have non-snooping
66 * Harvard caches, you need to implement this function.
67 *
68 * - start - virtual start address
69 * - end - virtual end address
70 */
71ENTRY(v3_coherent_user_range)
72 mov pc, lr
73
74/*
75 * flush_kern_dcache_page(void *page)
76 *
77 * Ensure no D cache aliasing occurs, either with itself or
78 * the I cache
79 *
80 * - addr - page aligned address
81 */
82ENTRY(v3_flush_kern_dcache_page)
83 /* FALLTHROUGH */
84
85/*
86 * dma_inv_range(start, end)
87 *
88 * Invalidate (discard) the specified virtual address range.
89 * May not write back any entries. If 'start' or 'end'
90 * are not cache line aligned, those lines must be written
91 * back.
92 *
93 * - start - virtual start address
94 * - end - virtual end address
95 */
96ENTRY(v3_dma_inv_range)
97 /* FALLTHROUGH */
98
99/*
100 * dma_flush_range(start, end)
101 *
102 * Clean and invalidate the specified virtual address range.
103 *
104 * - start - virtual start address
105 * - end - virtual end address
106 */
107ENTRY(v3_dma_flush_range)
108 mov r0, #0
109 mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
110 /* FALLTHROUGH */
111
112/*
113 * dma_clean_range(start, end)
114 *
115 * Clean (write back) the specified virtual address range.
116 *
117 * - start - virtual start address
118 * - end - virtual end address
119 */
120ENTRY(v3_dma_clean_range)
121 mov pc, lr
122
123 __INITDATA
124
125 .type v3_cache_fns, #object
126ENTRY(v3_cache_fns)
127 .long v3_flush_kern_cache_all
128 .long v3_flush_user_cache_all
129 .long v3_flush_user_cache_range
130 .long v3_coherent_kern_range
131 .long v3_coherent_user_range
132 .long v3_flush_kern_dcache_page
133 .long v3_dma_inv_range
134 .long v3_dma_clean_range
135 .long v3_dma_flush_range
136 .size v3_cache_fns, . - v3_cache_fns