blob: f85ec1a7c88e85c683a0cd2faba78513d1b15d0c [file] [log] [blame]
Chen Liqin6bc9a392009-06-12 22:01:00 +08001/*
2 * arch/score/mm/cache.c
3 *
4 * Score Processor version.
5 *
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 * Lennox Wu <lennox.wu@sunplusct.com>
8 * Chen Liqin <liqin.chen@sunplusct.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <linux/init.h>
27#include <linux/linkage.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/sched.h>
Chen Liqin11ab3f32009-11-18 13:22:33 +080032#include <linux/fs.h>
Chen Liqin6bc9a392009-06-12 22:01:00 +080033
34#include <asm/mmu_context.h>
35
Chen Liqin0402c912009-06-19 13:53:49 +080036/*
37Just flush entire Dcache!!
38You must ensure the page doesn't include instructions, because
39the function will not flush the Icache.
40The addr must be cache aligned.
41*/
42static void flush_data_cache_page(unsigned long addr)
43{
44 unsigned int i;
45 for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
46 __asm__ __volatile__(
47 "cache 0x0e, [%0, 0]\n"
48 "cache 0x1a, [%0, 0]\n"
49 "nop\n"
50 : : "r" (addr));
51 addr += L1_CACHE_BYTES;
52 }
53}
Chen Liqin6bc9a392009-06-12 22:01:00 +080054
Chen Liqin11ab3f32009-11-18 13:22:33 +080055void flush_dcache_page(struct page *page)
56{
57 struct address_space *mapping = page_mapping(page);
58 unsigned long addr;
59
60 if (PageHighMem(page))
61 return;
62 if (mapping && !mapping_mapped(mapping)) {
63 set_bit(PG_dcache_dirty, &(page)->flags);
64 return;
65 }
66
67 /*
68 * We could delay the flush for the !page_mapping case too. But that
69 * case is for exec env/arg pages and those are %99 certainly going to
70 * get faulted into the tlb (and thus flushed) anyways.
71 */
72 addr = (unsigned long) page_address(page);
73 flush_data_cache_page(addr);
74}
75
Chen Liqin0402c912009-06-19 13:53:49 +080076/* called by update_mmu_cache. */
Chen Liqin6bc9a392009-06-12 22:01:00 +080077void __update_cache(struct vm_area_struct *vma, unsigned long address,
78 pte_t pte)
79{
80 struct page *page;
81 unsigned long pfn, addr;
82 int exec = (vma->vm_flags & VM_EXEC);
83
84 pfn = pte_pfn(pte);
85 if (unlikely(!pfn_valid(pfn)))
86 return;
87 page = pfn_to_page(pfn);
Chen Liqin11ab3f32009-11-18 13:22:33 +080088 if (page_mapping(page) && test_bit(PG_dcache_dirty, &(page)->flags)) {
Chen Liqin6bc9a392009-06-12 22:01:00 +080089 addr = (unsigned long) page_address(page);
90 if (exec)
Chen Liqin0402c912009-06-19 13:53:49 +080091 flush_data_cache_page(addr);
Chen Liqin11ab3f32009-11-18 13:22:33 +080092 clear_bit(PG_dcache_dirty, &(page)->flags);
Chen Liqin6bc9a392009-06-12 22:01:00 +080093 }
94}
95
96static inline void setup_protection_map(void)
97{
98 protection_map[0] = PAGE_NONE;
99 protection_map[1] = PAGE_READONLY;
100 protection_map[2] = PAGE_COPY;
101 protection_map[3] = PAGE_COPY;
102 protection_map[4] = PAGE_READONLY;
103 protection_map[5] = PAGE_READONLY;
104 protection_map[6] = PAGE_COPY;
105 protection_map[7] = PAGE_COPY;
106 protection_map[8] = PAGE_NONE;
107 protection_map[9] = PAGE_READONLY;
108 protection_map[10] = PAGE_SHARED;
109 protection_map[11] = PAGE_SHARED;
110 protection_map[12] = PAGE_READONLY;
111 protection_map[13] = PAGE_READONLY;
112 protection_map[14] = PAGE_SHARED;
113 protection_map[15] = PAGE_SHARED;
114}
115
Greg Kroah-Hartmanb881bc42012-12-21 14:06:37 -0800116void cpu_cache_init(void)
Chen Liqin6bc9a392009-06-12 22:01:00 +0800117{
Chen Liqin6bc9a392009-06-12 22:01:00 +0800118 setup_protection_map();
119}
120
Chen Liqin0402c912009-06-19 13:53:49 +0800121void flush_icache_all(void)
Chen Liqin6bc9a392009-06-12 22:01:00 +0800122{
123 __asm__ __volatile__(
Chen Liqin0402c912009-06-19 13:53:49 +0800124 "la r8, flush_icache_all\n"
Chen Liqin6bc9a392009-06-12 22:01:00 +0800125 "cache 0x10, [r8, 0]\n"
126 "nop\nnop\nnop\nnop\nnop\nnop\n"
127 : : : "r8");
128}
129
Chen Liqin0402c912009-06-19 13:53:49 +0800130void flush_dcache_all(void)
Chen Liqin6bc9a392009-06-12 22:01:00 +0800131{
132 __asm__ __volatile__(
Chen Liqin0402c912009-06-19 13:53:49 +0800133 "la r8, flush_dcache_all\n"
Chen Liqin6bc9a392009-06-12 22:01:00 +0800134 "cache 0x1f, [r8, 0]\n"
135 "nop\nnop\nnop\nnop\nnop\nnop\n"
136 "cache 0x1a, [r8, 0]\n"
137 "nop\nnop\nnop\nnop\nnop\nnop\n"
138 : : : "r8");
139}
140
Chen Liqin0402c912009-06-19 13:53:49 +0800141void flush_cache_all(void)
Chen Liqin6bc9a392009-06-12 22:01:00 +0800142{
143 __asm__ __volatile__(
Chen Liqin0402c912009-06-19 13:53:49 +0800144 "la r8, flush_cache_all\n"
Chen Liqin6bc9a392009-06-12 22:01:00 +0800145 "cache 0x10, [r8, 0]\n"
146 "nop\nnop\nnop\nnop\nnop\nnop\n"
147 "cache 0x1f, [r8, 0]\n"
148 "nop\nnop\nnop\nnop\nnop\nnop\n"
149 "cache 0x1a, [r8, 0]\n"
150 "nop\nnop\nnop\nnop\nnop\nnop\n"
151 : : : "r8");
152}
153
Chen Liqin0402c912009-06-19 13:53:49 +0800154void flush_cache_mm(struct mm_struct *mm)
Chen Liqin6bc9a392009-06-12 22:01:00 +0800155{
156 if (!(mm->context))
157 return;
Chen Liqin0402c912009-06-19 13:53:49 +0800158 flush_cache_all();
Chen Liqin6bc9a392009-06-12 22:01:00 +0800159}
160
161/*if we flush a range precisely , the processing may be very long.
162We must check each page in the range whether present. If the page is present,
163we can flush the range in the page. Be careful, the range may be cross two
164page, a page is present and another is not present.
165*/
166/*
167The interface is provided in hopes that the port can find
168a suitably efficient method for removing multiple page
169sized regions from the cache.
170*/
Chen Liqin0402c912009-06-19 13:53:49 +0800171void flush_cache_range(struct vm_area_struct *vma,
Chen Liqin6bc9a392009-06-12 22:01:00 +0800172 unsigned long start, unsigned long end)
173{
174 struct mm_struct *mm = vma->vm_mm;
175 int exec = vma->vm_flags & VM_EXEC;
176 pgd_t *pgdp;
177 pud_t *pudp;
178 pmd_t *pmdp;
179 pte_t *ptep;
180
181 if (!(mm->context))
182 return;
183
184 pgdp = pgd_offset(mm, start);
185 pudp = pud_offset(pgdp, start);
186 pmdp = pmd_offset(pudp, start);
187 ptep = pte_offset(pmdp, start);
188
189 while (start <= end) {
190 unsigned long tmpend;
191 pgdp = pgd_offset(mm, start);
192 pudp = pud_offset(pgdp, start);
193 pmdp = pmd_offset(pudp, start);
194 ptep = pte_offset(pmdp, start);
195
196 if (!(pte_val(*ptep) & _PAGE_PRESENT)) {
197 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
198 continue;
199 }
200 tmpend = (start | (PAGE_SIZE-1)) > end ?
201 end : (start | (PAGE_SIZE-1));
202
Chen Liqin0402c912009-06-19 13:53:49 +0800203 flush_dcache_range(start, tmpend);
Chen Liqin6bc9a392009-06-12 22:01:00 +0800204 if (exec)
Chen Liqin0402c912009-06-19 13:53:49 +0800205 flush_icache_range(start, tmpend);
Chen Liqin6bc9a392009-06-12 22:01:00 +0800206 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
207 }
208}
209
Chen Liqin0402c912009-06-19 13:53:49 +0800210void flush_cache_page(struct vm_area_struct *vma,
Chen Liqin6bc9a392009-06-12 22:01:00 +0800211 unsigned long addr, unsigned long pfn)
212{
213 int exec = vma->vm_flags & VM_EXEC;
214 unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
215
Chen Liqin0402c912009-06-19 13:53:49 +0800216 flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
Chen Liqin6bc9a392009-06-12 22:01:00 +0800217
218 if (exec)
Chen Liqin0402c912009-06-19 13:53:49 +0800219 flush_icache_range(kaddr, kaddr + PAGE_SIZE);
Chen Liqin6bc9a392009-06-12 22:01:00 +0800220}
221
Chen Liqin0402c912009-06-19 13:53:49 +0800222void flush_cache_sigtramp(unsigned long addr)
Chen Liqin6bc9a392009-06-12 22:01:00 +0800223{
224 __asm__ __volatile__(
225 "cache 0x02, [%0, 0]\n"
226 "nop\nnop\nnop\nnop\nnop\n"
227 "cache 0x02, [%0, 0x4]\n"
228 "nop\nnop\nnop\nnop\nnop\n"
229
230 "cache 0x0d, [%0, 0]\n"
231 "nop\nnop\nnop\nnop\nnop\n"
232 "cache 0x0d, [%0, 0x4]\n"
233 "nop\nnop\nnop\nnop\nnop\n"
234
235 "cache 0x1a, [%0, 0]\n"
236 "nop\nnop\nnop\nnop\nnop\n"
237 : : "r" (addr));
238}
239
240/*
Chen Liqin6bc9a392009-06-12 22:01:00 +08002411. WB and invalid a cache line of Dcache
2422. Drain Write Buffer
243the range must be smaller than PAGE_SIZE
244*/
Chen Liqin0402c912009-06-19 13:53:49 +0800245void flush_dcache_range(unsigned long start, unsigned long end)
Chen Liqin6bc9a392009-06-12 22:01:00 +0800246{
247 int size, i;
248
249 start = start & ~(L1_CACHE_BYTES - 1);
250 end = end & ~(L1_CACHE_BYTES - 1);
251 size = end - start;
252 /* flush dcache to ram, and invalidate dcache lines. */
253 for (i = 0; i < size; i += L1_CACHE_BYTES) {
254 __asm__ __volatile__(
255 "cache 0x0e, [%0, 0]\n"
256 "nop\nnop\nnop\nnop\nnop\n"
257 "cache 0x1a, [%0, 0]\n"
258 "nop\nnop\nnop\nnop\nnop\n"
259 : : "r" (start));
260 start += L1_CACHE_BYTES;
261 }
262}
263
Chen Liqin0402c912009-06-19 13:53:49 +0800264void flush_icache_range(unsigned long start, unsigned long end)
Chen Liqin6bc9a392009-06-12 22:01:00 +0800265{
266 int size, i;
267 start = start & ~(L1_CACHE_BYTES - 1);
268 end = end & ~(L1_CACHE_BYTES - 1);
269
270 size = end - start;
271 /* invalidate icache lines. */
272 for (i = 0; i < size; i += L1_CACHE_BYTES) {
273 __asm__ __volatile__(
274 "cache 0x02, [%0, 0]\n"
275 "nop\nnop\nnop\nnop\nnop\n"
276 : : "r" (start));
277 start += L1_CACHE_BYTES;
278 }
279}