blob: 887ab9d18ccd25b16d4e5bc8d6d50db5f6020192 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/pg-sh7705.c
3 *
4 * Copyright (C) 1999, 2000 Niibe Yutaka
5 * Copyright (C) 2004 Alex Song
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/init.h>
12#include <linux/mman.h>
13#include <linux/mm.h>
14#include <linux/threads.h>
15#include <asm/addrspace.h>
16#include <asm/page.h>
17#include <asm/pgtable.h>
18#include <asm/processor.h>
19#include <asm/cache.h>
20#include <asm/io.h>
21#include <asm/uaccess.h>
22#include <asm/pgalloc.h>
23#include <asm/mmu_context.h>
24#include <asm/cacheflush.h>
25
26static inline void __flush_purge_virtual_region(void *p1, void *virt, int size)
27{
28 unsigned long v;
29 unsigned long begin, end;
30 unsigned long p1_begin;
31
32
33 begin = L1_CACHE_ALIGN((unsigned long)virt);
34 end = L1_CACHE_ALIGN((unsigned long)virt + size);
35
36 p1_begin = (unsigned long)p1 & ~(L1_CACHE_BYTES - 1);
37
38 /* do this the slow way as we may not have TLB entries
39 * for virt yet. */
40 for (v = begin; v < end; v += L1_CACHE_BYTES) {
41 unsigned long p;
42 unsigned long ways, addr;
43
44 p = __pa(p1_begin);
45
Paul Mundt11c19652006-12-25 10:19:56 +090046 ways = current_cpu_data.dcache.ways;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 addr = CACHE_OC_ADDRESS_ARRAY;
48
49 do {
50 unsigned long data;
51
Paul Mundt11c19652006-12-25 10:19:56 +090052 addr |= (v & current_cpu_data.dcache.entry_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54 data = ctrl_inl(addr);
55 if ((data & CACHE_PHYSADDR_MASK) ==
56 (p & CACHE_PHYSADDR_MASK)) {
57 data &= ~(SH_CACHE_UPDATED|SH_CACHE_VALID);
58 ctrl_outl(data, addr);
59 }
60
Paul Mundt11c19652006-12-25 10:19:56 +090061 addr += current_cpu_data.dcache.way_incr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 } while (--ways);
63
64 p1_begin += L1_CACHE_BYTES;
65 }
66}
67
68/*
69 * clear_user_page
70 * @to: P1 address
71 * @address: U0 address to be mapped
72 */
73void clear_user_page(void *to, unsigned long address, struct page *pg)
74{
75 struct page *page = virt_to_page(to);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
78 clear_page(to);
79 __flush_wback_region(to, PAGE_SIZE);
80 } else {
81 __flush_purge_virtual_region(to,
82 (void *)(address & 0xfffff000),
83 PAGE_SIZE);
84 clear_page(to);
85 __flush_wback_region(to, PAGE_SIZE);
86 }
87}
88
89/*
90 * copy_user_page
91 * @to: P1 address
92 * @from: P1 address
93 * @address: U0 address to be mapped
94 */
Paul Mundt26b7a782006-12-28 10:31:48 +090095void copy_user_page(void *to, void *from, unsigned long address,
96 struct page *pg)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
98 struct page *page = virt_to_page(to);
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
101 copy_page(to, from);
102 __flush_wback_region(to, PAGE_SIZE);
103 } else {
104 __flush_purge_virtual_region(to,
105 (void *)(address & 0xfffff000),
106 PAGE_SIZE);
107 copy_page(to, from);
108 __flush_wback_region(to, PAGE_SIZE);
109 }
110}