blob: 8602f68af4c869748951ccbd1109f2cf78833ba0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Mundt0dfae7d2009-07-27 21:30:17 +09002 * arch/sh/mm/pg-mmu.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
Paul Mundtdfff0fa2009-07-27 20:53:22 +09005 * Copyright (C) 2002 - 2009 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Released under the terms of the GNU GPL v2.0.
8 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/mm.h>
Paul Mundtacca4f42008-11-10 20:00:45 +090010#include <linux/init.h>
Paul Mundt52e27782006-11-21 11:09:41 +090011#include <linux/mutex.h>
Paul Mundte06c4e52007-07-31 13:01:43 +090012#include <linux/fs.h>
Paul Mundt7747b9a2007-11-05 16:12:32 +090013#include <linux/highmem.h>
14#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/mmu_context.h>
16#include <asm/cacheflush.h>
17
Paul Mundtacca4f42008-11-10 20:00:45 +090018#define kmap_get_fixmap_pte(vaddr) \
19 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
20
21static pte_t *kmap_coherent_pte;
22
23void __init kmap_coherent_init(void)
24{
Paul Mundt0dfae7d2009-07-27 21:30:17 +090025#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
Paul Mundtacca4f42008-11-10 20:00:45 +090026 unsigned long vaddr;
27
28 /* cache the first coherent kmap pte */
29 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
30 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
Paul Mundt0dfae7d2009-07-27 21:30:17 +090031#endif
Paul Mundtacca4f42008-11-10 20:00:45 +090032}
33
Paul Mundt8cf1a742007-07-24 13:28:26 +090034static inline void *kmap_coherent(struct page *page, unsigned long addr)
35{
36 enum fixed_addresses idx;
37 unsigned long vaddr, flags;
38 pte_t pte;
39
40 inc_preempt_count();
41
42 idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
43 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
44 pte = mk_pte(page, PAGE_KERNEL);
45
46 local_irq_save(flags);
47 flush_tlb_one(get_asid(), vaddr);
48 local_irq_restore(flags);
49
50 update_mmu_cache(NULL, vaddr, pte);
51
Paul Mundtacca4f42008-11-10 20:00:45 +090052 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
53
Paul Mundt8cf1a742007-07-24 13:28:26 +090054 return (void *)vaddr;
55}
56
57static inline void kunmap_coherent(struct page *page)
58{
59 dec_preempt_count();
60 preempt_check_resched();
61}
62
Paul Mundtba1789e2007-11-05 16:18:16 +090063void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
64 unsigned long vaddr, void *dst, const void *src,
65 unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
Paul Mundt0dfae7d2009-07-27 21:30:17 +090067 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
68 !test_bit(PG_dcache_dirty, &page->flags)) {
Paul Mundt2277ab42009-07-22 19:20:49 +090069 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
70 memcpy(vto, src, len);
71 kunmap_coherent(vto);
72 } else {
73 memcpy(dst, src, len);
Paul Mundt0dfae7d2009-07-27 21:30:17 +090074 if (boot_cpu_data.dcache.n_aliases)
75 set_bit(PG_dcache_dirty, &page->flags);
Paul Mundt2277ab42009-07-22 19:20:49 +090076 }
Paul Mundtba1789e2007-11-05 16:18:16 +090077
78 if (vma->vm_flags & VM_EXEC)
79 flush_cache_page(vma, vaddr, page_to_pfn(page));
80}
81
82void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
83 unsigned long vaddr, void *dst, const void *src,
84 unsigned long len)
85{
Paul Mundt0dfae7d2009-07-27 21:30:17 +090086 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
87 !test_bit(PG_dcache_dirty, &page->flags)) {
Paul Mundt2277ab42009-07-22 19:20:49 +090088 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
89 memcpy(dst, vfrom, len);
90 kunmap_coherent(vfrom);
91 } else {
92 memcpy(dst, src, len);
Paul Mundt0dfae7d2009-07-27 21:30:17 +090093 if (boot_cpu_data.dcache.n_aliases)
94 set_bit(PG_dcache_dirty, &page->flags);
Paul Mundt2277ab42009-07-22 19:20:49 +090095 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070096}
Paul Mundt39e688a2007-03-05 19:46:47 +090097
Paul Mundt7747b9a2007-11-05 16:12:32 +090098void copy_user_highpage(struct page *to, struct page *from,
99 unsigned long vaddr, struct vm_area_struct *vma)
100{
101 void *vfrom, *vto;
102
Paul Mundt7747b9a2007-11-05 16:12:32 +0900103 vto = kmap_atomic(to, KM_USER1);
Paul Mundt7747b9a2007-11-05 16:12:32 +0900104
Paul Mundt0dfae7d2009-07-27 21:30:17 +0900105 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
106 !test_bit(PG_dcache_dirty, &from->flags)) {
Paul Mundt2277ab42009-07-22 19:20:49 +0900107 vfrom = kmap_coherent(from, vaddr);
108 copy_page(vto, vfrom);
109 kunmap_coherent(vfrom);
110 } else {
111 vfrom = kmap_atomic(from, KM_USER0);
112 copy_page(vto, vfrom);
113 kunmap_atomic(vfrom, KM_USER0);
114 }
115
116 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
Paul Mundt7747b9a2007-11-05 16:12:32 +0900117 __flush_wback_region(vto, PAGE_SIZE);
118
119 kunmap_atomic(vto, KM_USER1);
120 /* Make sure this page is cleared on other CPU's too before using it */
121 smp_wmb();
122}
123EXPORT_SYMBOL(copy_user_highpage);
Paul Mundtdfff0fa2009-07-27 20:53:22 +0900124
125void clear_user_highpage(struct page *page, unsigned long vaddr)
126{
127 void *kaddr = kmap_atomic(page, KM_USER0);
128
129 clear_page(kaddr);
130
131 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
132 __flush_wback_region(kaddr, PAGE_SIZE);
133
134 kunmap_atomic(kaddr, KM_USER0);
135}
136EXPORT_SYMBOL(clear_user_highpage);
Paul Mundt9cef7492009-07-29 00:12:17 +0900137
138void __update_cache(struct vm_area_struct *vma,
139 unsigned long address, pte_t pte)
140{
141 struct page *page;
142 unsigned long pfn = pte_pfn(pte);
143
144 if (!boot_cpu_data.dcache.n_aliases)
145 return;
146
147 page = pfn_to_page(pfn);
148 if (pfn_valid(pfn) && page_mapping(page)) {
149 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
150 if (dirty) {
151 unsigned long addr = (unsigned long)page_address(page);
152
153 if (pages_do_alias(addr, address & PAGE_MASK))
154 __flush_wback_region((void *)addr, PAGE_SIZE);
155 }
156 }
157}