blob: f3c4b2a54fc756b2fc6b60e349d5bc20f1ebc8be [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/pg-sh4.c
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
Paul Mundt8cf1a742007-07-24 13:28:26 +09005 * Copyright (C) 2002 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Released under the terms of the GNU GPL v2.0.
8 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/mm.h>
Paul Mundtacca4f42008-11-10 20:00:45 +090010#include <linux/init.h>
Paul Mundt52e27782006-11-21 11:09:41 +090011#include <linux/mutex.h>
Paul Mundte06c4e52007-07-31 13:01:43 +090012#include <linux/fs.h>
Paul Mundt7747b9a2007-11-05 16:12:32 +090013#include <linux/highmem.h>
14#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/mmu_context.h>
16#include <asm/cacheflush.h>
17
Paul Mundtacca4f42008-11-10 20:00:45 +090018#define kmap_get_fixmap_pte(vaddr) \
19 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
20
21static pte_t *kmap_coherent_pte;
22
23void __init kmap_coherent_init(void)
24{
25 unsigned long vaddr;
26
27 /* cache the first coherent kmap pte */
28 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
29 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
30}
31
Paul Mundt8cf1a742007-07-24 13:28:26 +090032static inline void *kmap_coherent(struct page *page, unsigned long addr)
33{
34 enum fixed_addresses idx;
35 unsigned long vaddr, flags;
36 pte_t pte;
37
38 inc_preempt_count();
39
40 idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
41 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
42 pte = mk_pte(page, PAGE_KERNEL);
43
44 local_irq_save(flags);
45 flush_tlb_one(get_asid(), vaddr);
46 local_irq_restore(flags);
47
48 update_mmu_cache(NULL, vaddr, pte);
49
Paul Mundtacca4f42008-11-10 20:00:45 +090050 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
51
Paul Mundt8cf1a742007-07-24 13:28:26 +090052 return (void *)vaddr;
53}
54
55static inline void kunmap_coherent(struct page *page)
56{
57 dec_preempt_count();
58 preempt_check_resched();
59}
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
62 * clear_user_page
63 * @to: P1 address
64 * @address: U0 address to be mapped
65 * @page: page (virt_to_page(to))
66 */
67void clear_user_page(void *to, unsigned long address, struct page *page)
68{
Paul Mundtba1789e2007-11-05 16:18:16 +090069 clear_page(to);
Paul Mundt2277ab42009-07-22 19:20:49 +090070
71 if (pages_do_alias((unsigned long)to, address & PAGE_MASK))
Paul Mundtba1789e2007-11-05 16:18:16 +090072 __flush_wback_region(to, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Paul Mundtba1789e2007-11-05 16:18:16 +090075void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
76 unsigned long vaddr, void *dst, const void *src,
77 unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Paul Mundt2277ab42009-07-22 19:20:49 +090079 if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
80 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
81 memcpy(vto, src, len);
82 kunmap_coherent(vto);
83 } else {
84 memcpy(dst, src, len);
85 set_bit(PG_dcache_dirty, &page->flags);
86 }
Paul Mundtba1789e2007-11-05 16:18:16 +090087
88 if (vma->vm_flags & VM_EXEC)
89 flush_cache_page(vma, vaddr, page_to_pfn(page));
90}
91
92void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
93 unsigned long vaddr, void *dst, const void *src,
94 unsigned long len)
95{
Paul Mundt2277ab42009-07-22 19:20:49 +090096 if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
97 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
98 memcpy(dst, vfrom, len);
99 kunmap_coherent(vfrom);
100 } else {
101 memcpy(dst, src, len);
102 set_bit(PG_dcache_dirty, &page->flags);
103 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104}
Paul Mundt39e688a2007-03-05 19:46:47 +0900105
Paul Mundt7747b9a2007-11-05 16:12:32 +0900106void copy_user_highpage(struct page *to, struct page *from,
107 unsigned long vaddr, struct vm_area_struct *vma)
108{
109 void *vfrom, *vto;
110
Paul Mundt7747b9a2007-11-05 16:12:32 +0900111 vto = kmap_atomic(to, KM_USER1);
Paul Mundt7747b9a2007-11-05 16:12:32 +0900112
Paul Mundt2277ab42009-07-22 19:20:49 +0900113 if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) {
114 vfrom = kmap_coherent(from, vaddr);
115 copy_page(vto, vfrom);
116 kunmap_coherent(vfrom);
117 } else {
118 vfrom = kmap_atomic(from, KM_USER0);
119 copy_page(vto, vfrom);
120 kunmap_atomic(vfrom, KM_USER0);
121 }
122
123 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
Paul Mundt7747b9a2007-11-05 16:12:32 +0900124 __flush_wback_region(vto, PAGE_SIZE);
125
126 kunmap_atomic(vto, KM_USER1);
127 /* Make sure this page is cleared on other CPU's too before using it */
128 smp_wmb();
129}
130EXPORT_SYMBOL(copy_user_highpage);