blob: 2fe14da1f83909826d352249ba849488103fb4c2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/pg-sh4.c
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
Paul Mundt8cf1a742007-07-24 13:28:26 +09005 * Copyright (C) 2002 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Released under the terms of the GNU GPL v2.0.
8 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/mm.h>
Paul Mundtacca4f42008-11-10 20:00:45 +090010#include <linux/init.h>
Paul Mundt52e27782006-11-21 11:09:41 +090011#include <linux/mutex.h>
Paul Mundte06c4e52007-07-31 13:01:43 +090012#include <linux/fs.h>
Paul Mundt7747b9a2007-11-05 16:12:32 +090013#include <linux/highmem.h>
14#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/mmu_context.h>
16#include <asm/cacheflush.h>
17
Paul Mundt11c19652006-12-25 10:19:56 +090018#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
Paul Mundt8b395262006-09-27 14:38:02 +090019
Paul Mundtacca4f42008-11-10 20:00:45 +090020#define kmap_get_fixmap_pte(vaddr) \
21 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
22
23static pte_t *kmap_coherent_pte;
24
25void __init kmap_coherent_init(void)
26{
27 unsigned long vaddr;
28
29 /* cache the first coherent kmap pte */
30 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
31 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
32}
33
Paul Mundt8cf1a742007-07-24 13:28:26 +090034static inline void *kmap_coherent(struct page *page, unsigned long addr)
35{
36 enum fixed_addresses idx;
37 unsigned long vaddr, flags;
38 pte_t pte;
39
40 inc_preempt_count();
41
42 idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
43 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
44 pte = mk_pte(page, PAGE_KERNEL);
45
46 local_irq_save(flags);
47 flush_tlb_one(get_asid(), vaddr);
48 local_irq_restore(flags);
49
50 update_mmu_cache(NULL, vaddr, pte);
51
Paul Mundtacca4f42008-11-10 20:00:45 +090052 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
53
Paul Mundt8cf1a742007-07-24 13:28:26 +090054 return (void *)vaddr;
55}
56
57static inline void kunmap_coherent(struct page *page)
58{
59 dec_preempt_count();
60 preempt_check_resched();
61}
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063/*
64 * clear_user_page
65 * @to: P1 address
66 * @address: U0 address to be mapped
67 * @page: page (virt_to_page(to))
68 */
69void clear_user_page(void *to, unsigned long address, struct page *page)
70{
Paul Mundt39e688a2007-03-05 19:46:47 +090071 __set_bit(PG_mapped, &page->flags);
Paul Mundtba1789e2007-11-05 16:18:16 +090072
73 clear_page(to);
74 if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS))
75 __flush_wback_region(to, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076}
77
Paul Mundtba1789e2007-11-05 16:18:16 +090078void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
79 unsigned long vaddr, void *dst, const void *src,
80 unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -070081{
Paul Mundtba1789e2007-11-05 16:18:16 +090082 void *vto;
83
Paul Mundt39e688a2007-03-05 19:46:47 +090084 __set_bit(PG_mapped, &page->flags);
Paul Mundtba1789e2007-11-05 16:18:16 +090085
86 vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
87 memcpy(vto, src, len);
88 kunmap_coherent(vto);
89
90 if (vma->vm_flags & VM_EXEC)
91 flush_cache_page(vma, vaddr, page_to_pfn(page));
92}
93
94void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
95 unsigned long vaddr, void *dst, const void *src,
96 unsigned long len)
97{
98 void *vfrom;
99
100 __set_bit(PG_mapped, &page->flags);
101
102 vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
103 memcpy(dst, vfrom, len);
104 kunmap_coherent(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105}
Paul Mundt39e688a2007-03-05 19:46:47 +0900106
Paul Mundt7747b9a2007-11-05 16:12:32 +0900107void copy_user_highpage(struct page *to, struct page *from,
108 unsigned long vaddr, struct vm_area_struct *vma)
109{
110 void *vfrom, *vto;
111
112 __set_bit(PG_mapped, &to->flags);
113
114 vto = kmap_atomic(to, KM_USER1);
115 vfrom = kmap_coherent(from, vaddr);
116 copy_page(vto, vfrom);
117 kunmap_coherent(vfrom);
118
119 if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS))
120 __flush_wback_region(vto, PAGE_SIZE);
121
122 kunmap_atomic(vto, KM_USER1);
123 /* Make sure this page is cleared on other CPU's too before using it */
124 smp_wmb();
125}
126EXPORT_SYMBOL(copy_user_highpage);
127
Paul Mundt39e688a2007-03-05 19:46:47 +0900128/*
129 * For SH-4, we have our own implementation for ptep_get_and_clear
130 */
Magnus Damm73382f72008-07-05 12:33:30 +0900131pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Paul Mundt39e688a2007-03-05 19:46:47 +0900132{
133 pte_t pte = *ptep;
134
135 pte_clear(mm, addr, ptep);
136 if (!pte_not_present(pte)) {
137 unsigned long pfn = pte_pfn(pte);
138 if (pfn_valid(pfn)) {
139 struct page *page = pfn_to_page(pfn);
140 struct address_space *mapping = page_mapping(page);
141 if (!mapping || !mapping_writably_mapped(mapping))
142 __clear_bit(PG_mapped, &page->flags);
143 }
144 }
145 return pte;
146}