blob: 25f5c6f6821def36b1f796a66bb969f62044daa3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/pg-sh4.c
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
Paul Mundt8cf1a742007-07-24 13:28:26 +09005 * Copyright (C) 2002 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Released under the terms of the GNU GPL v2.0.
8 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/mm.h>
Paul Mundt52e27782006-11-21 11:09:41 +090010#include <linux/mutex.h>
Paul Mundte06c4e52007-07-31 13:01:43 +090011#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <asm/mmu_context.h>
13#include <asm/cacheflush.h>
14
Paul Mundt11c19652006-12-25 10:19:56 +090015#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
Paul Mundt8b395262006-09-27 14:38:02 +090016
Paul Mundt8cf1a742007-07-24 13:28:26 +090017static inline void *kmap_coherent(struct page *page, unsigned long addr)
18{
19 enum fixed_addresses idx;
20 unsigned long vaddr, flags;
21 pte_t pte;
22
23 inc_preempt_count();
24
25 idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
26 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
27 pte = mk_pte(page, PAGE_KERNEL);
28
29 local_irq_save(flags);
30 flush_tlb_one(get_asid(), vaddr);
31 local_irq_restore(flags);
32
33 update_mmu_cache(NULL, vaddr, pte);
34
35 return (void *)vaddr;
36}
37
38static inline void kunmap_coherent(struct page *page)
39{
40 dec_preempt_count();
41 preempt_check_resched();
42}
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/*
45 * clear_user_page
46 * @to: P1 address
47 * @address: U0 address to be mapped
48 * @page: page (virt_to_page(to))
49 */
50void clear_user_page(void *to, unsigned long address, struct page *page)
51{
Paul Mundt39e688a2007-03-05 19:46:47 +090052 __set_bit(PG_mapped, &page->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
54 clear_page(to);
55 else {
Paul Mundt8cf1a742007-07-24 13:28:26 +090056 void *vto = kmap_coherent(page, address);
57 __clear_user_page(vto, to);
58 kunmap_coherent(vto);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 }
60}
61
62/*
63 * copy_user_page
64 * @to: P1 address
65 * @from: P1 address
66 * @address: U0 address to be mapped
67 * @page: page (virt_to_page(to))
68 */
Paul Mundt8b395262006-09-27 14:38:02 +090069void copy_user_page(void *to, void *from, unsigned long address,
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 struct page *page)
71{
Paul Mundt39e688a2007-03-05 19:46:47 +090072 __set_bit(PG_mapped, &page->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
74 copy_page(to, from);
75 else {
Paul Mundt8cf1a742007-07-24 13:28:26 +090076 void *vfrom = kmap_coherent(page, address);
77 __copy_user_page(vfrom, from, to);
78 kunmap_coherent(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 }
80}
Paul Mundt39e688a2007-03-05 19:46:47 +090081
82/*
83 * For SH-4, we have our own implementation for ptep_get_and_clear
84 */
85inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
86{
87 pte_t pte = *ptep;
88
89 pte_clear(mm, addr, ptep);
90 if (!pte_not_present(pte)) {
91 unsigned long pfn = pte_pfn(pte);
92 if (pfn_valid(pfn)) {
93 struct page *page = pfn_to_page(pfn);
94 struct address_space *mapping = page_mapping(page);
95 if (!mapping || !mapping_writably_mapped(mapping))
96 __clear_bit(PG_mapped, &page->flags);
97 }
98 }
99 return pte;
100}