blob: 82b48e6a6239bbe0e14f1b2cd9fddee2dca2babd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/pg-sh4.c
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
Paul Mundt8cf1a742007-07-24 13:28:26 +09005 * Copyright (C) 2002 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Released under the terms of the GNU GPL v2.0.
8 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/mm.h>
Paul Mundt52e27782006-11-21 11:09:41 +090010#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <asm/mmu_context.h>
12#include <asm/cacheflush.h>
13
Paul Mundt11c19652006-12-25 10:19:56 +090014#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
Paul Mundt8b395262006-09-27 14:38:02 +090015
Paul Mundt8cf1a742007-07-24 13:28:26 +090016static inline void *kmap_coherent(struct page *page, unsigned long addr)
17{
18 enum fixed_addresses idx;
19 unsigned long vaddr, flags;
20 pte_t pte;
21
22 inc_preempt_count();
23
24 idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
25 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
26 pte = mk_pte(page, PAGE_KERNEL);
27
28 local_irq_save(flags);
29 flush_tlb_one(get_asid(), vaddr);
30 local_irq_restore(flags);
31
32 update_mmu_cache(NULL, vaddr, pte);
33
34 return (void *)vaddr;
35}
36
37static inline void kunmap_coherent(struct page *page)
38{
39 dec_preempt_count();
40 preempt_check_resched();
41}
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043/*
44 * clear_user_page
45 * @to: P1 address
46 * @address: U0 address to be mapped
47 * @page: page (virt_to_page(to))
48 */
49void clear_user_page(void *to, unsigned long address, struct page *page)
50{
Paul Mundt39e688a2007-03-05 19:46:47 +090051 __set_bit(PG_mapped, &page->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
53 clear_page(to);
54 else {
Paul Mundt8cf1a742007-07-24 13:28:26 +090055 void *vto = kmap_coherent(page, address);
56 __clear_user_page(vto, to);
57 kunmap_coherent(vto);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 }
59}
60
61/*
62 * copy_user_page
63 * @to: P1 address
64 * @from: P1 address
65 * @address: U0 address to be mapped
66 * @page: page (virt_to_page(to))
67 */
Paul Mundt8b395262006-09-27 14:38:02 +090068void copy_user_page(void *to, void *from, unsigned long address,
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 struct page *page)
70{
Paul Mundt39e688a2007-03-05 19:46:47 +090071 __set_bit(PG_mapped, &page->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
73 copy_page(to, from);
74 else {
Paul Mundt8cf1a742007-07-24 13:28:26 +090075 void *vfrom = kmap_coherent(page, address);
76 __copy_user_page(vfrom, from, to);
77 kunmap_coherent(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 }
79}
Paul Mundt39e688a2007-03-05 19:46:47 +090080
81/*
82 * For SH-4, we have our own implementation for ptep_get_and_clear
83 */
84inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
85{
86 pte_t pte = *ptep;
87
88 pte_clear(mm, addr, ptep);
89 if (!pte_not_present(pte)) {
90 unsigned long pfn = pte_pfn(pte);
91 if (pfn_valid(pfn)) {
92 struct page *page = pfn_to_page(pfn);
93 struct address_space *mapping = page_mapping(page);
94 if (!mapping || !mapping_writably_mapped(mapping))
95 __clear_bit(PG_mapped, &page->flags);
96 }
97 }
98 return pte;
99}