Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * arch/sh/mm/pg-sh4.c |
| 3 | * |
| 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
Paul Mundt | 8b39526 | 2006-09-27 14:38:02 +0900 | [diff] [blame] | 5 | * Copyright (C) 2002 - 2005 Paul Mundt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | * Released under the terms of the GNU GPL v2.0. |
| 8 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/mm.h> |
Paul Mundt | 52e2778 | 2006-11-21 11:09:41 +0900 | [diff] [blame] | 10 | #include <linux/mutex.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <asm/mmu_context.h> |
| 12 | #include <asm/cacheflush.h> |
| 13 | |
Paul Mundt | 52e2778 | 2006-11-21 11:09:41 +0900 | [diff] [blame] | 14 | extern struct mutex p3map_mutex[]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
Paul Mundt | 11c1965 | 2006-12-25 10:19:56 +0900 | [diff] [blame] | 16 | #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) |
Paul Mundt | 8b39526 | 2006-09-27 14:38:02 +0900 | [diff] [blame] | 17 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | /* |
| 19 | * clear_user_page |
| 20 | * @to: P1 address |
| 21 | * @address: U0 address to be mapped |
| 22 | * @page: page (virt_to_page(to)) |
| 23 | */ |
| 24 | void clear_user_page(void *to, unsigned long address, struct page *page) |
| 25 | { |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 26 | __set_bit(PG_mapped, &page->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
| 28 | clear_page(to); |
| 29 | else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | unsigned long phys_addr = PHYSADDR(to); |
| 31 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); |
Paul Mundt | 8b39526 | 2006-09-27 14:38:02 +0900 | [diff] [blame] | 32 | pgd_t *pgd = pgd_offset_k(p3_addr); |
| 33 | pud_t *pud = pud_offset(pgd, p3_addr); |
| 34 | pmd_t *pmd = pmd_offset(pud, p3_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | pte_t *pte = pte_offset_kernel(pmd, p3_addr); |
| 36 | pte_t entry; |
| 37 | unsigned long flags; |
| 38 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 39 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); |
Paul Mundt | 52e2778 | 2006-11-21 11:09:41 +0900 | [diff] [blame] | 40 | mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | set_pte(pte, entry); |
| 42 | local_irq_save(flags); |
Paul Mundt | ea9af69 | 2006-12-25 19:28:54 +0900 | [diff] [blame] | 43 | flush_tlb_one(get_asid(), p3_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | local_irq_restore(flags); |
| 45 | update_mmu_cache(NULL, p3_addr, entry); |
| 46 | __clear_user_page((void *)p3_addr, to); |
| 47 | pte_clear(&init_mm, p3_addr, pte); |
Paul Mundt | 52e2778 | 2006-11-21 11:09:41 +0900 | [diff] [blame] | 48 | mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | } |
| 50 | } |
| 51 | |
| 52 | /* |
| 53 | * copy_user_page |
| 54 | * @to: P1 address |
| 55 | * @from: P1 address |
| 56 | * @address: U0 address to be mapped |
| 57 | * @page: page (virt_to_page(to)) |
| 58 | */ |
Paul Mundt | 8b39526 | 2006-09-27 14:38:02 +0900 | [diff] [blame] | 59 | void copy_user_page(void *to, void *from, unsigned long address, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | struct page *page) |
| 61 | { |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 62 | __set_bit(PG_mapped, &page->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
| 64 | copy_page(to, from); |
| 65 | else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | unsigned long phys_addr = PHYSADDR(to); |
| 67 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); |
Paul Mundt | 8b39526 | 2006-09-27 14:38:02 +0900 | [diff] [blame] | 68 | pgd_t *pgd = pgd_offset_k(p3_addr); |
| 69 | pud_t *pud = pud_offset(pgd, p3_addr); |
| 70 | pmd_t *pmd = pmd_offset(pud, p3_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | pte_t *pte = pte_offset_kernel(pmd, p3_addr); |
| 72 | pte_t entry; |
| 73 | unsigned long flags; |
| 74 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 75 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); |
Paul Mundt | 52e2778 | 2006-11-21 11:09:41 +0900 | [diff] [blame] | 76 | mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | set_pte(pte, entry); |
| 78 | local_irq_save(flags); |
Paul Mundt | ea9af69 | 2006-12-25 19:28:54 +0900 | [diff] [blame] | 79 | flush_tlb_one(get_asid(), p3_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | local_irq_restore(flags); |
| 81 | update_mmu_cache(NULL, p3_addr, entry); |
| 82 | __copy_user_page((void *)p3_addr, from, to); |
| 83 | pte_clear(&init_mm, p3_addr, pte); |
Paul Mundt | 52e2778 | 2006-11-21 11:09:41 +0900 | [diff] [blame] | 84 | mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | } |
| 86 | } |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 87 | |
| 88 | /* |
| 89 | * For SH-4, we have our own implementation for ptep_get_and_clear |
| 90 | */ |
| 91 | inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 92 | { |
| 93 | pte_t pte = *ptep; |
| 94 | |
| 95 | pte_clear(mm, addr, ptep); |
| 96 | if (!pte_not_present(pte)) { |
| 97 | unsigned long pfn = pte_pfn(pte); |
| 98 | if (pfn_valid(pfn)) { |
| 99 | struct page *page = pfn_to_page(pfn); |
| 100 | struct address_space *mapping = page_mapping(page); |
| 101 | if (!mapping || !mapping_writably_mapped(mapping)) |
| 102 | __clear_bit(PG_mapped, &page->flags); |
| 103 | } |
| 104 | } |
| 105 | return pte; |
| 106 | } |