Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 2 | * arch/sh/mm/pg-mmu.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
Paul Mundt | dfff0fa | 2009-07-27 20:53:22 +0900 | [diff] [blame] | 5 | * Copyright (C) 2002 - 2009 Paul Mundt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | * Released under the terms of the GNU GPL v2.0. |
| 8 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/mm.h> |
Paul Mundt | acca4f4 | 2008-11-10 20:00:45 +0900 | [diff] [blame] | 10 | #include <linux/init.h> |
Paul Mundt | 52e2778 | 2006-11-21 11:09:41 +0900 | [diff] [blame] | 11 | #include <linux/mutex.h> |
Paul Mundt | e06c4e5 | 2007-07-31 13:01:43 +0900 | [diff] [blame] | 12 | #include <linux/fs.h> |
Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 13 | #include <linux/highmem.h> |
| 14 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <asm/mmu_context.h> |
| 16 | #include <asm/cacheflush.h> |
| 17 | |
Paul Mundt | ba1789e | 2007-11-05 16:18:16 +0900 | [diff] [blame] | 18 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
| 19 | unsigned long vaddr, void *dst, const void *src, |
| 20 | unsigned long len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | { |
Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 22 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && |
| 23 | !test_bit(PG_dcache_dirty, &page->flags)) { |
Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 24 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
| 25 | memcpy(vto, src, len); |
Paul Mundt | b5eb10a | 2009-08-04 16:00:36 +0900 | [diff] [blame] | 26 | kunmap_coherent(); |
Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 27 | } else { |
| 28 | memcpy(dst, src, len); |
Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 29 | if (boot_cpu_data.dcache.n_aliases) |
| 30 | set_bit(PG_dcache_dirty, &page->flags); |
Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 31 | } |
Paul Mundt | ba1789e | 2007-11-05 16:18:16 +0900 | [diff] [blame] | 32 | |
| 33 | if (vma->vm_flags & VM_EXEC) |
| 34 | flush_cache_page(vma, vaddr, page_to_pfn(page)); |
| 35 | } |
| 36 | |
| 37 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, |
| 38 | unsigned long vaddr, void *dst, const void *src, |
| 39 | unsigned long len) |
| 40 | { |
Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 41 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && |
| 42 | !test_bit(PG_dcache_dirty, &page->flags)) { |
Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 43 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
| 44 | memcpy(dst, vfrom, len); |
Paul Mundt | b5eb10a | 2009-08-04 16:00:36 +0900 | [diff] [blame] | 45 | kunmap_coherent(); |
Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 46 | } else { |
| 47 | memcpy(dst, src, len); |
Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 48 | if (boot_cpu_data.dcache.n_aliases) |
| 49 | set_bit(PG_dcache_dirty, &page->flags); |
Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 50 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | } |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 52 | |
Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 53 | void copy_user_highpage(struct page *to, struct page *from, |
| 54 | unsigned long vaddr, struct vm_area_struct *vma) |
| 55 | { |
| 56 | void *vfrom, *vto; |
| 57 | |
Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 58 | vto = kmap_atomic(to, KM_USER1); |
Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 59 | |
Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 60 | if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && |
| 61 | !test_bit(PG_dcache_dirty, &from->flags)) { |
Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 62 | vfrom = kmap_coherent(from, vaddr); |
| 63 | copy_page(vto, vfrom); |
Paul Mundt | b5eb10a | 2009-08-04 16:00:36 +0900 | [diff] [blame] | 64 | kunmap_coherent(); |
Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 65 | } else { |
| 66 | vfrom = kmap_atomic(from, KM_USER0); |
| 67 | copy_page(vto, vfrom); |
| 68 | kunmap_atomic(vfrom, KM_USER0); |
| 69 | } |
| 70 | |
| 71 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) |
Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 72 | __flush_wback_region(vto, PAGE_SIZE); |
| 73 | |
| 74 | kunmap_atomic(vto, KM_USER1); |
| 75 | /* Make sure this page is cleared on other CPU's too before using it */ |
| 76 | smp_wmb(); |
| 77 | } |
| 78 | EXPORT_SYMBOL(copy_user_highpage); |
Paul Mundt | dfff0fa | 2009-07-27 20:53:22 +0900 | [diff] [blame] | 79 | |
| 80 | void clear_user_highpage(struct page *page, unsigned long vaddr) |
| 81 | { |
| 82 | void *kaddr = kmap_atomic(page, KM_USER0); |
| 83 | |
| 84 | clear_page(kaddr); |
| 85 | |
| 86 | if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) |
| 87 | __flush_wback_region(kaddr, PAGE_SIZE); |
| 88 | |
| 89 | kunmap_atomic(kaddr, KM_USER0); |
| 90 | } |
| 91 | EXPORT_SYMBOL(clear_user_highpage); |
Paul Mundt | 9cef749 | 2009-07-29 00:12:17 +0900 | [diff] [blame] | 92 | |
| 93 | void __update_cache(struct vm_area_struct *vma, |
| 94 | unsigned long address, pte_t pte) |
| 95 | { |
| 96 | struct page *page; |
| 97 | unsigned long pfn = pte_pfn(pte); |
| 98 | |
| 99 | if (!boot_cpu_data.dcache.n_aliases) |
| 100 | return; |
| 101 | |
| 102 | page = pfn_to_page(pfn); |
| 103 | if (pfn_valid(pfn) && page_mapping(page)) { |
| 104 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); |
| 105 | if (dirty) { |
| 106 | unsigned long addr = (unsigned long)page_address(page); |
| 107 | |
| 108 | if (pages_do_alias(addr, address & PAGE_MASK)) |
| 109 | __flush_wback_region((void *)addr, PAGE_SIZE); |
| 110 | } |
| 111 | } |
| 112 | } |
Paul Mundt | c0fe478 | 2009-08-04 16:02:43 +0900 | [diff] [blame] | 113 | |
| 114 | void __flush_anon_page(struct page *page, unsigned long vmaddr) |
| 115 | { |
| 116 | unsigned long addr = (unsigned long) page_address(page); |
| 117 | |
| 118 | if (pages_do_alias(addr, vmaddr)) { |
| 119 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && |
| 120 | !test_bit(PG_dcache_dirty, &page->flags)) { |
| 121 | void *kaddr; |
| 122 | |
| 123 | kaddr = kmap_coherent(page, vmaddr); |
| 124 | __flush_wback_region((void *)kaddr, PAGE_SIZE); |
| 125 | kunmap_coherent(); |
| 126 | } else |
| 127 | __flush_wback_region((void *)addr, PAGE_SIZE); |
| 128 | } |
| 129 | } |
Paul Mundt | ecba106 | 2009-08-15 11:05:42 +0900 | [diff] [blame^] | 130 | |
| 131 | void __init cpu_cache_init(void) |
| 132 | { |
| 133 | if ((boot_cpu_data.family == CPU_FAMILY_SH4) || |
| 134 | (boot_cpu_data.family == CPU_FAMILY_SH4A) || |
| 135 | (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { |
| 136 | extern void __weak sh4_cache_init(void); |
| 137 | |
| 138 | sh4_cache_init(); |
| 139 | } |
| 140 | } |