David Howells | b478491 | 2010-10-27 17:28:46 +0100 | [diff] [blame] | 1 | /* Flush dcache and invalidate icache when the dcache is in writeback mode |
| 2 | * |
| 3 | * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public Licence |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the Licence, or (at your option) any later version. |
| 10 | */ |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <asm/cacheflush.h> |
Akira Takeuchi | 8be0628 | 2010-10-27 17:28:47 +0100 | [diff] [blame] | 14 | #include <asm/smp.h> |
| 15 | #include "cache-smp.h" |
| 16 | |
David Howells | b478491 | 2010-10-27 17:28:46 +0100 | [diff] [blame] | 17 | /** |
| 18 | * flush_icache_page - Flush a page from the dcache and invalidate the icache |
| 19 | * @vma: The VMA the page is part of. |
| 20 | * @page: The page to be flushed. |
| 21 | * |
| 22 | * Write a page back from the dcache and invalidate the icache so that we can |
| 23 | * run code from it that we've just written into it |
| 24 | */ |
| 25 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) |
| 26 | { |
| 27 | unsigned long start = page_to_phys(page); |
Akira Takeuchi | 8be0628 | 2010-10-27 17:28:47 +0100 | [diff] [blame] | 28 | unsigned long flags; |
David Howells | b478491 | 2010-10-27 17:28:46 +0100 | [diff] [blame] | 29 | |
Akira Takeuchi | 8be0628 | 2010-10-27 17:28:47 +0100 | [diff] [blame] | 30 | flags = smp_lock_cache(); |
| 31 | |
| 32 | mn10300_local_dcache_flush_page(start); |
| 33 | mn10300_local_icache_inv_page(start); |
| 34 | |
| 35 | smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE); |
| 36 | smp_unlock_cache(flags); |
David Howells | b478491 | 2010-10-27 17:28:46 +0100 | [diff] [blame] | 37 | } |
| 38 | EXPORT_SYMBOL(flush_icache_page); |
| 39 | |
| 40 | /** |
| 41 | * flush_icache_page_range - Flush dcache and invalidate icache for part of a |
| 42 | * single page |
| 43 | * @start: The starting virtual address of the page part. |
| 44 | * @end: The ending virtual address of the page part. |
| 45 | * |
| 46 | * Flush the dcache and invalidate the icache for part of a single page, as |
| 47 | * determined by the virtual addresses given. The page must be in the paged |
| 48 | * area. |
| 49 | */ |
| 50 | static void flush_icache_page_range(unsigned long start, unsigned long end) |
| 51 | { |
| 52 | unsigned long addr, size, off; |
| 53 | struct page *page; |
| 54 | pgd_t *pgd; |
| 55 | pud_t *pud; |
| 56 | pmd_t *pmd; |
| 57 | pte_t *ppte, pte; |
| 58 | |
| 59 | /* work out how much of the page to flush */ |
| 60 | off = start & ~PAGE_MASK; |
| 61 | size = end - start; |
| 62 | |
| 63 | /* get the physical address the page is mapped to from the page |
| 64 | * tables */ |
| 65 | pgd = pgd_offset(current->mm, start); |
| 66 | if (!pgd || !pgd_val(*pgd)) |
| 67 | return; |
| 68 | |
| 69 | pud = pud_offset(pgd, start); |
| 70 | if (!pud || !pud_val(*pud)) |
| 71 | return; |
| 72 | |
| 73 | pmd = pmd_offset(pud, start); |
| 74 | if (!pmd || !pmd_val(*pmd)) |
| 75 | return; |
| 76 | |
| 77 | ppte = pte_offset_map(pmd, start); |
| 78 | if (!ppte) |
| 79 | return; |
| 80 | pte = *ppte; |
| 81 | pte_unmap(ppte); |
| 82 | |
| 83 | if (pte_none(pte)) |
| 84 | return; |
| 85 | |
| 86 | page = pte_page(pte); |
| 87 | if (!page) |
| 88 | return; |
| 89 | |
| 90 | addr = page_to_phys(page); |
| 91 | |
| 92 | /* flush the dcache and invalidate the icache coverage on that |
| 93 | * region */ |
Akira Takeuchi | 8be0628 | 2010-10-27 17:28:47 +0100 | [diff] [blame] | 94 | mn10300_local_dcache_flush_range2(addr + off, size); |
| 95 | mn10300_local_icache_inv_range2(addr + off, size); |
| 96 | smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end); |
David Howells | b478491 | 2010-10-27 17:28:46 +0100 | [diff] [blame] | 97 | } |
| 98 | |
| 99 | /** |
| 100 | * flush_icache_range - Globally flush dcache and invalidate icache for region |
| 101 | * @start: The starting virtual address of the region. |
| 102 | * @end: The ending virtual address of the region. |
| 103 | * |
| 104 | * This is used by the kernel to globally flush some code it has just written |
| 105 | * from the dcache back to RAM and then to globally invalidate the icache over |
| 106 | * that region so that that code can be run on all CPUs in the system. |
| 107 | */ |
| 108 | void flush_icache_range(unsigned long start, unsigned long end) |
| 109 | { |
| 110 | unsigned long start_page, end_page; |
Akira Takeuchi | 8be0628 | 2010-10-27 17:28:47 +0100 | [diff] [blame] | 111 | unsigned long flags; |
| 112 | |
| 113 | flags = smp_lock_cache(); |
David Howells | b478491 | 2010-10-27 17:28:46 +0100 | [diff] [blame] | 114 | |
| 115 | if (end > 0x80000000UL) { |
| 116 | /* addresses above 0xa0000000 do not go through the cache */ |
| 117 | if (end > 0xa0000000UL) { |
| 118 | end = 0xa0000000UL; |
| 119 | if (start >= end) |
Akira Takeuchi | 8be0628 | 2010-10-27 17:28:47 +0100 | [diff] [blame] | 120 | goto done; |
David Howells | b478491 | 2010-10-27 17:28:46 +0100 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | /* kernel addresses between 0x80000000 and 0x9fffffff do not |
| 124 | * require page tables, so we just map such addresses |
| 125 | * directly */ |
| 126 | start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; |
Akira Takeuchi | 8be0628 | 2010-10-27 17:28:47 +0100 | [diff] [blame] | 127 | mn10300_local_dcache_flush_range(start_page, end); |
| 128 | mn10300_local_icache_inv_range(start_page, end); |
| 129 | smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end); |
David Howells | b478491 | 2010-10-27 17:28:46 +0100 | [diff] [blame] | 130 | if (start_page == start) |
Akira Takeuchi | 8be0628 | 2010-10-27 17:28:47 +0100 | [diff] [blame] | 131 | goto done; |
David Howells | b478491 | 2010-10-27 17:28:46 +0100 | [diff] [blame] | 132 | end = start_page; |
| 133 | } |
| 134 | |
| 135 | start_page = start & PAGE_MASK; |
Akira Takeuchi | 8be0628 | 2010-10-27 17:28:47 +0100 | [diff] [blame] | 136 | end_page = (end - 1) & PAGE_MASK; |
David Howells | b478491 | 2010-10-27 17:28:46 +0100 | [diff] [blame] | 137 | |
| 138 | if (start_page == end_page) { |
| 139 | /* the first and last bytes are on the same page */ |
| 140 | flush_icache_page_range(start, end); |
| 141 | } else if (start_page + 1 == end_page) { |
| 142 | /* split over two virtually contiguous pages */ |
| 143 | flush_icache_page_range(start, end_page); |
| 144 | flush_icache_page_range(end_page, end); |
| 145 | } else { |
| 146 | /* more than 2 pages; just flush the entire cache */ |
| 147 | mn10300_dcache_flush(); |
| 148 | mn10300_icache_inv(); |
Akira Takeuchi | 8be0628 | 2010-10-27 17:28:47 +0100 | [diff] [blame] | 149 | smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0); |
David Howells | b478491 | 2010-10-27 17:28:46 +0100 | [diff] [blame] | 150 | } |
Akira Takeuchi | 8be0628 | 2010-10-27 17:28:47 +0100 | [diff] [blame] | 151 | |
| 152 | done: |
| 153 | smp_unlock_cache(flags); |
David Howells | b478491 | 2010-10-27 17:28:46 +0100 | [diff] [blame] | 154 | } |
| 155 | EXPORT_SYMBOL(flush_icache_range); |