Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation, version 2. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| 11 | * NON INFRINGEMENT. See the GNU General Public License for |
| 12 | * more details. |
| 13 | */ |
| 14 | |
| 15 | #ifndef _ASM_TILE_TLBFLUSH_H |
| 16 | #define _ASM_TILE_TLBFLUSH_H |
| 17 | |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/sched.h> |
| 20 | #include <linux/smp.h> |
| 21 | #include <asm/cacheflush.h> |
| 22 | #include <asm/page.h> |
| 23 | #include <hv/hypervisor.h> |
| 24 | |
| 25 | /* |
| 26 | * Rather than associating each mm with its own ASID, we just use |
| 27 | * ASIDs to allow us to lazily flush the TLB when we switch mms. |
| 28 | * This way we only have to do an actual TLB flush on mm switch |
| 29 | * every time we wrap ASIDs, not every single time we switch. |
| 30 | * |
| 31 | * FIXME: We might improve performance by keeping ASIDs around |
| 32 | * properly, though since the hypervisor direct-maps VAs to TSB |
| 33 | * entries, we're likely to have lost at least the executable page |
| 34 | * mappings by the time we switch back to the original mm. |
| 35 | */ |
| 36 | DECLARE_PER_CPU(int, current_asid); |
| 37 | |
| 38 | /* The hypervisor tells us what ASIDs are available to us. */ |
| 39 | extern int min_asid, max_asid; |
| 40 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 41 | /* Pass as vma pointer for non-executable mapping, if no vma available. */ |
Chris Metcalf | 621b195 | 2012-04-01 14:04:21 -0400 | [diff] [blame] | 42 | #define FLUSH_NONEXEC ((struct vm_area_struct *)-1UL) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 43 | |
| 44 | /* Flush a single user page on this cpu. */ |
Chris Metcalf | 621b195 | 2012-04-01 14:04:21 -0400 | [diff] [blame] | 45 | static inline void local_flush_tlb_page(struct vm_area_struct *vma, |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 46 | unsigned long addr, |
| 47 | unsigned long page_size) |
| 48 | { |
| 49 | int rc = hv_flush_page(addr, page_size); |
| 50 | if (rc < 0) |
| 51 | panic("hv_flush_page(%#lx,%#lx) failed: %d", |
| 52 | addr, page_size, rc); |
| 53 | if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) |
| 54 | __flush_icache(); |
| 55 | } |
| 56 | |
| 57 | /* Flush range of user pages on this cpu. */ |
Chris Metcalf | 621b195 | 2012-04-01 14:04:21 -0400 | [diff] [blame] | 58 | static inline void local_flush_tlb_pages(struct vm_area_struct *vma, |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 59 | unsigned long addr, |
| 60 | unsigned long page_size, |
| 61 | unsigned long len) |
| 62 | { |
| 63 | int rc = hv_flush_pages(addr, page_size, len); |
| 64 | if (rc < 0) |
| 65 | panic("hv_flush_pages(%#lx,%#lx,%#lx) failed: %d", |
| 66 | addr, page_size, len, rc); |
| 67 | if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) |
| 68 | __flush_icache(); |
| 69 | } |
| 70 | |
| 71 | /* Flush all user pages on this cpu. */ |
| 72 | static inline void local_flush_tlb(void) |
| 73 | { |
| 74 | int rc = hv_flush_all(1); /* preserve global mappings */ |
| 75 | if (rc < 0) |
| 76 | panic("hv_flush_all(1) failed: %d", rc); |
| 77 | __flush_icache(); |
| 78 | } |
| 79 | |
| 80 | /* |
| 81 | * Global pages have to be flushed a bit differently. Not a real |
| 82 | * performance problem because this does not happen often. |
| 83 | */ |
| 84 | static inline void local_flush_tlb_all(void) |
| 85 | { |
| 86 | int i; |
| 87 | for (i = 0; ; ++i) { |
| 88 | HV_VirtAddrRange r = hv_inquire_virtual(i); |
| 89 | if (r.size == 0) |
| 90 | break; |
| 91 | local_flush_tlb_pages(NULL, r.start, PAGE_SIZE, r.size); |
| 92 | local_flush_tlb_pages(NULL, r.start, HPAGE_SIZE, r.size); |
| 93 | } |
| 94 | } |
| 95 | |
| 96 | /* |
| 97 | * TLB flushing: |
| 98 | * |
| 99 | * - flush_tlb() flushes the current mm struct TLBs |
| 100 | * - flush_tlb_all() flushes all processes TLBs |
| 101 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 102 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| 103 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
| 104 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages |
| 105 | * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus |
| 106 | * |
| 107 | * Here (as in vm_area_struct), "end" means the first byte after |
| 108 | * our end address. |
| 109 | */ |
| 110 | |
| 111 | extern void flush_tlb_all(void); |
| 112 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
| 113 | extern void flush_tlb_current_task(void); |
| 114 | extern void flush_tlb_mm(struct mm_struct *); |
Chris Metcalf | 621b195 | 2012-04-01 14:04:21 -0400 | [diff] [blame] | 115 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); |
| 116 | extern void flush_tlb_page_mm(struct vm_area_struct *, |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 117 | struct mm_struct *, unsigned long); |
Chris Metcalf | 621b195 | 2012-04-01 14:04:21 -0400 | [diff] [blame] | 118 | extern void flush_tlb_range(struct vm_area_struct *, |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 119 | unsigned long start, unsigned long end); |
| 120 | |
| 121 | #define flush_tlb() flush_tlb_current_task() |
| 122 | |
| 123 | #endif /* _ASM_TILE_TLBFLUSH_H */ |