Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation, version 2. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| 11 | * NON INFRINGEMENT. See the GNU General Public License for |
| 12 | * more details. |
| 13 | */ |
| 14 | |
| 15 | #ifndef _ASM_TILE_MMU_CONTEXT_H |
| 16 | #define _ASM_TILE_MMU_CONTEXT_H |
| 17 | |
| 18 | #include <linux/smp.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 19 | #include <linux/mm_types.h> |
| 20 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 21 | #include <asm/setup.h> |
| 22 | #include <asm/page.h> |
| 23 | #include <asm/pgalloc.h> |
| 24 | #include <asm/pgtable.h> |
| 25 | #include <asm/tlbflush.h> |
| 26 | #include <asm/homecache.h> |
| 27 | #include <asm-generic/mm_hooks.h> |
| 28 | |
| 29 | static inline int |
| 30 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 31 | { |
| 32 | return 0; |
| 33 | } |
| 34 | |
Chris Metcalf | d5d14ed | 2012-03-29 13:58:43 -0400 | [diff] [blame] | 35 | /* |
| 36 | * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S |
| 37 | * also call hv_install_context(). |
| 38 | */ |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 39 | static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) |
| 40 | { |
| 41 | /* FIXME: DIRECTIO should not always be set. FIXME. */ |
Chris Metcalf | d5d14ed | 2012-03-29 13:58:43 -0400 | [diff] [blame] | 42 | int rc = hv_install_context(__pa(pgdir), prot, asid, |
| 43 | HV_CTX_DIRECTIO | CTX_PAGE_FLAG); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 44 | if (rc < 0) |
| 45 | panic("hv_install_context failed: %d", rc); |
| 46 | } |
| 47 | |
| 48 | static inline void install_page_table(pgd_t *pgdir, int asid) |
| 49 | { |
Chris Metcalf | 640710a | 2013-08-12 15:08:09 -0400 | [diff] [blame] | 50 | pte_t *ptep = virt_to_kpte((unsigned long)pgdir); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 51 | __install_page_table(pgdir, asid, *ptep); |
| 52 | } |
| 53 | |
| 54 | /* |
| 55 | * "Lazy" TLB mode is entered when we are switching to a kernel task, |
| 56 | * which borrows the mm of the previous task. The goal of this |
| 57 | * optimization is to avoid having to install a new page table. On |
| 58 | * early x86 machines (where the concept originated) you couldn't do |
| 59 | * anything short of a full page table install for invalidation, so |
| 60 | * handling a remote TLB invalidate required doing a page table |
| 61 | * re-install. Someone clearly decided that it was silly to keep |
| 62 | * doing this while in "lazy" TLB mode, so the optimization involves |
| 63 | * installing the swapper page table instead the first time one |
| 64 | * occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running |
| 65 | * the kernel task doesn't need to take any more interrupts. At that |
| 66 | * point it's then necessary to explicitly reinstall it when context |
| 67 | * switching back to the original mm. |
| 68 | * |
| 69 | * On Tile, we have to do a page-table install whenever DMA is enabled, |
| 70 | * so in that case lazy mode doesn't help anyway. And more generally, |
| 71 | * we have efficient per-page TLB shootdown, and don't expect to spend |
| 72 | * that much time in kernel tasks in general, so just leaving the |
| 73 | * kernel task borrowing the old page table, but handling TLB |
| 74 | * shootdowns, is a reasonable thing to do. And importantly, this |
| 75 | * lets us use the hypervisor's internal APIs for TLB shootdown, which |
| 76 | * means we don't have to worry about having TLB shootdowns blocked |
| 77 | * when Linux is disabling interrupts; see the page migration code for |
| 78 | * an example of where it's important for TLB shootdowns to complete |
| 79 | * even when interrupts are disabled at the Linux level. |
| 80 | */ |
| 81 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t) |
| 82 | { |
| 83 | #if CHIP_HAS_TILE_DMA() |
| 84 | /* |
| 85 | * We have to do an "identity" page table switch in order to |
| 86 | * clear any pending DMA interrupts. |
| 87 | */ |
| 88 | if (current->thread.tile_dma_state.enabled) |
Christoph Lameter | b4f5019 | 2014-08-17 12:30:50 -0500 | [diff] [blame] | 89 | install_page_table(mm->pgd, __this_cpu_read(current_asid)); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 90 | #endif |
| 91 | } |
| 92 | |
| 93 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 94 | struct task_struct *tsk) |
| 95 | { |
| 96 | if (likely(prev != next)) { |
| 97 | |
| 98 | int cpu = smp_processor_id(); |
| 99 | |
| 100 | /* Pick new ASID. */ |
Christoph Lameter | b4f5019 | 2014-08-17 12:30:50 -0500 | [diff] [blame] | 101 | int asid = __this_cpu_read(current_asid) + 1; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 102 | if (asid > max_asid) { |
| 103 | asid = min_asid; |
| 104 | local_flush_tlb(); |
| 105 | } |
Christoph Lameter | b4f5019 | 2014-08-17 12:30:50 -0500 | [diff] [blame] | 106 | __this_cpu_write(current_asid, asid); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 107 | |
| 108 | /* Clear cpu from the old mm, and set it in the new one. */ |
KOSAKI Motohiro | dc0b124 | 2011-04-18 21:18:11 +0900 | [diff] [blame] | 109 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
| 110 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 111 | |
| 112 | /* Re-load page tables */ |
| 113 | install_page_table(next->pgd, asid); |
| 114 | |
| 115 | /* See how we should set the red/black cache info */ |
| 116 | check_mm_caching(prev, next); |
| 117 | |
| 118 | /* |
| 119 | * Since we're changing to a new mm, we have to flush |
| 120 | * the icache in case some physical page now being mapped |
| 121 | * has subsequently been repurposed and has new code. |
| 122 | */ |
| 123 | __flush_icache(); |
| 124 | |
| 125 | } |
| 126 | } |
| 127 | |
| 128 | static inline void activate_mm(struct mm_struct *prev_mm, |
| 129 | struct mm_struct *next_mm) |
| 130 | { |
| 131 | switch_mm(prev_mm, next_mm, NULL); |
| 132 | } |
| 133 | |
| 134 | #define destroy_context(mm) do { } while (0) |
| 135 | #define deactivate_mm(tsk, mm) do { } while (0) |
| 136 | |
| 137 | #endif /* _ASM_TILE_MMU_CONTEXT_H */ |