Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * arch/sh/mm/tlb-sh4.c |
| 3 | * |
| 4 | * SH-4 specific TLB operations |
| 5 | * |
| 6 | * Copyright (C) 1999 Niibe Yutaka |
Paul Mundt | d04a0f7 | 2007-09-21 11:55:03 +0900 | [diff] [blame] | 7 | * Copyright (C) 2002 - 2007 Paul Mundt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * |
| 9 | * Released under the terms of the GNU GPL v2.0. |
| 10 | */ |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 11 | #include <linux/kernel.h> |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 12 | #include <linux/mm.h> |
Paul Mundt | d04a0f7 | 2007-09-21 11:55:03 +0900 | [diff] [blame] | 13 | #include <linux/io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <asm/mmu_context.h> |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 15 | #include <asm/cacheflush.h> |
| 16 | |
Paul Mundt | 9cef749 | 2009-07-29 00:12:17 +0900 | [diff] [blame] | 17 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 18 | { |
Paul Mundt | 9cef749 | 2009-07-29 00:12:17 +0900 | [diff] [blame] | 19 | unsigned long flags, pteval, vpn; |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 20 | |
Paul Mundt | 9cef749 | 2009-07-29 00:12:17 +0900 | [diff] [blame] | 21 | /* |
| 22 | * Handle debugger faulting in for debugee. |
| 23 | */ |
Paul Mundt | 3ed6e12 | 2009-07-29 22:06:58 +0900 | [diff] [blame] | 24 | if (vma && current->active_mm != vma->vm_mm) |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 25 | return; |
| 26 | |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 27 | local_irq_save(flags); |
| 28 | |
| 29 | /* Set PTEH register */ |
| 30 | vpn = (address & MMU_VPN_MASK) | get_asid(); |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 31 | __raw_writel(vpn, MMU_PTEH); |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 32 | |
Paul Mundt | d04a0f7 | 2007-09-21 11:55:03 +0900 | [diff] [blame] | 33 | pteval = pte.pte_low; |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 34 | |
| 35 | /* Set PTEA register */ |
Paul Mundt | d04a0f7 | 2007-09-21 11:55:03 +0900 | [diff] [blame] | 36 | #ifdef CONFIG_X2TLB |
| 37 | /* |
| 38 | * For the extended mode TLB this is trivial, only the ESZ and |
| 39 | * EPR bits need to be written out to PTEA, with the remainder of |
| 40 | * the protection bits (with the exception of the compat-mode SZ |
| 41 | * and PR bits, which are cleared) being written out in PTEL. |
| 42 | */ |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 43 | __raw_writel(pte.pte_high, MMU_PTEA); |
Paul Mundt | d04a0f7 | 2007-09-21 11:55:03 +0900 | [diff] [blame] | 44 | #else |
Michael Trimarchi | 6503fe4 | 2009-08-20 13:27:44 +0900 | [diff] [blame] | 45 | if (cpu_data->flags & CPU_HAS_PTEA) { |
| 46 | /* The last 3 bits and the first one of pteval contains |
| 47 | * the PTEA timing control and space attribute bits |
| 48 | */ |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 49 | __raw_writel(copy_ptea_attributes(pteval), MMU_PTEA); |
Michael Trimarchi | 6503fe4 | 2009-08-20 13:27:44 +0900 | [diff] [blame] | 50 | } |
Paul Mundt | d04a0f7 | 2007-09-21 11:55:03 +0900 | [diff] [blame] | 51 | #endif |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 52 | |
| 53 | /* Set PTEL register */ |
| 54 | pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ |
Paul Mundt | e7bd34a | 2007-07-31 17:07:28 +0900 | [diff] [blame] | 55 | #ifdef CONFIG_CACHE_WRITETHROUGH |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 56 | pteval |= _PAGE_WT; |
| 57 | #endif |
| 58 | /* conveniently, we want all the software flags to be 0 anyway */ |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 59 | __raw_writel(pteval, MMU_PTEL); |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 60 | |
| 61 | /* Load the TLB */ |
| 62 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); |
| 63 | local_irq_restore(flags); |
| 64 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
Paul Mundt | 2dc2f8e | 2010-01-21 16:05:25 +0900 | [diff] [blame] | 66 | void local_flush_tlb_one(unsigned long asid, unsigned long page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | { |
| 68 | unsigned long addr, data; |
| 69 | |
| 70 | /* |
| 71 | * NOTE: PTEH.ASID should be set to this MM |
| 72 | * _AND_ we need to write ASID to the array. |
| 73 | * |
| 74 | * It would be simple if we didn't need to set PTEH.ASID... |
| 75 | */ |
| 76 | addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT; |
| 77 | data = page | asid; /* VALID bit is off */ |
Stuart Menefy | cbaa118 | 2007-11-30 17:06:36 +0900 | [diff] [blame] | 78 | jump_to_uncached(); |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 79 | __raw_writel(data, addr); |
Stuart Menefy | cbaa118 | 2007-11-30 17:06:36 +0900 | [diff] [blame] | 80 | back_to_cached(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | } |
Paul Mundt | be97d75 | 2010-04-02 16:13:27 +0900 | [diff] [blame] | 82 | |
| 83 | void local_flush_tlb_all(void) |
| 84 | { |
| 85 | unsigned long flags, status; |
| 86 | int i; |
| 87 | |
| 88 | /* |
| 89 | * Flush all the TLB. |
| 90 | */ |
| 91 | local_irq_save(flags); |
| 92 | jump_to_uncached(); |
| 93 | |
| 94 | status = __raw_readl(MMUCR); |
| 95 | status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT); |
| 96 | |
| 97 | if (status == 0) |
| 98 | status = MMUCR_URB_NENTRIES; |
| 99 | |
| 100 | for (i = 0; i < status; i++) |
| 101 | __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8)); |
| 102 | |
| 103 | for (i = 0; i < 4; i++) |
| 104 | __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8)); |
| 105 | |
| 106 | back_to_cached(); |
| 107 | ctrl_barrier(); |
| 108 | local_irq_restore(flags); |
| 109 | } |