blob: a2c6bfd85fb7a38ab6d002373c24afbf8648399a [file] [log] [blame]
Stephen Rothwell19702822005-11-04 16:58:59 +11001#ifndef _ASM_POWERPC_TLBFLUSH_H
2#define _ASM_POWERPC_TLBFLUSH_H
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +11003
Stephen Rothwell19702822005-11-04 16:58:59 +11004/*
5 * TLB flushing:
6 *
7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
8 * - flush_tlb_page(vma, vmaddr) flushes one page
9 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
10 * - flush_tlb_range(vma, start, end) flushes a range of pages
11 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
Stephen Rothwell19702822005-11-04 16:58:59 +110012 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifdef __KERNEL__
19
David Gibson62102302007-04-24 13:09:12 +100020#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
21/*
22 * TLB flushing for software loaded TLB chips
23 *
24 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
25 * flush_tlb_kernel_range are best implemented as tlbia vs
26 * specific tlbie's
27 */
28
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +110029#include <linux/mm.h>
30
31extern void _tlbie(unsigned long address, unsigned int pid);
Kumar Gala0ba34182008-07-15 16:12:25 -050032extern void _tlbil_all(void);
33extern void _tlbil_pid(unsigned int pid);
34extern void _tlbil_va(unsigned long address, unsigned int pid);
David Gibson62102302007-04-24 13:09:12 +100035
36#if defined(CONFIG_40x) || defined(CONFIG_8xx)
37#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
38#else /* CONFIG_44x || CONFIG_FSL_BOOKE */
39extern void _tlbia(void);
40#endif
41
42static inline void flush_tlb_mm(struct mm_struct *mm)
43{
Kumar Gala0ba34182008-07-15 16:12:25 -050044 _tlbil_pid(mm->context.id);
David Gibson62102302007-04-24 13:09:12 +100045}
46
47static inline void flush_tlb_page(struct vm_area_struct *vma,
48 unsigned long vmaddr)
49{
Kumar Gala0ba34182008-07-15 16:12:25 -050050 _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0);
David Gibson62102302007-04-24 13:09:12 +100051}
52
53static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
54 unsigned long vmaddr)
55{
Kumar Gala0ba34182008-07-15 16:12:25 -050056 flush_tlb_page(vma, vmaddr);
David Gibson62102302007-04-24 13:09:12 +100057}
58
59static inline void flush_tlb_range(struct vm_area_struct *vma,
60 unsigned long start, unsigned long end)
61{
Kumar Gala0ba34182008-07-15 16:12:25 -050062 _tlbil_pid(vma->vm_mm->context.id);
David Gibson62102302007-04-24 13:09:12 +100063}
64
65static inline void flush_tlb_kernel_range(unsigned long start,
66 unsigned long end)
67{
Kumar Gala0ba34182008-07-15 16:12:25 -050068 _tlbil_pid(0);
David Gibson62102302007-04-24 13:09:12 +100069}
70
71#elif defined(CONFIG_PPC32)
72/*
73 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx
74 */
75extern void _tlbie(unsigned long address);
76extern void _tlbia(void);
77
78extern void flush_tlb_mm(struct mm_struct *mm);
79extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
80extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
81extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
82 unsigned long end);
83extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
84
85#else
86/*
87 * TLB flushing for 64-bit has-MMU CPUs
88 */
Stephen Rothwell19702822005-11-04 16:58:59 +110089
90#include <linux/percpu.h>
91#include <asm/page.h>
92
93#define PPC64_TLB_BATCH_NR 192
94
95struct ppc64_tlb_batch {
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100096 int active;
97 unsigned long index;
98 struct mm_struct *mm;
99 real_pte_t pte[PPC64_TLB_BATCH_NR];
100 unsigned long vaddr[PPC64_TLB_BATCH_NR];
101 unsigned int psize;
Paul Mackerras1189be62007-10-11 20:37:10 +1000102 int ssize;
Stephen Rothwell19702822005-11-04 16:58:59 +1100103};
104DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
105
106extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
107
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000108extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
109 pte_t *ptep, unsigned long pte, int huge);
110
111#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
112
113static inline void arch_enter_lazy_mmu_mode(void)
Stephen Rothwell19702822005-11-04 16:58:59 +1100114{
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000115 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
116
117 batch->active = 1;
118}
119
120static inline void arch_leave_lazy_mmu_mode(void)
121{
122 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
Stephen Rothwell19702822005-11-04 16:58:59 +1100123
124 if (batch->index)
125 __flush_tlb_pending(batch);
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000126 batch->active = 0;
Stephen Rothwell19702822005-11-04 16:58:59 +1100127}
128
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000129#define arch_flush_lazy_mmu_mode() do {} while (0)
130
131
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100132extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
Paul Mackerras1189be62007-10-11 20:37:10 +1000133 int ssize, int local);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100134extern void flush_hash_range(unsigned long number, int local);
Stephen Rothwell19702822005-11-04 16:58:59 +1100135
Stephen Rothwell19702822005-11-04 16:58:59 +1100136
David Gibson62102302007-04-24 13:09:12 +1000137static inline void flush_tlb_mm(struct mm_struct *mm)
138{
139}
Stephen Rothwell19702822005-11-04 16:58:59 +1100140
David Gibson62102302007-04-24 13:09:12 +1000141static inline void flush_tlb_page(struct vm_area_struct *vma,
142 unsigned long vmaddr)
143{
144}
Stephen Rothwell19702822005-11-04 16:58:59 +1100145
David Gibson62102302007-04-24 13:09:12 +1000146static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
147 unsigned long vmaddr)
148{
149}
Stephen Rothwell19702822005-11-04 16:58:59 +1100150
David Gibson62102302007-04-24 13:09:12 +1000151static inline void flush_tlb_range(struct vm_area_struct *vma,
152 unsigned long start, unsigned long end)
153{
154}
155
156static inline void flush_tlb_kernel_range(unsigned long start,
157 unsigned long end)
158{
159}
160
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000161/* Private function for use by PCI IO mapping code */
162extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
163 unsigned long end);
164
165
Stephen Rothwell19702822005-11-04 16:58:59 +1100166#endif
167
Stephen Rothwell19702822005-11-04 16:58:59 +1100168#endif /*__KERNEL__ */
169#endif /* _ASM_POWERPC_TLBFLUSH_H */