blob: e7b4c0d298aecff4ff708d7ef3be46bbb0608dd8 [file] [log] [blame]
Stephen Rothwell19702822005-11-04 16:58:59 +11001#ifndef _ASM_POWERPC_TLBFLUSH_H
2#define _ASM_POWERPC_TLBFLUSH_H
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +11003
Stephen Rothwell19702822005-11-04 16:58:59 +11004/*
5 * TLB flushing:
6 *
7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
8 * - flush_tlb_page(vma, vmaddr) flushes one page
9 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
10 * - flush_tlb_range(vma, start, end) flushes a range of pages
11 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
Stephen Rothwell19702822005-11-04 16:58:59 +110012 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifdef __KERNEL__
19
David Gibson62102302007-04-24 13:09:12 +100020#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
21/*
22 * TLB flushing for software loaded TLB chips
23 *
24 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
25 * flush_tlb_kernel_range are best implemented as tlbia vs
26 * specific tlbie's
27 */
28
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +110029#include <linux/mm.h>
30
31extern void _tlbie(unsigned long address, unsigned int pid);
David Gibson62102302007-04-24 13:09:12 +100032
33#if defined(CONFIG_40x) || defined(CONFIG_8xx)
34#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
35#else /* CONFIG_44x || CONFIG_FSL_BOOKE */
36extern void _tlbia(void);
37#endif
38
39static inline void flush_tlb_mm(struct mm_struct *mm)
40{
41 _tlbia();
42}
43
44static inline void flush_tlb_page(struct vm_area_struct *vma,
45 unsigned long vmaddr)
46{
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +110047 _tlbie(vmaddr, vma->vm_mm->context.id);
David Gibson62102302007-04-24 13:09:12 +100048}
49
50static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
51 unsigned long vmaddr)
52{
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +110053 _tlbie(vmaddr, vma->vm_mm->context.id);
David Gibson62102302007-04-24 13:09:12 +100054}
55
56static inline void flush_tlb_range(struct vm_area_struct *vma,
57 unsigned long start, unsigned long end)
58{
59 _tlbia();
60}
61
62static inline void flush_tlb_kernel_range(unsigned long start,
63 unsigned long end)
64{
65 _tlbia();
66}
67
68#elif defined(CONFIG_PPC32)
69/*
70 * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx
71 */
72extern void _tlbie(unsigned long address);
73extern void _tlbia(void);
74
75extern void flush_tlb_mm(struct mm_struct *mm);
76extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
77extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
78extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
79 unsigned long end);
80extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
81
82#else
83/*
84 * TLB flushing for 64-bit has-MMU CPUs
85 */
Stephen Rothwell19702822005-11-04 16:58:59 +110086
87#include <linux/percpu.h>
88#include <asm/page.h>
89
90#define PPC64_TLB_BATCH_NR 192
91
92struct ppc64_tlb_batch {
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100093 int active;
94 unsigned long index;
95 struct mm_struct *mm;
96 real_pte_t pte[PPC64_TLB_BATCH_NR];
97 unsigned long vaddr[PPC64_TLB_BATCH_NR];
98 unsigned int psize;
Paul Mackerras1189be62007-10-11 20:37:10 +100099 int ssize;
Stephen Rothwell19702822005-11-04 16:58:59 +1100100};
101DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
102
103extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
104
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000105extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
106 pte_t *ptep, unsigned long pte, int huge);
107
108#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
109
110static inline void arch_enter_lazy_mmu_mode(void)
Stephen Rothwell19702822005-11-04 16:58:59 +1100111{
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000112 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
113
114 batch->active = 1;
115}
116
117static inline void arch_leave_lazy_mmu_mode(void)
118{
119 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
Stephen Rothwell19702822005-11-04 16:58:59 +1100120
121 if (batch->index)
122 __flush_tlb_pending(batch);
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000123 batch->active = 0;
Stephen Rothwell19702822005-11-04 16:58:59 +1100124}
125
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000126#define arch_flush_lazy_mmu_mode() do {} while (0)
127
128
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100129extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
Paul Mackerras1189be62007-10-11 20:37:10 +1000130 int ssize, int local);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100131extern void flush_hash_range(unsigned long number, int local);
Stephen Rothwell19702822005-11-04 16:58:59 +1100132
Stephen Rothwell19702822005-11-04 16:58:59 +1100133
David Gibson62102302007-04-24 13:09:12 +1000134static inline void flush_tlb_mm(struct mm_struct *mm)
135{
136}
Stephen Rothwell19702822005-11-04 16:58:59 +1100137
David Gibson62102302007-04-24 13:09:12 +1000138static inline void flush_tlb_page(struct vm_area_struct *vma,
139 unsigned long vmaddr)
140{
141}
Stephen Rothwell19702822005-11-04 16:58:59 +1100142
David Gibson62102302007-04-24 13:09:12 +1000143static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
144 unsigned long vmaddr)
145{
146}
Stephen Rothwell19702822005-11-04 16:58:59 +1100147
David Gibson62102302007-04-24 13:09:12 +1000148static inline void flush_tlb_range(struct vm_area_struct *vma,
149 unsigned long start, unsigned long end)
150{
151}
152
153static inline void flush_tlb_kernel_range(unsigned long start,
154 unsigned long end)
155{
156}
157
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000158/* Private function for use by PCI IO mapping code */
159extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
160 unsigned long end);
161
162
Stephen Rothwell19702822005-11-04 16:58:59 +1100163#endif
164
165/*
166 * This gets called at the end of handling a page fault, when
167 * the kernel has put a new PTE into the page table for the process.
168 * We use it to ensure coherency between the i-cache and d-cache
169 * for the page which has just been mapped in.
170 * On machines which use an MMU hash table, we use this to put a
171 * corresponding HPTE into the hash table ahead of time, instead of
172 * waiting for the inevitable extra hash-table miss exception.
173 */
174extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
175
Stephen Rothwell19702822005-11-04 16:58:59 +1100176#endif /*__KERNEL__ */
177#endif /* _ASM_POWERPC_TLBFLUSH_H */