Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _X8664_TLBFLUSH_H |
| 2 | #define _X8664_TLBFLUSH_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/mm.h> |
| 5 | #include <asm/processor.h> |
| 6 | |
Andi Kleen | b1c78c0 | 2006-09-26 10:52:29 +0200 | [diff] [blame] | 7 | static inline unsigned long get_cr3(void) |
| 8 | { |
| 9 | unsigned long cr3; |
| 10 | asm volatile("mov %%cr3,%0" : "=r" (cr3)); |
| 11 | return cr3; |
| 12 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
Andi Kleen | b1c78c0 | 2006-09-26 10:52:29 +0200 | [diff] [blame] | 14 | static inline void set_cr3(unsigned long cr3) |
| 15 | { |
| 16 | asm volatile("mov %0,%%cr3" :: "r" (cr3) : "memory"); |
| 17 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
Andi Kleen | b1c78c0 | 2006-09-26 10:52:29 +0200 | [diff] [blame] | 19 | static inline void __flush_tlb(void) |
| 20 | { |
| 21 | set_cr3(get_cr3()); |
| 22 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Andi Kleen | b1c78c0 | 2006-09-26 10:52:29 +0200 | [diff] [blame] | 24 | static inline unsigned long get_cr4(void) |
| 25 | { |
| 26 | unsigned long cr4; |
| 27 | asm volatile("mov %%cr4,%0" : "=r" (cr4)); |
| 28 | return cr4; |
| 29 | } |
| 30 | |
| 31 | static inline void set_cr4(unsigned long cr4) |
| 32 | { |
| 33 | asm volatile("mov %0,%%cr4" :: "r" (cr4) : "memory"); |
| 34 | } |
| 35 | |
| 36 | static inline void __flush_tlb_all(void) |
| 37 | { |
| 38 | unsigned long cr4 = get_cr4(); |
| 39 | set_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */ |
| 40 | set_cr4(cr4); /* write old PGE again and flush TLBs */ |
| 41 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | |
| 43 | #define __flush_tlb_one(addr) \ |
Andi Kleen | b1c78c0 | 2006-09-26 10:52:29 +0200 | [diff] [blame] | 44 | __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory") |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
| 46 | |
| 47 | /* |
| 48 | * TLB flushing: |
| 49 | * |
| 50 | * - flush_tlb() flushes the current mm struct TLBs |
| 51 | * - flush_tlb_all() flushes all processes TLBs |
| 52 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 53 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| 54 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
| 55 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages |
| 56 | * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables |
| 57 | * |
Andi Kleen | d970a52 | 2005-07-28 21:15:35 -0700 | [diff] [blame] | 58 | * x86-64 can only flush individual pages or full VMs. For a range flush |
| 59 | * we always do the full VM. Might be worth trying if for a small |
| 60 | * range a few INVLPGs in a row are a win. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | */ |
| 62 | |
| 63 | #ifndef CONFIG_SMP |
| 64 | |
| 65 | #define flush_tlb() __flush_tlb() |
| 66 | #define flush_tlb_all() __flush_tlb_all() |
| 67 | #define local_flush_tlb() __flush_tlb() |
| 68 | |
| 69 | static inline void flush_tlb_mm(struct mm_struct *mm) |
| 70 | { |
| 71 | if (mm == current->active_mm) |
| 72 | __flush_tlb(); |
| 73 | } |
| 74 | |
| 75 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
| 76 | unsigned long addr) |
| 77 | { |
| 78 | if (vma->vm_mm == current->active_mm) |
| 79 | __flush_tlb_one(addr); |
| 80 | } |
| 81 | |
| 82 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
| 83 | unsigned long start, unsigned long end) |
| 84 | { |
| 85 | if (vma->vm_mm == current->active_mm) |
| 86 | __flush_tlb(); |
| 87 | } |
| 88 | |
| 89 | #else |
| 90 | |
| 91 | #include <asm/smp.h> |
| 92 | |
| 93 | #define local_flush_tlb() \ |
| 94 | __flush_tlb() |
| 95 | |
| 96 | extern void flush_tlb_all(void); |
| 97 | extern void flush_tlb_current_task(void); |
| 98 | extern void flush_tlb_mm(struct mm_struct *); |
| 99 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); |
| 100 | |
| 101 | #define flush_tlb() flush_tlb_current_task() |
| 102 | |
| 103 | static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) |
| 104 | { |
| 105 | flush_tlb_mm(vma->vm_mm); |
| 106 | } |
| 107 | |
| 108 | #define TLBSTATE_OK 1 |
| 109 | #define TLBSTATE_LAZY 2 |
| 110 | |
Andi Kleen | 2b4a081 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 111 | /* Roughly an IPI every 20MB with 4k pages for freeing page table |
| 112 | ranges. Cost is about 42k of memory for each CPU. */ |
| 113 | #define ARCH_FREE_PTE_NR 5350 |
| 114 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | #endif |
| 116 | |
| 117 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() |
| 118 | |
| 119 | static inline void flush_tlb_pgtables(struct mm_struct *mm, |
| 120 | unsigned long start, unsigned long end) |
| 121 | { |
Andi Kleen | d970a52 | 2005-07-28 21:15:35 -0700 | [diff] [blame] | 122 | /* x86_64 does not keep any page table caches in a software TLB. |
| 123 | The CPUs do in their hardware TLBs, but they are handled |
| 124 | by the normal TLB flushing algorithms. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | #endif /* _X8664_TLBFLUSH_H */ |