Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _PARISC_TLBFLUSH_H |
| 2 | #define _PARISC_TLBFLUSH_H |
| 3 | |
| 4 | /* TLB flushing routines.... */ |
| 5 | |
| 6 | #include <linux/config.h> |
| 7 | #include <linux/mm.h> |
| 8 | #include <asm/mmu_context.h> |
| 9 | |
| 10 | extern void flush_tlb_all(void); |
| 11 | |
| 12 | /* |
| 13 | * flush_tlb_mm() |
| 14 | * |
| 15 | * XXX This code is NOT valid for HP-UX compatibility processes, |
| 16 | * (although it will probably work 99% of the time). HP-UX |
| 17 | * processes are free to play with the space id's and save them |
| 18 | * over long periods of time, etc. so we have to preserve the |
| 19 | * space and just flush the entire tlb. We need to check the |
| 20 | * personality in order to do that, but the personality is not |
| 21 | * currently being set correctly. |
| 22 | * |
| 23 | * Of course, Linux processes could do the same thing, but |
| 24 | * we don't support that (and the compilers, dynamic linker, |
| 25 | * etc. do not do that). |
| 26 | */ |
| 27 | |
| 28 | static inline void flush_tlb_mm(struct mm_struct *mm) |
| 29 | { |
| 30 | BUG_ON(mm == &init_mm); /* Should never happen */ |
| 31 | |
| 32 | #ifdef CONFIG_SMP |
| 33 | flush_tlb_all(); |
| 34 | #else |
| 35 | if (mm) { |
| 36 | if (mm->context != 0) |
| 37 | free_sid(mm->context); |
| 38 | mm->context = alloc_sid(); |
| 39 | if (mm == current->active_mm) |
| 40 | load_context(mm->context); |
| 41 | } |
| 42 | #endif |
| 43 | } |
| 44 | |
| 45 | extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) |
| 46 | { |
| 47 | } |
| 48 | |
| 49 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
| 50 | unsigned long addr) |
| 51 | { |
| 52 | /* For one page, it's not worth testing the split_tlb variable */ |
| 53 | |
| 54 | mb(); |
| 55 | mtsp(vma->vm_mm->context,1); |
| 56 | purge_tlb_start(); |
| 57 | pdtlb(addr); |
| 58 | pitlb(addr); |
| 59 | purge_tlb_end(); |
| 60 | } |
| 61 | |
| 62 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
| 63 | unsigned long start, unsigned long end) |
| 64 | { |
| 65 | unsigned long npages; |
| 66 | |
| 67 | |
| 68 | npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 69 | if (npages >= 512) /* XXX arbitrary, should be tuned */ |
| 70 | flush_tlb_all(); |
| 71 | else { |
| 72 | |
| 73 | mtsp(vma->vm_mm->context,1); |
| 74 | if (split_tlb) { |
| 75 | purge_tlb_start(); |
| 76 | while (npages--) { |
| 77 | pdtlb(start); |
| 78 | pitlb(start); |
| 79 | start += PAGE_SIZE; |
| 80 | } |
| 81 | purge_tlb_end(); |
| 82 | } else { |
| 83 | purge_tlb_start(); |
| 84 | while (npages--) { |
| 85 | pdtlb(start); |
| 86 | start += PAGE_SIZE; |
| 87 | } |
| 88 | purge_tlb_end(); |
| 89 | } |
| 90 | } |
| 91 | } |
| 92 | |
| 93 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() |
| 94 | |
| 95 | #endif |