Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _PARISC_TLBFLUSH_H |
| 2 | #define _PARISC_TLBFLUSH_H |
| 3 | |
| 4 | /* TLB flushing routines.... */ |
| 5 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <linux/mm.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 7 | #include <linux/sched.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <asm/mmu_context.h> |
| 9 | |
Grant Grundler | 04d472d | 2005-10-21 22:40:24 -0400 | [diff] [blame] | 10 | |
| 11 | /* This is for the serialisation of PxTLB broadcasts. At least on the |
| 12 | * N class systems, only one PxTLB inter processor broadcast can be |
| 13 | * active at any one time on the Merced bus. This tlb purge |
| 14 | * synchronisation is fairly lightweight and harmless so we activate |
Helge Deller | e82a3b7 | 2009-06-16 20:51:48 +0000 | [diff] [blame] | 15 | * it on all systems not just the N class. |
John David Anglin | 01ab605 | 2015-07-01 17:18:37 -0400 | [diff] [blame^] | 16 | |
| 17 | * It is also used to ensure PTE updates are atomic and consistent |
| 18 | * with the TLB. |
Matthew Wilcox | 29a622d | 2005-11-17 16:44:14 -0500 | [diff] [blame] | 19 | */ |
Grant Grundler | 04d472d | 2005-10-21 22:40:24 -0400 | [diff] [blame] | 20 | extern spinlock_t pa_tlb_lock; |
| 21 | |
Helge Deller | e82a3b7 | 2009-06-16 20:51:48 +0000 | [diff] [blame] | 22 | #define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags) |
| 23 | #define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags) |
Grant Grundler | 04d472d | 2005-10-21 22:40:24 -0400 | [diff] [blame] | 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | extern void flush_tlb_all(void); |
Matthew Wilcox | 1b2425e | 2006-01-10 20:47:49 -0500 | [diff] [blame] | 26 | extern void flush_tlb_all_local(void *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
Helge Deller | 0fc537d | 2013-05-07 21:42:47 +0000 | [diff] [blame] | 28 | #define smp_flush_tlb_all() flush_tlb_all() |
| 29 | |
John David Anglin | 01ab605 | 2015-07-01 17:18:37 -0400 | [diff] [blame^] | 30 | int __flush_tlb_range(unsigned long sid, |
| 31 | unsigned long start, unsigned long end); |
| 32 | |
| 33 | #define flush_tlb_range(vma, start, end) \ |
| 34 | __flush_tlb_range((vma)->vm_mm->context, start, end) |
| 35 | |
| 36 | #define flush_tlb_kernel_range(start, end) \ |
| 37 | __flush_tlb_range(0, start, end) |
| 38 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | /* |
| 40 | * flush_tlb_mm() |
| 41 | * |
John David Anglin | 01ab605 | 2015-07-01 17:18:37 -0400 | [diff] [blame^] | 42 | * The code to switch to a new context is NOT valid for processes |
| 43 | * which play with the space id's. Thus, we have to preserve the |
| 44 | * space and just flush the entire tlb. However, the compilers, |
| 45 | * dynamic linker, etc, do not manipulate space id's, so there |
| 46 | * could be a significant performance benefit in switching contexts |
| 47 | * and not flushing the whole tlb. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | */ |
| 49 | |
| 50 | static inline void flush_tlb_mm(struct mm_struct *mm) |
| 51 | { |
Kyle McMartin | 04532c4 | 2007-02-18 19:35:45 +0000 | [diff] [blame] | 52 | BUG_ON(mm == &init_mm); /* Should never happen */ |
| 53 | |
Kyle McMartin | 5289f46 | 2008-12-23 08:44:30 -0500 | [diff] [blame] | 54 | #if 1 || defined(CONFIG_SMP) |
John David Anglin | 01ab605 | 2015-07-01 17:18:37 -0400 | [diff] [blame^] | 55 | /* Except for very small threads, flushing the whole TLB is |
| 56 | * faster than using __flush_tlb_range. The pdtlb and pitlb |
| 57 | * instructions are very slow because of the TLB broadcast. |
| 58 | * It might be faster to do local range flushes on all CPUs |
| 59 | * on PA 2.0 systems. |
| 60 | */ |
Kyle McMartin | 04532c4 | 2007-02-18 19:35:45 +0000 | [diff] [blame] | 61 | flush_tlb_all(); |
| 62 | #else |
Kyle McMartin | 5289f46 | 2008-12-23 08:44:30 -0500 | [diff] [blame] | 63 | /* FIXME: currently broken, causing space id and protection ids |
John David Anglin | 01ab605 | 2015-07-01 17:18:37 -0400 | [diff] [blame^] | 64 | * to go out of sync, resulting in faults on userspace accesses. |
| 65 | * This approach needs further investigation since running many |
| 66 | * small applications (e.g., GCC testsuite) is faster on HP-UX. |
Kyle McMartin | 5289f46 | 2008-12-23 08:44:30 -0500 | [diff] [blame] | 67 | */ |
Kyle McMartin | 04532c4 | 2007-02-18 19:35:45 +0000 | [diff] [blame] | 68 | if (mm) { |
| 69 | if (mm->context != 0) |
| 70 | free_sid(mm->context); |
| 71 | mm->context = alloc_sid(); |
| 72 | if (mm == current->active_mm) |
| 73 | load_context(mm->context); |
| 74 | } |
| 75 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | } |
| 77 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
| 79 | unsigned long addr) |
| 80 | { |
John David Anglin | e8d8fc2 | 2013-06-29 16:42:12 -0400 | [diff] [blame] | 81 | unsigned long flags, sid; |
Helge Deller | e82a3b7 | 2009-06-16 20:51:48 +0000 | [diff] [blame] | 82 | |
John David Anglin | e8d8fc2 | 2013-06-29 16:42:12 -0400 | [diff] [blame] | 83 | sid = vma->vm_mm->context; |
Helge Deller | e82a3b7 | 2009-06-16 20:51:48 +0000 | [diff] [blame] | 84 | purge_tlb_start(flags); |
John David Anglin | e8d8fc2 | 2013-06-29 16:42:12 -0400 | [diff] [blame] | 85 | mtsp(sid, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | pdtlb(addr); |
John David Anglin | 01ab605 | 2015-07-01 17:18:37 -0400 | [diff] [blame^] | 87 | if (unlikely(split_tlb)) |
| 88 | pitlb(addr); |
Helge Deller | e82a3b7 | 2009-06-16 20:51:48 +0000 | [diff] [blame] | 89 | purge_tlb_end(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | #endif |