Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 Davidlohr Bueso. |
| 3 | */ |
| 4 | #include <linux/sched.h> |
| 5 | #include <linux/mm.h> |
| 6 | #include <linux/vmacache.h> |
| 7 | |
| 8 | /* |
| 9 | * Flush vma caches for threads that share a given mm. |
| 10 | * |
| 11 | * The operation is safe because the caller holds the mmap_sem |
| 12 | * exclusively and other threads accessing the vma cache will |
| 13 | * have mmap_sem held at least for read, so no extra locking |
| 14 | * is required to maintain the vma cache. |
| 15 | */ |
| 16 | void vmacache_flush_all(struct mm_struct *mm) |
| 17 | { |
| 18 | struct task_struct *g, *p; |
| 19 | |
Davidlohr Bueso | f5f302e | 2014-12-12 16:56:10 -0800 | [diff] [blame] | 20 | count_vm_vmacache_event(VMACACHE_FULL_FLUSHES); |
| 21 | |
Davidlohr Bueso | 6b4ebc3 | 2014-06-04 16:06:47 -0700 | [diff] [blame] | 22 | /* |
| 23 | * Single threaded tasks need not iterate the entire |
| 24 | * list of process. We can avoid the flushing as well |
| 25 | * since the mm's seqnum was increased and don't have |
| 26 | * to worry about other threads' seqnum. Current's |
| 27 | * flush will occur upon the next lookup. |
| 28 | */ |
| 29 | if (atomic_read(&mm->mm_users) == 1) |
| 30 | return; |
| 31 | |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 32 | rcu_read_lock(); |
| 33 | for_each_process_thread(g, p) { |
| 34 | /* |
| 35 | * Only flush the vmacache pointers as the |
| 36 | * mm seqnum is already set and curr's will |
| 37 | * be set upon invalidation when the next |
| 38 | * lookup is done. |
| 39 | */ |
| 40 | if (mm == p->mm) |
| 41 | vmacache_flush(p); |
| 42 | } |
| 43 | rcu_read_unlock(); |
| 44 | } |
| 45 | |
| 46 | /* |
| 47 | * This task may be accessing a foreign mm via (for example) |
| 48 | * get_user_pages()->find_vma(). The vmacache is task-local and this |
| 49 | * task's vmacache pertains to a different mm (ie, its own). There is |
| 50 | * nothing we can do here. |
| 51 | * |
| 52 | * Also handle the case where a kernel thread has adopted this mm via use_mm(). |
| 53 | * That kernel thread's vmacache is not applicable to this mm. |
| 54 | */ |
Davidlohr Bueso | a2c1aad | 2015-11-05 18:48:52 -0800 | [diff] [blame] | 55 | static inline bool vmacache_valid_mm(struct mm_struct *mm) |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 56 | { |
| 57 | return current->mm == mm && !(current->flags & PF_KTHREAD); |
| 58 | } |
| 59 | |
| 60 | void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) |
| 61 | { |
| 62 | if (vmacache_valid_mm(newvma->vm_mm)) |
| 63 | current->vmacache[VMACACHE_HASH(addr)] = newvma; |
| 64 | } |
| 65 | |
| 66 | static bool vmacache_valid(struct mm_struct *mm) |
| 67 | { |
| 68 | struct task_struct *curr; |
| 69 | |
| 70 | if (!vmacache_valid_mm(mm)) |
| 71 | return false; |
| 72 | |
| 73 | curr = current; |
| 74 | if (mm->vmacache_seqnum != curr->vmacache_seqnum) { |
| 75 | /* |
| 76 | * First attempt will always be invalid, initialize |
| 77 | * the new cache for this task here. |
| 78 | */ |
| 79 | curr->vmacache_seqnum = mm->vmacache_seqnum; |
| 80 | vmacache_flush(curr); |
| 81 | return false; |
| 82 | } |
| 83 | return true; |
| 84 | } |
| 85 | |
| 86 | struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) |
| 87 | { |
| 88 | int i; |
| 89 | |
| 90 | if (!vmacache_valid(mm)) |
| 91 | return NULL; |
| 92 | |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 93 | count_vm_vmacache_event(VMACACHE_FIND_CALLS); |
| 94 | |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 95 | for (i = 0; i < VMACACHE_SIZE; i++) { |
| 96 | struct vm_area_struct *vma = current->vmacache[i]; |
| 97 | |
Linus Torvalds | 50f5aa8 | 2014-04-28 14:24:09 -0700 | [diff] [blame] | 98 | if (!vma) |
| 99 | continue; |
| 100 | if (WARN_ON_ONCE(vma->vm_mm != mm)) |
| 101 | break; |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 102 | if (vma->vm_start <= addr && vma->vm_end > addr) { |
| 103 | count_vm_vmacache_event(VMACACHE_FIND_HITS); |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 104 | return vma; |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 105 | } |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 106 | } |
| 107 | |
| 108 | return NULL; |
| 109 | } |
| 110 | |
| 111 | #ifndef CONFIG_MMU |
| 112 | struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, |
| 113 | unsigned long start, |
| 114 | unsigned long end) |
| 115 | { |
| 116 | int i; |
| 117 | |
| 118 | if (!vmacache_valid(mm)) |
| 119 | return NULL; |
| 120 | |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 121 | count_vm_vmacache_event(VMACACHE_FIND_CALLS); |
| 122 | |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 123 | for (i = 0; i < VMACACHE_SIZE; i++) { |
| 124 | struct vm_area_struct *vma = current->vmacache[i]; |
| 125 | |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 126 | if (vma && vma->vm_start == start && vma->vm_end == end) { |
| 127 | count_vm_vmacache_event(VMACACHE_FIND_HITS); |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 128 | return vma; |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 129 | } |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | return NULL; |
| 133 | } |
| 134 | #endif |