Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2014 Davidlohr Bueso. |
| 4 | */ |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 5 | #include <linux/sched/signal.h> |
Ingo Molnar | 9164bb4 | 2017-02-04 01:20:53 +0100 | [diff] [blame] | 6 | #include <linux/sched/task.h> |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 7 | #include <linux/mm.h> |
| 8 | #include <linux/vmacache.h> |
David Rientjes | ddbf369 | 2018-08-17 15:49:58 -0700 | [diff] [blame^] | 9 | #include <asm/pgtable.h> |
| 10 | |
| 11 | /* |
| 12 | * Hash based on the pmd of addr if configured with MMU, which provides a good |
| 13 | * hit rate for workloads with spatial locality. Otherwise, use pages. |
| 14 | */ |
| 15 | #ifdef CONFIG_MMU |
| 16 | #define VMACACHE_SHIFT PMD_SHIFT |
| 17 | #else |
| 18 | #define VMACACHE_SHIFT PAGE_SHIFT |
| 19 | #endif |
| 20 | #define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK) |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 21 | |
| 22 | /* |
| 23 | * Flush vma caches for threads that share a given mm. |
| 24 | * |
| 25 | * The operation is safe because the caller holds the mmap_sem |
| 26 | * exclusively and other threads accessing the vma cache will |
| 27 | * have mmap_sem held at least for read, so no extra locking |
| 28 | * is required to maintain the vma cache. |
| 29 | */ |
| 30 | void vmacache_flush_all(struct mm_struct *mm) |
| 31 | { |
| 32 | struct task_struct *g, *p; |
| 33 | |
Davidlohr Bueso | f5f302e | 2014-12-12 16:56:10 -0800 | [diff] [blame] | 34 | count_vm_vmacache_event(VMACACHE_FULL_FLUSHES); |
| 35 | |
Davidlohr Bueso | 6b4ebc3 | 2014-06-04 16:06:47 -0700 | [diff] [blame] | 36 | /* |
| 37 | * Single threaded tasks need not iterate the entire |
| 38 | * list of process. We can avoid the flushing as well |
| 39 | * since the mm's seqnum was increased and don't have |
| 40 | * to worry about other threads' seqnum. Current's |
| 41 | * flush will occur upon the next lookup. |
| 42 | */ |
| 43 | if (atomic_read(&mm->mm_users) == 1) |
| 44 | return; |
| 45 | |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 46 | rcu_read_lock(); |
| 47 | for_each_process_thread(g, p) { |
| 48 | /* |
| 49 | * Only flush the vmacache pointers as the |
| 50 | * mm seqnum is already set and curr's will |
| 51 | * be set upon invalidation when the next |
| 52 | * lookup is done. |
| 53 | */ |
| 54 | if (mm == p->mm) |
| 55 | vmacache_flush(p); |
| 56 | } |
| 57 | rcu_read_unlock(); |
| 58 | } |
| 59 | |
| 60 | /* |
| 61 | * This task may be accessing a foreign mm via (for example) |
| 62 | * get_user_pages()->find_vma(). The vmacache is task-local and this |
| 63 | * task's vmacache pertains to a different mm (ie, its own). There is |
| 64 | * nothing we can do here. |
| 65 | * |
| 66 | * Also handle the case where a kernel thread has adopted this mm via use_mm(). |
| 67 | * That kernel thread's vmacache is not applicable to this mm. |
| 68 | */ |
Davidlohr Bueso | a2c1aad | 2015-11-05 18:48:52 -0800 | [diff] [blame] | 69 | static inline bool vmacache_valid_mm(struct mm_struct *mm) |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 70 | { |
| 71 | return current->mm == mm && !(current->flags & PF_KTHREAD); |
| 72 | } |
| 73 | |
| 74 | void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) |
| 75 | { |
| 76 | if (vmacache_valid_mm(newvma->vm_mm)) |
Ingo Molnar | 314ff78 | 2017-02-03 11:03:31 +0100 | [diff] [blame] | 77 | current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma; |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 78 | } |
| 79 | |
| 80 | static bool vmacache_valid(struct mm_struct *mm) |
| 81 | { |
| 82 | struct task_struct *curr; |
| 83 | |
| 84 | if (!vmacache_valid_mm(mm)) |
| 85 | return false; |
| 86 | |
| 87 | curr = current; |
Ingo Molnar | 314ff78 | 2017-02-03 11:03:31 +0100 | [diff] [blame] | 88 | if (mm->vmacache_seqnum != curr->vmacache.seqnum) { |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 89 | /* |
| 90 | * First attempt will always be invalid, initialize |
| 91 | * the new cache for this task here. |
| 92 | */ |
Ingo Molnar | 314ff78 | 2017-02-03 11:03:31 +0100 | [diff] [blame] | 93 | curr->vmacache.seqnum = mm->vmacache_seqnum; |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 94 | vmacache_flush(curr); |
| 95 | return false; |
| 96 | } |
| 97 | return true; |
| 98 | } |
| 99 | |
| 100 | struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) |
| 101 | { |
David Rientjes | ddbf369 | 2018-08-17 15:49:58 -0700 | [diff] [blame^] | 102 | int idx = VMACACHE_HASH(addr); |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 103 | int i; |
| 104 | |
Alexey Dobriyan | 131ddc5 | 2016-10-07 16:58:39 -0700 | [diff] [blame] | 105 | count_vm_vmacache_event(VMACACHE_FIND_CALLS); |
| 106 | |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 107 | if (!vmacache_valid(mm)) |
| 108 | return NULL; |
| 109 | |
| 110 | for (i = 0; i < VMACACHE_SIZE; i++) { |
David Rientjes | ddbf369 | 2018-08-17 15:49:58 -0700 | [diff] [blame^] | 111 | struct vm_area_struct *vma = current->vmacache.vmas[idx]; |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 112 | |
David Rientjes | ddbf369 | 2018-08-17 15:49:58 -0700 | [diff] [blame^] | 113 | if (vma) { |
| 114 | #ifdef CONFIG_DEBUG_VM_VMACACHE |
| 115 | if (WARN_ON_ONCE(vma->vm_mm != mm)) |
| 116 | break; |
| 117 | #endif |
| 118 | if (vma->vm_start <= addr && vma->vm_end > addr) { |
| 119 | count_vm_vmacache_event(VMACACHE_FIND_HITS); |
| 120 | return vma; |
| 121 | } |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 122 | } |
David Rientjes | ddbf369 | 2018-08-17 15:49:58 -0700 | [diff] [blame^] | 123 | if (++idx == VMACACHE_SIZE) |
| 124 | idx = 0; |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | return NULL; |
| 128 | } |
| 129 | |
| 130 | #ifndef CONFIG_MMU |
| 131 | struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, |
| 132 | unsigned long start, |
| 133 | unsigned long end) |
| 134 | { |
David Rientjes | ddbf369 | 2018-08-17 15:49:58 -0700 | [diff] [blame^] | 135 | int idx = VMACACHE_HASH(start); |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 136 | int i; |
| 137 | |
Alexey Dobriyan | 131ddc5 | 2016-10-07 16:58:39 -0700 | [diff] [blame] | 138 | count_vm_vmacache_event(VMACACHE_FIND_CALLS); |
| 139 | |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 140 | if (!vmacache_valid(mm)) |
| 141 | return NULL; |
| 142 | |
| 143 | for (i = 0; i < VMACACHE_SIZE; i++) { |
David Rientjes | ddbf369 | 2018-08-17 15:49:58 -0700 | [diff] [blame^] | 144 | struct vm_area_struct *vma = current->vmacache.vmas[idx]; |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 145 | |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 146 | if (vma && vma->vm_start == start && vma->vm_end == end) { |
| 147 | count_vm_vmacache_event(VMACACHE_FIND_HITS); |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 148 | return vma; |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 149 | } |
David Rientjes | ddbf369 | 2018-08-17 15:49:58 -0700 | [diff] [blame^] | 150 | if (++idx == VMACACHE_SIZE) |
| 151 | idx = 0; |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | return NULL; |
| 155 | } |
| 156 | #endif |