Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 Davidlohr Bueso. |
| 3 | */ |
| 4 | #include <linux/sched.h> |
| 5 | #include <linux/mm.h> |
| 6 | #include <linux/vmacache.h> |
| 7 | |
| 8 | /* |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 9 | * This task may be accessing a foreign mm via (for example) |
| 10 | * get_user_pages()->find_vma(). The vmacache is task-local and this |
| 11 | * task's vmacache pertains to a different mm (ie, its own). There is |
| 12 | * nothing we can do here. |
| 13 | * |
| 14 | * Also handle the case where a kernel thread has adopted this mm via use_mm(). |
| 15 | * That kernel thread's vmacache is not applicable to this mm. |
| 16 | */ |
Davidlohr Bueso | a2c1aad | 2015-11-05 18:48:52 -0800 | [diff] [blame] | 17 | static inline bool vmacache_valid_mm(struct mm_struct *mm) |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 18 | { |
| 19 | return current->mm == mm && !(current->flags & PF_KTHREAD); |
| 20 | } |
| 21 | |
| 22 | void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) |
| 23 | { |
| 24 | if (vmacache_valid_mm(newvma->vm_mm)) |
| 25 | current->vmacache[VMACACHE_HASH(addr)] = newvma; |
| 26 | } |
| 27 | |
| 28 | static bool vmacache_valid(struct mm_struct *mm) |
| 29 | { |
| 30 | struct task_struct *curr; |
| 31 | |
| 32 | if (!vmacache_valid_mm(mm)) |
| 33 | return false; |
| 34 | |
| 35 | curr = current; |
| 36 | if (mm->vmacache_seqnum != curr->vmacache_seqnum) { |
| 37 | /* |
| 38 | * First attempt will always be invalid, initialize |
| 39 | * the new cache for this task here. |
| 40 | */ |
| 41 | curr->vmacache_seqnum = mm->vmacache_seqnum; |
| 42 | vmacache_flush(curr); |
| 43 | return false; |
| 44 | } |
| 45 | return true; |
| 46 | } |
| 47 | |
| 48 | struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) |
| 49 | { |
| 50 | int i; |
| 51 | |
Alexey Dobriyan | 131ddc5 | 2016-10-07 16:58:39 -0700 | [diff] [blame] | 52 | count_vm_vmacache_event(VMACACHE_FIND_CALLS); |
| 53 | |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 54 | if (!vmacache_valid(mm)) |
| 55 | return NULL; |
| 56 | |
| 57 | for (i = 0; i < VMACACHE_SIZE; i++) { |
| 58 | struct vm_area_struct *vma = current->vmacache[i]; |
| 59 | |
Linus Torvalds | 50f5aa8 | 2014-04-28 14:24:09 -0700 | [diff] [blame] | 60 | if (!vma) |
| 61 | continue; |
| 62 | if (WARN_ON_ONCE(vma->vm_mm != mm)) |
| 63 | break; |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 64 | if (vma->vm_start <= addr && vma->vm_end > addr) { |
| 65 | count_vm_vmacache_event(VMACACHE_FIND_HITS); |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 66 | return vma; |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 67 | } |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | return NULL; |
| 71 | } |
| 72 | |
| 73 | #ifndef CONFIG_MMU |
| 74 | struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, |
| 75 | unsigned long start, |
| 76 | unsigned long end) |
| 77 | { |
| 78 | int i; |
| 79 | |
Alexey Dobriyan | 131ddc5 | 2016-10-07 16:58:39 -0700 | [diff] [blame] | 80 | count_vm_vmacache_event(VMACACHE_FIND_CALLS); |
| 81 | |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 82 | if (!vmacache_valid(mm)) |
| 83 | return NULL; |
| 84 | |
| 85 | for (i = 0; i < VMACACHE_SIZE; i++) { |
| 86 | struct vm_area_struct *vma = current->vmacache[i]; |
| 87 | |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 88 | if (vma && vma->vm_start == start && vma->vm_end == end) { |
| 89 | count_vm_vmacache_event(VMACACHE_FIND_HITS); |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 90 | return vma; |
Davidlohr Bueso | 4f11514 | 2014-06-04 16:06:46 -0700 | [diff] [blame] | 91 | } |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | return NULL; |
| 95 | } |
| 96 | #endif |