blob: c9ca3dd46b971e2911dba9b5d569cdeb9f509ec2 [file] [log] [blame]
Davidlohr Bueso615d6e82014-04-07 15:37:25 -07001/*
2 * Copyright (C) 2014 Davidlohr Bueso.
3 */
4#include <linux/sched.h>
5#include <linux/mm.h>
6#include <linux/vmacache.h>
7
8/*
Davidlohr Bueso615d6e82014-04-07 15:37:25 -07009 * This task may be accessing a foreign mm via (for example)
10 * get_user_pages()->find_vma(). The vmacache is task-local and this
11 * task's vmacache pertains to a different mm (ie, its own). There is
12 * nothing we can do here.
13 *
14 * Also handle the case where a kernel thread has adopted this mm via use_mm().
15 * That kernel thread's vmacache is not applicable to this mm.
16 */
Davidlohr Buesoa2c1aad2015-11-05 18:48:52 -080017static inline bool vmacache_valid_mm(struct mm_struct *mm)
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070018{
19 return current->mm == mm && !(current->flags & PF_KTHREAD);
20}
21
22void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
23{
24 if (vmacache_valid_mm(newvma->vm_mm))
25 current->vmacache[VMACACHE_HASH(addr)] = newvma;
26}
27
28static bool vmacache_valid(struct mm_struct *mm)
29{
30 struct task_struct *curr;
31
32 if (!vmacache_valid_mm(mm))
33 return false;
34
35 curr = current;
36 if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
37 /*
38 * First attempt will always be invalid, initialize
39 * the new cache for this task here.
40 */
41 curr->vmacache_seqnum = mm->vmacache_seqnum;
42 vmacache_flush(curr);
43 return false;
44 }
45 return true;
46}
47
48struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
49{
50 int i;
51
Alexey Dobriyan131ddc52016-10-07 16:58:39 -070052 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
53
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070054 if (!vmacache_valid(mm))
55 return NULL;
56
57 for (i = 0; i < VMACACHE_SIZE; i++) {
58 struct vm_area_struct *vma = current->vmacache[i];
59
Linus Torvalds50f5aa82014-04-28 14:24:09 -070060 if (!vma)
61 continue;
62 if (WARN_ON_ONCE(vma->vm_mm != mm))
63 break;
Davidlohr Bueso4f115142014-06-04 16:06:46 -070064 if (vma->vm_start <= addr && vma->vm_end > addr) {
65 count_vm_vmacache_event(VMACACHE_FIND_HITS);
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070066 return vma;
Davidlohr Bueso4f115142014-06-04 16:06:46 -070067 }
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070068 }
69
70 return NULL;
71}
72
73#ifndef CONFIG_MMU
74struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
75 unsigned long start,
76 unsigned long end)
77{
78 int i;
79
Alexey Dobriyan131ddc52016-10-07 16:58:39 -070080 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
81
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070082 if (!vmacache_valid(mm))
83 return NULL;
84
85 for (i = 0; i < VMACACHE_SIZE; i++) {
86 struct vm_area_struct *vma = current->vmacache[i];
87
Davidlohr Bueso4f115142014-06-04 16:06:46 -070088 if (vma && vma->vm_start == start && vma->vm_end == end) {
89 count_vm_vmacache_event(VMACACHE_FIND_HITS);
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070090 return vma;
Davidlohr Bueso4f115142014-06-04 16:06:46 -070091 }
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070092 }
93
94 return NULL;
95}
96#endif