blob: db7596eb6132e8118b73e9c851563cde4a6d376a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Davidlohr Bueso615d6e82014-04-07 15:37:25 -07002/*
3 * Copyright (C) 2014 Davidlohr Bueso.
4 */
Ingo Molnar3f07c012017-02-08 18:51:30 +01005#include <linux/sched/signal.h>
Ingo Molnar9164bb42017-02-04 01:20:53 +01006#include <linux/sched/task.h>
Davidlohr Bueso615d6e82014-04-07 15:37:25 -07007#include <linux/mm.h>
8#include <linux/vmacache.h>
9
10/*
11 * Flush vma caches for threads that share a given mm.
12 *
13 * The operation is safe because the caller holds the mmap_sem
14 * exclusively and other threads accessing the vma cache will
15 * have mmap_sem held at least for read, so no extra locking
16 * is required to maintain the vma cache.
17 */
18void vmacache_flush_all(struct mm_struct *mm)
19{
20 struct task_struct *g, *p;
21
Davidlohr Buesof5f302e2014-12-12 16:56:10 -080022 count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
23
Davidlohr Bueso6b4ebc32014-06-04 16:06:47 -070024 /*
25 * Single threaded tasks need not iterate the entire
26 * list of process. We can avoid the flushing as well
27 * since the mm's seqnum was increased and don't have
28 * to worry about other threads' seqnum. Current's
29 * flush will occur upon the next lookup.
30 */
31 if (atomic_read(&mm->mm_users) == 1)
32 return;
33
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070034 rcu_read_lock();
35 for_each_process_thread(g, p) {
36 /*
37 * Only flush the vmacache pointers as the
38 * mm seqnum is already set and curr's will
39 * be set upon invalidation when the next
40 * lookup is done.
41 */
42 if (mm == p->mm)
43 vmacache_flush(p);
44 }
45 rcu_read_unlock();
46}
47
48/*
49 * This task may be accessing a foreign mm via (for example)
50 * get_user_pages()->find_vma(). The vmacache is task-local and this
51 * task's vmacache pertains to a different mm (ie, its own). There is
52 * nothing we can do here.
53 *
54 * Also handle the case where a kernel thread has adopted this mm via use_mm().
55 * That kernel thread's vmacache is not applicable to this mm.
56 */
Davidlohr Buesoa2c1aad2015-11-05 18:48:52 -080057static inline bool vmacache_valid_mm(struct mm_struct *mm)
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070058{
59 return current->mm == mm && !(current->flags & PF_KTHREAD);
60}
61
62void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
63{
64 if (vmacache_valid_mm(newvma->vm_mm))
Ingo Molnar314ff782017-02-03 11:03:31 +010065 current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070066}
67
68static bool vmacache_valid(struct mm_struct *mm)
69{
70 struct task_struct *curr;
71
72 if (!vmacache_valid_mm(mm))
73 return false;
74
75 curr = current;
Ingo Molnar314ff782017-02-03 11:03:31 +010076 if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070077 /*
78 * First attempt will always be invalid, initialize
79 * the new cache for this task here.
80 */
Ingo Molnar314ff782017-02-03 11:03:31 +010081 curr->vmacache.seqnum = mm->vmacache_seqnum;
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070082 vmacache_flush(curr);
83 return false;
84 }
85 return true;
86}
87
88struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
89{
90 int i;
91
Alexey Dobriyan131ddc52016-10-07 16:58:39 -070092 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
93
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070094 if (!vmacache_valid(mm))
95 return NULL;
96
97 for (i = 0; i < VMACACHE_SIZE; i++) {
Ingo Molnar314ff782017-02-03 11:03:31 +010098 struct vm_area_struct *vma = current->vmacache.vmas[i];
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070099
Linus Torvalds50f5aa82014-04-28 14:24:09 -0700100 if (!vma)
101 continue;
102 if (WARN_ON_ONCE(vma->vm_mm != mm))
103 break;
Davidlohr Bueso4f115142014-06-04 16:06:46 -0700104 if (vma->vm_start <= addr && vma->vm_end > addr) {
105 count_vm_vmacache_event(VMACACHE_FIND_HITS);
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700106 return vma;
Davidlohr Bueso4f115142014-06-04 16:06:46 -0700107 }
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700108 }
109
110 return NULL;
111}
112
113#ifndef CONFIG_MMU
114struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
115 unsigned long start,
116 unsigned long end)
117{
118 int i;
119
Alexey Dobriyan131ddc52016-10-07 16:58:39 -0700120 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
121
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700122 if (!vmacache_valid(mm))
123 return NULL;
124
125 for (i = 0; i < VMACACHE_SIZE; i++) {
Ingo Molnar314ff782017-02-03 11:03:31 +0100126 struct vm_area_struct *vma = current->vmacache.vmas[i];
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700127
Davidlohr Bueso4f115142014-06-04 16:06:46 -0700128 if (vma && vma->vm_start == start && vma->vm_end == end) {
129 count_vm_vmacache_event(VMACACHE_FIND_HITS);
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700130 return vma;
Davidlohr Bueso4f115142014-06-04 16:06:46 -0700131 }
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700132 }
133
134 return NULL;
135}
136#endif