blob: ea517bef7dc552a10ac6ed424ca06aa50670c303 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Davidlohr Bueso615d6e82014-04-07 15:37:25 -07002/*
3 * Copyright (C) 2014 Davidlohr Bueso.
4 */
Ingo Molnar3f07c012017-02-08 18:51:30 +01005#include <linux/sched/signal.h>
Ingo Molnar9164bb42017-02-04 01:20:53 +01006#include <linux/sched/task.h>
Davidlohr Bueso615d6e82014-04-07 15:37:25 -07007#include <linux/mm.h>
8#include <linux/vmacache.h>
David Rientjesddbf3692018-08-17 15:49:58 -07009#include <asm/pgtable.h>
10
11/*
12 * Hash based on the pmd of addr if configured with MMU, which provides a good
13 * hit rate for workloads with spatial locality. Otherwise, use pages.
14 */
15#ifdef CONFIG_MMU
16#define VMACACHE_SHIFT PMD_SHIFT
17#else
18#define VMACACHE_SHIFT PAGE_SHIFT
19#endif
20#define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK)
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070021
22/*
23 * Flush vma caches for threads that share a given mm.
24 *
25 * The operation is safe because the caller holds the mmap_sem
26 * exclusively and other threads accessing the vma cache will
27 * have mmap_sem held at least for read, so no extra locking
28 * is required to maintain the vma cache.
29 */
30void vmacache_flush_all(struct mm_struct *mm)
31{
32 struct task_struct *g, *p;
33
Davidlohr Buesof5f302e2014-12-12 16:56:10 -080034 count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
35
Davidlohr Bueso6b4ebc32014-06-04 16:06:47 -070036 /*
37 * Single threaded tasks need not iterate the entire
38 * list of process. We can avoid the flushing as well
39 * since the mm's seqnum was increased and don't have
40 * to worry about other threads' seqnum. Current's
41 * flush will occur upon the next lookup.
42 */
43 if (atomic_read(&mm->mm_users) == 1)
44 return;
45
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070046 rcu_read_lock();
47 for_each_process_thread(g, p) {
48 /*
49 * Only flush the vmacache pointers as the
50 * mm seqnum is already set and curr's will
51 * be set upon invalidation when the next
52 * lookup is done.
53 */
54 if (mm == p->mm)
55 vmacache_flush(p);
56 }
57 rcu_read_unlock();
58}
59
60/*
61 * This task may be accessing a foreign mm via (for example)
62 * get_user_pages()->find_vma(). The vmacache is task-local and this
63 * task's vmacache pertains to a different mm (ie, its own). There is
64 * nothing we can do here.
65 *
66 * Also handle the case where a kernel thread has adopted this mm via use_mm().
67 * That kernel thread's vmacache is not applicable to this mm.
68 */
Davidlohr Buesoa2c1aad2015-11-05 18:48:52 -080069static inline bool vmacache_valid_mm(struct mm_struct *mm)
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070070{
71 return current->mm == mm && !(current->flags & PF_KTHREAD);
72}
73
74void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
75{
76 if (vmacache_valid_mm(newvma->vm_mm))
Ingo Molnar314ff782017-02-03 11:03:31 +010077 current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070078}
79
80static bool vmacache_valid(struct mm_struct *mm)
81{
82 struct task_struct *curr;
83
84 if (!vmacache_valid_mm(mm))
85 return false;
86
87 curr = current;
Ingo Molnar314ff782017-02-03 11:03:31 +010088 if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070089 /*
90 * First attempt will always be invalid, initialize
91 * the new cache for this task here.
92 */
Ingo Molnar314ff782017-02-03 11:03:31 +010093 curr->vmacache.seqnum = mm->vmacache_seqnum;
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070094 vmacache_flush(curr);
95 return false;
96 }
97 return true;
98}
99
100struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
101{
David Rientjesddbf3692018-08-17 15:49:58 -0700102 int idx = VMACACHE_HASH(addr);
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700103 int i;
104
Alexey Dobriyan131ddc52016-10-07 16:58:39 -0700105 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
106
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700107 if (!vmacache_valid(mm))
108 return NULL;
109
110 for (i = 0; i < VMACACHE_SIZE; i++) {
David Rientjesddbf3692018-08-17 15:49:58 -0700111 struct vm_area_struct *vma = current->vmacache.vmas[idx];
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700112
David Rientjesddbf3692018-08-17 15:49:58 -0700113 if (vma) {
114#ifdef CONFIG_DEBUG_VM_VMACACHE
115 if (WARN_ON_ONCE(vma->vm_mm != mm))
116 break;
117#endif
118 if (vma->vm_start <= addr && vma->vm_end > addr) {
119 count_vm_vmacache_event(VMACACHE_FIND_HITS);
120 return vma;
121 }
Davidlohr Bueso4f115142014-06-04 16:06:46 -0700122 }
David Rientjesddbf3692018-08-17 15:49:58 -0700123 if (++idx == VMACACHE_SIZE)
124 idx = 0;
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700125 }
126
127 return NULL;
128}
129
130#ifndef CONFIG_MMU
131struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
132 unsigned long start,
133 unsigned long end)
134{
David Rientjesddbf3692018-08-17 15:49:58 -0700135 int idx = VMACACHE_HASH(start);
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700136 int i;
137
Alexey Dobriyan131ddc52016-10-07 16:58:39 -0700138 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
139
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700140 if (!vmacache_valid(mm))
141 return NULL;
142
143 for (i = 0; i < VMACACHE_SIZE; i++) {
David Rientjesddbf3692018-08-17 15:49:58 -0700144 struct vm_area_struct *vma = current->vmacache.vmas[idx];
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700145
Davidlohr Bueso4f115142014-06-04 16:06:46 -0700146 if (vma && vma->vm_start == start && vma->vm_end == end) {
147 count_vm_vmacache_event(VMACACHE_FIND_HITS);
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700148 return vma;
Davidlohr Bueso4f115142014-06-04 16:06:46 -0700149 }
David Rientjesddbf3692018-08-17 15:49:58 -0700150 if (++idx == VMACACHE_SIZE)
151 idx = 0;
Davidlohr Bueso615d6e82014-04-07 15:37:25 -0700152 }
153
154 return NULL;
155}
156#endif