blob: 1081db987391d24d7385dc7550fa137163284dd2 [file] [log] [blame]
Davidlohr Bueso615d6e82014-04-07 15:37:25 -07001#ifndef __LINUX_VMACACHE_H
2#define __LINUX_VMACACHE_H
3
4#include <linux/sched.h>
5#include <linux/mm.h>
6
7/*
8 * Hash based on the page number. Provides a good hit rate for
9 * workloads with good locality and those with random accesses as well.
10 */
11#define VMACACHE_HASH(addr) ((addr >> PAGE_SHIFT) & VMACACHE_MASK)
12
13static inline void vmacache_flush(struct task_struct *tsk)
14{
Ingo Molnar314ff782017-02-03 11:03:31 +010015 memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070016}
17
18extern void vmacache_flush_all(struct mm_struct *mm);
19extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
20extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
21 unsigned long addr);
22
23#ifndef CONFIG_MMU
24extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
25 unsigned long start,
26 unsigned long end);
27#endif
28
29static inline void vmacache_invalidate(struct mm_struct *mm)
30{
31 mm->vmacache_seqnum++;
32
33 /* deal with overflows */
34 if (unlikely(mm->vmacache_seqnum == 0))
35 vmacache_flush_all(mm);
36}
37
38#endif /* __LINUX_VMACACHE_H */