blob: a5b3aa8d281f869ae21a5edd9137e0ee25ef4262 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Davidlohr Bueso615d6e82014-04-07 15:37:25 -07002#ifndef __LINUX_VMACACHE_H
3#define __LINUX_VMACACHE_H
4
5#include <linux/sched.h>
6#include <linux/mm.h>
7
8/*
9 * Hash based on the page number. Provides a good hit rate for
10 * workloads with good locality and those with random accesses as well.
11 */
12#define VMACACHE_HASH(addr) ((addr >> PAGE_SHIFT) & VMACACHE_MASK)
13
14static inline void vmacache_flush(struct task_struct *tsk)
15{
Ingo Molnar314ff782017-02-03 11:03:31 +010016 memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070017}
18
19extern void vmacache_flush_all(struct mm_struct *mm);
20extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
21extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
22 unsigned long addr);
23
24#ifndef CONFIG_MMU
25extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
26 unsigned long start,
27 unsigned long end);
28#endif
29
30static inline void vmacache_invalidate(struct mm_struct *mm)
31{
32 mm->vmacache_seqnum++;
33
34 /* deal with overflows */
35 if (unlikely(mm->vmacache_seqnum == 0))
36 vmacache_flush_all(mm);
37}
38
39#endif /* __LINUX_VMACACHE_H */