Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 2 | #ifndef __LINUX_VMACACHE_H |
| 3 | #define __LINUX_VMACACHE_H |
| 4 | |
| 5 | #include <linux/sched.h> |
| 6 | #include <linux/mm.h> |
| 7 | |
| 8 | /* |
| 9 | * Hash based on the page number. Provides a good hit rate for |
| 10 | * workloads with good locality and those with random accesses as well. |
| 11 | */ |
| 12 | #define VMACACHE_HASH(addr) ((addr >> PAGE_SHIFT) & VMACACHE_MASK) |
| 13 | |
| 14 | static inline void vmacache_flush(struct task_struct *tsk) |
| 15 | { |
Ingo Molnar | 314ff78 | 2017-02-03 11:03:31 +0100 | [diff] [blame] | 16 | memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 17 | } |
| 18 | |
| 19 | extern void vmacache_flush_all(struct mm_struct *mm); |
| 20 | extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); |
| 21 | extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, |
| 22 | unsigned long addr); |
| 23 | |
| 24 | #ifndef CONFIG_MMU |
| 25 | extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, |
| 26 | unsigned long start, |
| 27 | unsigned long end); |
| 28 | #endif |
| 29 | |
| 30 | static inline void vmacache_invalidate(struct mm_struct *mm) |
| 31 | { |
| 32 | mm->vmacache_seqnum++; |
| 33 | |
| 34 | /* deal with overflows */ |
| 35 | if (unlikely(mm->vmacache_seqnum == 0)) |
| 36 | vmacache_flush_all(mm); |
| 37 | } |
| 38 | |
| 39 | #endif /* __LINUX_VMACACHE_H */ |