Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_VMACACHE_H |
| 2 | #define __LINUX_VMACACHE_H |
| 3 | |
| 4 | #include <linux/sched.h> |
| 5 | #include <linux/mm.h> |
| 6 | |
| 7 | /* |
| 8 | * Hash based on the page number. Provides a good hit rate for |
| 9 | * workloads with good locality and those with random accesses as well. |
| 10 | */ |
| 11 | #define VMACACHE_HASH(addr) ((addr >> PAGE_SHIFT) & VMACACHE_MASK) |
| 12 | |
| 13 | static inline void vmacache_flush(struct task_struct *tsk) |
| 14 | { |
| 15 | memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); |
| 16 | } |
| 17 | |
| 18 | extern void vmacache_flush_all(struct mm_struct *mm); |
| 19 | extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); |
| 20 | extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, |
| 21 | unsigned long addr); |
| 22 | |
| 23 | #ifndef CONFIG_MMU |
| 24 | extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, |
| 25 | unsigned long start, |
| 26 | unsigned long end); |
| 27 | #endif |
| 28 | |
| 29 | static inline void vmacache_invalidate(struct mm_struct *mm) |
| 30 | { |
| 31 | mm->vmacache_seqnum++; |
| 32 | |
| 33 | /* deal with overflows */ |
| 34 | if (unlikely(mm->vmacache_seqnum == 0)) |
| 35 | vmacache_flush_all(mm); |
| 36 | } |
| 37 | |
| 38 | #endif /* __LINUX_VMACACHE_H */ |