blob: 3fc6ef726d8c280a84136688f404a5561eda5dd4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ALPHA_CACHEFLUSH_H
2#define _ALPHA_CACHEFLUSH_H
3
4#include <linux/config.h>
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the Alpha. */
8#define flush_cache_all() do { } while (0)
9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_range(vma, start, end) do { } while (0)
11#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
12#define flush_dcache_page(page) do { } while (0)
13#define flush_dcache_mmap_lock(mapping) do { } while (0)
14#define flush_dcache_mmap_unlock(mapping) do { } while (0)
15#define flush_cache_vmap(start, end) do { } while (0)
16#define flush_cache_vunmap(start, end) do { } while (0)
17
18/* Note that the following two definitions are _highly_ dependent
19 on the contexts in which they are used in the kernel. I personally
20 think it is criminal how loosely defined these macros are. */
21
22/* We need to flush the kernel's icache after loading modules. The
23 only other use of this macro is in load_aout_interp which is not
24 used on Alpha.
25
26 Note that this definition should *not* be used for userspace
27 icache flushing. While functional, it is _way_ overkill. The
28 icache is tagged with ASNs and it suffices to allocate a new ASN
29 for the process. */
30#ifndef CONFIG_SMP
31#define flush_icache_range(start, end) imb()
32#else
33#define flush_icache_range(start, end) smp_imb()
34extern void smp_imb(void);
35#endif
36
37/* We need to flush the userspace icache after setting breakpoints in
38 ptrace.
39
40 Instead of indiscriminately using imb, take advantage of the fact
41 that icache entries are tagged with the ASN and load a new mm context. */
42/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
43
44#ifndef CONFIG_SMP
45extern void __load_new_mm_context(struct mm_struct *);
46static inline void
47flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
48 unsigned long addr, int len)
49{
50 if (vma->vm_flags & VM_EXEC) {
51 struct mm_struct *mm = vma->vm_mm;
52 if (current->active_mm == mm)
53 __load_new_mm_context(mm);
54 else
55 mm->context[smp_processor_id()] = 0;
56 }
57}
58#else
59extern void flush_icache_user_range(struct vm_area_struct *vma,
60 struct page *page, unsigned long addr, int len);
61#endif
62
63/* This is used only in do_no_page and do_swap_page. */
64#define flush_icache_page(vma, page) \
65 flush_icache_user_range((vma), (page), 0, 0)
66
67#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
68do { memcpy(dst, src, len); \
69 flush_icache_user_range(vma, page, vaddr, len); \
70} while (0)
71#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
72 memcpy(dst, src, len)
73
74#endif /* _ALPHA_CACHEFLUSH_H */