blob: c7591e80067c0e9fc150872262d9375a0faad6e6 [file] [log] [blame]
James Hogan1e573722012-10-05 16:22:14 +01001#ifndef _ASM_METAG_BARRIER_H
2#define _ASM_METAG_BARRIER_H
3
4#include <asm/metag_mem.h>
5
6#define nop() asm volatile ("NOP")
7#define mb() wmb()
8#define rmb() barrier()
9
10#ifdef CONFIG_METAG_META21
11
12/* HTP and above have a system event to fence writes */
13static inline void wr_fence(void)
14{
15 volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
16 barrier();
17 *flushptr = 0;
Mikulas Patocka2425ce82014-05-08 15:51:37 -040018 barrier();
James Hogan1e573722012-10-05 16:22:14 +010019}
20
21#else /* CONFIG_METAG_META21 */
22
23/*
24 * ATP doesn't have system event to fence writes, so it is necessary to flush
25 * the processor write queues as well as possibly the write combiner (depending
26 * on the page being written).
27 * To ensure the write queues are flushed we do 4 writes to a system event
28 * register (in this case write combiner flush) which will also flush the write
29 * combiner.
30 */
31static inline void wr_fence(void)
32{
33 volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_COMBINE_FLUSH;
34 barrier();
35 *flushptr = 0;
36 *flushptr = 0;
37 *flushptr = 0;
38 *flushptr = 0;
Mikulas Patocka2425ce82014-05-08 15:51:37 -040039 barrier();
James Hogan1e573722012-10-05 16:22:14 +010040}
41
42#endif /* !CONFIG_METAG_META21 */
43
44static inline void wmb(void)
45{
46 /* flush writes through the write combiner */
47 wr_fence();
48}
49
50#define read_barrier_depends() do { } while (0)
51
52#ifndef CONFIG_SMP
53#define fence() do { } while (0)
54#define smp_mb() barrier()
55#define smp_rmb() barrier()
56#define smp_wmb() barrier()
57#else
58
59#ifdef CONFIG_METAG_SMP_WRITE_REORDERING
60/*
61 * Write to the atomic memory unlock system event register (command 0). This is
62 * needed before a write to shared memory in a critical section, to prevent
63 * external reordering of writes before the fence on other threads with writes
64 * after the fence on this thread (and to prevent the ensuing cache-memory
65 * incoherence). It is therefore ineffective if used after and on the same
66 * thread as a write.
67 */
68static inline void fence(void)
69{
70 volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
71 barrier();
72 *flushptr = 0;
Mikulas Patocka2425ce82014-05-08 15:51:37 -040073 barrier();
James Hogan1e573722012-10-05 16:22:14 +010074}
75#define smp_mb() fence()
76#define smp_rmb() fence()
77#define smp_wmb() barrier()
78#else
79#define fence() do { } while (0)
80#define smp_mb() barrier()
81#define smp_rmb() barrier()
82#define smp_wmb() barrier()
83#endif
84#endif
85#define smp_read_barrier_depends() do { } while (0)
86#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
87
Peter Zijlstra47933ad2013-11-06 14:57:36 +010088#define smp_store_release(p, v) \
89do { \
90 compiletime_assert_atomic_type(*p); \
91 smp_mb(); \
92 ACCESS_ONCE(*p) = (v); \
93} while (0)
94
95#define smp_load_acquire(p) \
96({ \
97 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
98 compiletime_assert_atomic_type(*p); \
99 smp_mb(); \
100 ___p1; \
101})
102
Peter Zijlstra40074de2014-03-13 19:00:36 +0100103#define smp_mb__before_atomic() barrier()
104#define smp_mb__after_atomic() barrier()
105
James Hogan1e573722012-10-05 16:22:14 +0100106#endif /* _ASM_METAG_BARRIER_H */