blob: 1be4337f525950d795d1a28c945898d20154a1e8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/asm-ppc/rwsem.h: R/W semaphores for SH using the stuff
3 * in lib/rwsem.c.
4 */
5
6#ifndef _ASM_SH_RWSEM_H
7#define _ASM_SH_RWSEM_H
8
9#ifdef __KERNEL__
10#include <linux/list.h>
11#include <linux/spinlock.h>
12#include <asm/atomic.h>
13#include <asm/system.h>
14
15/*
16 * the semaphore definition
17 */
18struct rw_semaphore {
19 long count;
20#define RWSEM_UNLOCKED_VALUE 0x00000000
21#define RWSEM_ACTIVE_BIAS 0x00000001
22#define RWSEM_ACTIVE_MASK 0x0000ffff
23#define RWSEM_WAITING_BIAS (-0x00010000)
24#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
25#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
26 spinlock_t wait_lock;
27 struct list_head wait_list;
28#if RWSEM_DEBUG
29 int debug;
30#endif
31};
32
33/*
34 * initialisation
35 */
36#if RWSEM_DEBUG
37#define __RWSEM_DEBUG_INIT , 0
38#else
39#define __RWSEM_DEBUG_INIT /* */
40#endif
41
42#define __RWSEM_INITIALIZER(name) \
43 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
44 LIST_HEAD_INIT((name).wait_list) \
45 __RWSEM_DEBUG_INIT }
46
47#define DECLARE_RWSEM(name) \
48 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
49
50extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
51extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
52extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
53extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
54
55static inline void init_rwsem(struct rw_semaphore *sem)
56{
57 sem->count = RWSEM_UNLOCKED_VALUE;
58 spin_lock_init(&sem->wait_lock);
59 INIT_LIST_HEAD(&sem->wait_list);
60#if RWSEM_DEBUG
61 sem->debug = 0;
62#endif
63}
64
65/*
66 * lock for reading
67 */
68static inline void __down_read(struct rw_semaphore *sem)
69{
70 if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
71 smp_wmb();
72 else
73 rwsem_down_read_failed(sem);
74}
75
76static inline int __down_read_trylock(struct rw_semaphore *sem)
77{
78 int tmp;
79
80 while ((tmp = sem->count) >= 0) {
81 if (tmp == cmpxchg(&sem->count, tmp,
82 tmp + RWSEM_ACTIVE_READ_BIAS)) {
83 smp_wmb();
84 return 1;
85 }
86 }
87 return 0;
88}
89
90/*
91 * lock for writing
92 */
93static inline void __down_write(struct rw_semaphore *sem)
94{
95 int tmp;
96
97 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
98 (atomic_t *)(&sem->count));
99 if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
100 smp_wmb();
101 else
102 rwsem_down_write_failed(sem);
103}
104
105static inline int __down_write_trylock(struct rw_semaphore *sem)
106{
107 int tmp;
108
109 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
110 RWSEM_ACTIVE_WRITE_BIAS);
111 smp_wmb();
112 return tmp == RWSEM_UNLOCKED_VALUE;
113}
114
115/*
116 * unlock after reading
117 */
118static inline void __up_read(struct rw_semaphore *sem)
119{
120 int tmp;
121
122 smp_wmb();
123 tmp = atomic_dec_return((atomic_t *)(&sem->count));
124 if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
125 rwsem_wake(sem);
126}
127
128/*
129 * unlock after writing
130 */
131static inline void __up_write(struct rw_semaphore *sem)
132{
133 smp_wmb();
134 if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
135 (atomic_t *)(&sem->count)) < 0)
136 rwsem_wake(sem);
137}
138
139/*
140 * implement atomic add functionality
141 */
142static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
143{
144 atomic_add(delta, (atomic_t *)(&sem->count));
145}
146
147/*
148 * downgrade write lock to read lock
149 */
150static inline void __downgrade_write(struct rw_semaphore *sem)
151{
152 int tmp;
153
154 smp_wmb();
155 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
156 if (tmp < 0)
157 rwsem_downgrade_wake(sem);
158}
159
160/*
161 * implement exchange and add functionality
162 */
163static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
164{
165 smp_mb();
166 return atomic_add_return(delta, (atomic_t *)(&sem->count));
167}
168
169#endif /* __KERNEL__ */
170#endif /* _ASM_SH_RWSEM_H */