blob: 902d36bf150d360a0a34671347524fa28d29c322 [file] [log] [blame]
Sam Ravnborga00736e2008-06-19 20:26:19 +02001/*
2 * rwsem.h: R/W semaphores implemented using CAS
3 *
4 * Written by David S. Miller (davem@redhat.com), 2001.
5 * Derived from asm-i386/rwsem.h
6 */
7#ifndef _SPARC64_RWSEM_H
8#define _SPARC64_RWSEM_H
9
10#ifndef _LINUX_RWSEM_H
11#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
12#endif
13
14#ifdef __KERNEL__
Sam Ravnborga00736e2008-06-19 20:26:19 +020015struct rw_semaphore {
David S. Miller9b3bb862010-08-17 22:49:26 -070016 signed long count;
17#define RWSEM_UNLOCKED_VALUE 0x00000000L
18#define RWSEM_ACTIVE_BIAS 0x00000001L
19#define RWSEM_ACTIVE_MASK 0xffffffffL
20#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
21#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
22#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
23 spinlock_t wait_lock;
24 struct list_head wait_list;
Sam Ravnborga00736e2008-06-19 20:26:19 +020025#ifdef CONFIG_DEBUG_LOCK_ALLOC
David S. Miller9b3bb862010-08-17 22:49:26 -070026 struct lockdep_map dep_map;
Sam Ravnborga00736e2008-06-19 20:26:19 +020027#endif
28};
29
30#ifdef CONFIG_DEBUG_LOCK_ALLOC
31# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
32#else
33# define __RWSEM_DEP_MAP_INIT(lockname)
34#endif
35
36#define __RWSEM_INITIALIZER(name) \
Thomas Gleixner8a2fe6c2009-11-07 22:41:03 -080037{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
38 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
Sam Ravnborga00736e2008-06-19 20:26:19 +020039
40#define DECLARE_RWSEM(name) \
41 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
42
David S. Miller9b3bb862010-08-17 22:49:26 -070043extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
44extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
45extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
46extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
47
Sam Ravnborga00736e2008-06-19 20:26:19 +020048extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
49 struct lock_class_key *key);
50
51#define init_rwsem(sem) \
52do { \
53 static struct lock_class_key __key; \
54 \
55 __init_rwsem((sem), #sem, &__key); \
56} while (0)
57
David S. Miller9b3bb862010-08-17 22:49:26 -070058/*
59 * lock for reading
60 */
61static inline void __down_read(struct rw_semaphore *sem)
62{
63 if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
64 rwsem_down_read_failed(sem);
65}
Sam Ravnborga00736e2008-06-19 20:26:19 +020066
David S. Miller9b3bb862010-08-17 22:49:26 -070067static inline int __down_read_trylock(struct rw_semaphore *sem)
68{
69 long tmp;
70
71 while ((tmp = sem->count) >= 0L) {
72 if (tmp == cmpxchg(&sem->count, tmp,
73 tmp + RWSEM_ACTIVE_READ_BIAS)) {
74 return 1;
75 }
76 }
77 return 0;
78}
79
80/*
81 * lock for writing
82 */
Sam Ravnborga00736e2008-06-19 20:26:19 +020083static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
84{
David S. Miller9b3bb862010-08-17 22:49:26 -070085 long tmp;
86
87 tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
88 (atomic64_t *)(&sem->count));
89 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
90 rwsem_down_write_failed(sem);
Sam Ravnborga00736e2008-06-19 20:26:19 +020091}
92
David S. Miller9b3bb862010-08-17 22:49:26 -070093static inline void __down_write(struct rw_semaphore *sem)
Sam Ravnborga00736e2008-06-19 20:26:19 +020094{
David S. Miller9b3bb862010-08-17 22:49:26 -070095 __down_write_nested(sem, 0);
Sam Ravnborga00736e2008-06-19 20:26:19 +020096}
97
David S. Miller9b3bb862010-08-17 22:49:26 -070098static inline int __down_write_trylock(struct rw_semaphore *sem)
Sam Ravnborga00736e2008-06-19 20:26:19 +020099{
David S. Miller9b3bb862010-08-17 22:49:26 -0700100 long tmp;
101
102 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
103 RWSEM_ACTIVE_WRITE_BIAS);
104 return tmp == RWSEM_UNLOCKED_VALUE;
105}
106
107/*
108 * unlock after reading
109 */
110static inline void __up_read(struct rw_semaphore *sem)
111{
112 long tmp;
113
114 tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
115 if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
116 rwsem_wake(sem);
117}
118
119/*
120 * unlock after writing
121 */
122static inline void __up_write(struct rw_semaphore *sem)
123{
124 if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
125 (atomic64_t *)(&sem->count)) < 0L))
126 rwsem_wake(sem);
127}
128
129/*
130 * implement atomic add functionality
131 */
132static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
133{
134 atomic64_add(delta, (atomic64_t *)(&sem->count));
135}
136
137/*
138 * downgrade write lock to read lock
139 */
140static inline void __downgrade_write(struct rw_semaphore *sem)
141{
142 long tmp;
143
144 tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
145 if (tmp < 0L)
146 rwsem_downgrade_wake(sem);
147}
148
149/*
150 * implement exchange and add functionality
151 */
152static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
153{
154 return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
Sam Ravnborga00736e2008-06-19 20:26:19 +0200155}
156
157static inline int rwsem_is_locked(struct rw_semaphore *sem)
158{
159 return (sem->count != 0);
160}
161
162#endif /* __KERNEL__ */
163
164#endif /* _SPARC64_RWSEM_H */