blob: 6c6a2141f271cba9fd7fe56636718dc9ad04b550 [file] [log] [blame]
Will Deacone1728002014-03-14 17:47:04 +00001#ifndef _ASM_GENERIC_RWSEM_H
2#define _ASM_GENERIC_RWSEM_H
Richard Kuodd472da2011-10-31 18:47:33 -05003
4#ifndef _LINUX_RWSEM_H
5#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
6#endif
7
8#ifdef __KERNEL__
9
10/*
Will Deacone1728002014-03-14 17:47:04 +000011 * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
Richard Kuodd472da2011-10-31 18:47:33 -050012 * Adapted largely from include/asm-i386/rwsem.h
13 * by Paul Mackerras <paulus@samba.org>.
14 */
15
16/*
17 * the semaphore definition
18 */
Will Deacone1728002014-03-14 17:47:04 +000019#ifdef CONFIG_64BIT
Richard Kuodd472da2011-10-31 18:47:33 -050020# define RWSEM_ACTIVE_MASK 0xffffffffL
21#else
22# define RWSEM_ACTIVE_MASK 0x0000ffffL
23#endif
24
25#define RWSEM_UNLOCKED_VALUE 0x00000000L
26#define RWSEM_ACTIVE_BIAS 0x00000001L
27#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
28#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
29#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
30
31/*
32 * lock for reading
33 */
34static inline void __down_read(struct rw_semaphore *sem)
35{
Waiman Long06321dd2017-01-19 09:31:52 -050036 if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
Richard Kuodd472da2011-10-31 18:47:33 -050037 rwsem_down_read_failed(sem);
38}
39
40static inline int __down_read_trylock(struct rw_semaphore *sem)
41{
42 long tmp;
43
Jason Low8ee62b12016-06-03 22:26:02 -070044 while ((tmp = atomic_long_read(&sem->count)) >= 0) {
45 if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
Richard Kuodd472da2011-10-31 18:47:33 -050046 tmp + RWSEM_ACTIVE_READ_BIAS)) {
47 return 1;
48 }
49 }
50 return 0;
51}
52
53/*
54 * lock for writing
55 */
Michal Hockof8e04d82016-04-07 17:12:21 +020056static inline void __down_write(struct rw_semaphore *sem)
Richard Kuodd472da2011-10-31 18:47:33 -050057{
58 long tmp;
59
Davidlohr Bueso00eb4ba2015-09-30 13:03:15 -070060 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
Waiman Long06321dd2017-01-19 09:31:52 -050061 &sem->count);
Richard Kuodd472da2011-10-31 18:47:33 -050062 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
63 rwsem_down_write_failed(sem);
64}
65
Michal Hockod4799602016-04-07 17:12:26 +020066static inline int __down_write_killable(struct rw_semaphore *sem)
67{
68 long tmp;
69
70 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
Waiman Long06321dd2017-01-19 09:31:52 -050071 &sem->count);
Michal Hockod4799602016-04-07 17:12:26 +020072 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
73 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
74 return -EINTR;
75 return 0;
76}
77
Richard Kuodd472da2011-10-31 18:47:33 -050078static inline int __down_write_trylock(struct rw_semaphore *sem)
79{
80 long tmp;
81
Jason Low8ee62b12016-06-03 22:26:02 -070082 tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
Richard Kuodd472da2011-10-31 18:47:33 -050083 RWSEM_ACTIVE_WRITE_BIAS);
84 return tmp == RWSEM_UNLOCKED_VALUE;
85}
86
87/*
88 * unlock after reading
89 */
90static inline void __up_read(struct rw_semaphore *sem)
91{
92 long tmp;
93
Waiman Long06321dd2017-01-19 09:31:52 -050094 tmp = atomic_long_dec_return_release(&sem->count);
Richard Kuodd472da2011-10-31 18:47:33 -050095 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
96 rwsem_wake(sem);
97}
98
99/*
100 * unlock after writing
101 */
102static inline void __up_write(struct rw_semaphore *sem)
103{
Davidlohr Bueso00eb4ba2015-09-30 13:03:15 -0700104 if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
Waiman Long06321dd2017-01-19 09:31:52 -0500105 &sem->count) < 0))
Richard Kuodd472da2011-10-31 18:47:33 -0500106 rwsem_wake(sem);
107}
108
109/*
Richard Kuodd472da2011-10-31 18:47:33 -0500110 * downgrade write lock to read lock
111 */
112static inline void __downgrade_write(struct rw_semaphore *sem)
113{
114 long tmp;
115
Davidlohr Bueso00eb4ba2015-09-30 13:03:15 -0700116 /*
117 * When downgrading from exclusive to shared ownership,
118 * anything inside the write-locked region cannot leak
119 * into the read side. In contrast, anything in the
120 * read-locked region is ok to be re-ordered into the
121 * write side. As such, rely on RELEASE semantics.
122 */
Waiman Long06321dd2017-01-19 09:31:52 -0500123 tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
Richard Kuodd472da2011-10-31 18:47:33 -0500124 if (tmp < 0)
125 rwsem_downgrade_wake(sem);
126}
127
Richard Kuodd472da2011-10-31 18:47:33 -0500128#endif /* __KERNEL__ */
Will Deacone1728002014-03-14 17:47:04 +0000129#endif /* _ASM_GENERIC_RWSEM_H */