blob: 93e67a055a4d85ca82436f71ed160d971e33e46e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Will Deacone1728002014-03-14 17:47:04 +00002#ifndef _ASM_GENERIC_RWSEM_H
3#define _ASM_GENERIC_RWSEM_H
Richard Kuodd472da2011-10-31 18:47:33 -05004
5#ifndef _LINUX_RWSEM_H
6#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
7#endif
8
9#ifdef __KERNEL__
10
11/*
Will Deacone1728002014-03-14 17:47:04 +000012 * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
Richard Kuodd472da2011-10-31 18:47:33 -050013 * Adapted largely from include/asm-i386/rwsem.h
14 * by Paul Mackerras <paulus@samba.org>.
15 */
16
17/*
18 * the semaphore definition
19 */
Will Deacone1728002014-03-14 17:47:04 +000020#ifdef CONFIG_64BIT
Richard Kuodd472da2011-10-31 18:47:33 -050021# define RWSEM_ACTIVE_MASK 0xffffffffL
22#else
23# define RWSEM_ACTIVE_MASK 0x0000ffffL
24#endif
25
26#define RWSEM_UNLOCKED_VALUE 0x00000000L
27#define RWSEM_ACTIVE_BIAS 0x00000001L
28#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
29#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
30#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
31
32/*
33 * lock for reading
34 */
35static inline void __down_read(struct rw_semaphore *sem)
36{
Waiman Long06321dd2017-01-19 09:31:52 -050037 if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
Richard Kuodd472da2011-10-31 18:47:33 -050038 rwsem_down_read_failed(sem);
39}
40
Kirill Tkhai76f85072017-09-29 19:06:38 +030041static inline int __down_read_killable(struct rw_semaphore *sem)
42{
43 if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
44 if (IS_ERR(rwsem_down_read_failed_killable(sem)))
45 return -EINTR;
46 }
47
48 return 0;
49}
50
Richard Kuodd472da2011-10-31 18:47:33 -050051static inline int __down_read_trylock(struct rw_semaphore *sem)
52{
53 long tmp;
54
Jason Low8ee62b12016-06-03 22:26:02 -070055 while ((tmp = atomic_long_read(&sem->count)) >= 0) {
56 if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
Richard Kuodd472da2011-10-31 18:47:33 -050057 tmp + RWSEM_ACTIVE_READ_BIAS)) {
58 return 1;
59 }
60 }
61 return 0;
62}
63
64/*
65 * lock for writing
66 */
Michal Hockof8e04d82016-04-07 17:12:21 +020067static inline void __down_write(struct rw_semaphore *sem)
Richard Kuodd472da2011-10-31 18:47:33 -050068{
69 long tmp;
70
Davidlohr Bueso00eb4ba2015-09-30 13:03:15 -070071 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
Waiman Long06321dd2017-01-19 09:31:52 -050072 &sem->count);
Richard Kuodd472da2011-10-31 18:47:33 -050073 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
74 rwsem_down_write_failed(sem);
75}
76
Michal Hockod4799602016-04-07 17:12:26 +020077static inline int __down_write_killable(struct rw_semaphore *sem)
78{
79 long tmp;
80
81 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
Waiman Long06321dd2017-01-19 09:31:52 -050082 &sem->count);
Michal Hockod4799602016-04-07 17:12:26 +020083 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
84 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
85 return -EINTR;
86 return 0;
87}
88
Richard Kuodd472da2011-10-31 18:47:33 -050089static inline int __down_write_trylock(struct rw_semaphore *sem)
90{
91 long tmp;
92
Jason Low8ee62b12016-06-03 22:26:02 -070093 tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
Richard Kuodd472da2011-10-31 18:47:33 -050094 RWSEM_ACTIVE_WRITE_BIAS);
95 return tmp == RWSEM_UNLOCKED_VALUE;
96}
97
98/*
99 * unlock after reading
100 */
101static inline void __up_read(struct rw_semaphore *sem)
102{
103 long tmp;
104
Waiman Long06321dd2017-01-19 09:31:52 -0500105 tmp = atomic_long_dec_return_release(&sem->count);
Richard Kuodd472da2011-10-31 18:47:33 -0500106 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
107 rwsem_wake(sem);
108}
109
110/*
111 * unlock after writing
112 */
113static inline void __up_write(struct rw_semaphore *sem)
114{
Davidlohr Bueso00eb4ba2015-09-30 13:03:15 -0700115 if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
Waiman Long06321dd2017-01-19 09:31:52 -0500116 &sem->count) < 0))
Richard Kuodd472da2011-10-31 18:47:33 -0500117 rwsem_wake(sem);
118}
119
120/*
Richard Kuodd472da2011-10-31 18:47:33 -0500121 * downgrade write lock to read lock
122 */
123static inline void __downgrade_write(struct rw_semaphore *sem)
124{
125 long tmp;
126
Davidlohr Bueso00eb4ba2015-09-30 13:03:15 -0700127 /*
128 * When downgrading from exclusive to shared ownership,
129 * anything inside the write-locked region cannot leak
130 * into the read side. In contrast, anything in the
131 * read-locked region is ok to be re-ordered into the
132 * write side. As such, rely on RELEASE semantics.
133 */
Waiman Long06321dd2017-01-19 09:31:52 -0500134 tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
Richard Kuodd472da2011-10-31 18:47:33 -0500135 if (tmp < 0)
136 rwsem_downgrade_wake(sem);
137}
138
Richard Kuodd472da2011-10-31 18:47:33 -0500139#endif /* __KERNEL__ */
Will Deacone1728002014-03-14 17:47:04 +0000140#endif /* _ASM_GENERIC_RWSEM_H */