Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Will Deacon | e172800 | 2014-03-14 17:47:04 +0000 | [diff] [blame] | 2 | #ifndef _ASM_GENERIC_RWSEM_H |
| 3 | #define _ASM_GENERIC_RWSEM_H |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 4 | |
| 5 | #ifndef _LINUX_RWSEM_H |
| 6 | #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." |
| 7 | #endif |
| 8 | |
| 9 | #ifdef __KERNEL__ |
| 10 | |
| 11 | /* |
Will Deacon | e172800 | 2014-03-14 17:47:04 +0000 | [diff] [blame] | 12 | * R/W semaphores originally for PPC using the stuff in lib/rwsem.c. |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 13 | * Adapted largely from include/asm-i386/rwsem.h |
| 14 | * by Paul Mackerras <paulus@samba.org>. |
| 15 | */ |
| 16 | |
| 17 | /* |
| 18 | * the semaphore definition |
| 19 | */ |
Will Deacon | e172800 | 2014-03-14 17:47:04 +0000 | [diff] [blame] | 20 | #ifdef CONFIG_64BIT |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 21 | # define RWSEM_ACTIVE_MASK 0xffffffffL |
| 22 | #else |
| 23 | # define RWSEM_ACTIVE_MASK 0x0000ffffL |
| 24 | #endif |
| 25 | |
| 26 | #define RWSEM_UNLOCKED_VALUE 0x00000000L |
| 27 | #define RWSEM_ACTIVE_BIAS 0x00000001L |
| 28 | #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) |
| 29 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
| 30 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
| 31 | |
| 32 | /* |
| 33 | * lock for reading |
| 34 | */ |
| 35 | static inline void __down_read(struct rw_semaphore *sem) |
| 36 | { |
Waiman Long | 06321dd | 2017-01-19 09:31:52 -0500 | [diff] [blame] | 37 | if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 38 | rwsem_down_read_failed(sem); |
| 39 | } |
| 40 | |
Kirill Tkhai | 76f8507 | 2017-09-29 19:06:38 +0300 | [diff] [blame] | 41 | static inline int __down_read_killable(struct rw_semaphore *sem) |
| 42 | { |
| 43 | if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) { |
| 44 | if (IS_ERR(rwsem_down_read_failed_killable(sem))) |
| 45 | return -EINTR; |
| 46 | } |
| 47 | |
| 48 | return 0; |
| 49 | } |
| 50 | |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 51 | static inline int __down_read_trylock(struct rw_semaphore *sem) |
| 52 | { |
| 53 | long tmp; |
| 54 | |
Jason Low | 8ee62b1 | 2016-06-03 22:26:02 -0700 | [diff] [blame] | 55 | while ((tmp = atomic_long_read(&sem->count)) >= 0) { |
| 56 | if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 57 | tmp + RWSEM_ACTIVE_READ_BIAS)) { |
| 58 | return 1; |
| 59 | } |
| 60 | } |
| 61 | return 0; |
| 62 | } |
| 63 | |
| 64 | /* |
| 65 | * lock for writing |
| 66 | */ |
Michal Hocko | f8e04d8 | 2016-04-07 17:12:21 +0200 | [diff] [blame] | 67 | static inline void __down_write(struct rw_semaphore *sem) |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 68 | { |
| 69 | long tmp; |
| 70 | |
Davidlohr Bueso | 00eb4ba | 2015-09-30 13:03:15 -0700 | [diff] [blame] | 71 | tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, |
Waiman Long | 06321dd | 2017-01-19 09:31:52 -0500 | [diff] [blame] | 72 | &sem->count); |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 73 | if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) |
| 74 | rwsem_down_write_failed(sem); |
| 75 | } |
| 76 | |
Michal Hocko | d479960 | 2016-04-07 17:12:26 +0200 | [diff] [blame] | 77 | static inline int __down_write_killable(struct rw_semaphore *sem) |
| 78 | { |
| 79 | long tmp; |
| 80 | |
| 81 | tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, |
Waiman Long | 06321dd | 2017-01-19 09:31:52 -0500 | [diff] [blame] | 82 | &sem->count); |
Michal Hocko | d479960 | 2016-04-07 17:12:26 +0200 | [diff] [blame] | 83 | if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) |
| 84 | if (IS_ERR(rwsem_down_write_failed_killable(sem))) |
| 85 | return -EINTR; |
| 86 | return 0; |
| 87 | } |
| 88 | |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 89 | static inline int __down_write_trylock(struct rw_semaphore *sem) |
| 90 | { |
| 91 | long tmp; |
| 92 | |
Jason Low | 8ee62b1 | 2016-06-03 22:26:02 -0700 | [diff] [blame] | 93 | tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 94 | RWSEM_ACTIVE_WRITE_BIAS); |
| 95 | return tmp == RWSEM_UNLOCKED_VALUE; |
| 96 | } |
| 97 | |
| 98 | /* |
| 99 | * unlock after reading |
| 100 | */ |
| 101 | static inline void __up_read(struct rw_semaphore *sem) |
| 102 | { |
| 103 | long tmp; |
| 104 | |
Waiman Long | 06321dd | 2017-01-19 09:31:52 -0500 | [diff] [blame] | 105 | tmp = atomic_long_dec_return_release(&sem->count); |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 106 | if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) |
| 107 | rwsem_wake(sem); |
| 108 | } |
| 109 | |
| 110 | /* |
| 111 | * unlock after writing |
| 112 | */ |
| 113 | static inline void __up_write(struct rw_semaphore *sem) |
| 114 | { |
Davidlohr Bueso | 00eb4ba | 2015-09-30 13:03:15 -0700 | [diff] [blame] | 115 | if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS, |
Waiman Long | 06321dd | 2017-01-19 09:31:52 -0500 | [diff] [blame] | 116 | &sem->count) < 0)) |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 117 | rwsem_wake(sem); |
| 118 | } |
| 119 | |
| 120 | /* |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 121 | * downgrade write lock to read lock |
| 122 | */ |
| 123 | static inline void __downgrade_write(struct rw_semaphore *sem) |
| 124 | { |
| 125 | long tmp; |
| 126 | |
Davidlohr Bueso | 00eb4ba | 2015-09-30 13:03:15 -0700 | [diff] [blame] | 127 | /* |
| 128 | * When downgrading from exclusive to shared ownership, |
| 129 | * anything inside the write-locked region cannot leak |
| 130 | * into the read side. In contrast, anything in the |
| 131 | * read-locked region is ok to be re-ordered into the |
| 132 | * write side. As such, rely on RELEASE semantics. |
| 133 | */ |
Waiman Long | 06321dd | 2017-01-19 09:31:52 -0500 | [diff] [blame] | 134 | tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count); |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 135 | if (tmp < 0) |
| 136 | rwsem_downgrade_wake(sem); |
| 137 | } |
| 138 | |
Richard Kuo | dd472da | 2011-10-31 18:47:33 -0500 | [diff] [blame] | 139 | #endif /* __KERNEL__ */ |
Will Deacon | e172800 | 2014-03-14 17:47:04 +0000 | [diff] [blame] | 140 | #endif /* _ASM_GENERIC_RWSEM_H */ |