Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* rwsem.c: R/W semaphores: contention handling functions |
| 2 | * |
| 3 | * Written by David Howells (dhowells@redhat.com). |
| 4 | * Derived from arch/i386/kernel/semaphore.c |
Alex Shi | ce6711f | 2013-02-05 21:11:55 +0800 | [diff] [blame] | 5 | * |
| 6 | * Writer lock-stealing by Alex Shi <alex.shi@intel.com> |
Michel Lespinasse | fe6e674 | 2013-05-07 06:45:59 -0700 | [diff] [blame] | 7 | * and Michel Lespinasse <walken@google.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | */ |
| 9 | #include <linux/rwsem.h> |
| 10 | #include <linux/sched.h> |
| 11 | #include <linux/init.h> |
Paul Gortmaker | 8bc3bcc | 2011-11-16 21:29:17 -0500 | [diff] [blame] | 12 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 14 | /* |
| 15 | * Initialize an rwsem: |
| 16 | */ |
| 17 | void __init_rwsem(struct rw_semaphore *sem, const char *name, |
| 18 | struct lock_class_key *key) |
| 19 | { |
| 20 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 21 | /* |
| 22 | * Make sure we are not reinitializing a held semaphore: |
| 23 | */ |
| 24 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 25 | lockdep_init_map(&sem->dep_map, name, key, 0); |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 26 | #endif |
| 27 | sem->count = RWSEM_UNLOCKED_VALUE; |
Thomas Gleixner | ddb6c9b | 2010-02-24 09:54:54 +0100 | [diff] [blame] | 28 | raw_spin_lock_init(&sem->wait_lock); |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 29 | INIT_LIST_HEAD(&sem->wait_list); |
| 30 | } |
| 31 | |
| 32 | EXPORT_SYMBOL(__init_rwsem); |
| 33 | |
Michel Lespinasse | e2d57f7 | 2013-05-07 06:45:49 -0700 | [diff] [blame] | 34 | enum rwsem_waiter_type { |
| 35 | RWSEM_WAITING_FOR_WRITE, |
| 36 | RWSEM_WAITING_FOR_READ |
| 37 | }; |
| 38 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | struct rwsem_waiter { |
| 40 | struct list_head list; |
| 41 | struct task_struct *task; |
Michel Lespinasse | e2d57f7 | 2013-05-07 06:45:49 -0700 | [diff] [blame] | 42 | enum rwsem_waiter_type type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | }; |
| 44 | |
Michel Lespinasse | fe6e674 | 2013-05-07 06:45:59 -0700 | [diff] [blame] | 45 | enum rwsem_wake_type { |
| 46 | RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */ |
| 47 | RWSEM_WAKE_READERS, /* Wake readers only */ |
| 48 | RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */ |
| 49 | }; |
Michel Lespinasse | 70bdc6e | 2010-08-09 17:21:17 -0700 | [diff] [blame] | 50 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | /* |
| 52 | * handle the lock release when processes blocked on it that can now run |
| 53 | * - if we come here from up_xxxx(), then: |
| 54 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) |
| 55 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) |
Michel Lespinasse | 345af7b | 2010-08-09 17:21:15 -0700 | [diff] [blame] | 56 | * - there must be someone on the queue |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | * - the spinlock must be held by the caller |
| 58 | * - woken process blocks are discarded from the list after having task zeroed |
| 59 | * - writers are only woken if downgrading is false |
| 60 | */ |
Michel Lespinasse | 70bdc6e | 2010-08-09 17:21:17 -0700 | [diff] [blame] | 61 | static struct rw_semaphore * |
Michel Lespinasse | fe6e674 | 2013-05-07 06:45:59 -0700 | [diff] [blame] | 62 | __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | { |
| 64 | struct rwsem_waiter *waiter; |
| 65 | struct task_struct *tsk; |
| 66 | struct list_head *next; |
Davidlohr Bueso | b5f5418 | 2013-05-07 06:46:02 -0700 | [diff] [blame] | 67 | long oldcount, woken, loop, adjustment; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | |
Michel Lespinasse | 345af7b | 2010-08-09 17:21:15 -0700 | [diff] [blame] | 69 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
Michel Lespinasse | 8cf5322 | 2013-05-07 06:45:58 -0700 | [diff] [blame] | 70 | if (waiter->type == RWSEM_WAITING_FOR_WRITE) { |
Michel Lespinasse | fe6e674 | 2013-05-07 06:45:59 -0700 | [diff] [blame] | 71 | if (wake_type == RWSEM_WAKE_ANY) |
Michel Lespinasse | 8cf5322 | 2013-05-07 06:45:58 -0700 | [diff] [blame] | 72 | /* Wake writer at the front of the queue, but do not |
| 73 | * grant it the lock yet as we want other writers |
| 74 | * to be able to steal it. Readers, on the other hand, |
| 75 | * will block as they will notice the queued writer. |
| 76 | */ |
| 77 | wake_up_process(waiter->task); |
Michel Lespinasse | 345af7b | 2010-08-09 17:21:15 -0700 | [diff] [blame] | 78 | goto out; |
Michel Lespinasse | 8cf5322 | 2013-05-07 06:45:58 -0700 | [diff] [blame] | 79 | } |
Michel Lespinasse | 345af7b | 2010-08-09 17:21:15 -0700 | [diff] [blame] | 80 | |
Michel Lespinasse | fe6e674 | 2013-05-07 06:45:59 -0700 | [diff] [blame] | 81 | /* Writers might steal the lock before we grant it to the next reader. |
| 82 | * We prefer to do the first reader grant before counting readers |
| 83 | * so we can bail out early if a writer stole the lock. |
Michel Lespinasse | 70bdc6e | 2010-08-09 17:21:17 -0700 | [diff] [blame] | 84 | */ |
Michel Lespinasse | fe6e674 | 2013-05-07 06:45:59 -0700 | [diff] [blame] | 85 | adjustment = 0; |
| 86 | if (wake_type != RWSEM_WAKE_READ_OWNED) { |
| 87 | adjustment = RWSEM_ACTIVE_READ_BIAS; |
| 88 | try_reader_grant: |
| 89 | oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; |
| 90 | if (unlikely(oldcount < RWSEM_WAITING_BIAS)) { |
| 91 | /* A writer stole the lock. Undo our reader grant. */ |
| 92 | if (rwsem_atomic_update(-adjustment, sem) & |
| 93 | RWSEM_ACTIVE_MASK) |
| 94 | goto out; |
| 95 | /* Last active locker left. Retry waking readers. */ |
| 96 | goto try_reader_grant; |
| 97 | } |
| 98 | } |
Michel Lespinasse | 345af7b | 2010-08-09 17:21:15 -0700 | [diff] [blame] | 99 | |
Michel Lespinasse | 345af7b | 2010-08-09 17:21:15 -0700 | [diff] [blame] | 100 | /* Grant an infinite number of read locks to the readers at the front |
| 101 | * of the queue. Note we increment the 'active part' of the count by |
| 102 | * the number of readers before waking any processes up. |
| 103 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | woken = 0; |
| 105 | do { |
| 106 | woken++; |
| 107 | |
| 108 | if (waiter->list.next == &sem->wait_list) |
| 109 | break; |
| 110 | |
| 111 | waiter = list_entry(waiter->list.next, |
| 112 | struct rwsem_waiter, list); |
| 113 | |
Michel Lespinasse | e2d57f7 | 2013-05-07 06:45:49 -0700 | [diff] [blame] | 114 | } while (waiter->type != RWSEM_WAITING_FOR_WRITE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | |
Michel Lespinasse | fe6e674 | 2013-05-07 06:45:59 -0700 | [diff] [blame] | 116 | adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; |
Michel Lespinasse | e2d57f7 | 2013-05-07 06:45:49 -0700 | [diff] [blame] | 117 | if (waiter->type != RWSEM_WAITING_FOR_WRITE) |
Michel Lespinasse | fd41b33 | 2010-08-09 17:21:18 -0700 | [diff] [blame] | 118 | /* hit end of list above */ |
| 119 | adjustment -= RWSEM_WAITING_BIAS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | |
Michel Lespinasse | fe6e674 | 2013-05-07 06:45:59 -0700 | [diff] [blame] | 121 | if (adjustment) |
| 122 | rwsem_atomic_add(adjustment, sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | |
| 124 | next = sem->wait_list.next; |
Michel Lespinasse | 8cf5322 | 2013-05-07 06:45:58 -0700 | [diff] [blame] | 125 | loop = woken; |
| 126 | do { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | waiter = list_entry(next, struct rwsem_waiter, list); |
| 128 | next = waiter->list.next; |
| 129 | tsk = waiter->task; |
akpm@osdl.org | d59dd46 | 2005-05-01 08:58:47 -0700 | [diff] [blame] | 130 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | waiter->task = NULL; |
| 132 | wake_up_process(tsk); |
| 133 | put_task_struct(tsk); |
Michel Lespinasse | 8cf5322 | 2013-05-07 06:45:58 -0700 | [diff] [blame] | 134 | } while (--loop); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
| 136 | sem->wait_list.next = next; |
| 137 | next->prev = &sem->wait_list; |
| 138 | |
| 139 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | return sem; |
Alex Shi | ce6711f | 2013-02-05 21:11:55 +0800 | [diff] [blame] | 141 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | /* |
Michel Lespinasse | 1e78277 | 2013-05-07 06:45:51 -0700 | [diff] [blame] | 144 | * wait for the read lock to be granted |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | */ |
Michel Lespinasse | 1e78277 | 2013-05-07 06:45:51 -0700 | [diff] [blame] | 146 | struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | { |
Davidlohr Bueso | b5f5418 | 2013-05-07 06:46:02 -0700 | [diff] [blame] | 148 | long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; |
Michel Lespinasse | a8618a0 | 2010-08-09 17:21:20 -0700 | [diff] [blame] | 149 | struct rwsem_waiter waiter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | struct task_struct *tsk = current; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | /* set up my own style of waitqueue */ |
Michel Lespinasse | a8618a0 | 2010-08-09 17:21:20 -0700 | [diff] [blame] | 153 | waiter.task = tsk; |
Michel Lespinasse | da16922 | 2013-05-07 06:45:52 -0700 | [diff] [blame] | 154 | waiter.type = RWSEM_WAITING_FOR_READ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | get_task_struct(tsk); |
| 156 | |
Michel Lespinasse | f7dd1ce | 2013-05-07 06:45:50 -0700 | [diff] [blame] | 157 | raw_spin_lock_irq(&sem->wait_lock); |
Michel Lespinasse | fd41b33 | 2010-08-09 17:21:18 -0700 | [diff] [blame] | 158 | if (list_empty(&sem->wait_list)) |
| 159 | adjustment += RWSEM_WAITING_BIAS; |
Michel Lespinasse | a8618a0 | 2010-08-09 17:21:20 -0700 | [diff] [blame] | 160 | list_add_tail(&waiter.list, &sem->wait_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | |
Michel Lespinasse | 70bdc6e | 2010-08-09 17:21:17 -0700 | [diff] [blame] | 162 | /* we're now waiting on the lock, but no longer actively locking */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | count = rwsem_atomic_update(adjustment, sem); |
| 164 | |
Michel Lespinasse | 25c3932 | 2013-05-07 06:46:00 -0700 | [diff] [blame] | 165 | /* If there are no active locks, wake the front queued process(es). |
| 166 | * |
| 167 | * If there are no writers and we are first in the queue, |
| 168 | * wake our own waiter to join the existing active readers ! |
| 169 | */ |
| 170 | if (count == RWSEM_WAITING_BIAS || |
| 171 | (count > RWSEM_WAITING_BIAS && |
| 172 | adjustment != -RWSEM_ACTIVE_READ_BIAS)) |
Michel Lespinasse | fe6e674 | 2013-05-07 06:45:59 -0700 | [diff] [blame] | 173 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | |
Thomas Gleixner | ddb6c9b | 2010-02-24 09:54:54 +0100 | [diff] [blame] | 175 | raw_spin_unlock_irq(&sem->wait_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | |
| 177 | /* wait to be given the lock */ |
Michel Lespinasse | f7dd1ce | 2013-05-07 06:45:50 -0700 | [diff] [blame] | 178 | while (true) { |
| 179 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
Michel Lespinasse | a8618a0 | 2010-08-09 17:21:20 -0700 | [diff] [blame] | 180 | if (!waiter.task) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | break; |
| 182 | schedule(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | } |
| 184 | |
| 185 | tsk->state = TASK_RUNNING; |
| 186 | |
| 187 | return sem; |
| 188 | } |
| 189 | |
| 190 | /* |
Michel Lespinasse | 023fe4f | 2013-05-07 06:45:53 -0700 | [diff] [blame] | 191 | * wait until we successfully acquire the write lock |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | */ |
Thomas Gleixner | d123375 | 2011-01-26 21:32:01 +0100 | [diff] [blame] | 193 | struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | { |
Davidlohr Bueso | b5f5418 | 2013-05-07 06:46:02 -0700 | [diff] [blame] | 195 | long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS; |
Michel Lespinasse | 1e78277 | 2013-05-07 06:45:51 -0700 | [diff] [blame] | 196 | struct rwsem_waiter waiter; |
| 197 | struct task_struct *tsk = current; |
Michel Lespinasse | 1e78277 | 2013-05-07 06:45:51 -0700 | [diff] [blame] | 198 | |
| 199 | /* set up my own style of waitqueue */ |
| 200 | waiter.task = tsk; |
Michel Lespinasse | 023fe4f | 2013-05-07 06:45:53 -0700 | [diff] [blame] | 201 | waiter.type = RWSEM_WAITING_FOR_WRITE; |
Michel Lespinasse | 1e78277 | 2013-05-07 06:45:51 -0700 | [diff] [blame] | 202 | |
| 203 | raw_spin_lock_irq(&sem->wait_lock); |
| 204 | if (list_empty(&sem->wait_list)) |
| 205 | adjustment += RWSEM_WAITING_BIAS; |
| 206 | list_add_tail(&waiter.list, &sem->wait_list); |
| 207 | |
| 208 | /* we're now waiting on the lock, but no longer actively locking */ |
| 209 | count = rwsem_atomic_update(adjustment, sem); |
| 210 | |
Michel Lespinasse | ed00f64 | 2013-05-07 06:45:54 -0700 | [diff] [blame] | 211 | /* If there were already threads queued before us and there are no |
| 212 | * active writers, the lock must be read owned; so we try to wake |
| 213 | * any read locks that were queued ahead of us. */ |
| 214 | if (count > RWSEM_WAITING_BIAS && |
| 215 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) |
Michel Lespinasse | fe6e674 | 2013-05-07 06:45:59 -0700 | [diff] [blame] | 216 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS); |
Michel Lespinasse | 1e78277 | 2013-05-07 06:45:51 -0700 | [diff] [blame] | 217 | |
Michel Lespinasse | 023fe4f | 2013-05-07 06:45:53 -0700 | [diff] [blame] | 218 | /* wait until we successfully acquire the lock */ |
Michel Lespinasse | a7d2c57 | 2013-05-07 06:45:56 -0700 | [diff] [blame] | 219 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
Michel Lespinasse | 1e78277 | 2013-05-07 06:45:51 -0700 | [diff] [blame] | 220 | while (true) { |
Michel Lespinasse | 9b0fc9c | 2013-05-07 06:45:57 -0700 | [diff] [blame] | 221 | if (!(count & RWSEM_ACTIVE_MASK)) { |
| 222 | /* Try acquiring the write lock. */ |
| 223 | count = RWSEM_ACTIVE_WRITE_BIAS; |
| 224 | if (!list_is_singular(&sem->wait_list)) |
| 225 | count += RWSEM_WAITING_BIAS; |
Davidlohr Bueso | 9607a85 | 2013-05-07 15:39:03 -0700 | [diff] [blame] | 226 | |
| 227 | if (sem->count == RWSEM_WAITING_BIAS && |
| 228 | cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) == |
Michel Lespinasse | 5ede972 | 2013-05-07 06:45:55 -0700 | [diff] [blame] | 229 | RWSEM_WAITING_BIAS) |
Michel Lespinasse | 9b0fc9c | 2013-05-07 06:45:57 -0700 | [diff] [blame] | 230 | break; |
| 231 | } |
Michel Lespinasse | 1e78277 | 2013-05-07 06:45:51 -0700 | [diff] [blame] | 232 | |
Michel Lespinasse | 1e78277 | 2013-05-07 06:45:51 -0700 | [diff] [blame] | 233 | raw_spin_unlock_irq(&sem->wait_lock); |
Michel Lespinasse | a7d2c57 | 2013-05-07 06:45:56 -0700 | [diff] [blame] | 234 | |
| 235 | /* Block until there are no active lockers. */ |
| 236 | do { |
| 237 | schedule(); |
| 238 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
Michel Lespinasse | 9b0fc9c | 2013-05-07 06:45:57 -0700 | [diff] [blame] | 239 | } while ((count = sem->count) & RWSEM_ACTIVE_MASK); |
Michel Lespinasse | a7d2c57 | 2013-05-07 06:45:56 -0700 | [diff] [blame] | 240 | |
Michel Lespinasse | 023fe4f | 2013-05-07 06:45:53 -0700 | [diff] [blame] | 241 | raw_spin_lock_irq(&sem->wait_lock); |
Michel Lespinasse | 1e78277 | 2013-05-07 06:45:51 -0700 | [diff] [blame] | 242 | } |
| 243 | |
Michel Lespinasse | 023fe4f | 2013-05-07 06:45:53 -0700 | [diff] [blame] | 244 | list_del(&waiter.list); |
| 245 | raw_spin_unlock_irq(&sem->wait_lock); |
Michel Lespinasse | 1e78277 | 2013-05-07 06:45:51 -0700 | [diff] [blame] | 246 | tsk->state = TASK_RUNNING; |
| 247 | |
| 248 | return sem; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | } |
| 250 | |
| 251 | /* |
| 252 | * handle waking up a waiter on the semaphore |
| 253 | * - up_read/up_write has decremented the active part of count if we come here |
| 254 | */ |
Thomas Gleixner | d123375 | 2011-01-26 21:32:01 +0100 | [diff] [blame] | 255 | struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | { |
| 257 | unsigned long flags; |
| 258 | |
Thomas Gleixner | ddb6c9b | 2010-02-24 09:54:54 +0100 | [diff] [blame] | 259 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | |
| 261 | /* do nothing if list empty */ |
| 262 | if (!list_empty(&sem->wait_list)) |
Michel Lespinasse | 70bdc6e | 2010-08-09 17:21:17 -0700 | [diff] [blame] | 263 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | |
Thomas Gleixner | ddb6c9b | 2010-02-24 09:54:54 +0100 | [diff] [blame] | 265 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | return sem; |
| 268 | } |
| 269 | |
| 270 | /* |
| 271 | * downgrade a write lock into a read lock |
| 272 | * - caller incremented waiting part of count and discovered it still negative |
| 273 | * - just wake up any readers at the front of the queue |
| 274 | */ |
Thomas Gleixner | d123375 | 2011-01-26 21:32:01 +0100 | [diff] [blame] | 275 | struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | { |
| 277 | unsigned long flags; |
| 278 | |
Thomas Gleixner | ddb6c9b | 2010-02-24 09:54:54 +0100 | [diff] [blame] | 279 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | |
| 281 | /* do nothing if list empty */ |
| 282 | if (!list_empty(&sem->wait_list)) |
Michel Lespinasse | 70bdc6e | 2010-08-09 17:21:17 -0700 | [diff] [blame] | 283 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | |
Thomas Gleixner | ddb6c9b | 2010-02-24 09:54:54 +0100 | [diff] [blame] | 285 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | return sem; |
| 288 | } |
| 289 | |
| 290 | EXPORT_SYMBOL(rwsem_down_read_failed); |
| 291 | EXPORT_SYMBOL(rwsem_down_write_failed); |
| 292 | EXPORT_SYMBOL(rwsem_wake); |
| 293 | EXPORT_SYMBOL(rwsem_downgrade_wake); |