Waiman Long | 19c5d69 | 2016-05-17 21:26:19 -0400 | [diff] [blame] | 1 | /* |
| 2 | * The owner field of the rw_semaphore structure will be set to |
| 3 | * RWSEM_READ_OWNED when a reader grabs the lock. A writer will clear |
| 4 | * the owner field when it unlocks. A reader, on the other hand, will |
| 5 | * not touch the owner field when it unlocks. |
| 6 | * |
| 7 | * In essence, the owner field now has the following 3 states: |
| 8 | * 1) 0 |
| 9 | * - lock is free or the owner hasn't set the field yet |
| 10 | * 2) RWSEM_READER_OWNED |
| 11 | * - lock is currently or previously owned by readers (lock is free |
| 12 | * or not set by owner yet) |
| 13 | * 3) Other non-zero value |
| 14 | * - a writer owns the lock |
| 15 | */ |
| 16 | #define RWSEM_READER_OWNED ((struct task_struct *)1UL) |
| 17 | |
Maria Yu | a5946df | 2018-11-29 13:57:53 +0800 | [diff] [blame] | 18 | enum rwsem_waiter_type { |
| 19 | RWSEM_WAITING_FOR_WRITE, |
| 20 | RWSEM_WAITING_FOR_READ |
| 21 | }; |
| 22 | |
| 23 | struct rwsem_waiter { |
| 24 | struct list_head list; |
| 25 | struct task_struct *task; |
| 26 | enum rwsem_waiter_type type; |
| 27 | }; |
| 28 | |
Davidlohr Bueso | 7a215f8 | 2015-01-30 01:14:25 -0800 | [diff] [blame] | 29 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
Waiman Long | fb6a44f | 2016-05-17 21:26:20 -0400 | [diff] [blame] | 30 | /* |
| 31 | * All writes to owner are protected by WRITE_ONCE() to make sure that |
| 32 | * store tearing can't happen as optimistic spinners may read and use |
| 33 | * the owner value concurrently without lock. Read from owner, however, |
| 34 | * may not need READ_ONCE() as long as the pointer value is only used |
| 35 | * for comparison and isn't being dereferenced. |
| 36 | */ |
Davidlohr Bueso | 7a215f8 | 2015-01-30 01:14:25 -0800 | [diff] [blame] | 37 | static inline void rwsem_set_owner(struct rw_semaphore *sem) |
| 38 | { |
Waiman Long | fb6a44f | 2016-05-17 21:26:20 -0400 | [diff] [blame] | 39 | WRITE_ONCE(sem->owner, current); |
Davidlohr Bueso | 7a215f8 | 2015-01-30 01:14:25 -0800 | [diff] [blame] | 40 | } |
| 41 | |
| 42 | static inline void rwsem_clear_owner(struct rw_semaphore *sem) |
| 43 | { |
Waiman Long | fb6a44f | 2016-05-17 21:26:20 -0400 | [diff] [blame] | 44 | WRITE_ONCE(sem->owner, NULL); |
Davidlohr Bueso | 7a215f8 | 2015-01-30 01:14:25 -0800 | [diff] [blame] | 45 | } |
| 46 | |
Waiman Long | 19c5d69 | 2016-05-17 21:26:19 -0400 | [diff] [blame] | 47 | static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) |
| 48 | { |
| 49 | /* |
| 50 | * We check the owner value first to make sure that we will only |
| 51 | * do a write to the rwsem cacheline when it is really necessary |
| 52 | * to minimize cacheline contention. |
| 53 | */ |
| 54 | if (sem->owner != RWSEM_READER_OWNED) |
Waiman Long | fb6a44f | 2016-05-17 21:26:20 -0400 | [diff] [blame] | 55 | WRITE_ONCE(sem->owner, RWSEM_READER_OWNED); |
Waiman Long | 19c5d69 | 2016-05-17 21:26:19 -0400 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | static inline bool rwsem_owner_is_writer(struct task_struct *owner) |
| 59 | { |
| 60 | return owner && owner != RWSEM_READER_OWNED; |
| 61 | } |
| 62 | |
| 63 | static inline bool rwsem_owner_is_reader(struct task_struct *owner) |
| 64 | { |
| 65 | return owner == RWSEM_READER_OWNED; |
| 66 | } |
Davidlohr Bueso | 7a215f8 | 2015-01-30 01:14:25 -0800 | [diff] [blame] | 67 | #else |
| 68 | static inline void rwsem_set_owner(struct rw_semaphore *sem) |
| 69 | { |
| 70 | } |
| 71 | |
| 72 | static inline void rwsem_clear_owner(struct rw_semaphore *sem) |
| 73 | { |
| 74 | } |
Waiman Long | 19c5d69 | 2016-05-17 21:26:19 -0400 | [diff] [blame] | 75 | |
| 76 | static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) |
| 77 | { |
| 78 | } |
Davidlohr Bueso | 7a215f8 | 2015-01-30 01:14:25 -0800 | [diff] [blame] | 79 | #endif |
Maria Yu | a5946df | 2018-11-29 13:57:53 +0800 | [diff] [blame] | 80 | |
| 81 | #ifdef CONFIG_RWSEM_PRIO_AWARE |
| 82 | |
| 83 | #define RWSEM_MAX_PREEMPT_ALLOWED 3000 |
| 84 | |
| 85 | /* |
| 86 | * Return true if current waiter is added in the front of the rwsem wait list. |
| 87 | */ |
| 88 | static inline bool rwsem_list_add_per_prio(struct rwsem_waiter *waiter_in, |
| 89 | struct rw_semaphore *sem) |
| 90 | { |
| 91 | struct list_head *pos; |
| 92 | struct list_head *head; |
| 93 | struct rwsem_waiter *waiter = NULL; |
| 94 | |
| 95 | pos = head = &sem->wait_list; |
| 96 | /* |
| 97 | * Rules for task prio aware rwsem wait list queueing: |
| 98 | * 1: Only try to preempt waiters with which task priority |
| 99 | * which is higher than DEFAULT_PRIO. |
| 100 | * 2: To avoid starvation, add count to record |
| 101 | * how many high priority waiters preempt to queue in wait |
| 102 | * list. |
| 103 | * If preempt count is exceed RWSEM_MAX_PREEMPT_ALLOWED, |
| 104 | * use simple fifo until wait list is empty. |
| 105 | */ |
| 106 | if (list_empty(head)) { |
| 107 | list_add_tail(&waiter_in->list, head); |
| 108 | sem->m_count = 0; |
| 109 | return true; |
| 110 | } |
| 111 | |
| 112 | if (waiter_in->task->prio < DEFAULT_PRIO |
| 113 | && sem->m_count < RWSEM_MAX_PREEMPT_ALLOWED) { |
| 114 | |
| 115 | list_for_each(pos, head) { |
| 116 | waiter = list_entry(pos, struct rwsem_waiter, list); |
| 117 | if (waiter->task->prio > waiter_in->task->prio) { |
| 118 | list_add(&waiter_in->list, pos->prev); |
| 119 | sem->m_count++; |
| 120 | return &waiter_in->list == head->next; |
| 121 | } |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | list_add_tail(&waiter_in->list, head); |
| 126 | |
| 127 | return false; |
| 128 | } |
| 129 | #else |
| 130 | static inline bool rwsem_list_add_per_prio(struct rwsem_waiter *waiter_in, |
| 131 | struct rw_semaphore *sem) |
| 132 | { |
| 133 | list_add_tail(&waiter_in->list, &sem->wait_list); |
| 134 | return false; |
| 135 | } |
| 136 | #endif |