blob: b60c84222bbafb2d574b0c502e52b4c0022b8272 [file] [log] [blame]
Waiman Long19c5d692016-05-17 21:26:19 -04001/*
2 * The owner field of the rw_semaphore structure will be set to
3 * RWSEM_READ_OWNED when a reader grabs the lock. A writer will clear
4 * the owner field when it unlocks. A reader, on the other hand, will
5 * not touch the owner field when it unlocks.
6 *
7 * In essence, the owner field now has the following 3 states:
8 * 1) 0
9 * - lock is free or the owner hasn't set the field yet
10 * 2) RWSEM_READER_OWNED
11 * - lock is currently or previously owned by readers (lock is free
12 * or not set by owner yet)
13 * 3) Other non-zero value
14 * - a writer owns the lock
15 */
16#define RWSEM_READER_OWNED ((struct task_struct *)1UL)
17
Maria Yua5946df2018-11-29 13:57:53 +080018enum rwsem_waiter_type {
19 RWSEM_WAITING_FOR_WRITE,
20 RWSEM_WAITING_FOR_READ
21};
22
23struct rwsem_waiter {
24 struct list_head list;
25 struct task_struct *task;
26 enum rwsem_waiter_type type;
27};
28
Davidlohr Bueso7a215f82015-01-30 01:14:25 -080029#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
Waiman Longfb6a44f2016-05-17 21:26:20 -040030/*
31 * All writes to owner are protected by WRITE_ONCE() to make sure that
32 * store tearing can't happen as optimistic spinners may read and use
33 * the owner value concurrently without lock. Read from owner, however,
34 * may not need READ_ONCE() as long as the pointer value is only used
35 * for comparison and isn't being dereferenced.
36 */
Davidlohr Bueso7a215f82015-01-30 01:14:25 -080037static inline void rwsem_set_owner(struct rw_semaphore *sem)
38{
Waiman Longfb6a44f2016-05-17 21:26:20 -040039 WRITE_ONCE(sem->owner, current);
Davidlohr Bueso7a215f82015-01-30 01:14:25 -080040}
41
42static inline void rwsem_clear_owner(struct rw_semaphore *sem)
43{
Waiman Longfb6a44f2016-05-17 21:26:20 -040044 WRITE_ONCE(sem->owner, NULL);
Davidlohr Bueso7a215f82015-01-30 01:14:25 -080045}
46
Waiman Long19c5d692016-05-17 21:26:19 -040047static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
48{
49 /*
50 * We check the owner value first to make sure that we will only
51 * do a write to the rwsem cacheline when it is really necessary
52 * to minimize cacheline contention.
53 */
54 if (sem->owner != RWSEM_READER_OWNED)
Waiman Longfb6a44f2016-05-17 21:26:20 -040055 WRITE_ONCE(sem->owner, RWSEM_READER_OWNED);
Waiman Long19c5d692016-05-17 21:26:19 -040056}
57
58static inline bool rwsem_owner_is_writer(struct task_struct *owner)
59{
60 return owner && owner != RWSEM_READER_OWNED;
61}
62
63static inline bool rwsem_owner_is_reader(struct task_struct *owner)
64{
65 return owner == RWSEM_READER_OWNED;
66}
Davidlohr Bueso7a215f82015-01-30 01:14:25 -080067#else
68static inline void rwsem_set_owner(struct rw_semaphore *sem)
69{
70}
71
72static inline void rwsem_clear_owner(struct rw_semaphore *sem)
73{
74}
Waiman Long19c5d692016-05-17 21:26:19 -040075
76static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
77{
78}
Davidlohr Bueso7a215f82015-01-30 01:14:25 -080079#endif
Maria Yua5946df2018-11-29 13:57:53 +080080
81#ifdef CONFIG_RWSEM_PRIO_AWARE
82
83#define RWSEM_MAX_PREEMPT_ALLOWED 3000
84
85/*
86 * Return true if current waiter is added in the front of the rwsem wait list.
87 */
88static inline bool rwsem_list_add_per_prio(struct rwsem_waiter *waiter_in,
89 struct rw_semaphore *sem)
90{
91 struct list_head *pos;
92 struct list_head *head;
93 struct rwsem_waiter *waiter = NULL;
94
95 pos = head = &sem->wait_list;
96 /*
97 * Rules for task prio aware rwsem wait list queueing:
98 * 1: Only try to preempt waiters with which task priority
99 * which is higher than DEFAULT_PRIO.
100 * 2: To avoid starvation, add count to record
101 * how many high priority waiters preempt to queue in wait
102 * list.
103 * If preempt count is exceed RWSEM_MAX_PREEMPT_ALLOWED,
104 * use simple fifo until wait list is empty.
105 */
106 if (list_empty(head)) {
107 list_add_tail(&waiter_in->list, head);
108 sem->m_count = 0;
109 return true;
110 }
111
112 if (waiter_in->task->prio < DEFAULT_PRIO
113 && sem->m_count < RWSEM_MAX_PREEMPT_ALLOWED) {
114
115 list_for_each(pos, head) {
116 waiter = list_entry(pos, struct rwsem_waiter, list);
117 if (waiter->task->prio > waiter_in->task->prio) {
118 list_add(&waiter_in->list, pos->prev);
119 sem->m_count++;
120 return &waiter_in->list == head->next;
121 }
122 }
123 }
124
125 list_add_tail(&waiter_in->list, head);
126
127 return false;
128}
129#else
130static inline bool rwsem_list_add_per_prio(struct rwsem_waiter *waiter_in,
131 struct rw_semaphore *sem)
132{
133 list_add_tail(&waiter_in->list, &sem->wait_list);
134 return false;
135}
136#endif