blob: 5b2e6159b744a35a845a82e9213295be754fbabd [file] [log] [blame]
Mikulas Patocka62ac6652012-09-26 07:46:43 +02001#ifndef _LINUX_PERCPU_RWSEM_H
2#define _LINUX_PERCPU_RWSEM_H
3
Oleg Nesterov9390ef02012-12-17 16:01:36 -08004#include <linux/atomic.h>
Oleg Nesterova1fd3e22012-12-17 16:01:32 -08005#include <linux/rwsem.h>
Mikulas Patocka62ac6652012-09-26 07:46:43 +02006#include <linux/percpu.h>
Oleg Nesterova1fd3e22012-12-17 16:01:32 -08007#include <linux/wait.h>
Oleg Nesterov001dac62015-08-21 19:42:57 +02008#include <linux/rcu_sync.h>
Oleg Nesterov8ebe3472012-12-17 16:01:38 -08009#include <linux/lockdep.h>
Mikulas Patocka62ac6652012-09-26 07:46:43 +020010
11struct percpu_rw_semaphore {
Oleg Nesterov001dac62015-08-21 19:42:57 +020012 struct rcu_sync rss;
Peter Zijlstra80127a32016-07-14 20:08:46 +020013 unsigned int __percpu *read_count;
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080014 struct rw_semaphore rw_sem;
Peter Zijlstra80127a32016-07-14 20:08:46 +020015 wait_queue_head_t writer;
16 int readers_block;
Mikulas Patocka62ac6652012-09-26 07:46:43 +020017};
18
Peter Zijlstra11d96842015-06-22 14:16:31 +020019#define DEFINE_STATIC_PERCPU_RWSEM(name) \
20static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \
21static struct percpu_rw_semaphore name = { \
22 .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \
23 .read_count = &__percpu_rwsem_rc_##name, \
24 .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
25 .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \
26}
27
Peter Zijlstra80127a32016-07-14 20:08:46 +020028extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
29extern void __percpu_up_read(struct percpu_rw_semaphore *);
30
Peter Zijlstra259d69b2015-11-23 15:23:55 +010031static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
Peter Zijlstra80127a32016-07-14 20:08:46 +020032{
33 might_sleep();
34
35 rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
36
37 preempt_disable();
38 /*
39 * We are in an RCU-sched read-side critical section, so the writer
40 * cannot both change sem->state from readers_fast and start checking
41 * counters while we are here. So if we see !sem->state, we know that
42 * the writer won't be checking until we're past the preempt_enable()
43 * and that one the synchronize_sched() is done, the writer will see
44 * anything we did within this RCU-sched read-size critical section.
45 */
46 __this_cpu_inc(*sem->read_count);
47 if (unlikely(!rcu_sync_is_idle(&sem->rss)))
48 __percpu_down_read(sem, false); /* Unconditional memory barrier */
Peter Zijlstra259d69b2015-11-23 15:23:55 +010049 barrier();
Peter Zijlstra80127a32016-07-14 20:08:46 +020050 /*
Peter Zijlstra259d69b2015-11-23 15:23:55 +010051 * The barrier() prevents the compiler from
Peter Zijlstra80127a32016-07-14 20:08:46 +020052 * bleeding the critical section out.
53 */
54}
55
Peter Zijlstra259d69b2015-11-23 15:23:55 +010056static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
57{
58 percpu_down_read_preempt_disable(sem);
59 preempt_enable();
60}
61
Peter Zijlstra80127a32016-07-14 20:08:46 +020062static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
63{
64 int ret = 1;
65
66 preempt_disable();
67 /*
68 * Same as in percpu_down_read().
69 */
70 __this_cpu_inc(*sem->read_count);
71 if (unlikely(!rcu_sync_is_idle(&sem->rss)))
72 ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
73 preempt_enable();
74 /*
75 * The barrier() from preempt_enable() prevents the compiler from
76 * bleeding the critical section out.
77 */
78
79 if (ret)
80 rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
81
82 return ret;
83}
84
Peter Zijlstra259d69b2015-11-23 15:23:55 +010085static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
Peter Zijlstra80127a32016-07-14 20:08:46 +020086{
87 /*
Peter Zijlstra259d69b2015-11-23 15:23:55 +010088 * The barrier() prevents the compiler from
Peter Zijlstra80127a32016-07-14 20:08:46 +020089 * bleeding the critical section out.
90 */
Peter Zijlstra259d69b2015-11-23 15:23:55 +010091 barrier();
Peter Zijlstra80127a32016-07-14 20:08:46 +020092 /*
93 * Same as in percpu_down_read().
94 */
95 if (likely(rcu_sync_is_idle(&sem->rss)))
96 __this_cpu_dec(*sem->read_count);
97 else
98 __percpu_up_read(sem); /* Unconditional memory barrier */
99 preempt_enable();
100
101 rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
102}
Mikulas Patocka5c1eabe2012-10-22 19:37:47 -0400103
Peter Zijlstra259d69b2015-11-23 15:23:55 +0100104static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
105{
106 preempt_disable();
107 percpu_up_read_preempt_enable(sem);
108}
109
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800110extern void percpu_down_write(struct percpu_rw_semaphore *);
111extern void percpu_up_write(struct percpu_rw_semaphore *);
Mikulas Patocka62ac6652012-09-26 07:46:43 +0200112
Oleg Nesterov8ebe3472012-12-17 16:01:38 -0800113extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
114 const char *, struct lock_class_key *);
Peter Zijlstra80127a32016-07-14 20:08:46 +0200115
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800116extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
Mikulas Patocka62ac6652012-09-26 07:46:43 +0200117
Peter Zijlstra80127a32016-07-14 20:08:46 +0200118#define percpu_init_rwsem(sem) \
Oleg Nesterov8ebe3472012-12-17 16:01:38 -0800119({ \
120 static struct lock_class_key rwsem_key; \
Peter Zijlstra80127a32016-07-14 20:08:46 +0200121 __percpu_init_rwsem(sem, #sem, &rwsem_key); \
Oleg Nesterov8ebe3472012-12-17 16:01:38 -0800122})
123
Oleg Nesterov55cc156502015-07-21 20:26:44 +0200124#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
125
Peter Zijlstra11d96842015-06-22 14:16:31 +0200126#define percpu_rwsem_assert_held(sem) \
127 lockdep_assert_held(&(sem)->rw_sem)
128
Oleg Nesterov55cc156502015-07-21 20:26:44 +0200129static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
130 bool read, unsigned long ip)
131{
132 lock_release(&sem->rw_sem.dep_map, 1, ip);
133#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
134 if (!read)
135 sem->rw_sem.owner = NULL;
136#endif
137}
138
139static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
140 bool read, unsigned long ip)
141{
142 lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
143}
144
Mikulas Patocka62ac6652012-09-26 07:46:43 +0200145#endif