blob: d402d3924a911da7641bc9bb53d20688b2354601 [file] [log] [blame]
Mikulas Patocka62ac6652012-09-26 07:46:43 +02001#ifndef _LINUX_PERCPU_RWSEM_H
2#define _LINUX_PERCPU_RWSEM_H
3
Oleg Nesterov9390ef02012-12-17 16:01:36 -08004#include <linux/atomic.h>
Oleg Nesterova1fd3e22012-12-17 16:01:32 -08005#include <linux/rwsem.h>
Mikulas Patocka62ac6652012-09-26 07:46:43 +02006#include <linux/percpu.h>
Oleg Nesterova1fd3e22012-12-17 16:01:32 -08007#include <linux/wait.h>
Oleg Nesterov001dac62015-08-21 19:42:57 +02008#include <linux/rcu_sync.h>
Oleg Nesterov8ebe3472012-12-17 16:01:38 -08009#include <linux/lockdep.h>
Mikulas Patocka62ac6652012-09-26 07:46:43 +020010
11struct percpu_rw_semaphore {
Oleg Nesterov001dac62015-08-21 19:42:57 +020012 struct rcu_sync rss;
Peter Zijlstra80127a32016-07-14 20:08:46 +020013 unsigned int __percpu *read_count;
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080014 struct rw_semaphore rw_sem;
Peter Zijlstra80127a32016-07-14 20:08:46 +020015 wait_queue_head_t writer;
16 int readers_block;
Mikulas Patocka62ac6652012-09-26 07:46:43 +020017};
18
Peter Zijlstra11d96842015-06-22 14:16:31 +020019#define DEFINE_STATIC_PERCPU_RWSEM(name) \
20static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \
21static struct percpu_rw_semaphore name = { \
22 .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \
23 .read_count = &__percpu_rwsem_rc_##name, \
24 .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
25 .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \
26}
27
Peter Zijlstra80127a32016-07-14 20:08:46 +020028extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
29extern void __percpu_up_read(struct percpu_rw_semaphore *);
30
31static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
32{
33 might_sleep();
34
35 rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
36
37 preempt_disable();
38 /*
39 * We are in an RCU-sched read-side critical section, so the writer
40 * cannot both change sem->state from readers_fast and start checking
41 * counters while we are here. So if we see !sem->state, we know that
42 * the writer won't be checking until we're past the preempt_enable()
43 * and that one the synchronize_sched() is done, the writer will see
44 * anything we did within this RCU-sched read-size critical section.
45 */
46 __this_cpu_inc(*sem->read_count);
47 if (unlikely(!rcu_sync_is_idle(&sem->rss)))
48 __percpu_down_read(sem, false); /* Unconditional memory barrier */
49 preempt_enable();
50 /*
51 * The barrier() from preempt_enable() prevents the compiler from
52 * bleeding the critical section out.
53 */
54}
55
56static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
57{
58 int ret = 1;
59
60 preempt_disable();
61 /*
62 * Same as in percpu_down_read().
63 */
64 __this_cpu_inc(*sem->read_count);
65 if (unlikely(!rcu_sync_is_idle(&sem->rss)))
66 ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
67 preempt_enable();
68 /*
69 * The barrier() from preempt_enable() prevents the compiler from
70 * bleeding the critical section out.
71 */
72
73 if (ret)
74 rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
75
76 return ret;
77}
78
79static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
80{
81 /*
82 * The barrier() in preempt_disable() prevents the compiler from
83 * bleeding the critical section out.
84 */
85 preempt_disable();
86 /*
87 * Same as in percpu_down_read().
88 */
89 if (likely(rcu_sync_is_idle(&sem->rss)))
90 __this_cpu_dec(*sem->read_count);
91 else
92 __percpu_up_read(sem); /* Unconditional memory barrier */
93 preempt_enable();
94
95 rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
96}
Mikulas Patocka5c1eabe2012-10-22 19:37:47 -040097
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080098extern void percpu_down_write(struct percpu_rw_semaphore *);
99extern void percpu_up_write(struct percpu_rw_semaphore *);
Mikulas Patocka62ac6652012-09-26 07:46:43 +0200100
Oleg Nesterov8ebe3472012-12-17 16:01:38 -0800101extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
102 const char *, struct lock_class_key *);
Peter Zijlstra80127a32016-07-14 20:08:46 +0200103
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800104extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
Mikulas Patocka62ac6652012-09-26 07:46:43 +0200105
Peter Zijlstra80127a32016-07-14 20:08:46 +0200106#define percpu_init_rwsem(sem) \
Oleg Nesterov8ebe3472012-12-17 16:01:38 -0800107({ \
108 static struct lock_class_key rwsem_key; \
Peter Zijlstra80127a32016-07-14 20:08:46 +0200109 __percpu_init_rwsem(sem, #sem, &rwsem_key); \
Oleg Nesterov8ebe3472012-12-17 16:01:38 -0800110})
111
Oleg Nesterov55cc156502015-07-21 20:26:44 +0200112#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
113
Peter Zijlstra11d96842015-06-22 14:16:31 +0200114#define percpu_rwsem_assert_held(sem) \
115 lockdep_assert_held(&(sem)->rw_sem)
116
Oleg Nesterov55cc156502015-07-21 20:26:44 +0200117static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
118 bool read, unsigned long ip)
119{
120 lock_release(&sem->rw_sem.dep_map, 1, ip);
121#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
122 if (!read)
123 sem->rw_sem.owner = NULL;
124#endif
125}
126
127static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
128 bool read, unsigned long ip)
129{
130 lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
131}
132
Mikulas Patocka62ac6652012-09-26 07:46:43 +0200133#endif