blob: 250a4acddb2b8975a6c8278a3c5b1d803f36b585 [file] [log] [blame]
Mikulas Patocka62ac6652012-09-26 07:46:43 +02001#ifndef _LINUX_PERCPU_RWSEM_H
2#define _LINUX_PERCPU_RWSEM_H
3
4#include <linux/mutex.h>
5#include <linux/percpu.h>
6#include <linux/rcupdate.h>
7#include <linux/delay.h>
8
9struct percpu_rw_semaphore {
10 unsigned __percpu *counters;
11 bool locked;
12 struct mutex mtx;
13};
14
Mikulas Patocka5c1eabe2012-10-22 19:37:47 -040015#define light_mb() barrier()
16#define heavy_mb() synchronize_sched()
17
Mikulas Patocka62ac6652012-09-26 07:46:43 +020018static inline void percpu_down_read(struct percpu_rw_semaphore *p)
19{
Mikulas Patocka1bf11c52012-10-22 19:39:16 -040020 rcu_read_lock_sched();
Mikulas Patocka62ac6652012-09-26 07:46:43 +020021 if (unlikely(p->locked)) {
Mikulas Patocka1bf11c52012-10-22 19:39:16 -040022 rcu_read_unlock_sched();
Mikulas Patocka62ac6652012-09-26 07:46:43 +020023 mutex_lock(&p->mtx);
24 this_cpu_inc(*p->counters);
25 mutex_unlock(&p->mtx);
26 return;
27 }
28 this_cpu_inc(*p->counters);
Mikulas Patocka1bf11c52012-10-22 19:39:16 -040029 rcu_read_unlock_sched();
Mikulas Patocka5c1eabe2012-10-22 19:37:47 -040030 light_mb(); /* A, between read of p->locked and read of data, paired with D */
Mikulas Patocka62ac6652012-09-26 07:46:43 +020031}
32
33static inline void percpu_up_read(struct percpu_rw_semaphore *p)
34{
Mikulas Patocka5c1eabe2012-10-22 19:37:47 -040035 light_mb(); /* B, between read of the data and write to p->counter, paired with C */
Mikulas Patocka62ac6652012-09-26 07:46:43 +020036 this_cpu_dec(*p->counters);
37}
38
39static inline unsigned __percpu_count(unsigned __percpu *counters)
40{
41 unsigned total = 0;
42 int cpu;
43
44 for_each_possible_cpu(cpu)
45 total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu));
46
47 return total;
48}
49
50static inline void percpu_down_write(struct percpu_rw_semaphore *p)
51{
52 mutex_lock(&p->mtx);
53 p->locked = true;
Mikulas Patocka1bf11c52012-10-22 19:39:16 -040054 synchronize_sched(); /* make sure that all readers exit the rcu_read_lock_sched region */
Mikulas Patocka62ac6652012-09-26 07:46:43 +020055 while (__percpu_count(p->counters))
56 msleep(1);
Mikulas Patocka5c1eabe2012-10-22 19:37:47 -040057 heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
Mikulas Patocka62ac6652012-09-26 07:46:43 +020058}
59
60static inline void percpu_up_write(struct percpu_rw_semaphore *p)
61{
Mikulas Patocka5c1eabe2012-10-22 19:37:47 -040062 heavy_mb(); /* D, between write to data and write to p->locked, paired with A */
Mikulas Patocka62ac6652012-09-26 07:46:43 +020063 p->locked = false;
64 mutex_unlock(&p->mtx);
65}
66
67static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p)
68{
69 p->counters = alloc_percpu(unsigned);
70 if (unlikely(!p->counters))
71 return -ENOMEM;
72 p->locked = false;
73 mutex_init(&p->mtx);
74 return 0;
75}
76
77static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p)
78{
79 free_percpu(p->counters);
80 p->counters = NULL; /* catch use after free bugs */
81}
82
83#endif