blob: ce92ab563a08c4cd43127f7206f6aa06169b65bb [file] [log] [blame]
Oleg Nesterov9390ef02012-12-17 16:01:36 -08001#include <linux/atomic.h>
Oleg Nesterova1fd3e22012-12-17 16:01:32 -08002#include <linux/rwsem.h>
3#include <linux/percpu.h>
4#include <linux/wait.h>
5#include <linux/percpu-rwsem.h>
6#include <linux/rcupdate.h>
7#include <linux/sched.h>
8#include <linux/errno.h>
9
10int percpu_init_rwsem(struct percpu_rw_semaphore *brw)
11{
12 brw->fast_read_ctr = alloc_percpu(int);
13 if (unlikely(!brw->fast_read_ctr))
14 return -ENOMEM;
15
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080016 init_rwsem(&brw->rw_sem);
Oleg Nesterov9390ef02012-12-17 16:01:36 -080017 atomic_set(&brw->write_ctr, 0);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080018 atomic_set(&brw->slow_read_ctr, 0);
19 init_waitqueue_head(&brw->write_waitq);
20 return 0;
21}
22
23void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
24{
25 free_percpu(brw->fast_read_ctr);
26 brw->fast_read_ctr = NULL; /* catch use after free bugs */
27}
28
29/*
30 * This is the fast-path for down_read/up_read, it only needs to ensure
Oleg Nesterov9390ef02012-12-17 16:01:36 -080031 * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080032 * fast per-cpu counter. The writer uses synchronize_sched_expedited() to
33 * serialize with the preempt-disabled section below.
34 *
35 * The nontrivial part is that we should guarantee acquire/release semantics
36 * in case when
37 *
38 * R_W: down_write() comes after up_read(), the writer should see all
39 * changes done by the reader
40 * or
41 * W_R: down_read() comes after up_write(), the reader should see all
42 * changes done by the writer
43 *
44 * If this helper fails the callers rely on the normal rw_semaphore and
45 * atomic_dec_and_test(), so in this case we have the necessary barriers.
46 *
Oleg Nesterov9390ef02012-12-17 16:01:36 -080047 * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080048 * __this_cpu_add() below can be reordered with any LOAD/STORE done by the
49 * reader inside the critical section. See the comments in down_write and
50 * up_write below.
51 */
52static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
53{
54 bool success = false;
55
56 preempt_disable();
Oleg Nesterov9390ef02012-12-17 16:01:36 -080057 if (likely(!atomic_read(&brw->write_ctr))) {
Oleg Nesterova1fd3e22012-12-17 16:01:32 -080058 __this_cpu_add(*brw->fast_read_ctr, val);
59 success = true;
60 }
61 preempt_enable();
62
63 return success;
64}
65
66/*
67 * Like the normal down_read() this is not recursive, the writer can
68 * come after the first percpu_down_read() and create the deadlock.
69 */
70void percpu_down_read(struct percpu_rw_semaphore *brw)
71{
72 if (likely(update_fast_ctr(brw, +1)))
73 return;
74
75 down_read(&brw->rw_sem);
76 atomic_inc(&brw->slow_read_ctr);
77 up_read(&brw->rw_sem);
78}
79
80void percpu_up_read(struct percpu_rw_semaphore *brw)
81{
82 if (likely(update_fast_ctr(brw, -1)))
83 return;
84
85 /* false-positive is possible but harmless */
86 if (atomic_dec_and_test(&brw->slow_read_ctr))
87 wake_up_all(&brw->write_waitq);
88}
89
90static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
91{
92 unsigned int sum = 0;
93 int cpu;
94
95 for_each_possible_cpu(cpu) {
96 sum += per_cpu(*brw->fast_read_ctr, cpu);
97 per_cpu(*brw->fast_read_ctr, cpu) = 0;
98 }
99
100 return sum;
101}
102
103/*
Oleg Nesterov9390ef02012-12-17 16:01:36 -0800104 * A writer increments ->write_ctr to force the readers to switch to the
105 * slow mode, note the atomic_read() check in update_fast_ctr().
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800106 *
107 * After that the readers can only inc/dec the slow ->slow_read_ctr counter,
108 * ->fast_read_ctr is stable. Once the writer moves its sum into the slow
109 * counter it represents the number of active readers.
110 *
111 * Finally the writer takes ->rw_sem for writing and blocks the new readers,
112 * then waits until the slow counter becomes zero.
113 */
114void percpu_down_write(struct percpu_rw_semaphore *brw)
115{
Oleg Nesterov9390ef02012-12-17 16:01:36 -0800116 /* tell update_fast_ctr() there is a pending writer */
117 atomic_inc(&brw->write_ctr);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800118 /*
Oleg Nesterov9390ef02012-12-17 16:01:36 -0800119 * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800120 * so that update_fast_ctr() can't succeed.
121 *
122 * 2. Ensures we see the result of every previous this_cpu_add() in
123 * update_fast_ctr().
124 *
125 * 3. Ensures that if any reader has exited its critical section via
126 * fast-path, it executes a full memory barrier before we return.
127 * See R_W case in the comment above update_fast_ctr().
128 */
129 synchronize_sched_expedited();
130
Oleg Nesterov9390ef02012-12-17 16:01:36 -0800131 /* exclude other writers, and block the new readers completely */
132 down_write(&brw->rw_sem);
133
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800134 /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
135 atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
136
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800137 /* wait for all readers to complete their percpu_up_read() */
138 wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
139}
140
141void percpu_up_write(struct percpu_rw_semaphore *brw)
142{
Oleg Nesterov9390ef02012-12-17 16:01:36 -0800143 /* release the lock, but the readers can't use the fast-path */
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800144 up_write(&brw->rw_sem);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800145 /*
146 * Insert the barrier before the next fast-path in down_read,
147 * see W_R case in the comment above update_fast_ctr().
148 */
149 synchronize_sched_expedited();
Oleg Nesterov9390ef02012-12-17 16:01:36 -0800150 /* the last writer unblocks update_fast_ctr() */
151 atomic_dec(&brw->write_ctr);
Oleg Nesterova1fd3e22012-12-17 16:01:32 -0800152}