blob: 66fa43dc3f0f9ff8b5c67e5330120fd663bf4054 [file] [log] [blame]
Chris Mason925baed2008-06-25 16:01:30 -04001/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
Chris Mason925baed2008-06-25 16:01:30 -040019#include <linux/pagemap.h>
20#include <linux/spinlock.h>
21#include <linux/page-flags.h>
Chris Mason4881ee52008-07-24 09:51:08 -040022#include <asm/bug.h>
Chris Mason925baed2008-06-25 16:01:30 -040023#include "ctree.h"
24#include "extent_io.h"
25#include "locking.h"
26
Chris Masonb4ce94d2009-02-04 09:25:08 -050027static inline void spin_nested(struct extent_buffer *eb)
28{
29 spin_lock(&eb->lock);
30}
Chris Masond3977122009-01-05 21:25:51 -050031
Chris Masonb4ce94d2009-02-04 09:25:08 -050032/*
33 * Setting a lock to blocking will drop the spinlock and set the
34 * flag that forces other procs who want the lock to wait. After
35 * this you can safely schedule with the lock held.
36 */
37void btrfs_set_lock_blocking(struct extent_buffer *eb)
38{
39 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
40 set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
41 spin_unlock(&eb->lock);
42 }
43 /* exit with the spin lock released and the bit set */
44}
45
46/*
47 * clearing the blocking flag will take the spinlock again.
48 * After this you can't safely schedule
49 */
50void btrfs_clear_lock_blocking(struct extent_buffer *eb)
51{
52 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
53 spin_nested(eb);
54 clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
55 smp_mb__after_clear_bit();
56 }
57 /* exit with the spin lock held */
58}
59
60/*
61 * unfortunately, many of the places that currently set a lock to blocking
Wu Fengguangd4a78942009-04-02 16:46:06 -040062 * don't end up blocking for very long, and often they don't block
63 * at all. For a dbench 50 run, if we don't spin on the blocking bit
Chris Masonb4ce94d2009-02-04 09:25:08 -050064 * at all, the context switch rate can jump up to 400,000/sec or more.
65 *
66 * So, we're still stuck with this crummy spin on the blocking bit,
67 * at least until the most common causes of the short blocks
68 * can be dealt with.
69 */
70static int btrfs_spin_on_block(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -040071{
Chris Masonf9efa9c2008-06-25 16:14:04 -040072 int i;
Chris Mason66d7e852009-03-12 20:12:45 -040073
Chris Masonf9efa9c2008-06-25 16:14:04 -040074 for (i = 0; i < 512; i++) {
Chris Masonb4ce94d2009-02-04 09:25:08 -050075 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
76 return 1;
77 if (need_resched())
78 break;
Chris Mason66d7e852009-03-12 20:12:45 -040079 cpu_relax();
Chris Masonf9efa9c2008-06-25 16:14:04 -040080 }
Chris Mason925baed2008-06-25 16:01:30 -040081 return 0;
82}
83
Chris Masonb4ce94d2009-02-04 09:25:08 -050084/*
85 * This is somewhat different from trylock. It will take the
86 * spinlock but if it finds the lock is set to blocking, it will
87 * return without the lock held.
88 *
89 * returns 1 if it was able to take the lock and zero otherwise
90 *
91 * After this call, scheduling is not safe without first calling
92 * btrfs_set_lock_blocking()
93 */
94int btrfs_try_spin_lock(struct extent_buffer *eb)
95{
96 int i;
97
Chris Masonb9473432009-03-13 11:00:37 -040098 if (btrfs_spin_on_block(eb)) {
99 spin_nested(eb);
100 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
101 return 1;
102 spin_unlock(&eb->lock);
103 }
Chris Masonb4ce94d2009-02-04 09:25:08 -0500104 /* spin for a bit on the BLOCKING flag */
105 for (i = 0; i < 2; i++) {
Chris Mason66d7e852009-03-12 20:12:45 -0400106 cpu_relax();
Chris Masonb4ce94d2009-02-04 09:25:08 -0500107 if (!btrfs_spin_on_block(eb))
108 break;
109
110 spin_nested(eb);
111 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
112 return 1;
113 spin_unlock(&eb->lock);
114 }
115 return 0;
116}
117
118/*
119 * the autoremove wake function will return 0 if it tried to wake up
120 * a process that was already awake, which means that process won't
121 * count as an exclusive wakeup. The waitq code will continue waking
122 * procs until it finds one that was actually sleeping.
123 *
124 * For btrfs, this isn't quite what we want. We want a single proc
125 * to be notified that the lock is ready for taking. If that proc
126 * already happen to be awake, great, it will loop around and try for
127 * the lock.
128 *
129 * So, btrfs_wake_function always returns 1, even when the proc that we
130 * tried to wake up was already awake.
131 */
132static int btrfs_wake_function(wait_queue_t *wait, unsigned mode,
133 int sync, void *key)
134{
135 autoremove_wake_function(wait, mode, sync, key);
136 return 1;
137}
138
139/*
140 * returns with the extent buffer spinlocked.
141 *
142 * This will spin and/or wait as required to take the lock, and then
143 * return with the spinlock held.
144 *
145 * After this call, scheduling is not safe without first calling
146 * btrfs_set_lock_blocking()
147 */
148int btrfs_tree_lock(struct extent_buffer *eb)
149{
150 DEFINE_WAIT(wait);
151 wait.func = btrfs_wake_function;
152
Chris Mason66d7e852009-03-12 20:12:45 -0400153 if (!btrfs_spin_on_block(eb))
154 goto sleep;
155
Chris Masonb4ce94d2009-02-04 09:25:08 -0500156 while(1) {
157 spin_nested(eb);
158
159 /* nobody is blocking, exit with the spinlock held */
160 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
161 return 0;
162
163 /*
164 * we have the spinlock, but the real owner is blocking.
165 * wait for them
166 */
167 spin_unlock(&eb->lock);
168
169 /*
170 * spin for a bit, and if the blocking flag goes away,
171 * loop around
172 */
Chris Mason66d7e852009-03-12 20:12:45 -0400173 cpu_relax();
Chris Masonb4ce94d2009-02-04 09:25:08 -0500174 if (btrfs_spin_on_block(eb))
175 continue;
Chris Mason66d7e852009-03-12 20:12:45 -0400176sleep:
Chris Masonb4ce94d2009-02-04 09:25:08 -0500177 prepare_to_wait_exclusive(&eb->lock_wq, &wait,
178 TASK_UNINTERRUPTIBLE);
179
180 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
181 schedule();
182
183 finish_wait(&eb->lock_wq, &wait);
184 }
185 return 0;
186}
187
Chris Mason925baed2008-06-25 16:01:30 -0400188int btrfs_tree_unlock(struct extent_buffer *eb)
189{
Chris Masonb4ce94d2009-02-04 09:25:08 -0500190 /*
191 * if we were a blocking owner, we don't have the spinlock held
192 * just clear the bit and look for waiters
193 */
194 if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
195 smp_mb__after_clear_bit();
196 else
197 spin_unlock(&eb->lock);
198
199 if (waitqueue_active(&eb->lock_wq))
200 wake_up(&eb->lock_wq);
Chris Mason925baed2008-06-25 16:01:30 -0400201 return 0;
202}
203
Chris Masonb9447ef2009-03-09 11:45:38 -0400204void btrfs_assert_tree_locked(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400205{
Chris Masonb9447ef2009-03-09 11:45:38 -0400206 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
207 assert_spin_locked(&eb->lock);
Chris Mason925baed2008-06-25 16:01:30 -0400208}