blob: 754bc591a2470251c5126be8a12a1ab33bc0efed [file] [log] [blame]
David Chinnerfe4fa4b2008-10-30 17:06:08 +11001/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110027#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110029#include "xfs_inode.h"
30#include "xfs_dinode.h"
31#include "xfs_error.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110032#include "xfs_filestream.h"
33#include "xfs_vnodeops.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110034#include "xfs_inode_item.h"
Christoph Hellwig7d095252009-06-08 15:33:32 +020035#include "xfs_quota.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000036#include "xfs_trace.h"
Dave Chinner1a387d32010-08-24 11:46:31 +100037#include "xfs_fsops.h"
David Chinnerfe4fa4b2008-10-30 17:06:08 +110038
David Chinnera167b172008-10-30 17:06:18 +110039#include <linux/kthread.h>
40#include <linux/freezer.h>
41
Dave Chinner78ae5252010-09-28 12:28:19 +100042/*
43 * The inode lookup is done in batches to keep the amount of lock traffic and
44 * radix tree lookups to a minimum. The batch size is a trade off between
45 * lookup reduction and stack usage. This is in the reclaim path, so we can't
46 * be too greedy.
47 */
48#define XFS_LOOKUP_BATCH 32
49
Dave Chinnere13de952010-09-28 12:28:06 +100050STATIC int
51xfs_inode_ag_walk_grab(
52 struct xfs_inode *ip)
53{
54 struct inode *inode = VFS_I(ip);
55
56 /* nothing to sync during shutdown */
57 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
58 return EFSCORRUPTED;
59
60 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
61 if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
62 return ENOENT;
63
64 /* If we can't grab the inode, it must on it's way to reclaim. */
65 if (!igrab(inode))
66 return ENOENT;
67
68 if (is_bad_inode(inode)) {
69 IRELE(ip);
70 return ENOENT;
71 }
72
73 /* inode is valid */
74 return 0;
75}
76
Dave Chinner75f3cb12009-06-08 15:35:14 +020077STATIC int
78xfs_inode_ag_walk(
79 struct xfs_mount *mp,
Dave Chinner5017e972010-01-11 11:47:40 +000080 struct xfs_perag *pag,
Dave Chinner75f3cb12009-06-08 15:35:14 +020081 int (*execute)(struct xfs_inode *ip,
82 struct xfs_perag *pag, int flags),
Dave Chinner65d0f202010-09-24 18:40:15 +100083 int flags)
Dave Chinner75f3cb12009-06-08 15:35:14 +020084{
Dave Chinner75f3cb12009-06-08 15:35:14 +020085 uint32_t first_index;
86 int last_error = 0;
87 int skipped;
Dave Chinner65d0f202010-09-24 18:40:15 +100088 int done;
Dave Chinner78ae5252010-09-28 12:28:19 +100089 int nr_found;
Dave Chinner75f3cb12009-06-08 15:35:14 +020090
91restart:
Dave Chinner65d0f202010-09-24 18:40:15 +100092 done = 0;
Dave Chinner75f3cb12009-06-08 15:35:14 +020093 skipped = 0;
94 first_index = 0;
Dave Chinner78ae5252010-09-28 12:28:19 +100095 nr_found = 0;
Dave Chinner75f3cb12009-06-08 15:35:14 +020096 do {
Dave Chinner78ae5252010-09-28 12:28:19 +100097 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
Dave Chinner75f3cb12009-06-08 15:35:14 +020098 int error = 0;
Dave Chinner78ae5252010-09-28 12:28:19 +100099 int i;
Dave Chinner75f3cb12009-06-08 15:35:14 +0200100
Dave Chinner65d0f202010-09-24 18:40:15 +1000101 read_lock(&pag->pag_ici_lock);
102 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
Dave Chinner78ae5252010-09-28 12:28:19 +1000103 (void **)batch, first_index,
104 XFS_LOOKUP_BATCH);
Dave Chinner65d0f202010-09-24 18:40:15 +1000105 if (!nr_found) {
106 read_unlock(&pag->pag_ici_lock);
Dave Chinner75f3cb12009-06-08 15:35:14 +0200107 break;
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000108 }
Dave Chinner75f3cb12009-06-08 15:35:14 +0200109
Dave Chinner65d0f202010-09-24 18:40:15 +1000110 /*
Dave Chinner78ae5252010-09-28 12:28:19 +1000111 * Grab the inodes before we drop the lock. if we found
112 * nothing, nr == 0 and the loop will be skipped.
Dave Chinner65d0f202010-09-24 18:40:15 +1000113 */
Dave Chinner78ae5252010-09-28 12:28:19 +1000114 for (i = 0; i < nr_found; i++) {
115 struct xfs_inode *ip = batch[i];
Dave Chinner65d0f202010-09-24 18:40:15 +1000116
Dave Chinner78ae5252010-09-28 12:28:19 +1000117 if (done || xfs_inode_ag_walk_grab(ip))
118 batch[i] = NULL;
119
120 /*
121 * Update the index for the next lookup. Catch overflows
122 * into the next AG range which can occur if we have inodes
123 * in the last block of the AG and we are currently
124 * pointing to the last inode.
125 */
126 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
127 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
128 done = 1;
Dave Chinnere13de952010-09-28 12:28:06 +1000129 }
Dave Chinner78ae5252010-09-28 12:28:19 +1000130
131 /* unlock now we've grabbed the inodes. */
Dave Chinnere13de952010-09-28 12:28:06 +1000132 read_unlock(&pag->pag_ici_lock);
133
Dave Chinner78ae5252010-09-28 12:28:19 +1000134 for (i = 0; i < nr_found; i++) {
135 if (!batch[i])
136 continue;
137 error = execute(batch[i], pag, flags);
138 IRELE(batch[i]);
139 if (error == EAGAIN) {
140 skipped++;
141 continue;
142 }
143 if (error && last_error != EFSCORRUPTED)
144 last_error = error;
Dave Chinner75f3cb12009-06-08 15:35:14 +0200145 }
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000146
147 /* bail out if the filesystem is corrupted. */
Dave Chinner75f3cb12009-06-08 15:35:14 +0200148 if (error == EFSCORRUPTED)
149 break;
150
Dave Chinner78ae5252010-09-28 12:28:19 +1000151 } while (nr_found && !done);
Dave Chinner75f3cb12009-06-08 15:35:14 +0200152
153 if (skipped) {
154 delay(1);
155 goto restart;
156 }
Dave Chinner75f3cb12009-06-08 15:35:14 +0200157 return last_error;
158}
159
Christoph Hellwigfe588ed2009-06-08 15:35:27 +0200160int
Dave Chinner75f3cb12009-06-08 15:35:14 +0200161xfs_inode_ag_iterator(
162 struct xfs_mount *mp,
163 int (*execute)(struct xfs_inode *ip,
164 struct xfs_perag *pag, int flags),
Dave Chinner65d0f202010-09-24 18:40:15 +1000165 int flags)
Dave Chinner75f3cb12009-06-08 15:35:14 +0200166{
Dave Chinner16fd5362010-07-20 09:43:39 +1000167 struct xfs_perag *pag;
Dave Chinner75f3cb12009-06-08 15:35:14 +0200168 int error = 0;
169 int last_error = 0;
170 xfs_agnumber_t ag;
171
Dave Chinner16fd5362010-07-20 09:43:39 +1000172 ag = 0;
Dave Chinner65d0f202010-09-24 18:40:15 +1000173 while ((pag = xfs_perag_get(mp, ag))) {
174 ag = pag->pag_agno + 1;
175 error = xfs_inode_ag_walk(mp, pag, execute, flags);
Dave Chinner5017e972010-01-11 11:47:40 +0000176 xfs_perag_put(pag);
Dave Chinner75f3cb12009-06-08 15:35:14 +0200177 if (error) {
178 last_error = error;
179 if (error == EFSCORRUPTED)
180 break;
181 }
182 }
183 return XFS_ERROR(last_error);
184}
185
Dave Chinner5a34d5c2009-06-08 15:35:03 +0200186STATIC int
187xfs_sync_inode_data(
188 struct xfs_inode *ip,
Dave Chinner75f3cb12009-06-08 15:35:14 +0200189 struct xfs_perag *pag,
Dave Chinner5a34d5c2009-06-08 15:35:03 +0200190 int flags)
191{
192 struct inode *inode = VFS_I(ip);
193 struct address_space *mapping = inode->i_mapping;
194 int error = 0;
195
196 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
197 goto out_wait;
198
199 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
200 if (flags & SYNC_TRYLOCK)
201 goto out_wait;
202 xfs_ilock(ip, XFS_IOLOCK_SHARED);
203 }
204
205 error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
Christoph Hellwig0cadda12010-01-19 09:56:44 +0000206 0 : XBF_ASYNC, FI_NONE);
Dave Chinner5a34d5c2009-06-08 15:35:03 +0200207 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
208
209 out_wait:
Christoph Hellwigb0710cc2009-06-08 15:37:11 +0200210 if (flags & SYNC_WAIT)
Dave Chinner5a34d5c2009-06-08 15:35:03 +0200211 xfs_ioend_wait(ip);
212 return error;
213}
214
Christoph Hellwig845b6d02009-06-08 15:35:05 +0200215STATIC int
216xfs_sync_inode_attr(
217 struct xfs_inode *ip,
Dave Chinner75f3cb12009-06-08 15:35:14 +0200218 struct xfs_perag *pag,
Christoph Hellwig845b6d02009-06-08 15:35:05 +0200219 int flags)
220{
221 int error = 0;
222
223 xfs_ilock(ip, XFS_ILOCK_SHARED);
224 if (xfs_inode_clean(ip))
225 goto out_unlock;
226 if (!xfs_iflock_nowait(ip)) {
227 if (!(flags & SYNC_WAIT))
228 goto out_unlock;
229 xfs_iflock(ip);
230 }
231
232 if (xfs_inode_clean(ip)) {
233 xfs_ifunlock(ip);
234 goto out_unlock;
235 }
236
Dave Chinnerc8543632010-02-06 12:39:36 +1100237 error = xfs_iflush(ip, flags);
Christoph Hellwig845b6d02009-06-08 15:35:05 +0200238
239 out_unlock:
240 xfs_iunlock(ip, XFS_ILOCK_SHARED);
241 return error;
242}
243
Christoph Hellwig075fe102009-06-08 15:35:48 +0200244/*
245 * Write out pagecache data for the whole filesystem.
246 */
Christoph Hellwig64c86142010-06-24 11:45:34 +1000247STATIC int
Christoph Hellwig075fe102009-06-08 15:35:48 +0200248xfs_sync_data(
249 struct xfs_mount *mp,
250 int flags)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100251{
Christoph Hellwig075fe102009-06-08 15:35:48 +0200252 int error;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100253
Christoph Hellwigb0710cc2009-06-08 15:37:11 +0200254 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100255
Dave Chinner65d0f202010-09-24 18:40:15 +1000256 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags);
Christoph Hellwig075fe102009-06-08 15:35:48 +0200257 if (error)
258 return XFS_ERROR(error);
David Chinnere9f1c6e2008-10-30 17:15:50 +1100259
Christoph Hellwiga14a3482010-01-19 09:56:46 +0000260 xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
Christoph Hellwig075fe102009-06-08 15:35:48 +0200261 return 0;
262}
David Chinnere9f1c6e2008-10-30 17:15:50 +1100263
Christoph Hellwig075fe102009-06-08 15:35:48 +0200264/*
265 * Write out inode metadata (attributes) for the whole filesystem.
266 */
Christoph Hellwig64c86142010-06-24 11:45:34 +1000267STATIC int
Christoph Hellwig075fe102009-06-08 15:35:48 +0200268xfs_sync_attr(
269 struct xfs_mount *mp,
270 int flags)
271{
272 ASSERT((flags & ~SYNC_WAIT) == 0);
Dave Chinner75f3cb12009-06-08 15:35:14 +0200273
Dave Chinner65d0f202010-09-24 18:40:15 +1000274 return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags);
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100275}
276
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100277STATIC int
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100278xfs_sync_fsdata(
Christoph Hellwigdf308bc2010-03-12 10:59:16 +0000279 struct xfs_mount *mp)
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100280{
281 struct xfs_buf *bp;
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100282
283 /*
Christoph Hellwigdf308bc2010-03-12 10:59:16 +0000284 * If the buffer is pinned then push on the log so we won't get stuck
285 * waiting in the write for someone, maybe ourselves, to flush the log.
286 *
287 * Even though we just pushed the log above, we did not have the
288 * superblock buffer locked at that point so it can become pinned in
289 * between there and here.
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100290 */
Christoph Hellwigdf308bc2010-03-12 10:59:16 +0000291 bp = xfs_getsb(mp, 0);
292 if (XFS_BUF_ISPINNED(bp))
293 xfs_log_force(mp, 0);
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100294
Christoph Hellwigdf308bc2010-03-12 10:59:16 +0000295 return xfs_bwrite(mp, bp);
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100296}
297
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100298/*
David Chinnera4e4c4f2008-10-30 17:16:11 +1100299 * When remounting a filesystem read-only or freezing the filesystem, we have
300 * two phases to execute. This first phase is syncing the data before we
301 * quiesce the filesystem, and the second is flushing all the inodes out after
302 * we've waited for all the transactions created by the first phase to
303 * complete. The second phase ensures that the inodes are written to their
304 * location on disk rather than just existing in transactions in the log. This
305 * means after a quiesce there is no log replay required to write the inodes to
306 * disk (this is the main difference between a sync and a quiesce).
307 */
308/*
309 * First stage of freeze - no writers will make progress now we are here,
David Chinnere9f1c6e2008-10-30 17:15:50 +1100310 * so we flush delwri and delalloc buffers here, then wait for all I/O to
311 * complete. Data is frozen at that point. Metadata is not frozen,
David Chinnera4e4c4f2008-10-30 17:16:11 +1100312 * transactions can still occur here so don't bother flushing the buftarg
313 * because it'll just get dirty again.
David Chinnere9f1c6e2008-10-30 17:15:50 +1100314 */
315int
316xfs_quiesce_data(
317 struct xfs_mount *mp)
318{
Christoph Hellwigdf308bc2010-03-12 10:59:16 +0000319 int error, error2 = 0;
David Chinnere9f1c6e2008-10-30 17:15:50 +1100320
321 /* push non-blocking */
Christoph Hellwig075fe102009-06-08 15:35:48 +0200322 xfs_sync_data(mp, 0);
Christoph Hellwig8b5403a2009-06-08 15:37:16 +0200323 xfs_qm_sync(mp, SYNC_TRYLOCK);
David Chinnere9f1c6e2008-10-30 17:15:50 +1100324
Dave Chinnerc90b07e2009-10-06 20:29:27 +0000325 /* push and block till complete */
Christoph Hellwigb0710cc2009-06-08 15:37:11 +0200326 xfs_sync_data(mp, SYNC_WAIT);
Christoph Hellwig7d095252009-06-08 15:33:32 +0200327 xfs_qm_sync(mp, SYNC_WAIT);
David Chinnere9f1c6e2008-10-30 17:15:50 +1100328
David Chinnera4e4c4f2008-10-30 17:16:11 +1100329 /* write superblock and hoover up shutdown errors */
Christoph Hellwigdf308bc2010-03-12 10:59:16 +0000330 error = xfs_sync_fsdata(mp);
331
332 /* make sure all delwri buffers are written out */
333 xfs_flush_buftarg(mp->m_ddev_targp, 1);
334
335 /* mark the log as covered if needed */
336 if (xfs_log_need_covered(mp))
Dave Chinner1a387d32010-08-24 11:46:31 +1000337 error2 = xfs_fs_log_dummy(mp, SYNC_WAIT);
David Chinnere9f1c6e2008-10-30 17:15:50 +1100338
David Chinnera4e4c4f2008-10-30 17:16:11 +1100339 /* flush data-only devices */
David Chinnere9f1c6e2008-10-30 17:15:50 +1100340 if (mp->m_rtdev_targp)
341 XFS_bflush(mp->m_rtdev_targp);
342
Christoph Hellwigdf308bc2010-03-12 10:59:16 +0000343 return error ? error : error2;
David Chinnere9f1c6e2008-10-30 17:15:50 +1100344}
345
David Chinner76bf1052008-10-30 17:16:21 +1100346STATIC void
347xfs_quiesce_fs(
348 struct xfs_mount *mp)
349{
350 int count = 0, pincount;
351
Dave Chinnerc8543632010-02-06 12:39:36 +1100352 xfs_reclaim_inodes(mp, 0);
David Chinner76bf1052008-10-30 17:16:21 +1100353 xfs_flush_buftarg(mp->m_ddev_targp, 0);
David Chinner76bf1052008-10-30 17:16:21 +1100354
355 /*
356 * This loop must run at least twice. The first instance of the loop
357 * will flush most meta data but that will generate more meta data
358 * (typically directory updates). Which then must be flushed and
Dave Chinnerc8543632010-02-06 12:39:36 +1100359 * logged before we can write the unmount record. We also so sync
360 * reclaim of inodes to catch any that the above delwri flush skipped.
David Chinner76bf1052008-10-30 17:16:21 +1100361 */
362 do {
Dave Chinnerc8543632010-02-06 12:39:36 +1100363 xfs_reclaim_inodes(mp, SYNC_WAIT);
Christoph Hellwig075fe102009-06-08 15:35:48 +0200364 xfs_sync_attr(mp, SYNC_WAIT);
David Chinner76bf1052008-10-30 17:16:21 +1100365 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
366 if (!pincount) {
367 delay(50);
368 count++;
369 }
370 } while (count < 2);
371}
372
373/*
374 * Second stage of a quiesce. The data is already synced, now we have to take
375 * care of the metadata. New transactions are already blocked, so we need to
376 * wait for any remaining transactions to drain out before proceding.
377 */
378void
379xfs_quiesce_attr(
380 struct xfs_mount *mp)
381{
382 int error = 0;
383
384 /* wait for all modifications to complete */
385 while (atomic_read(&mp->m_active_trans) > 0)
386 delay(100);
387
388 /* flush inodes and push all remaining buffers out to disk */
389 xfs_quiesce_fs(mp);
390
Felix Blyakher5e106572009-01-22 21:34:05 -0600391 /*
392 * Just warn here till VFS can correctly support
393 * read-only remount without racing.
394 */
395 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
David Chinner76bf1052008-10-30 17:16:21 +1100396
397 /* Push the superblock and write an unmount record */
398 error = xfs_log_sbcount(mp, 1);
399 if (error)
400 xfs_fs_cmn_err(CE_WARN, mp,
401 "xfs_attr_quiesce: failed to log sb changes. "
402 "Frozen image may not be consistent.");
403 xfs_log_unmount_write(mp);
404 xfs_unmountfs_writesb(mp);
405}
406
David Chinnere9f1c6e2008-10-30 17:15:50 +1100407/*
David Chinnera167b172008-10-30 17:06:18 +1100408 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
409 * Doing this has two advantages:
410 * - It saves on stack space, which is tight in certain situations
411 * - It can be used (with care) as a mechanism to avoid deadlocks.
412 * Flushing while allocating in a full filesystem requires both.
413 */
414STATIC void
415xfs_syncd_queue_work(
416 struct xfs_mount *mp,
417 void *data,
Dave Chinnere43afd72009-04-06 18:47:27 +0200418 void (*syncer)(struct xfs_mount *, void *),
419 struct completion *completion)
David Chinnera167b172008-10-30 17:06:18 +1100420{
Dave Chinnera8d770d2009-04-06 18:44:54 +0200421 struct xfs_sync_work *work;
David Chinnera167b172008-10-30 17:06:18 +1100422
Dave Chinnera8d770d2009-04-06 18:44:54 +0200423 work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
David Chinnera167b172008-10-30 17:06:18 +1100424 INIT_LIST_HEAD(&work->w_list);
425 work->w_syncer = syncer;
426 work->w_data = data;
427 work->w_mount = mp;
Dave Chinnere43afd72009-04-06 18:47:27 +0200428 work->w_completion = completion;
David Chinnera167b172008-10-30 17:06:18 +1100429 spin_lock(&mp->m_sync_lock);
430 list_add_tail(&work->w_list, &mp->m_sync_list);
431 spin_unlock(&mp->m_sync_lock);
432 wake_up_process(mp->m_sync_task);
433}
434
435/*
436 * Flush delayed allocate data, attempting to free up reserved space
437 * from existing allocations. At this point a new allocation attempt
438 * has failed with ENOSPC and we are in the process of scratching our
439 * heads, looking about for more room...
440 */
441STATIC void
Dave Chinnera8d770d2009-04-06 18:44:54 +0200442xfs_flush_inodes_work(
David Chinnera167b172008-10-30 17:06:18 +1100443 struct xfs_mount *mp,
444 void *arg)
445{
446 struct inode *inode = arg;
Christoph Hellwig075fe102009-06-08 15:35:48 +0200447 xfs_sync_data(mp, SYNC_TRYLOCK);
Christoph Hellwigb0710cc2009-06-08 15:37:11 +0200448 xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
David Chinnera167b172008-10-30 17:06:18 +1100449 iput(inode);
450}
451
452void
Dave Chinnera8d770d2009-04-06 18:44:54 +0200453xfs_flush_inodes(
David Chinnera167b172008-10-30 17:06:18 +1100454 xfs_inode_t *ip)
455{
456 struct inode *inode = VFS_I(ip);
Dave Chinnere43afd72009-04-06 18:47:27 +0200457 DECLARE_COMPLETION_ONSTACK(completion);
David Chinnera167b172008-10-30 17:06:18 +1100458
459 igrab(inode);
Dave Chinnere43afd72009-04-06 18:47:27 +0200460 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
461 wait_for_completion(&completion);
Christoph Hellwiga14a3482010-01-19 09:56:46 +0000462 xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
David Chinnera167b172008-10-30 17:06:18 +1100463}
464
David Chinneraacaa882008-10-30 17:15:29 +1100465/*
Christoph Hellwigdf308bc2010-03-12 10:59:16 +0000466 * Every sync period we need to unpin all items, reclaim inodes and sync
467 * disk quotas. We might need to cover the log to indicate that the
Dave Chinner1a387d32010-08-24 11:46:31 +1000468 * filesystem is idle and not frozen.
David Chinneraacaa882008-10-30 17:15:29 +1100469 */
David Chinnera167b172008-10-30 17:06:18 +1100470STATIC void
471xfs_sync_worker(
472 struct xfs_mount *mp,
473 void *unused)
474{
475 int error;
476
David Chinneraacaa882008-10-30 17:15:29 +1100477 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
Christoph Hellwiga14a3482010-01-19 09:56:46 +0000478 xfs_log_force(mp, 0);
Dave Chinnerc8543632010-02-06 12:39:36 +1100479 xfs_reclaim_inodes(mp, 0);
David Chinneraacaa882008-10-30 17:15:29 +1100480 /* dgc: errors ignored here */
Christoph Hellwig8b5403a2009-06-08 15:37:16 +0200481 error = xfs_qm_sync(mp, SYNC_TRYLOCK);
Dave Chinner1a387d32010-08-24 11:46:31 +1000482 if (mp->m_super->s_frozen == SB_UNFROZEN &&
483 xfs_log_need_covered(mp))
484 error = xfs_fs_log_dummy(mp, 0);
David Chinneraacaa882008-10-30 17:15:29 +1100485 }
David Chinnera167b172008-10-30 17:06:18 +1100486 mp->m_sync_seq++;
487 wake_up(&mp->m_wait_single_sync_task);
488}
489
490STATIC int
491xfssyncd(
492 void *arg)
493{
494 struct xfs_mount *mp = arg;
495 long timeleft;
Dave Chinnera8d770d2009-04-06 18:44:54 +0200496 xfs_sync_work_t *work, *n;
David Chinnera167b172008-10-30 17:06:18 +1100497 LIST_HEAD (tmp);
498
499 set_freezable();
500 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
501 for (;;) {
Dave Chinner20f6b2c2010-03-04 01:46:23 +0000502 if (list_empty(&mp->m_sync_list))
503 timeleft = schedule_timeout_interruptible(timeleft);
David Chinnera167b172008-10-30 17:06:18 +1100504 /* swsusp */
505 try_to_freeze();
506 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
507 break;
508
509 spin_lock(&mp->m_sync_lock);
510 /*
511 * We can get woken by laptop mode, to do a sync -
512 * that's the (only!) case where the list would be
513 * empty with time remaining.
514 */
515 if (!timeleft || list_empty(&mp->m_sync_list)) {
516 if (!timeleft)
517 timeleft = xfs_syncd_centisecs *
518 msecs_to_jiffies(10);
519 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
520 list_add_tail(&mp->m_sync_work.w_list,
521 &mp->m_sync_list);
522 }
Dave Chinner20f6b2c2010-03-04 01:46:23 +0000523 list_splice_init(&mp->m_sync_list, &tmp);
David Chinnera167b172008-10-30 17:06:18 +1100524 spin_unlock(&mp->m_sync_lock);
525
526 list_for_each_entry_safe(work, n, &tmp, w_list) {
527 (*work->w_syncer)(mp, work->w_data);
528 list_del(&work->w_list);
529 if (work == &mp->m_sync_work)
530 continue;
Dave Chinnere43afd72009-04-06 18:47:27 +0200531 if (work->w_completion)
532 complete(work->w_completion);
David Chinnera167b172008-10-30 17:06:18 +1100533 kmem_free(work);
534 }
535 }
536
537 return 0;
538}
539
540int
541xfs_syncd_init(
542 struct xfs_mount *mp)
543{
544 mp->m_sync_work.w_syncer = xfs_sync_worker;
545 mp->m_sync_work.w_mount = mp;
Dave Chinnere43afd72009-04-06 18:47:27 +0200546 mp->m_sync_work.w_completion = NULL;
Jan Engelhardte2a07812010-03-23 09:52:55 +1100547 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname);
David Chinnera167b172008-10-30 17:06:18 +1100548 if (IS_ERR(mp->m_sync_task))
549 return -PTR_ERR(mp->m_sync_task);
550 return 0;
551}
552
553void
554xfs_syncd_stop(
555 struct xfs_mount *mp)
556{
557 kthread_stop(mp->m_sync_task);
558}
559
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400560void
561__xfs_inode_set_reclaim_tag(
562 struct xfs_perag *pag,
563 struct xfs_inode *ip)
564{
565 radix_tree_tag_set(&pag->pag_ici_root,
566 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
567 XFS_ICI_RECLAIM_TAG);
Dave Chinner16fd5362010-07-20 09:43:39 +1000568
569 if (!pag->pag_ici_reclaimable) {
570 /* propagate the reclaim tag up into the perag radix tree */
571 spin_lock(&ip->i_mount->m_perag_lock);
572 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
573 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
574 XFS_ICI_RECLAIM_TAG);
575 spin_unlock(&ip->i_mount->m_perag_lock);
576 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
577 -1, _RET_IP_);
578 }
Dave Chinner9bf729c2010-04-29 09:55:50 +1000579 pag->pag_ici_reclaimable++;
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400580}
581
David Chinner11654512008-10-30 17:37:49 +1100582/*
583 * We set the inode flag atomically with the radix tree tag.
584 * Once we get tag lookups on the radix tree, this inode flag
585 * can go away.
586 */
David Chinner396beb82008-10-30 17:37:26 +1100587void
588xfs_inode_set_reclaim_tag(
589 xfs_inode_t *ip)
590{
Dave Chinner5017e972010-01-11 11:47:40 +0000591 struct xfs_mount *mp = ip->i_mount;
592 struct xfs_perag *pag;
David Chinner396beb82008-10-30 17:37:26 +1100593
Dave Chinner5017e972010-01-11 11:47:40 +0000594 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000595 write_lock(&pag->pag_ici_lock);
David Chinner396beb82008-10-30 17:37:26 +1100596 spin_lock(&ip->i_flags_lock);
Christoph Hellwigbc990f52009-08-16 20:36:34 -0400597 __xfs_inode_set_reclaim_tag(pag, ip);
David Chinner11654512008-10-30 17:37:49 +1100598 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
David Chinner396beb82008-10-30 17:37:26 +1100599 spin_unlock(&ip->i_flags_lock);
Christoph Hellwigf1f724e2010-03-01 11:30:31 +0000600 write_unlock(&pag->pag_ici_lock);
Dave Chinner5017e972010-01-11 11:47:40 +0000601 xfs_perag_put(pag);
David Chinner396beb82008-10-30 17:37:26 +1100602}
603
Johannes Weiner081003f2010-10-01 07:43:54 +0000604STATIC void
605__xfs_inode_clear_reclaim(
David Chinner396beb82008-10-30 17:37:26 +1100606 xfs_perag_t *pag,
607 xfs_inode_t *ip)
608{
Dave Chinner9bf729c2010-04-29 09:55:50 +1000609 pag->pag_ici_reclaimable--;
Dave Chinner16fd5362010-07-20 09:43:39 +1000610 if (!pag->pag_ici_reclaimable) {
611 /* clear the reclaim tag from the perag radix tree */
612 spin_lock(&ip->i_mount->m_perag_lock);
613 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
614 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
615 XFS_ICI_RECLAIM_TAG);
616 spin_unlock(&ip->i_mount->m_perag_lock);
617 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
618 -1, _RET_IP_);
619 }
David Chinner396beb82008-10-30 17:37:26 +1100620}
621
Johannes Weiner081003f2010-10-01 07:43:54 +0000622void
623__xfs_inode_clear_reclaim_tag(
624 xfs_mount_t *mp,
625 xfs_perag_t *pag,
626 xfs_inode_t *ip)
627{
628 radix_tree_tag_clear(&pag->pag_ici_root,
629 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
630 __xfs_inode_clear_reclaim(pag, ip);
631}
632
Dave Chinner777df5a2010-02-06 12:37:26 +1100633/*
Dave Chinnere3a20c02010-09-24 19:51:50 +1000634 * Grab the inode for reclaim exclusively.
635 * Return 0 if we grabbed it, non-zero otherwise.
636 */
637STATIC int
638xfs_reclaim_inode_grab(
639 struct xfs_inode *ip,
640 int flags)
641{
642
643 /*
644 * do some unlocked checks first to avoid unnecceary lock traffic.
645 * The first is a flush lock check, the second is a already in reclaim
646 * check. Only do these checks if we are not going to block on locks.
647 */
648 if ((flags & SYNC_TRYLOCK) &&
649 (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) {
650 return 1;
651 }
652
653 /*
654 * The radix tree lock here protects a thread in xfs_iget from racing
655 * with us starting reclaim on the inode. Once we have the
656 * XFS_IRECLAIM flag set it will not touch us.
657 */
658 spin_lock(&ip->i_flags_lock);
659 ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
660 if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
661 /* ignore as it is already under reclaim */
662 spin_unlock(&ip->i_flags_lock);
663 return 1;
664 }
665 __xfs_iflags_set(ip, XFS_IRECLAIM);
666 spin_unlock(&ip->i_flags_lock);
667 return 0;
668}
669
670/*
Dave Chinner777df5a2010-02-06 12:37:26 +1100671 * Inodes in different states need to be treated differently, and the return
672 * value of xfs_iflush is not sufficient to get this right. The following table
673 * lists the inode states and the reclaim actions necessary for non-blocking
674 * reclaim:
675 *
676 *
677 * inode state iflush ret required action
678 * --------------- ---------- ---------------
679 * bad - reclaim
680 * shutdown EIO unpin and reclaim
681 * clean, unpinned 0 reclaim
682 * stale, unpinned 0 reclaim
Dave Chinnerc8543632010-02-06 12:39:36 +1100683 * clean, pinned(*) 0 requeue
684 * stale, pinned EAGAIN requeue
685 * dirty, delwri ok 0 requeue
686 * dirty, delwri blocked EAGAIN requeue
687 * dirty, sync flush 0 reclaim
Dave Chinner777df5a2010-02-06 12:37:26 +1100688 *
689 * (*) dgc: I don't think the clean, pinned state is possible but it gets
690 * handled anyway given the order of checks implemented.
691 *
Dave Chinnerc8543632010-02-06 12:39:36 +1100692 * As can be seen from the table, the return value of xfs_iflush() is not
693 * sufficient to correctly decide the reclaim action here. The checks in
694 * xfs_iflush() might look like duplicates, but they are not.
695 *
696 * Also, because we get the flush lock first, we know that any inode that has
697 * been flushed delwri has had the flush completed by the time we check that
698 * the inode is clean. The clean inode check needs to be done before flushing
699 * the inode delwri otherwise we would loop forever requeuing clean inodes as
700 * we cannot tell apart a successful delwri flush and a clean inode from the
701 * return value of xfs_iflush().
702 *
703 * Note that because the inode is flushed delayed write by background
704 * writeback, the flush lock may already be held here and waiting on it can
705 * result in very long latencies. Hence for sync reclaims, where we wait on the
706 * flush lock, the caller should push out delayed write inodes first before
707 * trying to reclaim them to minimise the amount of time spent waiting. For
708 * background relaim, we just requeue the inode for the next pass.
709 *
Dave Chinner777df5a2010-02-06 12:37:26 +1100710 * Hence the order of actions after gaining the locks should be:
711 * bad => reclaim
712 * shutdown => unpin and reclaim
Dave Chinnerc8543632010-02-06 12:39:36 +1100713 * pinned, delwri => requeue
714 * pinned, sync => unpin
Dave Chinner777df5a2010-02-06 12:37:26 +1100715 * stale => reclaim
716 * clean => reclaim
Dave Chinnerc8543632010-02-06 12:39:36 +1100717 * dirty, delwri => flush and requeue
718 * dirty, sync => flush, wait and reclaim
Dave Chinner777df5a2010-02-06 12:37:26 +1100719 */
Dave Chinner75f3cb12009-06-08 15:35:14 +0200720STATIC int
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000721xfs_reclaim_inode(
Dave Chinner75f3cb12009-06-08 15:35:14 +0200722 struct xfs_inode *ip,
723 struct xfs_perag *pag,
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000724 int sync_mode)
David Chinner7a3be022008-10-30 17:37:37 +1100725{
Dave Chinnerc8543632010-02-06 12:39:36 +1100726 int error = 0;
Dave Chinner777df5a2010-02-06 12:37:26 +1100727
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000728 xfs_ilock(ip, XFS_ILOCK_EXCL);
Dave Chinnerc8543632010-02-06 12:39:36 +1100729 if (!xfs_iflock_nowait(ip)) {
730 if (!(sync_mode & SYNC_WAIT))
731 goto out;
732 xfs_iflock(ip);
733 }
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000734
Dave Chinner777df5a2010-02-06 12:37:26 +1100735 if (is_bad_inode(VFS_I(ip)))
736 goto reclaim;
737 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
738 xfs_iunpin_wait(ip);
739 goto reclaim;
740 }
Dave Chinnerc8543632010-02-06 12:39:36 +1100741 if (xfs_ipincount(ip)) {
742 if (!(sync_mode & SYNC_WAIT)) {
743 xfs_ifunlock(ip);
744 goto out;
745 }
Dave Chinner777df5a2010-02-06 12:37:26 +1100746 xfs_iunpin_wait(ip);
Dave Chinnerc8543632010-02-06 12:39:36 +1100747 }
Dave Chinner777df5a2010-02-06 12:37:26 +1100748 if (xfs_iflags_test(ip, XFS_ISTALE))
749 goto reclaim;
750 if (xfs_inode_clean(ip))
751 goto reclaim;
752
753 /* Now we have an inode that needs flushing */
754 error = xfs_iflush(ip, sync_mode);
Dave Chinnerc8543632010-02-06 12:39:36 +1100755 if (sync_mode & SYNC_WAIT) {
756 xfs_iflock(ip);
757 goto reclaim;
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000758 }
759
Dave Chinnerc8543632010-02-06 12:39:36 +1100760 /*
761 * When we have to flush an inode but don't have SYNC_WAIT set, we
762 * flush the inode out using a delwri buffer and wait for the next
763 * call into reclaim to find it in a clean state instead of waiting for
764 * it now. We also don't return errors here - if the error is transient
765 * then the next reclaim pass will flush the inode, and if the error
Dave Chinnerf1d486a2010-04-13 15:06:45 +1000766 * is permanent then the next sync reclaim will reclaim the inode and
Dave Chinnerc8543632010-02-06 12:39:36 +1100767 * pass on the error.
768 */
Dave Chinnerf1d486a2010-04-13 15:06:45 +1000769 if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
Dave Chinnerc8543632010-02-06 12:39:36 +1100770 xfs_fs_cmn_err(CE_WARN, ip->i_mount,
771 "inode 0x%llx background reclaim flush failed with %d",
772 (long long)ip->i_ino, error);
773 }
774out:
775 xfs_iflags_clear(ip, XFS_IRECLAIM);
776 xfs_iunlock(ip, XFS_ILOCK_EXCL);
777 /*
778 * We could return EAGAIN here to make reclaim rescan the inode tree in
779 * a short while. However, this just burns CPU time scanning the tree
780 * waiting for IO to complete and xfssyncd never goes back to the idle
781 * state. Instead, return 0 to let the next scheduled background reclaim
782 * attempt to reclaim the inode again.
783 */
784 return 0;
785
Dave Chinner777df5a2010-02-06 12:37:26 +1100786reclaim:
787 xfs_ifunlock(ip);
Dave Chinnerc8e20be2010-01-10 23:51:45 +0000788 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinner2f11fea2010-07-20 17:53:25 +1000789
790 XFS_STATS_INC(xs_ig_reclaims);
791 /*
792 * Remove the inode from the per-AG radix tree.
793 *
794 * Because radix_tree_delete won't complain even if the item was never
795 * added to the tree assert that it's been there before to catch
796 * problems with the inode life time early on.
797 */
798 write_lock(&pag->pag_ici_lock);
799 if (!radix_tree_delete(&pag->pag_ici_root,
800 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
801 ASSERT(0);
Johannes Weiner081003f2010-10-01 07:43:54 +0000802 __xfs_inode_clear_reclaim(pag, ip);
Dave Chinner2f11fea2010-07-20 17:53:25 +1000803 write_unlock(&pag->pag_ici_lock);
804
805 /*
806 * Here we do an (almost) spurious inode lock in order to coordinate
807 * with inode cache radix tree lookups. This is because the lookup
808 * can reference the inodes in the cache without taking references.
809 *
810 * We make that OK here by ensuring that we wait until the inode is
811 * unlocked after the lookup before we go ahead and free it. We get
812 * both the ilock and the iolock because the code may need to drop the
813 * ilock one but will still hold the iolock.
814 */
815 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
816 xfs_qm_dqdetach(ip);
817 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
818
819 xfs_inode_free(ip);
Dave Chinnerc8543632010-02-06 12:39:36 +1100820 return error;
821
David Chinner7a3be022008-10-30 17:37:37 +1100822}
823
Dave Chinner65d0f202010-09-24 18:40:15 +1000824/*
825 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
826 * corrupted, we still want to try to reclaim all the inodes. If we don't,
827 * then a shut down during filesystem unmount reclaim walk leak all the
828 * unreclaimed inodes.
829 */
830int
831xfs_reclaim_inodes_ag(
832 struct xfs_mount *mp,
833 int flags,
834 int *nr_to_scan)
835{
836 struct xfs_perag *pag;
837 int error = 0;
838 int last_error = 0;
839 xfs_agnumber_t ag;
840
841 ag = 0;
842 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
843 unsigned long first_index = 0;
844 int done = 0;
Dave Chinnere3a20c02010-09-24 19:51:50 +1000845 int nr_found = 0;
Dave Chinner65d0f202010-09-24 18:40:15 +1000846
847 ag = pag->pag_agno + 1;
848
849 do {
Dave Chinnere3a20c02010-09-24 19:51:50 +1000850 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
851 int i;
Dave Chinner65d0f202010-09-24 18:40:15 +1000852
853 write_lock(&pag->pag_ici_lock);
Dave Chinnere3a20c02010-09-24 19:51:50 +1000854 nr_found = radix_tree_gang_lookup_tag(
855 &pag->pag_ici_root,
856 (void **)batch, first_index,
857 XFS_LOOKUP_BATCH,
Dave Chinner65d0f202010-09-24 18:40:15 +1000858 XFS_ICI_RECLAIM_TAG);
859 if (!nr_found) {
860 write_unlock(&pag->pag_ici_lock);
861 break;
862 }
863
864 /*
Dave Chinnere3a20c02010-09-24 19:51:50 +1000865 * Grab the inodes before we drop the lock. if we found
866 * nothing, nr == 0 and the loop will be skipped.
Dave Chinner65d0f202010-09-24 18:40:15 +1000867 */
Dave Chinnere3a20c02010-09-24 19:51:50 +1000868 for (i = 0; i < nr_found; i++) {
869 struct xfs_inode *ip = batch[i];
Dave Chinner65d0f202010-09-24 18:40:15 +1000870
Dave Chinnere3a20c02010-09-24 19:51:50 +1000871 if (done || xfs_reclaim_inode_grab(ip, flags))
872 batch[i] = NULL;
Dave Chinner65d0f202010-09-24 18:40:15 +1000873
Dave Chinnere3a20c02010-09-24 19:51:50 +1000874 /*
875 * Update the index for the next lookup. Catch
876 * overflows into the next AG range which can
877 * occur if we have inodes in the last block of
878 * the AG and we are currently pointing to the
879 * last inode.
880 */
881 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
882 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
883 done = 1;
884 }
885
886 /* unlock now we've grabbed the inodes. */
887 write_unlock(&pag->pag_ici_lock);
888
889 for (i = 0; i < nr_found; i++) {
890 if (!batch[i])
891 continue;
892 error = xfs_reclaim_inode(batch[i], pag, flags);
893 if (error && last_error != EFSCORRUPTED)
894 last_error = error;
895 }
896
897 *nr_to_scan -= XFS_LOOKUP_BATCH;
898
899 } while (nr_found && !done && *nr_to_scan > 0);
Dave Chinner65d0f202010-09-24 18:40:15 +1000900
901 xfs_perag_put(pag);
902 }
903 return XFS_ERROR(last_error);
904}
905
David Chinnerfce08f22008-10-30 17:37:03 +1100906int
David Chinner1dc33182008-10-30 17:37:15 +1100907xfs_reclaim_inodes(
David Chinnerfce08f22008-10-30 17:37:03 +1100908 xfs_mount_t *mp,
David Chinnerfce08f22008-10-30 17:37:03 +1100909 int mode)
910{
Dave Chinner65d0f202010-09-24 18:40:15 +1000911 int nr_to_scan = INT_MAX;
912
913 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
Dave Chinner9bf729c2010-04-29 09:55:50 +1000914}
915
916/*
917 * Shrinker infrastructure.
Dave Chinner9bf729c2010-04-29 09:55:50 +1000918 */
Dave Chinner9bf729c2010-04-29 09:55:50 +1000919static int
920xfs_reclaim_inode_shrink(
Dave Chinner7f8275d2010-07-19 14:56:17 +1000921 struct shrinker *shrink,
Dave Chinner9bf729c2010-04-29 09:55:50 +1000922 int nr_to_scan,
923 gfp_t gfp_mask)
924{
925 struct xfs_mount *mp;
926 struct xfs_perag *pag;
927 xfs_agnumber_t ag;
Dave Chinner16fd5362010-07-20 09:43:39 +1000928 int reclaimable;
Dave Chinner9bf729c2010-04-29 09:55:50 +1000929
Dave Chinner70e60ce2010-07-20 08:07:02 +1000930 mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
Dave Chinner9bf729c2010-04-29 09:55:50 +1000931 if (nr_to_scan) {
932 if (!(gfp_mask & __GFP_FS))
933 return -1;
934
Dave Chinnere3a20c02010-09-24 19:51:50 +1000935 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan);
Dave Chinner65d0f202010-09-24 18:40:15 +1000936 /* terminate if we don't exhaust the scan */
Dave Chinner70e60ce2010-07-20 08:07:02 +1000937 if (nr_to_scan > 0)
938 return -1;
939 }
Dave Chinner9bf729c2010-04-29 09:55:50 +1000940
Dave Chinner16fd5362010-07-20 09:43:39 +1000941 reclaimable = 0;
942 ag = 0;
Dave Chinner65d0f202010-09-24 18:40:15 +1000943 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
944 ag = pag->pag_agno + 1;
Dave Chinner70e60ce2010-07-20 08:07:02 +1000945 reclaimable += pag->pag_ici_reclaimable;
946 xfs_perag_put(pag);
Dave Chinner9bf729c2010-04-29 09:55:50 +1000947 }
Dave Chinner9bf729c2010-04-29 09:55:50 +1000948 return reclaimable;
949}
950
Dave Chinner9bf729c2010-04-29 09:55:50 +1000951void
952xfs_inode_shrinker_register(
953 struct xfs_mount *mp)
954{
Dave Chinner70e60ce2010-07-20 08:07:02 +1000955 mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink;
956 mp->m_inode_shrink.seeks = DEFAULT_SEEKS;
957 register_shrinker(&mp->m_inode_shrink);
Dave Chinner9bf729c2010-04-29 09:55:50 +1000958}
959
960void
961xfs_inode_shrinker_unregister(
962 struct xfs_mount *mp)
963{
Dave Chinner70e60ce2010-07-20 08:07:02 +1000964 unregister_shrinker(&mp->m_inode_shrink);
David Chinnerfce08f22008-10-30 17:37:03 +1100965}