blob: 838070ce724912d0b00a354915c15f3b9cd82f9f [file] [log] [blame]
David Chinnerfe4fa4b2008-10-30 17:06:08 +11001/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h"
30#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_inode.h"
37#include "xfs_dinode.h"
38#include "xfs_error.h"
39#include "xfs_mru_cache.h"
40#include "xfs_filestream.h"
41#include "xfs_vnodeops.h"
42#include "xfs_utils.h"
43#include "xfs_buf_item.h"
44#include "xfs_inode_item.h"
45#include "xfs_rw.h"
46
David Chinnera167b172008-10-30 17:06:18 +110047#include <linux/kthread.h>
48#include <linux/freezer.h>
49
David Chinnerfe4fa4b2008-10-30 17:06:08 +110050/*
David Chinner683a8972008-10-30 17:07:29 +110051 * Sync all the inodes in the given AG according to the
52 * direction given by the flags.
David Chinnerfe4fa4b2008-10-30 17:06:08 +110053 */
David Chinner683a8972008-10-30 17:07:29 +110054STATIC int
55xfs_sync_inodes_ag(
56 xfs_mount_t *mp,
57 int ag,
David Chinner2030b5a2008-10-30 17:15:12 +110058 int flags)
David Chinner683a8972008-10-30 17:07:29 +110059{
David Chinner683a8972008-10-30 17:07:29 +110060 xfs_perag_t *pag = &mp->m_perag[ag];
David Chinner683a8972008-10-30 17:07:29 +110061 int nr_found;
62 int first_index = 0;
63 int error = 0;
64 int last_error = 0;
65 int fflag = XFS_B_ASYNC;
66 int lock_flags = XFS_ILOCK_SHARED;
67
68 if (flags & SYNC_DELWRI)
69 fflag = XFS_B_DELWRI;
70 if (flags & SYNC_WAIT)
71 fflag = 0; /* synchronous overrides all */
72
73 if (flags & (SYNC_DELWRI | SYNC_CLOSE)) {
74 /*
75 * We need the I/O lock if we're going to call any of
76 * the flush/inval routines.
77 */
78 lock_flags |= XFS_IOLOCK_SHARED;
79 }
80
81 do {
David Chinnerbc60a992008-10-30 17:15:03 +110082 struct inode *inode;
83 boolean_t inode_refed;
84 xfs_inode_t *ip = NULL;
85
David Chinner683a8972008-10-30 17:07:29 +110086 /*
87 * use a gang lookup to find the next inode in the tree
88 * as the tree is sparse and a gang lookup walks to find
89 * the number of objects requested.
90 */
91 read_lock(&pag->pag_ici_lock);
92 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
93 (void**)&ip, first_index, 1);
94
95 if (!nr_found) {
96 read_unlock(&pag->pag_ici_lock);
97 break;
98 }
99
100 /* update the index for the next lookup */
101 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
102
103 /*
104 * skip inodes in reclaim. Let xfs_syncsub do that for
105 * us so we don't need to worry.
106 */
David Chinnerbc60a992008-10-30 17:15:03 +1100107 if (xfs_iflags_test(ip, (XFS_IRECLAIM|XFS_IRECLAIMABLE))) {
David Chinner683a8972008-10-30 17:07:29 +1100108 read_unlock(&pag->pag_ici_lock);
109 continue;
110 }
111
112 /* bad inodes are dealt with elsewhere */
David Chinnerbc60a992008-10-30 17:15:03 +1100113 inode = VFS_I(ip);
114 if (is_bad_inode(inode)) {
David Chinner683a8972008-10-30 17:07:29 +1100115 read_unlock(&pag->pag_ici_lock);
116 continue;
117 }
118
119 /* nothing to sync during shutdown */
120 if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) {
121 read_unlock(&pag->pag_ici_lock);
122 return 0;
123 }
124
125 /*
David Chinnerbc60a992008-10-30 17:15:03 +1100126 * If we can't get a reference on the VFS_I, the inode must be
127 * in reclaim. If we can get the inode lock without blocking,
128 * it is safe to flush the inode because we hold the tree lock
129 * and xfs_iextract will block right now. Hence if we lock the
130 * inode while holding the tree lock, xfs_ireclaim() is
131 * guaranteed to block on the inode lock we now hold and hence
132 * it is safe to reference the inode until we drop the inode
133 * locks completely.
David Chinner683a8972008-10-30 17:07:29 +1100134 */
David Chinnerbc60a992008-10-30 17:15:03 +1100135 inode_refed = B_FALSE;
136 if (igrab(inode)) {
David Chinner683a8972008-10-30 17:07:29 +1100137 read_unlock(&pag->pag_ici_lock);
David Chinner683a8972008-10-30 17:07:29 +1100138 xfs_ilock(ip, lock_flags);
David Chinnerbc60a992008-10-30 17:15:03 +1100139 inode_refed = B_TRUE;
David Chinner683a8972008-10-30 17:07:29 +1100140 } else {
David Chinnerbc60a992008-10-30 17:15:03 +1100141 if (!xfs_ilock_nowait(ip, lock_flags)) {
142 /* leave it to reclaim */
143 read_unlock(&pag->pag_ici_lock);
144 continue;
145 }
David Chinner683a8972008-10-30 17:07:29 +1100146 read_unlock(&pag->pag_ici_lock);
147 }
David Chinnerbc60a992008-10-30 17:15:03 +1100148
David Chinner683a8972008-10-30 17:07:29 +1100149 /*
150 * If we have to flush data or wait for I/O completion
151 * we need to drop the ilock that we currently hold.
152 * If we need to drop the lock, insert a marker if we
153 * have not already done so.
154 */
155 if (flags & SYNC_CLOSE) {
156 xfs_iunlock(ip, XFS_ILOCK_SHARED);
157 if (XFS_FORCED_SHUTDOWN(mp))
158 xfs_tosspages(ip, 0, -1, FI_REMAPF);
159 else
160 error = xfs_flushinval_pages(ip, 0, -1,
161 FI_REMAPF);
162 /* wait for I/O on freeze */
163 if (flags & SYNC_IOWAIT)
164 vn_iowait(ip);
165
166 xfs_ilock(ip, XFS_ILOCK_SHARED);
167 }
168
David Chinnerbc60a992008-10-30 17:15:03 +1100169 if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) {
David Chinner683a8972008-10-30 17:07:29 +1100170 xfs_iunlock(ip, XFS_ILOCK_SHARED);
171 error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE);
172 if (flags & SYNC_IOWAIT)
173 vn_iowait(ip);
174 xfs_ilock(ip, XFS_ILOCK_SHARED);
175 }
176
177 if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) {
178 if (flags & SYNC_WAIT) {
179 xfs_iflock(ip);
180 if (!xfs_inode_clean(ip))
181 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
182 else
183 xfs_ifunlock(ip);
184 } else if (xfs_iflock_nowait(ip)) {
185 if (!xfs_inode_clean(ip))
186 error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
187 else
188 xfs_ifunlock(ip);
David Chinner683a8972008-10-30 17:07:29 +1100189 }
190 }
191
192 if (lock_flags)
193 xfs_iunlock(ip, lock_flags);
194
David Chinnerbc60a992008-10-30 17:15:03 +1100195 if (inode_refed) {
David Chinner683a8972008-10-30 17:07:29 +1100196 IRELE(ip);
David Chinner683a8972008-10-30 17:07:29 +1100197 }
198
199 if (error)
200 last_error = error;
201 /*
202 * bail out if the filesystem is corrupted.
203 */
204 if (error == EFSCORRUPTED)
205 return XFS_ERROR(error);
206
207 } while (nr_found);
208
209 return last_error;
210}
211
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100212int
213xfs_sync_inodes(
214 xfs_mount_t *mp,
David Chinner2030b5a2008-10-30 17:15:12 +1100215 int flags)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100216{
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100217 int error;
218 int last_error;
David Chinner683a8972008-10-30 17:07:29 +1100219 int i;
David Chinnere9f1c6e2008-10-30 17:15:50 +1100220 int lflags = XFS_LOG_FORCE;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100221
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100222 if (mp->m_flags & XFS_MOUNT_RDONLY)
223 return 0;
224 error = 0;
225 last_error = 0;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100226
David Chinnere9f1c6e2008-10-30 17:15:50 +1100227 if (flags & SYNC_WAIT)
228 lflags |= XFS_LOG_SYNC;
229
David Chinner683a8972008-10-30 17:07:29 +1100230 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
231 if (!mp->m_perag[i].pag_ici_init)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100232 continue;
David Chinner2030b5a2008-10-30 17:15:12 +1100233 error = xfs_sync_inodes_ag(mp, i, flags);
David Chinner683a8972008-10-30 17:07:29 +1100234 if (error)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100235 last_error = error;
David Chinner683a8972008-10-30 17:07:29 +1100236 if (error == EFSCORRUPTED)
237 break;
238 }
David Chinnere9f1c6e2008-10-30 17:15:50 +1100239 if (flags & SYNC_DELWRI)
240 xfs_log_force(mp, 0, lflags);
241
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100242 return XFS_ERROR(last_error);
243}
244
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100245STATIC int
246xfs_commit_dummy_trans(
247 struct xfs_mount *mp,
248 uint log_flags)
249{
250 struct xfs_inode *ip = mp->m_rootip;
251 struct xfs_trans *tp;
252 int error;
253
254 /*
255 * Put a dummy transaction in the log to tell recovery
256 * that all others are OK.
257 */
258 tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
259 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
260 if (error) {
261 xfs_trans_cancel(tp, 0);
262 return error;
263 }
264
265 xfs_ilock(ip, XFS_ILOCK_EXCL);
266
267 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
268 xfs_trans_ihold(tp, ip);
269 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
270 /* XXX(hch): ignoring the error here.. */
271 error = xfs_trans_commit(tp, 0);
272
273 xfs_iunlock(ip, XFS_ILOCK_EXCL);
274
275 xfs_log_force(mp, 0, log_flags);
276 return 0;
277}
278
David Chinnere9f1c6e2008-10-30 17:15:50 +1100279int
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100280xfs_sync_fsdata(
281 struct xfs_mount *mp,
282 int flags)
283{
284 struct xfs_buf *bp;
285 struct xfs_buf_log_item *bip;
286 int error = 0;
287
288 /*
289 * If this is xfssyncd() then only sync the superblock if we can
290 * lock it without sleeping and it is not pinned.
291 */
292 if (flags & SYNC_BDFLUSH) {
293 ASSERT(!(flags & SYNC_WAIT));
294
295 bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
296 if (!bp)
297 goto out;
298
299 bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
300 if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
301 goto out_brelse;
302 } else {
303 bp = xfs_getsb(mp, 0);
304
305 /*
306 * If the buffer is pinned then push on the log so we won't
307 * get stuck waiting in the write for someone, maybe
308 * ourselves, to flush the log.
309 *
310 * Even though we just pushed the log above, we did not have
311 * the superblock buffer locked at that point so it can
312 * become pinned in between there and here.
313 */
314 if (XFS_BUF_ISPINNED(bp))
315 xfs_log_force(mp, 0, XFS_LOG_FORCE);
316 }
317
318
319 if (flags & SYNC_WAIT)
320 XFS_BUF_UNASYNC(bp);
321 else
322 XFS_BUF_ASYNC(bp);
323
324 return xfs_bwrite(mp, bp);
325
326 out_brelse:
327 xfs_buf_relse(bp);
328 out:
329 return error;
330}
331
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100332/*
David Chinnere9f1c6e2008-10-30 17:15:50 +1100333 * First stage of freeze - no more writers will make progress now we are here,
334 * so we flush delwri and delalloc buffers here, then wait for all I/O to
335 * complete. Data is frozen at that point. Metadata is not frozen,
336 * transactions can still occur here so don't bother flushing the buftarg (i.e
337 * SYNC_QUIESCE) because it'll just get dirty again.
338 */
339int
340xfs_quiesce_data(
341 struct xfs_mount *mp)
342{
343 int error;
344
345 /* push non-blocking */
346 xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_BDFLUSH);
347 XFS_QM_DQSYNC(mp, SYNC_BDFLUSH);
348 xfs_filestream_flush(mp);
349
350 /* push and block */
351 xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT);
352 XFS_QM_DQSYNC(mp, SYNC_WAIT);
353
354 /* write superblock and hoover shutdown errors */
355 error = xfs_sync_fsdata(mp, 0);
356
357 /* flush devices */
358 XFS_bflush(mp->m_ddev_targp);
359 if (mp->m_rtdev_targp)
360 XFS_bflush(mp->m_rtdev_targp);
361
362 return error;
363}
364
365/*
David Chinnerdfd837a2008-10-30 17:15:21 +1100366 * xfs_sync flushes any pending I/O to file system vfsp.
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100367 *
David Chinnerdfd837a2008-10-30 17:15:21 +1100368 * This routine is called by vfs_sync() to make sure that things make it
369 * out to disk eventually, on sync() system calls to flush out everything,
370 * and when the file system is unmounted. For the vfs_sync() case, all
371 * we really need to do is sync out the log to make all of our meta-data
372 * updates permanent (except for timestamps). For calls from pflushd(),
373 * dirty pages are kept moving by calling pdflush() on the inodes
374 * containing them. We also flush the inodes that we can lock without
375 * sleeping and the superblock if we can lock it without sleeping from
376 * vfs_sync() so that items at the tail of the log are always moving out.
377 *
378 * Flags:
379 * SYNC_BDFLUSH - We're being called from vfs_sync() so we don't want
380 * to sleep if we can help it. All we really need
381 * to do is ensure that the log is synced at least
382 * periodically. We also push the inodes and
383 * superblock if we can lock them without sleeping
384 * and they are not pinned.
David Chinnerbe97d9d2008-10-30 17:15:38 +1100385 * SYNC_ATTR - We need to flush the inodes. Now handled by direct calls
386 * to xfs_sync_inodes().
David Chinnerdfd837a2008-10-30 17:15:21 +1100387 * SYNC_WAIT - All the flushes that take place in this call should
388 * be synchronous.
389 * SYNC_DELWRI - This tells us to push dirty pages associated with
390 * inodes. SYNC_WAIT and SYNC_BDFLUSH are used to
391 * determine if they should be flushed sync, async, or
392 * delwri.
393 * SYNC_CLOSE - This flag is passed when the system is being
394 * unmounted. We should sync and invalidate everything.
395 * SYNC_FSDATA - This indicates that the caller would like to make
396 * sure the superblock is safe on disk. We can ensure
397 * this by simply making sure the log gets flushed
398 * if SYNC_BDFLUSH is set, and by actually writing it
399 * out otherwise.
400 * SYNC_IOWAIT - The caller wants us to wait for all data I/O to complete
401 * before we return (including direct I/O). Forms the drain
402 * side of the write barrier needed to safely quiesce the
403 * filesystem.
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100404 *
405 */
David Chinnerdfd837a2008-10-30 17:15:21 +1100406int
407xfs_sync(
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100408 xfs_mount_t *mp,
David Chinner2030b5a2008-10-30 17:15:12 +1100409 int flags)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100410{
David Chinnerdfd837a2008-10-30 17:15:21 +1100411 int error;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100412 int last_error = 0;
413 uint log_flags = XFS_LOG_FORCE;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100414
David Chinnerbe97d9d2008-10-30 17:15:38 +1100415 ASSERT(!(flags & SYNC_ATTR));
416
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100417 /*
David Chinnerdfd837a2008-10-30 17:15:21 +1100418 * Get the Quota Manager to flush the dquots.
419 *
420 * If XFS quota support is not enabled or this filesystem
421 * instance does not use quotas XFS_QM_DQSYNC will always
422 * return zero.
423 */
424 error = XFS_QM_DQSYNC(mp, flags);
425 if (error) {
426 /*
427 * If we got an IO error, we will be shutting down.
428 * So, there's nothing more for us to do here.
429 */
430 ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp));
431 if (XFS_FORCED_SHUTDOWN(mp))
432 return XFS_ERROR(error);
433 }
434
435 if (flags & SYNC_IOWAIT)
436 xfs_filestream_flush(mp);
437
438 /*
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100439 * Sync out the log. This ensures that the log is periodically
440 * flushed even if there is not enough activity to fill it up.
441 */
442 if (flags & SYNC_WAIT)
443 log_flags |= XFS_LOG_SYNC;
444
445 xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
446
David Chinnerbe97d9d2008-10-30 17:15:38 +1100447 if (flags & SYNC_DELWRI) {
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100448 if (flags & SYNC_BDFLUSH)
David Chinner75c68f42008-10-30 17:06:28 +1100449 xfs_finish_reclaim_all(mp, 1, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100450 else
David Chinner2030b5a2008-10-30 17:15:12 +1100451 error = xfs_sync_inodes(mp, flags);
David Chinnerbe97d9d2008-10-30 17:15:38 +1100452 /*
453 * Flushing out dirty data above probably generated more
454 * log activity, so if this isn't vfs_sync() then flush
455 * the log again.
456 */
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100457 xfs_log_force(mp, 0, log_flags);
David Chinnerbe97d9d2008-10-30 17:15:38 +1100458 }
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100459
460 if (flags & SYNC_FSDATA) {
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100461 error = xfs_sync_fsdata(mp, flags);
462 if (error)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100463 last_error = error;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100464 }
465
466 /*
467 * Now check to see if the log needs a "dummy" transaction.
468 */
469 if (!(flags & SYNC_REMOUNT) && xfs_log_need_covered(mp)) {
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100470 error = xfs_commit_dummy_trans(mp, log_flags);
471 if (error)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100472 return error;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100473 }
474
475 /*
476 * When shutting down, we need to insure that the AIL is pushed
477 * to disk or the filesystem can appear corrupt from the PROM.
478 */
479 if ((flags & (SYNC_CLOSE|SYNC_WAIT)) == (SYNC_CLOSE|SYNC_WAIT)) {
480 XFS_bflush(mp->m_ddev_targp);
481 if (mp->m_rtdev_targp) {
482 XFS_bflush(mp->m_rtdev_targp);
483 }
484 }
485
486 return XFS_ERROR(last_error);
487}
David Chinnera167b172008-10-30 17:06:18 +1100488
489/*
490 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
491 * Doing this has two advantages:
492 * - It saves on stack space, which is tight in certain situations
493 * - It can be used (with care) as a mechanism to avoid deadlocks.
494 * Flushing while allocating in a full filesystem requires both.
495 */
496STATIC void
497xfs_syncd_queue_work(
498 struct xfs_mount *mp,
499 void *data,
500 void (*syncer)(struct xfs_mount *, void *))
501{
502 struct bhv_vfs_sync_work *work;
503
504 work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
505 INIT_LIST_HEAD(&work->w_list);
506 work->w_syncer = syncer;
507 work->w_data = data;
508 work->w_mount = mp;
509 spin_lock(&mp->m_sync_lock);
510 list_add_tail(&work->w_list, &mp->m_sync_list);
511 spin_unlock(&mp->m_sync_lock);
512 wake_up_process(mp->m_sync_task);
513}
514
515/*
516 * Flush delayed allocate data, attempting to free up reserved space
517 * from existing allocations. At this point a new allocation attempt
518 * has failed with ENOSPC and we are in the process of scratching our
519 * heads, looking about for more room...
520 */
521STATIC void
522xfs_flush_inode_work(
523 struct xfs_mount *mp,
524 void *arg)
525{
526 struct inode *inode = arg;
527 filemap_flush(inode->i_mapping);
528 iput(inode);
529}
530
531void
532xfs_flush_inode(
533 xfs_inode_t *ip)
534{
535 struct inode *inode = VFS_I(ip);
536
537 igrab(inode);
538 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
539 delay(msecs_to_jiffies(500));
540}
541
542/*
543 * This is the "bigger hammer" version of xfs_flush_inode_work...
544 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
545 */
546STATIC void
547xfs_flush_device_work(
548 struct xfs_mount *mp,
549 void *arg)
550{
551 struct inode *inode = arg;
552 sync_blockdev(mp->m_super->s_bdev);
553 iput(inode);
554}
555
556void
557xfs_flush_device(
558 xfs_inode_t *ip)
559{
560 struct inode *inode = VFS_I(ip);
561
562 igrab(inode);
563 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
564 delay(msecs_to_jiffies(500));
565 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
566}
567
David Chinneraacaa882008-10-30 17:15:29 +1100568/*
569 * Every sync period we need to unpin all items, reclaim inodes, sync
570 * quota and write out the superblock. We might need to cover the log
571 * to indicate it is idle.
572 */
David Chinnera167b172008-10-30 17:06:18 +1100573STATIC void
574xfs_sync_worker(
575 struct xfs_mount *mp,
576 void *unused)
577{
578 int error;
579
David Chinneraacaa882008-10-30 17:15:29 +1100580 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
581 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
582 xfs_finish_reclaim_all(mp, 1, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
583 /* dgc: errors ignored here */
584 error = XFS_QM_DQSYNC(mp, SYNC_BDFLUSH);
585 error = xfs_sync_fsdata(mp, SYNC_BDFLUSH);
586 if (xfs_log_need_covered(mp))
587 error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE);
588 }
David Chinnera167b172008-10-30 17:06:18 +1100589 mp->m_sync_seq++;
590 wake_up(&mp->m_wait_single_sync_task);
591}
592
593STATIC int
594xfssyncd(
595 void *arg)
596{
597 struct xfs_mount *mp = arg;
598 long timeleft;
599 bhv_vfs_sync_work_t *work, *n;
600 LIST_HEAD (tmp);
601
602 set_freezable();
603 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
604 for (;;) {
605 timeleft = schedule_timeout_interruptible(timeleft);
606 /* swsusp */
607 try_to_freeze();
608 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
609 break;
610
611 spin_lock(&mp->m_sync_lock);
612 /*
613 * We can get woken by laptop mode, to do a sync -
614 * that's the (only!) case where the list would be
615 * empty with time remaining.
616 */
617 if (!timeleft || list_empty(&mp->m_sync_list)) {
618 if (!timeleft)
619 timeleft = xfs_syncd_centisecs *
620 msecs_to_jiffies(10);
621 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
622 list_add_tail(&mp->m_sync_work.w_list,
623 &mp->m_sync_list);
624 }
625 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
626 list_move(&work->w_list, &tmp);
627 spin_unlock(&mp->m_sync_lock);
628
629 list_for_each_entry_safe(work, n, &tmp, w_list) {
630 (*work->w_syncer)(mp, work->w_data);
631 list_del(&work->w_list);
632 if (work == &mp->m_sync_work)
633 continue;
634 kmem_free(work);
635 }
636 }
637
638 return 0;
639}
640
641int
642xfs_syncd_init(
643 struct xfs_mount *mp)
644{
645 mp->m_sync_work.w_syncer = xfs_sync_worker;
646 mp->m_sync_work.w_mount = mp;
647 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
648 if (IS_ERR(mp->m_sync_task))
649 return -PTR_ERR(mp->m_sync_task);
650 return 0;
651}
652
653void
654xfs_syncd_stop(
655 struct xfs_mount *mp)
656{
657 kthread_stop(mp->m_sync_task);
658}
659