blob: 7e9fb5251b2ee93f67e44de95a6a56edfddf541f [file] [log] [blame]
David Chinnerfe4fa4b2008-10-30 17:06:08 +11001/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h"
30#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_inode.h"
37#include "xfs_dinode.h"
38#include "xfs_error.h"
39#include "xfs_mru_cache.h"
40#include "xfs_filestream.h"
41#include "xfs_vnodeops.h"
42#include "xfs_utils.h"
43#include "xfs_buf_item.h"
44#include "xfs_inode_item.h"
45#include "xfs_rw.h"
46
David Chinnera167b172008-10-30 17:06:18 +110047#include <linux/kthread.h>
48#include <linux/freezer.h>
49
David Chinnerfe4fa4b2008-10-30 17:06:08 +110050/*
51 * xfs_sync flushes any pending I/O to file system vfsp.
52 *
53 * This routine is called by vfs_sync() to make sure that things make it
54 * out to disk eventually, on sync() system calls to flush out everything,
55 * and when the file system is unmounted. For the vfs_sync() case, all
56 * we really need to do is sync out the log to make all of our meta-data
57 * updates permanent (except for timestamps). For calls from pflushd(),
58 * dirty pages are kept moving by calling pdflush() on the inodes
59 * containing them. We also flush the inodes that we can lock without
60 * sleeping and the superblock if we can lock it without sleeping from
61 * vfs_sync() so that items at the tail of the log are always moving out.
62 *
63 * Flags:
64 * SYNC_BDFLUSH - We're being called from vfs_sync() so we don't want
65 * to sleep if we can help it. All we really need
66 * to do is ensure that the log is synced at least
67 * periodically. We also push the inodes and
68 * superblock if we can lock them without sleeping
69 * and they are not pinned.
70 * SYNC_ATTR - We need to flush the inodes. If SYNC_BDFLUSH is not
71 * set, then we really want to lock each inode and flush
72 * it.
73 * SYNC_WAIT - All the flushes that take place in this call should
74 * be synchronous.
75 * SYNC_DELWRI - This tells us to push dirty pages associated with
76 * inodes. SYNC_WAIT and SYNC_BDFLUSH are used to
77 * determine if they should be flushed sync, async, or
78 * delwri.
79 * SYNC_CLOSE - This flag is passed when the system is being
80 * unmounted. We should sync and invalidate everything.
81 * SYNC_FSDATA - This indicates that the caller would like to make
82 * sure the superblock is safe on disk. We can ensure
83 * this by simply making sure the log gets flushed
84 * if SYNC_BDFLUSH is set, and by actually writing it
85 * out otherwise.
86 * SYNC_IOWAIT - The caller wants us to wait for all data I/O to complete
87 * before we return (including direct I/O). Forms the drain
88 * side of the write barrier needed to safely quiesce the
89 * filesystem.
90 *
91 */
92int
93xfs_sync(
94 xfs_mount_t *mp,
95 int flags)
96{
97 int error;
98
99 /*
100 * Get the Quota Manager to flush the dquots.
101 *
102 * If XFS quota support is not enabled or this filesystem
103 * instance does not use quotas XFS_QM_DQSYNC will always
104 * return zero.
105 */
106 error = XFS_QM_DQSYNC(mp, flags);
107 if (error) {
108 /*
109 * If we got an IO error, we will be shutting down.
110 * So, there's nothing more for us to do here.
111 */
112 ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp));
113 if (XFS_FORCED_SHUTDOWN(mp))
114 return XFS_ERROR(error);
115 }
116
117 if (flags & SYNC_IOWAIT)
118 xfs_filestream_flush(mp);
119
David Chinner2030b5a2008-10-30 17:15:12 +1100120 return xfs_syncsub(mp, flags);
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100121}
122
123/*
David Chinner683a8972008-10-30 17:07:29 +1100124 * Sync all the inodes in the given AG according to the
125 * direction given by the flags.
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100126 */
David Chinner683a8972008-10-30 17:07:29 +1100127STATIC int
128xfs_sync_inodes_ag(
129 xfs_mount_t *mp,
130 int ag,
David Chinner2030b5a2008-10-30 17:15:12 +1100131 int flags)
David Chinner683a8972008-10-30 17:07:29 +1100132{
David Chinner683a8972008-10-30 17:07:29 +1100133 xfs_perag_t *pag = &mp->m_perag[ag];
David Chinner683a8972008-10-30 17:07:29 +1100134 int nr_found;
135 int first_index = 0;
136 int error = 0;
137 int last_error = 0;
138 int fflag = XFS_B_ASYNC;
139 int lock_flags = XFS_ILOCK_SHARED;
140
141 if (flags & SYNC_DELWRI)
142 fflag = XFS_B_DELWRI;
143 if (flags & SYNC_WAIT)
144 fflag = 0; /* synchronous overrides all */
145
146 if (flags & (SYNC_DELWRI | SYNC_CLOSE)) {
147 /*
148 * We need the I/O lock if we're going to call any of
149 * the flush/inval routines.
150 */
151 lock_flags |= XFS_IOLOCK_SHARED;
152 }
153
154 do {
David Chinnerbc60a992008-10-30 17:15:03 +1100155 struct inode *inode;
156 boolean_t inode_refed;
157 xfs_inode_t *ip = NULL;
158
David Chinner683a8972008-10-30 17:07:29 +1100159 /*
160 * use a gang lookup to find the next inode in the tree
161 * as the tree is sparse and a gang lookup walks to find
162 * the number of objects requested.
163 */
164 read_lock(&pag->pag_ici_lock);
165 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
166 (void**)&ip, first_index, 1);
167
168 if (!nr_found) {
169 read_unlock(&pag->pag_ici_lock);
170 break;
171 }
172
173 /* update the index for the next lookup */
174 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
175
176 /*
177 * skip inodes in reclaim. Let xfs_syncsub do that for
178 * us so we don't need to worry.
179 */
David Chinnerbc60a992008-10-30 17:15:03 +1100180 if (xfs_iflags_test(ip, (XFS_IRECLAIM|XFS_IRECLAIMABLE))) {
David Chinner683a8972008-10-30 17:07:29 +1100181 read_unlock(&pag->pag_ici_lock);
182 continue;
183 }
184
185 /* bad inodes are dealt with elsewhere */
David Chinnerbc60a992008-10-30 17:15:03 +1100186 inode = VFS_I(ip);
187 if (is_bad_inode(inode)) {
David Chinner683a8972008-10-30 17:07:29 +1100188 read_unlock(&pag->pag_ici_lock);
189 continue;
190 }
191
192 /* nothing to sync during shutdown */
193 if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) {
194 read_unlock(&pag->pag_ici_lock);
195 return 0;
196 }
197
198 /*
David Chinnerbc60a992008-10-30 17:15:03 +1100199 * If we can't get a reference on the VFS_I, the inode must be
200 * in reclaim. If we can get the inode lock without blocking,
201 * it is safe to flush the inode because we hold the tree lock
202 * and xfs_iextract will block right now. Hence if we lock the
203 * inode while holding the tree lock, xfs_ireclaim() is
204 * guaranteed to block on the inode lock we now hold and hence
205 * it is safe to reference the inode until we drop the inode
206 * locks completely.
David Chinner683a8972008-10-30 17:07:29 +1100207 */
David Chinnerbc60a992008-10-30 17:15:03 +1100208 inode_refed = B_FALSE;
209 if (igrab(inode)) {
David Chinner683a8972008-10-30 17:07:29 +1100210 read_unlock(&pag->pag_ici_lock);
David Chinner683a8972008-10-30 17:07:29 +1100211 xfs_ilock(ip, lock_flags);
David Chinnerbc60a992008-10-30 17:15:03 +1100212 inode_refed = B_TRUE;
David Chinner683a8972008-10-30 17:07:29 +1100213 } else {
David Chinnerbc60a992008-10-30 17:15:03 +1100214 if (!xfs_ilock_nowait(ip, lock_flags)) {
215 /* leave it to reclaim */
216 read_unlock(&pag->pag_ici_lock);
217 continue;
218 }
David Chinner683a8972008-10-30 17:07:29 +1100219 read_unlock(&pag->pag_ici_lock);
220 }
David Chinnerbc60a992008-10-30 17:15:03 +1100221
David Chinner683a8972008-10-30 17:07:29 +1100222 /*
223 * If we have to flush data or wait for I/O completion
224 * we need to drop the ilock that we currently hold.
225 * If we need to drop the lock, insert a marker if we
226 * have not already done so.
227 */
228 if (flags & SYNC_CLOSE) {
229 xfs_iunlock(ip, XFS_ILOCK_SHARED);
230 if (XFS_FORCED_SHUTDOWN(mp))
231 xfs_tosspages(ip, 0, -1, FI_REMAPF);
232 else
233 error = xfs_flushinval_pages(ip, 0, -1,
234 FI_REMAPF);
235 /* wait for I/O on freeze */
236 if (flags & SYNC_IOWAIT)
237 vn_iowait(ip);
238
239 xfs_ilock(ip, XFS_ILOCK_SHARED);
240 }
241
David Chinnerbc60a992008-10-30 17:15:03 +1100242 if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) {
David Chinner683a8972008-10-30 17:07:29 +1100243 xfs_iunlock(ip, XFS_ILOCK_SHARED);
244 error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE);
245 if (flags & SYNC_IOWAIT)
246 vn_iowait(ip);
247 xfs_ilock(ip, XFS_ILOCK_SHARED);
248 }
249
250 if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) {
251 if (flags & SYNC_WAIT) {
252 xfs_iflock(ip);
253 if (!xfs_inode_clean(ip))
254 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
255 else
256 xfs_ifunlock(ip);
257 } else if (xfs_iflock_nowait(ip)) {
258 if (!xfs_inode_clean(ip))
259 error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
260 else
261 xfs_ifunlock(ip);
David Chinner683a8972008-10-30 17:07:29 +1100262 }
263 }
264
265 if (lock_flags)
266 xfs_iunlock(ip, lock_flags);
267
David Chinnerbc60a992008-10-30 17:15:03 +1100268 if (inode_refed) {
David Chinner683a8972008-10-30 17:07:29 +1100269 IRELE(ip);
David Chinner683a8972008-10-30 17:07:29 +1100270 }
271
272 if (error)
273 last_error = error;
274 /*
275 * bail out if the filesystem is corrupted.
276 */
277 if (error == EFSCORRUPTED)
278 return XFS_ERROR(error);
279
280 } while (nr_found);
281
282 return last_error;
283}
284
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100285int
286xfs_sync_inodes(
287 xfs_mount_t *mp,
David Chinner2030b5a2008-10-30 17:15:12 +1100288 int flags)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100289{
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100290 int error;
291 int last_error;
David Chinner683a8972008-10-30 17:07:29 +1100292 int i;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100293
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100294 if (mp->m_flags & XFS_MOUNT_RDONLY)
295 return 0;
296 error = 0;
297 last_error = 0;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100298
David Chinner683a8972008-10-30 17:07:29 +1100299 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
300 if (!mp->m_perag[i].pag_ici_init)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100301 continue;
David Chinner2030b5a2008-10-30 17:15:12 +1100302 error = xfs_sync_inodes_ag(mp, i, flags);
David Chinner683a8972008-10-30 17:07:29 +1100303 if (error)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100304 last_error = error;
David Chinner683a8972008-10-30 17:07:29 +1100305 if (error == EFSCORRUPTED)
306 break;
307 }
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100308 return XFS_ERROR(last_error);
309}
310
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100311STATIC int
312xfs_commit_dummy_trans(
313 struct xfs_mount *mp,
314 uint log_flags)
315{
316 struct xfs_inode *ip = mp->m_rootip;
317 struct xfs_trans *tp;
318 int error;
319
320 /*
321 * Put a dummy transaction in the log to tell recovery
322 * that all others are OK.
323 */
324 tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
325 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
326 if (error) {
327 xfs_trans_cancel(tp, 0);
328 return error;
329 }
330
331 xfs_ilock(ip, XFS_ILOCK_EXCL);
332
333 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
334 xfs_trans_ihold(tp, ip);
335 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
336 /* XXX(hch): ignoring the error here.. */
337 error = xfs_trans_commit(tp, 0);
338
339 xfs_iunlock(ip, XFS_ILOCK_EXCL);
340
341 xfs_log_force(mp, 0, log_flags);
342 return 0;
343}
344
345STATIC int
346xfs_sync_fsdata(
347 struct xfs_mount *mp,
348 int flags)
349{
350 struct xfs_buf *bp;
351 struct xfs_buf_log_item *bip;
352 int error = 0;
353
354 /*
355 * If this is xfssyncd() then only sync the superblock if we can
356 * lock it without sleeping and it is not pinned.
357 */
358 if (flags & SYNC_BDFLUSH) {
359 ASSERT(!(flags & SYNC_WAIT));
360
361 bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
362 if (!bp)
363 goto out;
364
365 bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
366 if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
367 goto out_brelse;
368 } else {
369 bp = xfs_getsb(mp, 0);
370
371 /*
372 * If the buffer is pinned then push on the log so we won't
373 * get stuck waiting in the write for someone, maybe
374 * ourselves, to flush the log.
375 *
376 * Even though we just pushed the log above, we did not have
377 * the superblock buffer locked at that point so it can
378 * become pinned in between there and here.
379 */
380 if (XFS_BUF_ISPINNED(bp))
381 xfs_log_force(mp, 0, XFS_LOG_FORCE);
382 }
383
384
385 if (flags & SYNC_WAIT)
386 XFS_BUF_UNASYNC(bp);
387 else
388 XFS_BUF_ASYNC(bp);
389
390 return xfs_bwrite(mp, bp);
391
392 out_brelse:
393 xfs_buf_relse(bp);
394 out:
395 return error;
396}
397
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100398/*
399 * xfs sync routine for internal use
400 *
401 * This routine supports all of the flags defined for the generic vfs_sync
402 * interface as explained above under xfs_sync.
403 *
404 */
David Chinner2030b5a2008-10-30 17:15:12 +1100405STATIC int
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100406xfs_syncsub(
407 xfs_mount_t *mp,
David Chinner2030b5a2008-10-30 17:15:12 +1100408 int flags)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100409{
410 int error = 0;
411 int last_error = 0;
412 uint log_flags = XFS_LOG_FORCE;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100413
414 /*
415 * Sync out the log. This ensures that the log is periodically
416 * flushed even if there is not enough activity to fill it up.
417 */
418 if (flags & SYNC_WAIT)
419 log_flags |= XFS_LOG_SYNC;
420
421 xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
422
423 if (flags & (SYNC_ATTR|SYNC_DELWRI)) {
424 if (flags & SYNC_BDFLUSH)
David Chinner75c68f42008-10-30 17:06:28 +1100425 xfs_finish_reclaim_all(mp, 1, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100426 else
David Chinner2030b5a2008-10-30 17:15:12 +1100427 error = xfs_sync_inodes(mp, flags);
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100428 }
429
430 /*
431 * Flushing out dirty data above probably generated more
432 * log activity, so if this isn't vfs_sync() then flush
433 * the log again.
434 */
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100435 if (flags & SYNC_DELWRI)
436 xfs_log_force(mp, 0, log_flags);
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100437
438 if (flags & SYNC_FSDATA) {
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100439 error = xfs_sync_fsdata(mp, flags);
440 if (error)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100441 last_error = error;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100442 }
443
444 /*
445 * Now check to see if the log needs a "dummy" transaction.
446 */
447 if (!(flags & SYNC_REMOUNT) && xfs_log_need_covered(mp)) {
Christoph Hellwig2af75df2008-10-30 17:14:53 +1100448 error = xfs_commit_dummy_trans(mp, log_flags);
449 if (error)
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100450 return error;
David Chinnerfe4fa4b2008-10-30 17:06:08 +1100451 }
452
453 /*
454 * When shutting down, we need to insure that the AIL is pushed
455 * to disk or the filesystem can appear corrupt from the PROM.
456 */
457 if ((flags & (SYNC_CLOSE|SYNC_WAIT)) == (SYNC_CLOSE|SYNC_WAIT)) {
458 XFS_bflush(mp->m_ddev_targp);
459 if (mp->m_rtdev_targp) {
460 XFS_bflush(mp->m_rtdev_targp);
461 }
462 }
463
464 return XFS_ERROR(last_error);
465}
David Chinnera167b172008-10-30 17:06:18 +1100466
467/*
468 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
469 * Doing this has two advantages:
470 * - It saves on stack space, which is tight in certain situations
471 * - It can be used (with care) as a mechanism to avoid deadlocks.
472 * Flushing while allocating in a full filesystem requires both.
473 */
474STATIC void
475xfs_syncd_queue_work(
476 struct xfs_mount *mp,
477 void *data,
478 void (*syncer)(struct xfs_mount *, void *))
479{
480 struct bhv_vfs_sync_work *work;
481
482 work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
483 INIT_LIST_HEAD(&work->w_list);
484 work->w_syncer = syncer;
485 work->w_data = data;
486 work->w_mount = mp;
487 spin_lock(&mp->m_sync_lock);
488 list_add_tail(&work->w_list, &mp->m_sync_list);
489 spin_unlock(&mp->m_sync_lock);
490 wake_up_process(mp->m_sync_task);
491}
492
493/*
494 * Flush delayed allocate data, attempting to free up reserved space
495 * from existing allocations. At this point a new allocation attempt
496 * has failed with ENOSPC and we are in the process of scratching our
497 * heads, looking about for more room...
498 */
499STATIC void
500xfs_flush_inode_work(
501 struct xfs_mount *mp,
502 void *arg)
503{
504 struct inode *inode = arg;
505 filemap_flush(inode->i_mapping);
506 iput(inode);
507}
508
509void
510xfs_flush_inode(
511 xfs_inode_t *ip)
512{
513 struct inode *inode = VFS_I(ip);
514
515 igrab(inode);
516 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
517 delay(msecs_to_jiffies(500));
518}
519
520/*
521 * This is the "bigger hammer" version of xfs_flush_inode_work...
522 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
523 */
524STATIC void
525xfs_flush_device_work(
526 struct xfs_mount *mp,
527 void *arg)
528{
529 struct inode *inode = arg;
530 sync_blockdev(mp->m_super->s_bdev);
531 iput(inode);
532}
533
534void
535xfs_flush_device(
536 xfs_inode_t *ip)
537{
538 struct inode *inode = VFS_I(ip);
539
540 igrab(inode);
541 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
542 delay(msecs_to_jiffies(500));
543 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
544}
545
546STATIC void
547xfs_sync_worker(
548 struct xfs_mount *mp,
549 void *unused)
550{
551 int error;
552
553 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
554 error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR);
555 mp->m_sync_seq++;
556 wake_up(&mp->m_wait_single_sync_task);
557}
558
559STATIC int
560xfssyncd(
561 void *arg)
562{
563 struct xfs_mount *mp = arg;
564 long timeleft;
565 bhv_vfs_sync_work_t *work, *n;
566 LIST_HEAD (tmp);
567
568 set_freezable();
569 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
570 for (;;) {
571 timeleft = schedule_timeout_interruptible(timeleft);
572 /* swsusp */
573 try_to_freeze();
574 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
575 break;
576
577 spin_lock(&mp->m_sync_lock);
578 /*
579 * We can get woken by laptop mode, to do a sync -
580 * that's the (only!) case where the list would be
581 * empty with time remaining.
582 */
583 if (!timeleft || list_empty(&mp->m_sync_list)) {
584 if (!timeleft)
585 timeleft = xfs_syncd_centisecs *
586 msecs_to_jiffies(10);
587 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
588 list_add_tail(&mp->m_sync_work.w_list,
589 &mp->m_sync_list);
590 }
591 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
592 list_move(&work->w_list, &tmp);
593 spin_unlock(&mp->m_sync_lock);
594
595 list_for_each_entry_safe(work, n, &tmp, w_list) {
596 (*work->w_syncer)(mp, work->w_data);
597 list_del(&work->w_list);
598 if (work == &mp->m_sync_work)
599 continue;
600 kmem_free(work);
601 }
602 }
603
604 return 0;
605}
606
607int
608xfs_syncd_init(
609 struct xfs_mount *mp)
610{
611 mp->m_sync_work.w_syncer = xfs_sync_worker;
612 mp->m_sync_work.w_mount = mp;
613 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
614 if (IS_ERR(mp->m_sync_task))
615 return -PTR_ERR(mp->m_sync_task);
616 return 0;
617}
618
619void
620xfs_syncd_stop(
621 struct xfs_mount *mp)
622{
623 kthread_stop(mp->m_sync_task);
624}
625