[XFS] Add xfs_icsb_sync_counters_locked for when m_sb_lock already held
Add a new xfs_icsb_sync_counters_locked for the case where m_sb_lock
is already taken and add a flags argument to xfs_icsb_sync_counters so
that xfs_icsb_sync_counters_flags is not needed.
SGI-PV: 976035
SGI-Modid: xfs-linux-melb:xfs-kern:30917a
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 2fec452..a2fad07 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -55,7 +55,6 @@
STATIC void xfs_icsb_destroy_counters(xfs_mount_t *);
STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
int, int);
-STATIC void xfs_icsb_sync_counters(xfs_mount_t *);
STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
int64_t, int);
STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
@@ -64,7 +63,6 @@
#define xfs_icsb_destroy_counters(mp) do { } while (0)
#define xfs_icsb_balance_counter(mp, a, b, c) do { } while (0)
-#define xfs_icsb_sync_counters(mp) do { } while (0)
#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
#endif
@@ -1400,7 +1398,7 @@
if (!xfs_fs_writable(mp))
return 0;
- xfs_icsb_sync_counters(mp);
+ xfs_icsb_sync_counters(mp, 0);
/*
* we don't need to do this if we are updating the superblock
@@ -2278,38 +2276,33 @@
}
void
-xfs_icsb_sync_counters_flags(
+xfs_icsb_sync_counters_locked(
xfs_mount_t *mp,
int flags)
{
xfs_icsb_cnts_t cnt;
- /* Pass 1: lock all counters */
- if ((flags & XFS_ICSB_SB_LOCKED) == 0)
- spin_lock(&mp->m_sb_lock);
-
xfs_icsb_count(mp, &cnt, flags);
- /* Step 3: update mp->m_sb fields */
if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
mp->m_sb.sb_icount = cnt.icsb_icount;
if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
mp->m_sb.sb_ifree = cnt.icsb_ifree;
if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
-
- if ((flags & XFS_ICSB_SB_LOCKED) == 0)
- spin_unlock(&mp->m_sb_lock);
}
/*
* Accurate update of per-cpu counters to incore superblock
*/
-STATIC void
+void
xfs_icsb_sync_counters(
- xfs_mount_t *mp)
+ xfs_mount_t *mp,
+ int flags)
{
- xfs_icsb_sync_counters_flags(mp, 0);
+ spin_lock(&mp->m_sb_lock);
+ xfs_icsb_sync_counters_locked(mp, flags);
+ spin_unlock(&mp->m_sb_lock);
}
/*