[XFS] Endianess annotations for various allocator data structures

SGI-PV: 943272
SGI-Modid: xfs-linux:xfs-kern:201006a

Signed-off-by: Christoph Hellwig <hch@sgi.com>
Signed-off-by: Nathan Scott <nathans@sgi.com>
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 8d05559..a96e2ff 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -48,27 +48,26 @@
  * are > 64k, our value cannot be confused for an EFS superblock's.
  */
 
-typedef struct xfs_agf
-{
+typedef struct xfs_agf {
 	/*
 	 * Common allocation group header information
 	 */
-	__uint32_t	agf_magicnum;	/* magic number == XFS_AGF_MAGIC */
-	__uint32_t	agf_versionnum;	/* header version == XFS_AGF_VERSION */
-	xfs_agnumber_t	agf_seqno;	/* sequence # starting from 0 */
-	xfs_agblock_t	agf_length;	/* size in blocks of a.g. */
+	__be32		agf_magicnum;	/* magic number == XFS_AGF_MAGIC */
+	__be32		agf_versionnum;	/* header version == XFS_AGF_VERSION */
+	__be32		agf_seqno;	/* sequence # starting from 0 */
+	__be32		agf_length;	/* size in blocks of a.g. */
 	/*
 	 * Freespace information
 	 */
-	xfs_agblock_t	agf_roots[XFS_BTNUM_AGF];	/* root blocks */
-	__uint32_t	agf_spare0;	/* spare field */
-	__uint32_t	agf_levels[XFS_BTNUM_AGF];	/* btree levels */
-	__uint32_t	agf_spare1;	/* spare field */
-	__uint32_t	agf_flfirst;	/* first freelist block's index */
-	__uint32_t	agf_fllast;	/* last freelist block's index */
-	__uint32_t	agf_flcount;	/* count of blocks in freelist */
-	xfs_extlen_t	agf_freeblks;	/* total free blocks */
-	xfs_extlen_t	agf_longest;	/* longest free space */
+	__be32		agf_roots[XFS_BTNUM_AGF];	/* root blocks */
+	__be32		agf_spare0;	/* spare field */
+	__be32		agf_levels[XFS_BTNUM_AGF];	/* btree levels */
+	__be32		agf_spare1;	/* spare field */
+	__be32		agf_flfirst;	/* first freelist block's index */
+	__be32		agf_fllast;	/* last freelist block's index */
+	__be32		agf_flcount;	/* count of blocks in freelist */
+	__be32		agf_freeblks;	/* total free blocks */
+	__be32		agf_longest;	/* longest free space */
 } xfs_agf_t;
 
 #define	XFS_AGF_MAGICNUM	0x00000001
@@ -96,31 +95,30 @@
  */
 #define	XFS_AGI_UNLINKED_BUCKETS	64
 
-typedef struct xfs_agi
-{
+typedef struct xfs_agi {
 	/*
 	 * Common allocation group header information
 	 */
-	__uint32_t	agi_magicnum;	/* magic number == XFS_AGI_MAGIC */
-	__uint32_t	agi_versionnum;	/* header version == XFS_AGI_VERSION */
-	xfs_agnumber_t	agi_seqno;	/* sequence # starting from 0 */
-	xfs_agblock_t	agi_length;	/* size in blocks of a.g. */
+	__be32		agi_magicnum;	/* magic number == XFS_AGI_MAGIC */
+	__be32		agi_versionnum;	/* header version == XFS_AGI_VERSION */
+	__be32		agi_seqno;	/* sequence # starting from 0 */
+	__be32		agi_length;	/* size in blocks of a.g. */
 	/*
 	 * Inode information
 	 * Inodes are mapped by interpreting the inode number, so no
 	 * mapping data is needed here.
 	 */
-	xfs_agino_t	agi_count;	/* count of allocated inodes */
-	xfs_agblock_t	agi_root;	/* root of inode btree */
-	__uint32_t	agi_level;	/* levels in inode btree */
-	xfs_agino_t	agi_freecount;	/* number of free inodes */
-	xfs_agino_t	agi_newino;	/* new inode just allocated */
-	xfs_agino_t	agi_dirino;	/* last directory inode chunk */
+	__be32		agi_count;	/* count of allocated inodes */
+	__be32		agi_root;	/* root of inode btree */
+	__be32		agi_level;	/* levels in inode btree */
+	__be32		agi_freecount;	/* number of free inodes */
+	__be32		agi_newino;	/* new inode just allocated */
+	__be32		agi_dirino;	/* last directory inode chunk */
 	/*
 	 * Hash table of inodes which have been unlinked but are
 	 * still being referenced.
 	 */
-	xfs_agino_t	agi_unlinked[XFS_AGI_UNLINKED_BUCKETS];
+	__be32		agi_unlinked[XFS_AGI_UNLINKED_BUCKETS];
 } xfs_agi_t;
 
 #define	XFS_AGI_MAGICNUM	0x00000001
@@ -201,8 +199,8 @@
 	(MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + MIN(cl + 1, XFS_AG_MAXLEVELS(mp)))
 #define	XFS_MIN_FREELIST(a,mp)		\
 	(XFS_MIN_FREELIST_RAW(		\
-		INT_GET((a)->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT), \
-		INT_GET((a)->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT), mp))
+		be32_to_cpu((a)->agf_levels[XFS_BTNUM_BNOi]), \
+		be32_to_cpu((a)->agf_levels[XFS_BTNUM_CNTi]), mp))
 #define	XFS_MIN_FREELIST_PAG(pag,mp)	\
 	(XFS_MIN_FREELIST_RAW(		\
 		(uint_t)(pag)->pagf_levels[XFS_BTNUM_BNOi], \
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index a439e07..f4328e1 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -231,8 +231,8 @@
 	if (args->minleft == 0)
 		return 1;
 	agf = XFS_BUF_TO_AGF(args->agbp);
-	diff = INT_GET(agf->agf_freeblks, ARCH_CONVERT)
-		+ INT_GET(agf->agf_flcount, ARCH_CONVERT)
+	diff = be32_to_cpu(agf->agf_freeblks)
+		+ be32_to_cpu(agf->agf_flcount)
 		- args->len - args->minleft;
 	if (diff >= 0)
 		return 1;
@@ -307,7 +307,8 @@
 			bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]);
 			cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]);
 			XFS_WANT_CORRUPTED_RETURN(
-				INT_GET(bnoblock->bb_numrecs, ARCH_CONVERT) == INT_GET(cntblock->bb_numrecs, ARCH_CONVERT));
+				be16_to_cpu(bnoblock->bb_numrecs) ==
+				be16_to_cpu(cntblock->bb_numrecs));
 		}
 	}
 #endif
@@ -493,21 +494,17 @@
 		(void *)str,
 		(void *)mp,
 		(void *)(__psint_t)flags,
-		(void *)(__psunsigned_t)INT_GET(agf->agf_seqno, ARCH_CONVERT),
-		(void *)(__psunsigned_t)INT_GET(agf->agf_length, ARCH_CONVERT),
-		(void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_BNO],
-						ARCH_CONVERT),
-		(void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_CNT],
-						ARCH_CONVERT),
-		(void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_BNO],
-						ARCH_CONVERT),
-		(void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_CNT],
-						ARCH_CONVERT),
-		(void *)(__psunsigned_t)INT_GET(agf->agf_flfirst, ARCH_CONVERT),
-		(void *)(__psunsigned_t)INT_GET(agf->agf_fllast, ARCH_CONVERT),
-		(void *)(__psunsigned_t)INT_GET(agf->agf_flcount, ARCH_CONVERT),
-		(void *)(__psunsigned_t)INT_GET(agf->agf_freeblks, ARCH_CONVERT),
-		(void *)(__psunsigned_t)INT_GET(agf->agf_longest, ARCH_CONVERT));
+		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_seqno),
+		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_length),
+		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
+		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
+		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
+		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
+		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_flfirst),
+		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_fllast),
+		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_flcount),
+		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_freeblks),
+		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_longest));
 }
 
 STATIC void
@@ -600,12 +597,12 @@
 		if (!(args->wasfromfl)) {
 
 			agf = XFS_BUF_TO_AGF(args->agbp);
-			INT_MOD(agf->agf_freeblks, ARCH_CONVERT, -(args->len));
+			be32_add(&agf->agf_freeblks, -(args->len));
 			xfs_trans_agblocks_delta(args->tp,
 						 -((long)(args->len)));
 			args->pag->pagf_freeblks -= args->len;
-			ASSERT(INT_GET(agf->agf_freeblks, ARCH_CONVERT)
-				<= INT_GET(agf->agf_length, ARCH_CONVERT));
+			ASSERT(be32_to_cpu(agf->agf_freeblks) <=
+				be32_to_cpu(agf->agf_length));
 			TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
 			xfs_alloc_log_agf(args->tp, args->agbp,
 						XFS_AGF_FREEBLKS);
@@ -711,8 +708,7 @@
 	cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
 		args->agno, XFS_BTNUM_CNT, NULL, 0);
 	ASSERT(args->agbno + args->len <=
-		INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
-			ARCH_CONVERT));
+		be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
 			args->agbno, args->len, XFSA_FIXUP_BNO_OK))) {
 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
@@ -885,8 +881,7 @@
 			goto error0;
 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
 		ltend = ltbno + ltlen;
-		ASSERT(ltend <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
-				ARCH_CONVERT));
+		ASSERT(ltend <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
 		args->len = blen;
 		if (!xfs_alloc_fix_minleft(args)) {
 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
@@ -1241,8 +1236,7 @@
 		ltlen, &ltnew);
 	ASSERT(ltnew >= ltbno);
 	ASSERT(ltnew + rlen <= ltend);
-	ASSERT(ltnew + rlen <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
-		ARCH_CONVERT));
+	ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
 	args->agbno = ltnew;
 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
 			ltnew, rlen, XFSA_FIXUP_BNO_OK)))
@@ -1405,8 +1399,7 @@
 	args->agbno = rbno;
 	XFS_WANT_CORRUPTED_GOTO(
 		args->agbno + args->len <=
-			INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
-			ARCH_CONVERT),
+			be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
 		error0);
 	TRACE_ALLOC("normal", args);
 	return 0;
@@ -1454,8 +1447,8 @@
 	 * freelist.
 	 */
 	else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
-		 (INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_flcount,
-			ARCH_CONVERT) > args->minleft)) {
+		 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
+		  > args->minleft)) {
 		if ((error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno)))
 			goto error0;
 		if (fbno != NULLAGBLOCK) {
@@ -1470,8 +1463,7 @@
 			args->agbno = fbno;
 			XFS_WANT_CORRUPTED_GOTO(
 				args->agbno + args->len <=
-				INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length,
-					ARCH_CONVERT),
+				be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
 				error0);
 			args->wasfromfl = 1;
 			TRACE_ALLOC("freelist", args);
@@ -1745,12 +1737,12 @@
 
 		agf = XFS_BUF_TO_AGF(agbp);
 		pag = &mp->m_perag[agno];
-		INT_MOD(agf->agf_freeblks, ARCH_CONVERT, len);
+		be32_add(&agf->agf_freeblks, len);
 		xfs_trans_agblocks_delta(tp, len);
 		pag->pagf_freeblks += len;
 		XFS_WANT_CORRUPTED_GOTO(
-			INT_GET(agf->agf_freeblks, ARCH_CONVERT)
-				<= INT_GET(agf->agf_length, ARCH_CONVERT),
+			be32_to_cpu(agf->agf_freeblks) <=
+			be32_to_cpu(agf->agf_length),
 			error0);
 		TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
 		xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
@@ -1897,18 +1889,18 @@
 	 */
 	agf = XFS_BUF_TO_AGF(agbp);
 	need = XFS_MIN_FREELIST(agf, mp);
-	delta = need > INT_GET(agf->agf_flcount, ARCH_CONVERT) ?
-		(need - INT_GET(agf->agf_flcount, ARCH_CONVERT)) : 0;
+	delta = need > be32_to_cpu(agf->agf_flcount) ?
+		(need - be32_to_cpu(agf->agf_flcount)) : 0;
 	/*
 	 * If there isn't enough total or single-extent, reject it.
 	 */
-	longest = INT_GET(agf->agf_longest, ARCH_CONVERT);
+	longest = be32_to_cpu(agf->agf_longest);
 	longest = (longest > delta) ? (longest - delta) :
-		(INT_GET(agf->agf_flcount, ARCH_CONVERT) > 0 || longest > 0);
+		(be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
 	if (args->minlen + args->alignment + args->minalignslop - 1 > longest ||
 	     (args->minleft &&
-		(int)(INT_GET(agf->agf_freeblks, ARCH_CONVERT) +
-		   INT_GET(agf->agf_flcount, ARCH_CONVERT) - need - args->total) <
+		(int)(be32_to_cpu(agf->agf_freeblks) +
+		   be32_to_cpu(agf->agf_flcount) - need - args->total) <
 	     (int)args->minleft)) {
 		xfs_trans_brelse(tp, agbp);
 		args->agbp = NULL;
@@ -1917,7 +1909,7 @@
 	/*
 	 * Make the freelist shorter if it's too long.
 	 */
-	while (INT_GET(agf->agf_flcount, ARCH_CONVERT) > need) {
+	while (be32_to_cpu(agf->agf_flcount) > need) {
 		xfs_buf_t	*bp;
 
 		if ((error = xfs_alloc_get_freelist(tp, agbp, &bno)))
@@ -1944,9 +1936,9 @@
 	/*
 	 * Make the freelist longer if it's too short.
 	 */
-	while (INT_GET(agf->agf_flcount, ARCH_CONVERT) < need) {
+	while (be32_to_cpu(agf->agf_flcount) < need) {
 		targs.agbno = 0;
-		targs.maxlen = need - INT_GET(agf->agf_flcount, ARCH_CONVERT);
+		targs.maxlen = need - be32_to_cpu(agf->agf_flcount);
 		/*
 		 * Allocate as many blocks as possible at once.
 		 */
@@ -2006,19 +1998,19 @@
 	 */
 	mp = tp->t_mountp;
 	if ((error = xfs_alloc_read_agfl(mp, tp,
-			INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp)))
+			be32_to_cpu(agf->agf_seqno), &agflbp)))
 		return error;
 	agfl = XFS_BUF_TO_AGFL(agflbp);
 	/*
 	 * Get the block number and update the data structures.
 	 */
-	bno = INT_GET(agfl->agfl_bno[INT_GET(agf->agf_flfirst, ARCH_CONVERT)], ARCH_CONVERT);
-	INT_MOD(agf->agf_flfirst, ARCH_CONVERT, 1);
+	bno = INT_GET(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)], ARCH_CONVERT);
+	be32_add(&agf->agf_flfirst, 1);
 	xfs_trans_brelse(tp, agflbp);
-	if (INT_GET(agf->agf_flfirst, ARCH_CONVERT) == XFS_AGFL_SIZE(mp))
+	if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
 		agf->agf_flfirst = 0;
-	pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)];
-	INT_MOD(agf->agf_flcount, ARCH_CONVERT, -1);
+	pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
+	be32_add(&agf->agf_flcount, -1);
 	xfs_trans_agflist_delta(tp, -1);
 	pag->pagf_flcount--;
 	TRACE_MODAGF(NULL, agf, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT);
@@ -2033,7 +2025,7 @@
 	 * the freeing transaction must be pushed to disk NOW by forcing
 	 * to disk all iclogs up that transaction's LSN.
 	 */
-	xfs_alloc_search_busy(tp, INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1);
+	xfs_alloc_search_busy(tp, be32_to_cpu(agf->agf_seqno), bno, 1);
 	return 0;
 }
 
@@ -2111,18 +2103,18 @@
 	mp = tp->t_mountp;
 
 	if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
-			INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp)))
+			be32_to_cpu(agf->agf_seqno), &agflbp)))
 		return error;
 	agfl = XFS_BUF_TO_AGFL(agflbp);
-	INT_MOD(agf->agf_fllast, ARCH_CONVERT, 1);
-	if (INT_GET(agf->agf_fllast, ARCH_CONVERT) == XFS_AGFL_SIZE(mp))
+	be32_add(&agf->agf_fllast, 1);
+	if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
 		agf->agf_fllast = 0;
-	pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)];
-	INT_MOD(agf->agf_flcount, ARCH_CONVERT, 1);
+	pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
+	be32_add(&agf->agf_flcount, 1);
 	xfs_trans_agflist_delta(tp, 1);
 	pag->pagf_flcount++;
-	ASSERT(INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp));
-	blockp = &agfl->agfl_bno[INT_GET(agf->agf_fllast, ARCH_CONVERT)];
+	ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
+	blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
 	INT_SET(*blockp, ARCH_CONVERT, bno);
 	TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
@@ -2169,14 +2161,12 @@
 	 */
 	agf = XFS_BUF_TO_AGF(bp);
 	agf_ok =
-		INT_GET(agf->agf_magicnum, ARCH_CONVERT) == XFS_AGF_MAGIC &&
-		XFS_AGF_GOOD_VERSION(
-			INT_GET(agf->agf_versionnum, ARCH_CONVERT)) &&
-		INT_GET(agf->agf_freeblks, ARCH_CONVERT) <=
-				INT_GET(agf->agf_length, ARCH_CONVERT) &&
-		INT_GET(agf->agf_flfirst, ARCH_CONVERT) < XFS_AGFL_SIZE(mp) &&
-		INT_GET(agf->agf_fllast,  ARCH_CONVERT) < XFS_AGFL_SIZE(mp) &&
-		INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp);
+		be32_to_cpu(agf->agf_magicnum) == XFS_AGF_MAGIC &&
+		XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
+		be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
+		be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
+		be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
+		be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp);
 	if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
 			XFS_RANDOM_ALLOC_READ_AGF))) {
 		XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
@@ -2186,13 +2176,13 @@
 	}
 	pag = &mp->m_perag[agno];
 	if (!pag->pagf_init) {
-		pag->pagf_freeblks = INT_GET(agf->agf_freeblks, ARCH_CONVERT);
-		pag->pagf_flcount = INT_GET(agf->agf_flcount, ARCH_CONVERT);
-		pag->pagf_longest = INT_GET(agf->agf_longest, ARCH_CONVERT);
+		pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
+		pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
+		pag->pagf_longest = be32_to_cpu(agf->agf_longest);
 		pag->pagf_levels[XFS_BTNUM_BNOi] =
-			INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT);
+			be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
 		pag->pagf_levels[XFS_BTNUM_CNTi] =
-			INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT);
+			be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
 		spinlock_init(&pag->pagb_lock, "xfspagb");
 		pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS *
 					sizeof(xfs_perag_busy_t), KM_SLEEP);
@@ -2200,13 +2190,13 @@
 	}
 #ifdef DEBUG
 	else if (!XFS_FORCED_SHUTDOWN(mp)) {
-		ASSERT(pag->pagf_freeblks == INT_GET(agf->agf_freeblks, ARCH_CONVERT));
-		ASSERT(pag->pagf_flcount == INT_GET(agf->agf_flcount, ARCH_CONVERT));
-		ASSERT(pag->pagf_longest == INT_GET(agf->agf_longest, ARCH_CONVERT));
+		ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
+		ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
+		ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
 		ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
-		       INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT));
+		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
 		ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
-		       INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT));
+		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
 	}
 #endif
 	XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGF, XFS_AGF_REF);
@@ -2455,7 +2445,7 @@
 #ifdef DEBUG
 	ASSERT(args.agbp != NULL);
 	agf = XFS_BUF_TO_AGF(args.agbp);
-	ASSERT(args.agbno + len <= INT_GET(agf->agf_length, ARCH_CONVERT));
+	ASSERT(args.agbno + len <= be32_to_cpu(agf->agf_length));
 #endif
 	error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno,
 		len, 0);
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 7ecc8c0..a1d92da 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -117,7 +117,7 @@
 	/*
 	 * Fail if we're off the end of the block.
 	 */
-	if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+	if (ptr > be16_to_cpu(block->bb_numrecs)) {
 		*stat = 0;
 		return 0;
 	}
@@ -131,18 +131,18 @@
 		lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
 		lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
 #ifdef DEBUG
-		for (i = ptr; i < INT_GET(block->bb_numrecs, ARCH_CONVERT); i++) {
-			if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level)))
+		for (i = ptr; i < be16_to_cpu(block->bb_numrecs); i++) {
+			if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
 				return error;
 		}
 #endif
-		if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+		if (ptr < be16_to_cpu(block->bb_numrecs)) {
 			memmove(&lkp[ptr - 1], &lkp[ptr],
-				(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lkp)); /* INT_: mem copy */
+				(be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lkp));
 			memmove(&lpp[ptr - 1], &lpp[ptr],
-				(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lpp)); /* INT_: mem copy */
-			xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1);
-			xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1);
+				(be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lpp));
+			xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1);
+			xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1);
 		}
 	}
 	/*
@@ -151,25 +151,25 @@
 	 */
 	else {
 		lrp = XFS_ALLOC_REC_ADDR(block, 1, cur);
-		if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+		if (ptr < be16_to_cpu(block->bb_numrecs)) {
 			memmove(&lrp[ptr - 1], &lrp[ptr],
-				(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lrp));
-			xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1);
+				(be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lrp));
+			xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1);
 		}
 		/*
 		 * If it's the first record in the block, we'll need a key
 		 * structure to pass up to the next level (updkey).
 		 */
 		if (ptr == 1) {
-			key.ar_startblock = lrp->ar_startblock; /* INT_: direct copy */
-			key.ar_blockcount = lrp->ar_blockcount; /* INT_: direct copy */
+			key.ar_startblock = lrp->ar_startblock;
+			key.ar_blockcount = lrp->ar_blockcount;
 			lkp = &key;
 		}
 	}
 	/*
 	 * Decrement and log the number of entries in the block.
 	 */
-	INT_MOD(block->bb_numrecs, ARCH_CONVERT, -1);
+	be16_add(&block->bb_numrecs, -1);
 	xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
 	/*
 	 * See if the longest free extent in the allocation group was
@@ -182,24 +182,24 @@
 
 	if (level == 0 &&
 	    cur->bc_btnum == XFS_BTNUM_CNT &&
-	    INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK &&
-	    ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
-		ASSERT(ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT) + 1);
+	    be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
+	    ptr > be16_to_cpu(block->bb_numrecs)) {
+		ASSERT(ptr == be16_to_cpu(block->bb_numrecs) + 1);
 		/*
 		 * There are still records in the block.  Grab the size
 		 * from the last one.
 		 */
-		if (INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
-			rrp = XFS_ALLOC_REC_ADDR(block, INT_GET(block->bb_numrecs, ARCH_CONVERT), cur);
-			INT_COPY(agf->agf_longest, rrp->ar_blockcount, ARCH_CONVERT);
+		if (be16_to_cpu(block->bb_numrecs)) {
+			rrp = XFS_ALLOC_REC_ADDR(block, be16_to_cpu(block->bb_numrecs), cur);
+			agf->agf_longest = rrp->ar_blockcount;
 		}
 		/*
 		 * No free extents left.
 		 */
 		else
 			agf->agf_longest = 0;
-		mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest =
-			INT_GET(agf->agf_longest, ARCH_CONVERT);
+		mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest =
+			be32_to_cpu(agf->agf_longest);
 		xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
 			XFS_AGF_LONGEST);
 	}
@@ -213,15 +213,15 @@
 		 * and it's NOT the leaf level,
 		 * then we can get rid of this level.
 		 */
-		if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == 1 && level > 0) {
+		if (be16_to_cpu(block->bb_numrecs) == 1 && level > 0) {
 			/*
 			 * lpp is still set to the first pointer in the block.
 			 * Make it the new root of the btree.
 			 */
-			bno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT);
-			INT_COPY(agf->agf_roots[cur->bc_btnum], *lpp, ARCH_CONVERT);
-			INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, -1);
-			mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_levels[cur->bc_btnum]--;
+			bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]);
+			agf->agf_roots[cur->bc_btnum] = *lpp;
+			be32_add(&agf->agf_levels[cur->bc_btnum], -1);
+			mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--;
 			/*
 			 * Put this buffer/block on the ag's freelist.
 			 */
@@ -243,7 +243,7 @@
 			 * that freed the block.
 			 */
 			xfs_alloc_mark_busy(cur->bc_tp,
-				INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1);
+				be32_to_cpu(agf->agf_seqno), bno, 1);
 
 			xfs_trans_agbtree_delta(cur->bc_tp, -1);
 			xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
@@ -269,7 +269,7 @@
 	 * If the number of records remaining in the block is at least
 	 * the minimum, we're done.
 	 */
-	if (INT_GET(block->bb_numrecs, ARCH_CONVERT) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
+	if (be16_to_cpu(block->bb_numrecs) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
 		if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i)))
 			return error;
 		*stat = 1;
@@ -280,8 +280,8 @@
 	 * tree balanced.  Look at the left and right sibling blocks to
 	 * see if we can re-balance by moving only one record.
 	 */
-	rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
-	lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT);
+	rbno = be32_to_cpu(block->bb_rightsib);
+	lbno = be32_to_cpu(block->bb_leftsib);
 	bno = NULLAGBLOCK;
 	ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK);
 	/*
@@ -318,18 +318,18 @@
 		/*
 		 * Grab the current block number, for future use.
 		 */
-		bno = INT_GET(right->bb_leftsib, ARCH_CONVERT);
+		bno = be32_to_cpu(right->bb_leftsib);
 		/*
 		 * If right block is full enough so that removing one entry
 		 * won't make it too empty, and left-shifting an entry out
 		 * of right to us works, we're done.
 		 */
-		if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >=
+		if (be16_to_cpu(right->bb_numrecs) - 1 >=
 		     XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
 			if ((error = xfs_alloc_lshift(tcur, level, &i)))
 				goto error0;
 			if (i) {
-				ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >=
+				ASSERT(be16_to_cpu(block->bb_numrecs) >=
 				       XFS_ALLOC_BLOCK_MINRECS(level, cur));
 				xfs_btree_del_cursor(tcur,
 						     XFS_BTREE_NOERROR);
@@ -346,7 +346,7 @@
 		 * future reference, and fix up the temp cursor to point
 		 * to our block again (last record).
 		 */
-		rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT);
+		rrecs = be16_to_cpu(right->bb_numrecs);
 		if (lbno != NULLAGBLOCK) {
 			i = xfs_btree_firstrec(tcur, level);
 			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
@@ -382,18 +382,18 @@
 		/*
 		 * Grab the current block number, for future use.
 		 */
-		bno = INT_GET(left->bb_rightsib, ARCH_CONVERT);
+		bno = be32_to_cpu(left->bb_rightsib);
 		/*
 		 * If left block is full enough so that removing one entry
 		 * won't make it too empty, and right-shifting an entry out
 		 * of left to us works, we're done.
 		 */
-		if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >=
+		if (be16_to_cpu(left->bb_numrecs) - 1 >=
 		     XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
 			if ((error = xfs_alloc_rshift(tcur, level, &i)))
 				goto error0;
 			if (i) {
-				ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >=
+				ASSERT(be16_to_cpu(block->bb_numrecs) >=
 				       XFS_ALLOC_BLOCK_MINRECS(level, cur));
 				xfs_btree_del_cursor(tcur,
 						     XFS_BTREE_NOERROR);
@@ -407,7 +407,7 @@
 		 * Otherwise, grab the number of records in right for
 		 * future reference.
 		 */
-		lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
+		lrecs = be16_to_cpu(left->bb_numrecs);
 	}
 	/*
 	 * Delete the temp cursor, we're done with it.
@@ -421,7 +421,7 @@
 	 * See if we can join with the left neighbor block.
 	 */
 	if (lbno != NULLAGBLOCK &&
-	    lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
+	    lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
 		/*
 		 * Set "right" to be the starting block,
 		 * "left" to be the left neighbor.
@@ -441,7 +441,7 @@
 	 * If that won't work, see if we can join with the right neighbor block.
 	 */
 	else if (rbno != NULLAGBLOCK &&
-		 rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <=
+		 rrecs + be16_to_cpu(block->bb_numrecs) <=
 		  XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
 		/*
 		 * Set "left" to be the starting block,
@@ -476,31 +476,34 @@
 		/*
 		 * It's a non-leaf.  Move keys and pointers.
 		 */
-		lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur);
-		lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur);
+		lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur);
+		lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur);
 		rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
 		rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
 #ifdef DEBUG
-		for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) {
-			if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level)))
+		for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
+			if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
 				return error;
 		}
 #endif
-		memcpy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp)); /* INT_: structure copy */
-		memcpy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp)); /* INT_: structure copy */
-		xfs_alloc_log_keys(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1,
-				   INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT));
-		xfs_alloc_log_ptrs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1,
-				   INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		memcpy(lkp, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*lkp));
+		memcpy(lpp, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*lpp));
+		xfs_alloc_log_keys(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1,
+				   be16_to_cpu(left->bb_numrecs) +
+				   be16_to_cpu(right->bb_numrecs));
+		xfs_alloc_log_ptrs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1,
+				   be16_to_cpu(left->bb_numrecs) +
+				   be16_to_cpu(right->bb_numrecs));
 	} else {
 		/*
 		 * It's a leaf.  Move records.
 		 */
-		lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur);
+		lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur);
 		rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
-		memcpy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp));
-		xfs_alloc_log_recs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1,
-				   INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		memcpy(lrp, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*lrp));
+		xfs_alloc_log_recs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1,
+				   be16_to_cpu(left->bb_numrecs) +
+				   be16_to_cpu(right->bb_numrecs));
 	}
 	/*
 	 * If we joined with the left neighbor, set the buffer in the
@@ -508,7 +511,7 @@
 	 */
 	if (bp != lbp) {
 		xfs_btree_setbuf(cur, level, lbp);
-		cur->bc_ptrs[level] += INT_GET(left->bb_numrecs, ARCH_CONVERT);
+		cur->bc_ptrs[level] += be16_to_cpu(left->bb_numrecs);
 	}
 	/*
 	 * If we joined with the right neighbor and there's a level above
@@ -520,28 +523,28 @@
 	/*
 	 * Fix up the number of records in the surviving block.
 	 */
-	INT_MOD(left->bb_numrecs, ARCH_CONVERT, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+	be16_add(&left->bb_numrecs, be16_to_cpu(right->bb_numrecs));
 	/*
 	 * Fix up the right block pointer in the surviving block, and log it.
 	 */
-	left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */
+	left->bb_rightsib = right->bb_rightsib;
 	xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
 	/*
 	 * If there is a right sibling now, make it point to the
 	 * remaining block.
 	 */
-	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+	if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) {
 		xfs_alloc_block_t	*rrblock;
 		xfs_buf_t		*rrbp;
 
 		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
-				cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0,
+				cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0,
 				&rrbp, XFS_ALLOC_BTREE_REF)))
 			return error;
 		rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp);
 		if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
 			return error;
-		INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno);
+		rrblock->bb_leftsib = cpu_to_be32(lbno);
 		xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
 	}
 	/*
@@ -562,10 +565,9 @@
 	 * busy block is allocated, the iclog is pushed up to the
 	 * LSN that freed the block.
 	 */
-	xfs_alloc_mark_busy(cur->bc_tp,
-		INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1);
-
+	xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1);
 	xfs_trans_agbtree_delta(cur->bc_tp, -1);
+
 	/*
 	 * Adjust the current level's cursor so that we're left referring
 	 * to the right node, after we're done.
@@ -613,7 +615,7 @@
 	int			ptr;	/* index in btree block for this rec */
 	xfs_alloc_rec_t		*rp;	/* pointer to btree records */
 
-	ASSERT(INT_GET(recp->ar_blockcount, ARCH_CONVERT) > 0);
+	ASSERT(be32_to_cpu(recp->ar_blockcount) > 0);
 
 	/*
 	 * GCC doesn't understand the (arguably complex) control flow in
@@ -637,8 +639,8 @@
 	/*
 	 * Make a key out of the record data to be inserted, and save it.
 	 */
-	key.ar_startblock = recp->ar_startblock; /* INT_: direct copy */
-	key.ar_blockcount = recp->ar_blockcount; /* INT_: direct copy */
+	key.ar_startblock = recp->ar_startblock;
+	key.ar_blockcount = recp->ar_blockcount;
 	optr = ptr = cur->bc_ptrs[level];
 	/*
 	 * If we're off the left edge, return failure.
@@ -659,7 +661,7 @@
 	/*
 	 * Check that the new entry is being inserted in the right place.
 	 */
-	if (ptr <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+	if (ptr <= be16_to_cpu(block->bb_numrecs)) {
 		if (level == 0) {
 			rp = XFS_ALLOC_REC_ADDR(block, ptr, cur);
 			xfs_btree_check_rec(cur->bc_btnum, recp, rp);
@@ -675,7 +677,7 @@
 	 * If the block is full, we can't insert the new entry until we
 	 * make the block un-full.
 	 */
-	if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
+	if (be16_to_cpu(block->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
 		/*
 		 * First, try shifting an entry to the right neighbor.
 		 */
@@ -712,8 +714,8 @@
 						return error;
 #endif
 					ptr = cur->bc_ptrs[level];
-					nrec.ar_startblock = nkey.ar_startblock; /* INT_: direct copy */
-					nrec.ar_blockcount = nkey.ar_blockcount; /* INT_: direct copy */
+					nrec.ar_startblock = nkey.ar_startblock;
+					nrec.ar_blockcount = nkey.ar_blockcount;
 				}
 				/*
 				 * Otherwise the insert fails.
@@ -737,15 +739,15 @@
 		kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
 		pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
 #ifdef DEBUG
-		for (i = INT_GET(block->bb_numrecs, ARCH_CONVERT); i >= ptr; i--) {
-			if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level)))
+		for (i = be16_to_cpu(block->bb_numrecs); i >= ptr; i--) {
+			if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level)))
 				return error;
 		}
 #endif
 		memmove(&kp[ptr], &kp[ptr - 1],
-			(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*kp)); /* INT_: copy */
+			(be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*kp));
 		memmove(&pp[ptr], &pp[ptr - 1],
-			(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*pp)); /* INT_: copy */
+			(be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*pp));
 #ifdef DEBUG
 		if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
 			return error;
@@ -754,12 +756,12 @@
 		 * Now stuff the new data in, bump numrecs and log the new data.
 		 */
 		kp[ptr - 1] = key;
-		INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop);
-		INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1);
-		xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT));
-		xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT));
+		pp[ptr - 1] = cpu_to_be32(*bnop);
+		be16_add(&block->bb_numrecs, 1);
+		xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs));
+		xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs));
 #ifdef DEBUG
-		if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT))
+		if (ptr < be16_to_cpu(block->bb_numrecs))
 			xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1,
 				kp + ptr);
 #endif
@@ -769,16 +771,16 @@
 		 */
 		rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
 		memmove(&rp[ptr], &rp[ptr - 1],
-			(INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*rp));
+			(be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*rp));
 		/*
 		 * Now stuff the new record in, bump numrecs
 		 * and log the new data.
 		 */
 		rp[ptr - 1] = *recp; /* INT_: struct copy */
-		INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1);
-		xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT));
+		be16_add(&block->bb_numrecs, 1);
+		xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs));
 #ifdef DEBUG
-		if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT))
+		if (ptr < be16_to_cpu(block->bb_numrecs))
 			xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1,
 				rp + ptr);
 #endif
@@ -800,16 +802,16 @@
 	agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
 	if (level == 0 &&
 	    cur->bc_btnum == XFS_BTNUM_CNT &&
-	    INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK &&
-	    INT_GET(recp->ar_blockcount, ARCH_CONVERT) > INT_GET(agf->agf_longest, ARCH_CONVERT)) {
+	    be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
+	    be32_to_cpu(recp->ar_blockcount) > be32_to_cpu(agf->agf_longest)) {
 		/*
 		 * If this is a leaf in the by-size btree and there
 		 * is no right sibling block and this block is bigger
 		 * than the previous longest block, update it.
 		 */
-		INT_COPY(agf->agf_longest, recp->ar_blockcount, ARCH_CONVERT);
-		cur->bc_mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest
-			= INT_GET(recp->ar_blockcount, ARCH_CONVERT);
+		agf->agf_longest = recp->ar_blockcount;
+		cur->bc_mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest
+			= be32_to_cpu(recp->ar_blockcount);
 		xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
 			XFS_AGF_LONGEST);
 	}
@@ -919,8 +921,9 @@
 
 		agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
 		for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++)
-			ASSERT(INT_GET(p->ar_startblock, ARCH_CONVERT) + INT_GET(p->ar_blockcount, ARCH_CONVERT) <=
-			       INT_GET(agf->agf_length, ARCH_CONVERT));
+			ASSERT(be32_to_cpu(p->ar_startblock) +
+			       be32_to_cpu(p->ar_blockcount) <=
+			       be32_to_cpu(agf->agf_length));
 	}
 #endif
 	first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
@@ -957,8 +960,8 @@
 		xfs_agf_t	*agf;	/* a.g. freespace header */
 
 		agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
-		agno = INT_GET(agf->agf_seqno, ARCH_CONVERT);
-		agbno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT);
+		agno = be32_to_cpu(agf->agf_seqno);
+		agbno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]);
 	}
 	/*
 	 * Iterate over each level in the btree, starting at the root.
@@ -1025,7 +1028,7 @@
 			 * Set low and high entry numbers, 1-based.
 			 */
 			low = 1;
-			if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) {
+			if (!(high = be16_to_cpu(block->bb_numrecs))) {
 				/*
 				 * If the block is empty, the tree must
 				 * be an empty leaf.
@@ -1054,14 +1057,14 @@
 					xfs_alloc_key_t	*kkp;
 
 					kkp = kkbase + keyno - 1;
-					startblock = INT_GET(kkp->ar_startblock, ARCH_CONVERT);
-					blockcount = INT_GET(kkp->ar_blockcount, ARCH_CONVERT);
+					startblock = be32_to_cpu(kkp->ar_startblock);
+					blockcount = be32_to_cpu(kkp->ar_blockcount);
 				} else {
 					xfs_alloc_rec_t	*krp;
 
 					krp = krbase + keyno - 1;
-					startblock = INT_GET(krp->ar_startblock, ARCH_CONVERT);
-					blockcount = INT_GET(krp->ar_blockcount, ARCH_CONVERT);
+					startblock = be32_to_cpu(krp->ar_startblock);
+					blockcount = be32_to_cpu(krp->ar_blockcount);
 				}
 				/*
 				 * Compute difference to get next direction.
@@ -1101,7 +1104,7 @@
 			 */
 			if (diff > 0 && --keyno < 1)
 				keyno = 1;
-			agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, keyno, cur), ARCH_CONVERT);
+			agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, keyno, cur));
 #ifdef DEBUG
 			if ((error = xfs_btree_check_sptr(cur, agbno, level)))
 				return error;
@@ -1120,8 +1123,8 @@
 		 * not the last block, we're in the wrong block.
 		 */
 		if (dir == XFS_LOOKUP_GE &&
-		    keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) &&
-		    INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+		    keyno > be16_to_cpu(block->bb_numrecs) &&
+		    be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) {
 			int	i;
 
 			cur->bc_ptrs[0] = keyno;
@@ -1138,7 +1141,7 @@
 	/*
 	 * Return if we succeeded or not.
 	 */
-	if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT))
+	if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs))
 		*stat = 0;
 	else
 		*stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0));
@@ -1181,7 +1184,7 @@
 	/*
 	 * If we've got no left sibling then we can't shift an entry left.
 	 */
-	if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) {
+	if (be32_to_cpu(right->bb_leftsib) == NULLAGBLOCK) {
 		*stat = 0;
 		return 0;
 	}
@@ -1197,8 +1200,8 @@
 	 * Set up the left neighbor as "left".
 	 */
 	if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
-			cur->bc_private.a.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp,
-			XFS_ALLOC_BTREE_REF)))
+			cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib),
+			0, &lbp, XFS_ALLOC_BTREE_REF)))
 		return error;
 	left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
 	if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
@@ -1206,11 +1209,11 @@
 	/*
 	 * If it's full, it can't take another entry.
 	 */
-	if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
+	if (be16_to_cpu(left->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
 		*stat = 0;
 		return 0;
 	}
-	nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1;
+	nrec = be16_to_cpu(left->bb_numrecs) + 1;
 	/*
 	 * If non-leaf, copy a key and a ptr to the left block.
 	 */
@@ -1225,7 +1228,7 @@
 		lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur);
 		rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
 #ifdef DEBUG
-		if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level)))
+		if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level)))
 			return error;
 #endif
 		*lpp = *rpp; /* INT_: copy */
@@ -1247,30 +1250,30 @@
 	/*
 	 * Bump and log left's numrecs, decrement and log right's numrecs.
 	 */
-	INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1);
+	be16_add(&left->bb_numrecs, 1);
 	xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
-	INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1);
+	be16_add(&right->bb_numrecs, -1);
 	xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
 	/*
 	 * Slide the contents of right down one entry.
 	 */
 	if (level > 0) {
 #ifdef DEBUG
-		for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) {
-			if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT),
+		for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
+			if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i + 1]),
 					level)))
 				return error;
 		}
 #endif
-		memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
-		memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
-		xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
-		xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		memmove(rkp, rkp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
+		memmove(rpp, rpp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
+		xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
+		xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
 	} else {
-		memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
-		xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
-		key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */
-		key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */
+		memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
+		xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
+		key.ar_startblock = rrp->ar_startblock;
+		key.ar_blockcount = rrp->ar_blockcount;
 		rkp = &key;
 	}
 	/*
@@ -1335,9 +1338,9 @@
 		xfs_agnumber_t	seqno;
 
 		agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
-		INT_SET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT, nbno);
-		INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, 1);
-		seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT);
+		agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno);
+		be32_add(&agf->agf_levels[cur->bc_btnum], 1);
+		seqno = be32_to_cpu(agf->agf_seqno);
 		mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++;
 		xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
 			XFS_AGF_ROOTS | XFS_AGF_LEVELS);
@@ -1354,12 +1357,12 @@
 	if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp)))
 		return error;
 #endif
-	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+	if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) {
 		/*
 		 * Our block is left, pick up the right block.
 		 */
 		lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp));
-		rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT);
+		rbno = be32_to_cpu(left->bb_rightsib);
 		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
 				cur->bc_private.a.agno, rbno, 0, &rbp,
 				XFS_ALLOC_BTREE_REF)))
@@ -1376,7 +1379,7 @@
 		rbp = lbp;
 		right = left;
 		rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp));
-		lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT);
+		lbno = be32_to_cpu(right->bb_leftsib);
 		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
 				cur->bc_private.a.agno, lbno, 0, &lbp,
 				XFS_ALLOC_BTREE_REF)))
@@ -1390,11 +1393,11 @@
 	/*
 	 * Fill in the new block's btree header and log it.
 	 */
-	INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]);
-	INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels);
-	INT_SET(new->bb_numrecs, ARCH_CONVERT, 2);
-	INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK);
-	INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
+	new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
+	new->bb_level = cpu_to_be16(cur->bc_nlevels);
+	new->bb_numrecs = cpu_to_be16(2);
+	new->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
+	new->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
 	xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS);
 	ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK);
 	/*
@@ -1404,18 +1407,18 @@
 		xfs_alloc_key_t		*kp;	/* btree key pointer */
 
 		kp = XFS_ALLOC_KEY_ADDR(new, 1, cur);
-		if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) {
+		if (be16_to_cpu(left->bb_level) > 0) {
 			kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); /* INT_: structure copy */
 			kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);/* INT_: structure copy */
 		} else {
 			xfs_alloc_rec_t	*rp;	/* btree record pointer */
 
 			rp = XFS_ALLOC_REC_ADDR(left, 1, cur);
-			kp[0].ar_startblock = rp->ar_startblock; /* INT_: direct copy */
-			kp[0].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */
+			kp[0].ar_startblock = rp->ar_startblock;
+			kp[0].ar_blockcount = rp->ar_blockcount;
 			rp = XFS_ALLOC_REC_ADDR(right, 1, cur);
-			kp[1].ar_startblock = rp->ar_startblock; /* INT_: direct copy */
-			kp[1].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */
+			kp[1].ar_startblock = rp->ar_startblock;
+			kp[1].ar_blockcount = rp->ar_blockcount;
 		}
 	}
 	xfs_alloc_log_keys(cur, nbp, 1, 2);
@@ -1426,8 +1429,8 @@
 		xfs_alloc_ptr_t		*pp;	/* btree address pointer */
 
 		pp = XFS_ALLOC_PTR_ADDR(new, 1, cur);
-		INT_SET(pp[0], ARCH_CONVERT, lbno);
-		INT_SET(pp[1], ARCH_CONVERT, rbno);
+		pp[0] = cpu_to_be32(lbno);
+		pp[1] = cpu_to_be32(rbno);
 	}
 	xfs_alloc_log_ptrs(cur, nbp, 1, 2);
 	/*
@@ -1472,7 +1475,7 @@
 	/*
 	 * If we've got no right sibling then we can't shift an entry right.
 	 */
-	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) {
+	if (be32_to_cpu(left->bb_rightsib) == NULLAGBLOCK) {
 		*stat = 0;
 		return 0;
 	}
@@ -1480,7 +1483,7 @@
 	 * If the cursor entry is the one that would be moved, don't
 	 * do it... it's too complicated.
 	 */
-	if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) {
+	if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) {
 		*stat = 0;
 		return 0;
 	}
@@ -1488,8 +1491,8 @@
 	 * Set up the right neighbor as "right".
 	 */
 	if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
-			cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp,
-			XFS_ALLOC_BTREE_REF)))
+			cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib),
+			0, &rbp, XFS_ALLOC_BTREE_REF)))
 		return error;
 	right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
 	if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
@@ -1497,7 +1500,7 @@
 	/*
 	 * If it's full, it can't take another entry.
 	 */
-	if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
+	if (be16_to_cpu(right->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
 		*stat = 0;
 		return 0;
 	}
@@ -1510,47 +1513,47 @@
 		xfs_alloc_ptr_t	*lpp;	/* address pointer for left block */
 		xfs_alloc_ptr_t	*rpp;	/* address pointer for right block */
 
-		lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
-		lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
+		lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
 		rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
 		rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
 #ifdef DEBUG
-		for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) {
-			if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level)))
+		for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) {
+			if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
 				return error;
 		}
 #endif
-		memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
-		memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+		memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
+		memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
 #ifdef DEBUG
-		if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level)))
+		if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level)))
 			return error;
 #endif
 		*rkp = *lkp; /* INT_: copy */
 		*rpp = *lpp; /* INT_: copy */
-		xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
-		xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+		xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
+		xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
 		xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1);
 	} else {
 		xfs_alloc_rec_t	*lrp;	/* record pointer for left block */
 		xfs_alloc_rec_t	*rrp;	/* record pointer for right block */
 
-		lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
 		rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
-		memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+		memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
 		*rrp = *lrp;
-		xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
-		key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */
-		key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */
+		xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
+		key.ar_startblock = rrp->ar_startblock;
+		key.ar_blockcount = rrp->ar_blockcount;
 		rkp = &key;
 		xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1);
 	}
 	/*
 	 * Decrement and log left's numrecs, bump and log right's numrecs.
 	 */
-	INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1);
+	be16_add(&left->bb_numrecs, -1);
 	xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
-	INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1);
+	be16_add(&right->bb_numrecs, 1);
 	xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
 	/*
 	 * Using a temporary cursor, update the parent key values of the
@@ -1623,17 +1626,17 @@
 	/*
 	 * Fill in the btree header for the new block.
 	 */
-	INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]);
-	right->bb_level = left->bb_level; /* INT_: direct copy */
-	INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2));
+	right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
+	right->bb_level = left->bb_level;
+	right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
 	/*
 	 * Make sure that if there's an odd number of entries now, that
 	 * each new block will have the same number of entries.
 	 */
-	if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) &&
-	    cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1)
-		INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1);
-	i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1;
+	if ((be16_to_cpu(left->bb_numrecs) & 1) &&
+	    cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
+		be16_add(&right->bb_numrecs, 1);
+	i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
 	/*
 	 * For non-leaf blocks, copy keys and addresses over to the new block.
 	 */
@@ -1648,15 +1651,15 @@
 		rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
 		rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
 #ifdef DEBUG
-		for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) {
-			if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level)))
+		for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
+			if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
 				return error;
 		}
 #endif
-		memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); /* INT_: copy */
-		memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); /* INT_: copy */
-		xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
-		xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
+		memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
+		xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
+		xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
 		*keyp = *rkp;
 	}
 	/*
@@ -1668,38 +1671,38 @@
 
 		lrp = XFS_ALLOC_REC_ADDR(left, i, cur);
 		rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
-		memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
-		xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
-		keyp->ar_startblock = rrp->ar_startblock; /* INT_: direct copy */
-		keyp->ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */
+		memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
+		xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
+		keyp->ar_startblock = rrp->ar_startblock;
+		keyp->ar_blockcount = rrp->ar_blockcount;
 	}
 	/*
 	 * Find the left block number by looking in the buffer.
 	 * Adjust numrecs, sibling pointers.
 	 */
 	lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp));
-	INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT)));
-	right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */
-	INT_SET(left->bb_rightsib, ARCH_CONVERT, rbno);
-	INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno);
+	be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
+	right->bb_rightsib = left->bb_rightsib;
+	left->bb_rightsib = cpu_to_be32(rbno);
+	right->bb_leftsib = cpu_to_be32(lbno);
 	xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS);
 	xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
 	/*
 	 * If there's a block to the new block's right, make that block
 	 * point back to right instead of to left.
 	 */
-	if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+	if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) {
 		xfs_alloc_block_t	*rrblock;	/* rr btree block */
 		xfs_buf_t		*rrbp;		/* buffer for rrblock */
 
 		if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
-				cur->bc_private.a.agno, INT_GET(right->bb_rightsib, ARCH_CONVERT), 0,
+				cur->bc_private.a.agno, be32_to_cpu(right->bb_rightsib), 0,
 				&rrbp, XFS_ALLOC_BTREE_REF)))
 			return error;
 		rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp);
 		if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
 			return error;
-		INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, rbno);
+		rrblock->bb_leftsib = cpu_to_be32(rbno);
 		xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
 	}
 	/*
@@ -1707,9 +1710,9 @@
 	 * If it's just pointing past the last entry in left, then we'll
 	 * insert there, so don't change anything in that case.
 	 */
-	if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) {
+	if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) {
 		xfs_btree_setbuf(cur, level, rbp);
-		cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT);
+		cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs);
 	}
 	/*
 	 * If there are more levels, we'll need another cursor which refers to
@@ -1807,7 +1810,7 @@
 	/*
 	 * If we just went off the left edge of the tree, return failure.
 	 */
-	if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) {
+	if (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK) {
 		*stat = 0;
 		return 0;
 	}
@@ -1836,7 +1839,7 @@
 		xfs_agblock_t	agbno;	/* block number of btree block */
 		xfs_buf_t	*bp;	/* buffer pointer for block */
 
-		agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT);
+		agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
 		if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
 				cur->bc_private.a.agno, agbno, 0, &bp,
 				XFS_ALLOC_BTREE_REF)))
@@ -1846,7 +1849,7 @@
 		block = XFS_BUF_TO_ALLOC_BLOCK(bp);
 		if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
 			return error;
-		cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+		cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs);
 	}
 	*stat = 1;
 	return 0;
@@ -1913,7 +1916,7 @@
 	/*
 	 * Off the right end or left end, return failure.
 	 */
-	if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) {
+	if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) {
 		*stat = 0;
 		return 0;
 	}
@@ -1924,8 +1927,8 @@
 		xfs_alloc_rec_t		*rec;	/* record data */
 
 		rec = XFS_ALLOC_REC_ADDR(block, ptr, cur);
-		*bno = INT_GET(rec->ar_startblock, ARCH_CONVERT);
-		*len = INT_GET(rec->ar_blockcount, ARCH_CONVERT);
+		*bno = be32_to_cpu(rec->ar_startblock);
+		*len = be32_to_cpu(rec->ar_blockcount);
 	}
 	*stat = 1;
 	return 0;
@@ -1964,14 +1967,14 @@
 	 * Increment the ptr at this level.  If we're still in the block
 	 * then we're done.
 	 */
-	if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+	if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) {
 		*stat = 1;
 		return 0;
 	}
 	/*
 	 * If we just went off the right edge of the tree, return failure.
 	 */
-	if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) {
+	if (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK) {
 		*stat = 0;
 		return 0;
 	}
@@ -1986,7 +1989,7 @@
 		if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
 			return error;
 #endif
-		if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT))
+		if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs))
 			break;
 		/*
 		 * Read-ahead the right block, we're going to read it
@@ -2006,7 +2009,7 @@
 	     lev > level; ) {
 		xfs_agblock_t	agbno;	/* block number of btree block */
 
-		agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT);
+		agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
 		if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
 				cur->bc_private.a.agno, agbno, 0, &bp,
 				XFS_ALLOC_BTREE_REF)))
@@ -2041,8 +2044,8 @@
 
 	level = 0;
 	nbno = NULLAGBLOCK;
-	INT_SET(nrec.ar_startblock, ARCH_CONVERT, cur->bc_rec.a.ar_startblock);
-	INT_SET(nrec.ar_blockcount, ARCH_CONVERT, cur->bc_rec.a.ar_blockcount);
+	nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
+	nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
 	ncur = (xfs_btree_cur_t *)0;
 	pcur = cur;
 	/*
@@ -2163,8 +2166,8 @@
 		/*
 		 * Fill in the new contents and log them.
 		 */
-		INT_SET(rp->ar_startblock, ARCH_CONVERT, bno);
-		INT_SET(rp->ar_blockcount, ARCH_CONVERT, len);
+		rp->ar_startblock = cpu_to_be32(bno);
+		rp->ar_blockcount = cpu_to_be32(len);
 		xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr);
 	}
 	/*
@@ -2173,15 +2176,15 @@
 	 * extent in the a.g., which we cache in the a.g. freelist header.
 	 */
 	if (cur->bc_btnum == XFS_BTNUM_CNT &&
-	    INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK &&
-	    ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+	    be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
+	    ptr == be16_to_cpu(block->bb_numrecs)) {
 		xfs_agf_t	*agf;	/* a.g. freespace header */
 		xfs_agnumber_t	seqno;
 
 		agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
-		seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT);
+		seqno = be32_to_cpu(agf->agf_seqno);
 		cur->bc_mp->m_perag[seqno].pagf_longest = len;
-		INT_SET(agf->agf_longest, ARCH_CONVERT, len);
+		agf->agf_longest = cpu_to_be32(len);
 		xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
 			XFS_AGF_LONGEST);
 	}
@@ -2191,8 +2194,8 @@
 	if (ptr == 1) {
 		xfs_alloc_key_t	key;	/* key containing [bno, len] */
 
-		INT_SET(key.ar_startblock, ARCH_CONVERT, bno);
-		INT_SET(key.ar_blockcount, ARCH_CONVERT, len);
+		key.ar_startblock = cpu_to_be32(bno);
+		key.ar_blockcount = cpu_to_be32(len);
 		if ((error = xfs_alloc_updkey(cur, &key, 1)))
 			return error;
 	}
diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h
index 5615ebb..bce81c7 100644
--- a/fs/xfs/xfs_alloc_btree.h
+++ b/fs/xfs/xfs_alloc_btree.h
@@ -38,14 +38,19 @@
 /*
  * Data record/key structure
  */
-typedef struct xfs_alloc_rec
-{
-	xfs_agblock_t	ar_startblock;	/* starting block number */
-	xfs_extlen_t	ar_blockcount;	/* count of free blocks */
+typedef struct xfs_alloc_rec {
+	__be32		ar_startblock;	/* starting block number */
+	__be32		ar_blockcount;	/* count of free blocks */
 } xfs_alloc_rec_t, xfs_alloc_key_t;
 
-typedef xfs_agblock_t xfs_alloc_ptr_t;	/* btree pointer type */
-					/* btree block header type */
+typedef struct xfs_alloc_rec_incore {
+	xfs_agblock_t	ar_startblock;	/* starting block number */
+	xfs_extlen_t	ar_blockcount;	/* count of free blocks */
+} xfs_alloc_rec_incore_t;
+
+/* btree pointer type */
+typedef __be32 xfs_alloc_ptr_t;
+/* btree block header type */
 typedef	struct xfs_btree_sblock xfs_alloc_block_t;
 
 #define	XFS_BUF_TO_ALLOC_BLOCK(bp)	((xfs_alloc_block_t *)XFS_BUF_PTR(bp))
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 9f635f0..e415a46 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2763,8 +2763,8 @@
 	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
 	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
 	rblock = ifp->if_broot;
-	ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) == 1);
-	ASSERT(INT_GET(rblock->bb_numrecs, ARCH_CONVERT) == 1);
+	ASSERT(be16_to_cpu(rblock->bb_level) == 1);
+	ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
 	ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1);
 	mp = ip->i_mount;
 	pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes);
@@ -3207,11 +3207,11 @@
 	 * Fill in the root.
 	 */
 	block = ifp->if_broot;
-	INT_SET(block->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC);
-	INT_SET(block->bb_level, ARCH_CONVERT, 1);
-	INT_SET(block->bb_numrecs, ARCH_CONVERT, 1);
-	INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLDFSBNO);
-	INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLDFSBNO);
+	block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
+	block->bb_level = cpu_to_be16(1);
+	block->bb_numrecs = cpu_to_be16(1);
+	block->bb_leftsib = cpu_to_be64(NULLDFSBNO);
+	block->bb_rightsib = cpu_to_be64(NULLDFSBNO);
 	/*
 	 * Need a cursor.  Can't allocate until bb_level is filled in.
 	 */
@@ -3264,10 +3264,10 @@
 	 * Fill in the child block.
 	 */
 	ablock = XFS_BUF_TO_BMBT_BLOCK(abp);
-	INT_SET(ablock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC);
+	ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
 	ablock->bb_level = 0;
-	INT_SET(ablock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO);
-	INT_SET(ablock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO);
+	ablock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
+	ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
 	arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
 	for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) {
@@ -3277,8 +3277,8 @@
 			arp++; cnt++;
 		}
 	}
-	INT_SET(ablock->bb_numrecs, ARCH_CONVERT, cnt);
-	ASSERT(INT_GET(ablock->bb_numrecs, ARCH_CONVERT) == XFS_IFORK_NEXTENTS(ip, whichfork));
+	ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
+	ablock->bb_numrecs = cpu_to_be16(cnt);
 	/*
 	 * Fill in the root key and pointer.
 	 */
@@ -3292,7 +3292,7 @@
 	 * the root is at the right level.
 	 */
 	xfs_bmbt_log_block(cur, abp, XFS_BB_ALL_BITS);
-	xfs_bmbt_log_recs(cur, abp, 1, INT_GET(ablock->bb_numrecs, ARCH_CONVERT));
+	xfs_bmbt_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
 	ASSERT(*curp == NULL);
 	*curp = cur;
 	*logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork);
@@ -4371,8 +4371,8 @@
 	/*
 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
 	 */
-	ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
-	level = INT_GET(block->bb_level, ARCH_CONVERT);
+	level = be16_to_cpu(block->bb_level);
+	ASSERT(level > 0);
 	pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
 	ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
 	ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount);
@@ -4415,7 +4415,7 @@
 		xfs_extnum_t	num_recs;
 
 
-		num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+		num_recs = be16_to_cpu(block->bb_numrecs);
 		if (unlikely(i + num_recs > room)) {
 			ASSERT(i + num_recs <= room);
 			xfs_fs_cmn_err(CE_WARN, ip->i_mount,
@@ -4432,7 +4432,7 @@
 		/*
 		 * Read-ahead the next leaf block, if any.
 		 */
-		nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
+		nextbno = be64_to_cpu(block->bb_rightsib);
 		if (nextbno != NULLFSBLOCK)
 			xfs_btree_reada_bufl(mp, nextbno, 1);
 		/*
@@ -4689,7 +4689,7 @@
 	}
 	if (wr && *firstblock == NULLFSBLOCK) {
 		if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
-			minleft = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1;
+			minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
 		else
 			minleft = 1;
 	} else
@@ -5967,10 +5967,10 @@
 	xfs_bmbt_ptr_t		*pp, *thispa;	/* pointer to block address */
 	xfs_bmbt_key_t		*prevp, *keyp;
 
-	ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
+	ASSERT(be16_to_cpu(block->bb_level) > 0);
 
 	prevp = NULL;
-	for( i = 1; i <= INT_GET(block->bb_numrecs, ARCH_CONVERT);i++) {
+	for( i = 1; i <= be16_to_cpu(block->bb_numrecs); i++) {
 		dmxr = mp->m_bmap_dmxr[0];
 
 		if (root) {
@@ -5995,7 +5995,7 @@
 			pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
 				xfs_bmbt, block, i, dmxr);
 		}
-		for (j = i+1; j <= INT_GET(block->bb_numrecs, ARCH_CONVERT); j++) {
+		for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
 			if (root) {
 				thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz);
 			} else {
@@ -6048,8 +6048,8 @@
 	/*
 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
 	 */
-	ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
-	level = INT_GET(block->bb_level, ARCH_CONVERT);
+	level = be16_to_cpu(block->bb_level);
+	ASSERT(level > 0);
 	xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
 	pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
 	ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
@@ -6109,13 +6109,13 @@
 		xfs_extnum_t	num_recs;
 
 
-		num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+		num_recs = be16_to_cpu(block->bb_numrecs);
 
 		/*
 		 * Read-ahead the next leaf block, if any.
 		 */
 
-		nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
+		nextbno = be64_to_cpu(block->bb_rightsib);
 
 		/*
 		 * Check all the extents to make sure they are OK.
@@ -6212,8 +6212,8 @@
 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
 	 */
 	block = ifp->if_broot;
-	ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0);
-	level = INT_GET(block->bb_level, ARCH_CONVERT);
+	level = be16_to_cpu(block->bb_level);
+	ASSERT(level > 0);
 	pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
 	ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
 	ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount);
@@ -6258,14 +6258,14 @@
 
 	if (--level) {
 		/* Not at node above leafs, count this level of nodes */
-		nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
+		nextbno = be64_to_cpu(block->bb_rightsib);
 		while (nextbno != NULLFSBLOCK) {
 			if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
 				0, &nbp, XFS_BMAP_BTREE_REF)))
 				return error;
 			*count += 1;
 			nextblock = XFS_BUF_TO_BMBT_BLOCK(nbp);
-			nextbno = INT_GET(nextblock->bb_rightsib, ARCH_CONVERT);
+			nextbno = be64_to_cpu(nextblock->bb_rightsib);
 			xfs_trans_brelse(tp, nbp);
 		}
 
@@ -6284,8 +6284,8 @@
 	} else {
 		/* count all level 1 nodes and their leaves */
 		for (;;) {
-			nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
-			numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+			nextbno = be64_to_cpu(block->bb_rightsib);
+			numrecs = be16_to_cpu(block->bb_numrecs);
 			frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize,
 				xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]);
 			if (unlikely(xfs_bmap_disk_count_leaves(frp, numrecs, count) < 0)) {
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 3bf70ec..3f1383d 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -366,7 +366,7 @@
 		return 0;
 	}
 	block = xfs_bmbt_get_block(cur, level, &bp);
-	numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+	numrecs = be16_to_cpu(block->bb_numrecs);
 #ifdef DEBUG
 	if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
 		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
@@ -411,7 +411,7 @@
 		}
 	}
 	numrecs--;
-	INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs);
+	block->bb_numrecs = cpu_to_be16(numrecs);
 	xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
 	/*
 	 * We're at the root level.
@@ -447,8 +447,8 @@
 		*stat = 1;
 		return 0;
 	}
-	rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
-	lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT);
+	rbno = be64_to_cpu(block->bb_rightsib);
+	lbno = be64_to_cpu(block->bb_leftsib);
 	/*
 	 * One child of root, need to get a chance to copy its contents
 	 * into the root and delete it. Can't go up to next level,
@@ -492,15 +492,15 @@
 			goto error0;
 		}
 #endif
-		bno = INT_GET(right->bb_leftsib, ARCH_CONVERT);
-		if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >=
+		bno = be64_to_cpu(right->bb_leftsib);
+		if (be16_to_cpu(right->bb_numrecs) - 1 >=
 		    XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
 			if ((error = xfs_bmbt_lshift(tcur, level, &i))) {
 				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 				goto error0;
 			}
 			if (i) {
-				ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >=
+				ASSERT(be16_to_cpu(block->bb_numrecs) >=
 				       XFS_BMAP_BLOCK_IMINRECS(level, tcur));
 				xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
 				tcur = NULL;
@@ -517,7 +517,7 @@
 				return 0;
 			}
 		}
-		rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT);
+		rrecs = be16_to_cpu(right->bb_numrecs);
 		if (lbno != NULLFSBLOCK) {
 			i = xfs_btree_firstrec(tcur, level);
 			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
@@ -548,15 +548,15 @@
 			goto error0;
 		}
 #endif
-		bno = INT_GET(left->bb_rightsib, ARCH_CONVERT);
-		if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >=
+		bno = be64_to_cpu(left->bb_rightsib);
+		if (be16_to_cpu(left->bb_numrecs) - 1 >=
 		    XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
 			if ((error = xfs_bmbt_rshift(tcur, level, &i))) {
 				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 				goto error0;
 			}
 			if (i) {
-				ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >=
+				ASSERT(be16_to_cpu(block->bb_numrecs) >=
 				       XFS_BMAP_BLOCK_IMINRECS(level, tcur));
 				xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
 				tcur = NULL;
@@ -567,14 +567,14 @@
 				return 0;
 			}
 		}
-		lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
+		lrecs = be16_to_cpu(left->bb_numrecs);
 	}
 	xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
 	tcur = NULL;
 	mp = cur->bc_mp;
 	ASSERT(bno != NULLFSBLOCK);
 	if (lbno != NULLFSBLOCK &&
-	    lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
+	    lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
 		rbno = bno;
 		right = block;
 		rbp = bp;
@@ -589,7 +589,7 @@
 			goto error0;
 		}
 	} else if (rbno != NULLFSBLOCK &&
-		   rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <=
+		   rrecs + be16_to_cpu(block->bb_numrecs) <=
 		   XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
 		lbno = bno;
 		left = block;
@@ -604,7 +604,7 @@
 			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 			goto error0;
 		}
-		lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
+		lrecs = be16_to_cpu(left->bb_numrecs);
 	} else {
 		if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) {
 			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
@@ -614,8 +614,8 @@
 		*stat = 1;
 		return 0;
 	}
-	numlrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
-	numrrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT);
+	numlrecs = be16_to_cpu(left->bb_numrecs);
+	numrrecs = be16_to_cpu(right->bb_numrecs);
 	if (level > 0) {
 		lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur);
 		lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur);
@@ -639,12 +639,12 @@
 		memcpy(lrp, rrp, numrrecs * sizeof(*lrp));
 		xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
 	}
-	INT_MOD(left->bb_numrecs, ARCH_CONVERT, numrrecs);
-	left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */
+	be16_add(&left->bb_numrecs, numrrecs);
+	left->bb_rightsib = right->bb_rightsib;
 	xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS);
-	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) {
+	if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) {
 		if ((error = xfs_btree_read_bufl(mp, cur->bc_tp,
-				INT_GET(left->bb_rightsib, ARCH_CONVERT),
+				be64_to_cpu(left->bb_rightsib),
 				0, &rrbp, XFS_BMAP_BTREE_REF))) {
 			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 			goto error0;
@@ -654,7 +654,7 @@
 			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 			goto error0;
 		}
-		INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno);
+		rrblock->bb_leftsib = cpu_to_be64(lbno);
 		xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
 	}
 	xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1,
@@ -711,7 +711,7 @@
 	if ((error = xfs_btree_check_lblock(cur, block, 0, bp)))
 		return error;
 #endif
-	if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) {
+	if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) {
 		*stat = 0;
 		return 0;
 	}
@@ -772,7 +772,7 @@
 	}
 	XFS_STATS_INC(xs_bmbt_insrec);
 	block = xfs_bmbt_get_block(cur, level, &bp);
-	numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+	numrecs = be16_to_cpu(block->bb_numrecs);
 #ifdef DEBUG
 	if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
 		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
@@ -854,7 +854,7 @@
 			}
 		}
 	}
-	numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+	numrecs = be16_to_cpu(block->bb_numrecs);
 	if (level > 0) {
 		kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
 		pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
@@ -881,7 +881,7 @@
 		kp[ptr - 1] = key;
 		INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop);
 		numrecs++;
-		INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs);
+		block->bb_numrecs = cpu_to_be16(numrecs);
 		xfs_bmbt_log_keys(cur, bp, ptr, numrecs);
 		xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs);
 	} else {
@@ -890,7 +890,7 @@
 			(numrecs - ptr + 1) * sizeof(*rp));
 		rp[ptr - 1] = *recp;
 		numrecs++;
-		INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs);
+		block->bb_numrecs = cpu_to_be16(numrecs);
 		xfs_bmbt_log_recs(cur, bp, ptr, numrecs);
 	}
 	xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
@@ -955,7 +955,7 @@
 	/*
 	 * Give up if the root has multiple children.
 	 */
-	if (INT_GET(block->bb_numrecs, ARCH_CONVERT) != 1) {
+	if (be16_to_cpu(block->bb_numrecs) != 1) {
 		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 		return 0;
 	}
@@ -966,37 +966,37 @@
 	 */
 	cbp = cur->bc_bufs[level - 1];
 	cblock = XFS_BUF_TO_BMBT_BLOCK(cbp);
-	if (INT_GET(cblock->bb_numrecs, ARCH_CONVERT) > XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
+	if (be16_to_cpu(cblock->bb_numrecs) > XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
 		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 		return 0;
 	}
-	ASSERT(INT_GET(cblock->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO);
-	ASSERT(INT_GET(cblock->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO);
+	ASSERT(be64_to_cpu(cblock->bb_leftsib) == NULLDFSBNO);
+	ASSERT(be64_to_cpu(cblock->bb_rightsib) == NULLDFSBNO);
 	ip = cur->bc_private.b.ip;
 	ifp = XFS_IFORK_PTR(ip, cur->bc_private.b.whichfork);
 	ASSERT(XFS_BMAP_BLOCK_IMAXRECS(level, cur) ==
 	       XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes));
-	i = (int)(INT_GET(cblock->bb_numrecs, ARCH_CONVERT) - XFS_BMAP_BLOCK_IMAXRECS(level, cur));
+	i = (int)(be16_to_cpu(cblock->bb_numrecs) - XFS_BMAP_BLOCK_IMAXRECS(level, cur));
 	if (i) {
 		xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork);
 		block = ifp->if_broot;
 	}
-	INT_MOD(block->bb_numrecs, ARCH_CONVERT, i);
-	ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) == INT_GET(cblock->bb_numrecs, ARCH_CONVERT));
+	be16_add(&block->bb_numrecs, i);
+	ASSERT(block->bb_numrecs == cblock->bb_numrecs);
 	kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
 	ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
-	memcpy(kp, ckp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*kp));
+	memcpy(kp, ckp, be16_to_cpu(block->bb_numrecs) * sizeof(*kp));
 	pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
 	cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
 #ifdef DEBUG
-	for (i = 0; i < INT_GET(cblock->bb_numrecs, ARCH_CONVERT); i++) {
+	for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
 		if ((error = xfs_btree_check_lptr(cur, INT_GET(cpp[i], ARCH_CONVERT), level - 1))) {
 			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 			return error;
 		}
 	}
 #endif
-	memcpy(pp, cpp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*pp));
+	memcpy(pp, cpp, be16_to_cpu(block->bb_numrecs) * sizeof(*pp));
 	xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1,
 			cur->bc_private.b.flist, cur->bc_mp);
 	ip->i_d.di_nblocks--;
@@ -1004,7 +1004,7 @@
 			XFS_TRANS_DQ_BCOUNT, -1L);
 	xfs_trans_binval(cur->bc_tp, cbp);
 	cur->bc_bufs[level - 1] = NULL;
-	INT_MOD(block->bb_level, ARCH_CONVERT, -1);
+	be16_add(&block->bb_level, -1);
 	xfs_trans_log_inode(cur->bc_tp, ip,
 		XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
 	cur->bc_nlevels--;
@@ -1160,7 +1160,7 @@
 			else
 				krbase = XFS_BMAP_REC_IADDR(block, 1, cur);
 			low = 1;
-			if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) {
+			if (!(high = be16_to_cpu(block->bb_numrecs))) {
 				ASSERT(level == 0);
 				cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
 				XFS_BMBT_TRACE_CURSOR(cur, EXIT);
@@ -1207,8 +1207,8 @@
 		 * If ge search and we went off the end of the block, but it's
 		 * not the last block, we're in the wrong block.
 		 */
-		if (dir == XFS_LOOKUP_GE && keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) &&
-		    INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) {
+		if (dir == XFS_LOOKUP_GE && keyno > be16_to_cpu(block->bb_numrecs) &&
+		    be64_to_cpu(block->bb_rightsib) != NULLDFSBNO) {
 			cur->bc_ptrs[0] = keyno;
 			if ((error = xfs_bmbt_increment(cur, 0, &i))) {
 				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
@@ -1223,7 +1223,7 @@
 	else if (dir == XFS_LOOKUP_LE && diff > 0)
 		keyno--;
 	cur->bc_ptrs[0] = keyno;
-	if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+	if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) {
 		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 		*stat = 0;
 	} else {
@@ -1280,7 +1280,7 @@
 		return error;
 	}
 #endif
-	if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) {
+	if (be64_to_cpu(right->bb_leftsib) == NULLDFSBNO) {
 		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 		*stat = 0;
 		return 0;
@@ -1291,7 +1291,7 @@
 		return 0;
 	}
 	mp = cur->bc_mp;
-	if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0,
+	if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(right->bb_leftsib), 0,
 			&lbp, XFS_BMAP_BTREE_REF))) {
 		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 		return error;
@@ -1301,12 +1301,12 @@
 		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 		return error;
 	}
-	if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
+	if (be16_to_cpu(left->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
 		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 		*stat = 0;
 		return 0;
 	}
-	lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1;
+	lrecs = be16_to_cpu(left->bb_numrecs) + 1;
 	if (level > 0) {
 		lkp = XFS_BMAP_KEY_IADDR(left, lrecs, cur);
 		rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
@@ -1328,7 +1328,7 @@
 		*lrp = *rrp;
 		xfs_bmbt_log_recs(cur, lbp, lrecs, lrecs);
 	}
-	INT_SET(left->bb_numrecs, ARCH_CONVERT, lrecs);
+	left->bb_numrecs = cpu_to_be16(lrecs);
 	xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS);
 #ifdef DEBUG
 	if (level > 0)
@@ -1336,8 +1336,8 @@
 	else
 		xfs_btree_check_rec(XFS_BTNUM_BMAP, lrp - 1, lrp);
 #endif
-	rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1;
-	INT_SET(right->bb_numrecs, ARCH_CONVERT, rrecs);
+	rrecs = be16_to_cpu(right->bb_numrecs) - 1;
+	right->bb_numrecs = cpu_to_be16(rrecs);
 	xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS);
 	if (level > 0) {
 #ifdef DEBUG
@@ -1414,18 +1414,18 @@
 		return error;
 	}
 #endif
-	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) {
+	if (be64_to_cpu(left->bb_rightsib) == NULLDFSBNO) {
 		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 		*stat = 0;
 		return 0;
 	}
-	if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) {
+	if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) {
 		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 		*stat = 0;
 		return 0;
 	}
 	mp = cur->bc_mp;
-	if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0,
+	if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(left->bb_rightsib), 0,
 			&rbp, XFS_BMAP_BTREE_REF))) {
 		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 		return error;
@@ -1435,26 +1435,26 @@
 		XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 		return error;
 	}
-	if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
+	if (be16_to_cpu(right->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
 		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 		*stat = 0;
 		return 0;
 	}
 	if (level > 0) {
-		lkp = XFS_BMAP_KEY_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
-		lpp = XFS_BMAP_PTR_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		lkp = XFS_BMAP_KEY_IADDR(left, be16_to_cpu(left->bb_numrecs), cur);
+		lpp = XFS_BMAP_PTR_IADDR(left, be16_to_cpu(left->bb_numrecs), cur);
 		rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
 		rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
 #ifdef DEBUG
-		for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) {
+		for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) {
 			if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) {
 				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 				return error;
 			}
 		}
 #endif
-		memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
-		memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+		memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
+		memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
 #ifdef DEBUG
 		if ((error = xfs_btree_check_lptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) {
 			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
@@ -1463,21 +1463,21 @@
 #endif
 		*rkp = *lkp;
 		*rpp = *lpp; /* INT_: direct copy */
-		xfs_bmbt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
-		xfs_bmbt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+		xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
+		xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
 	} else {
-		lrp = XFS_BMAP_REC_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		lrp = XFS_BMAP_REC_IADDR(left, be16_to_cpu(left->bb_numrecs), cur);
 		rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
-		memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+		memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
 		*rrp = *lrp;
-		xfs_bmbt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+		xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
 		INT_SET(key.br_startoff, ARCH_CONVERT,
 			xfs_bmbt_disk_get_startoff(rrp));
 		rkp = &key;
 	}
-	INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1);
+	be16_add(&left->bb_numrecs, -1);
 	xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS);
-	INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1);
+	be16_add(&right->bb_numrecs, 1);
 #ifdef DEBUG
 	if (level > 0)
 		xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1);
@@ -1608,47 +1608,47 @@
 		return error;
 	}
 #endif
-	INT_SET(right->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC);
-	right->bb_level = left->bb_level; /* INT_: direct copy */
-	INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2));
-	if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) &&
-	    cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1)
-		INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1);
-	i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1;
+	right->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
+	right->bb_level = left->bb_level;
+	right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
+	if ((be16_to_cpu(left->bb_numrecs) & 1) &&
+	    cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
+		be16_add(&right->bb_numrecs, 1);
+	i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
 	if (level > 0) {
 		lkp = XFS_BMAP_KEY_IADDR(left, i, cur);
 		lpp = XFS_BMAP_PTR_IADDR(left, i, cur);
 		rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
 		rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
 #ifdef DEBUG
-		for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) {
+		for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
 			if ((error = xfs_btree_check_lptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) {
 				XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 				return error;
 			}
 		}
 #endif
-		memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
-		memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
-		xfs_bmbt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
-		xfs_bmbt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
+		memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
+		xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
+		xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
 		keyp->br_startoff = INT_GET(rkp->br_startoff, ARCH_CONVERT);
 	} else {
 		lrp = XFS_BMAP_REC_IADDR(left, i, cur);
 		rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
-		memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
-		xfs_bmbt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
+		xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
 		keyp->br_startoff = xfs_bmbt_disk_get_startoff(rrp);
 	}
-	INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT)));
-	right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */
-	INT_SET(left->bb_rightsib, ARCH_CONVERT, args.fsbno);
-	INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno);
+	be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
+	right->bb_rightsib = left->bb_rightsib;
+	left->bb_rightsib = cpu_to_be64(args.fsbno);
+	right->bb_leftsib = cpu_to_be64(lbno);
 	xfs_bmbt_log_block(cur, rbp, XFS_BB_ALL_BITS);
 	xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
-	if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) {
+	if (be64_to_cpu(right->bb_rightsib) != NULLDFSBNO) {
 		if ((error = xfs_btree_read_bufl(args.mp, args.tp,
-				INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, &rrbp,
+				be64_to_cpu(right->bb_rightsib), 0, &rrbp,
 				XFS_BMAP_BTREE_REF))) {
 			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 			return error;
@@ -1658,12 +1658,12 @@
 			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 			return error;
 		}
-		INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, args.fsbno);
+		rrblock->bb_leftsib = cpu_to_be64(args.fsbno);
 		xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
 	}
-	if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) {
+	if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) {
 		xfs_btree_setbuf(cur, level, rbp);
-		cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT);
+		cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs);
 	}
 	if (level + 1 < cur->bc_nlevels) {
 		if ((error = xfs_btree_dup_cursor(cur, curp))) {
@@ -1735,18 +1735,18 @@
 	xfs_bmbt_key_t		*tkp;
 	xfs_bmbt_ptr_t		*tpp;
 
-	INT_SET(rblock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC);
-	rblock->bb_level = dblock->bb_level;	/* both in on-disk format */
-	ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) > 0);
-	rblock->bb_numrecs = dblock->bb_numrecs;/* both in on-disk format */
-	INT_SET(rblock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO);
-	INT_SET(rblock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO);
+	rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
+	rblock->bb_level = dblock->bb_level;
+	ASSERT(be16_to_cpu(rblock->bb_level) > 0);
+	rblock->bb_numrecs = dblock->bb_numrecs;
+	rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
+	rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
 	dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
 	fkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
 	tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
 	fpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
 	tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
-	dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT);
+	dmxr = be16_to_cpu(dblock->bb_numrecs);
 	memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
 	memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */
 }
@@ -1789,7 +1789,7 @@
 		return error;
 	}
 #endif
-	if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) {
+	if (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO) {
 		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 		*stat = 0;
 		return 0;
@@ -1821,7 +1821,7 @@
 			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 			return error;
 		}
-		cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+		cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs);
 	}
 	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 	*stat = 1;
@@ -2107,12 +2107,12 @@
 		return error;
 	}
 #endif
-	if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+	if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) {
 		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 		*stat = 1;
 		return 0;
 	}
-	if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) {
+	if (be64_to_cpu(block->bb_rightsib) == NULLDFSBNO) {
 		XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 		*stat = 0;
 		return 0;
@@ -2125,7 +2125,7 @@
 			return error;
 		}
 #endif
-		if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT))
+		if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs))
 			break;
 		if (lev < cur->bc_nlevels - 1)
 			xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
@@ -2387,23 +2387,23 @@
 	bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0);
 	cblock = XFS_BUF_TO_BMBT_BLOCK(bp);
 	*cblock = *block;
-	INT_MOD(block->bb_level, ARCH_CONVERT, +1);
-	INT_SET(block->bb_numrecs, ARCH_CONVERT, 1);
+	be16_add(&block->bb_level, 1);
+	block->bb_numrecs = cpu_to_be16(1);
 	cur->bc_nlevels++;
 	cur->bc_ptrs[level + 1] = 1;
 	kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
 	ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
-	memcpy(ckp, kp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*kp));
+	memcpy(ckp, kp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*kp));
 	cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
 #ifdef DEBUG
-	for (i = 0; i < INT_GET(cblock->bb_numrecs, ARCH_CONVERT); i++) {
+	for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
 		if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i], ARCH_CONVERT), level))) {
 			XFS_BMBT_TRACE_CURSOR(cur, ERROR);
 			return error;
 		}
 	}
 #endif
-	memcpy(cpp, pp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*pp));
+	memcpy(cpp, pp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*pp));
 #ifdef DEBUG
 	if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)args.fsbno,
 			level))) {
@@ -2412,7 +2412,7 @@
 	}
 #endif
 	INT_SET(*pp, ARCH_CONVERT, args.fsbno);
-	xfs_iroot_realloc(cur->bc_private.b.ip, 1 - INT_GET(cblock->bb_numrecs, ARCH_CONVERT),
+	xfs_iroot_realloc(cur->bc_private.b.ip, 1 - be16_to_cpu(cblock->bb_numrecs),
 		cur->bc_private.b.whichfork);
 	xfs_btree_setbuf(cur, level, bp);
 	/*
@@ -2420,8 +2420,8 @@
 	 * the root is at the right level.
 	 */
 	xfs_bmbt_log_block(cur, bp, XFS_BB_ALL_BITS);
-	xfs_bmbt_log_keys(cur, bp, 1, INT_GET(cblock->bb_numrecs, ARCH_CONVERT));
-	xfs_bmbt_log_ptrs(cur, bp, 1, INT_GET(cblock->bb_numrecs, ARCH_CONVERT));
+	xfs_bmbt_log_keys(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs));
+	xfs_bmbt_log_ptrs(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs));
 	XFS_BMBT_TRACE_CURSOR(cur, EXIT);
 	*logflags |=
 		XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork);
@@ -2689,18 +2689,18 @@
 	xfs_bmbt_key_t		*tkp;
 	xfs_bmbt_ptr_t		*tpp;
 
-	ASSERT(INT_GET(rblock->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC);
-	ASSERT(INT_GET(rblock->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO);
-	ASSERT(INT_GET(rblock->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO);
-	ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) > 0);
-	dblock->bb_level = rblock->bb_level;	/* both in on-disk format */
-	dblock->bb_numrecs = rblock->bb_numrecs;/* both in on-disk format */
+	ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
+	ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO);
+	ASSERT(be64_to_cpu(rblock->bb_rightsib) == NULLDFSBNO);
+	ASSERT(be16_to_cpu(rblock->bb_level) > 0);
+	dblock->bb_level = rblock->bb_level;
+	dblock->bb_numrecs = rblock->bb_numrecs;
 	dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
 	fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
 	tkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
 	fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
 	tpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
-	dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT);
+	dmxr = be16_to_cpu(dblock->bb_numrecs);
 	memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
 	memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */
 }
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
index 7478b1b..e095a2d 100644
--- a/fs/xfs/xfs_bmap_btree.h
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -28,10 +28,9 @@
 /*
  * Bmap root header, on-disk form only.
  */
-typedef struct xfs_bmdr_block
-{
-	__uint16_t	bb_level;	/* 0 is a leaf */
-	__uint16_t	bb_numrecs;	/* current # of data records */
+typedef struct xfs_bmdr_block {
+	__be16		bb_level;	/* 0 is a leaf */
+	__be16		bb_numrecs;	/* current # of data records */
 } xfs_bmdr_block_t;
 
 /*
@@ -212,36 +211,36 @@
 
 #define XFS_BMAP_REC_DADDR(bb,i,cur)	\
 	(XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_DSIZE(			\
-			INT_GET((bb)->bb_level, ARCH_CONVERT), cur),	\
+			be16_to_cpu((bb)->bb_level), cur),		\
 			xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS(	\
-				INT_GET((bb)->bb_level, ARCH_CONVERT), cur)))
+				be16_to_cpu((bb)->bb_level), cur)))
 #define XFS_BMAP_REC_IADDR(bb,i,cur)	\
 	(XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_ISIZE(			\
-			INT_GET((bb)->bb_level, ARCH_CONVERT), cur),	\
+			be16_to_cpu((bb)->bb_level), cur),		\
 			xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS(	\
-				INT_GET((bb)->bb_level, ARCH_CONVERT), cur)))
+				be16_to_cpu((bb)->bb_level), cur)))
 
 #define XFS_BMAP_KEY_DADDR(bb,i,cur)	\
 	(XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_DSIZE(			\
-			INT_GET((bb)->bb_level, ARCH_CONVERT), cur),	\
+			be16_to_cpu((bb)->bb_level), cur),		\
 			xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS(	\
-				INT_GET((bb)->bb_level, ARCH_CONVERT), cur)))
+				be16_to_cpu((bb)->bb_level), cur)))
 #define XFS_BMAP_KEY_IADDR(bb,i,cur)	\
 	(XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_ISIZE(			\
-			INT_GET((bb)->bb_level, ARCH_CONVERT), cur),	\
+			be16_to_cpu((bb)->bb_level), cur),		\
 			xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS(	\
-				INT_GET((bb)->bb_level, ARCH_CONVERT), cur)))
+				be16_to_cpu((bb)->bb_level), cur)))
 
 #define XFS_BMAP_PTR_DADDR(bb,i,cur)	\
 	(XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_DSIZE(			\
-			INT_GET((bb)->bb_level, ARCH_CONVERT), cur),	\
+			be16_to_cpu((bb)->bb_level), cur),		\
 			xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS(	\
-				INT_GET((bb)->bb_level, ARCH_CONVERT), cur)))
+				be16_to_cpu((bb)->bb_level), cur)))
 #define XFS_BMAP_PTR_IADDR(bb,i,cur)	\
 	(XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_ISIZE(			\
-			INT_GET((bb)->bb_level, ARCH_CONVERT), cur),	\
+			be16_to_cpu((bb)->bb_level), cur),		\
 			xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS(	\
-				INT_GET((bb)->bb_level, ARCH_CONVERT), cur)))
+				be16_to_cpu((bb)->bb_level), cur)))
 
 /*
  * These are to be used when we know the size of the block and
@@ -254,7 +253,7 @@
 #define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) \
 	(XFS_BTREE_PTR_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)))
 
-#define XFS_BMAP_BROOT_NUMRECS(bb)	INT_GET((bb)->bb_numrecs, ARCH_CONVERT)
+#define XFS_BMAP_BROOT_NUMRECS(bb)	be16_to_cpu((bb)->bb_numrecs)
 #define XFS_BMAP_BROOT_MAXRECS(sz)	XFS_BTREE_BLOCK_MAXRECS(sz,xfs_bmbt,0)
 
 #define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \
@@ -262,7 +261,7 @@
 	       ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t))))
 
 #define XFS_BMAP_BROOT_SPACE(bb) \
-	(XFS_BMAP_BROOT_SPACE_CALC(INT_GET((bb)->bb_numrecs, ARCH_CONVERT)))
+	(XFS_BMAP_BROOT_SPACE_CALC(be16_to_cpu((bb)->bb_numrecs)))
 #define XFS_BMDR_SPACE_CALC(nrecs) \
 	(int)(sizeof(xfs_bmdr_block_t) + \
 	       ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t))))
@@ -273,11 +272,10 @@
 #define XFS_BM_MAXLEVELS(mp,w)		((mp)->m_bm_maxlevels[(w)])
 
 #define XFS_BMAP_SANITY_CHECK(mp,bb,level) \
-	(INT_GET((bb)->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC && \
-	 INT_GET((bb)->bb_level, ARCH_CONVERT) == level && \
-	 INT_GET((bb)->bb_numrecs, ARCH_CONVERT) > 0 && \
-	 INT_GET((bb)->bb_numrecs, ARCH_CONVERT) <= \
-			 (mp)->m_bmap_dmxr[(level) != 0])
+	(be32_to_cpu((bb)->bb_magic) == XFS_BMAP_MAGIC && \
+	 be16_to_cpu((bb)->bb_level) == level && \
+	 be16_to_cpu((bb)->bb_numrecs) > 0 && \
+	 be16_to_cpu((bb)->bb_numrecs) <= (mp)->m_bmap_dmxr[(level) != 0])
 
 
 #ifdef __KERNEL__
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 9de5a1f..52d5d09 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -90,11 +90,14 @@
 	switch (cur->bc_btnum) {
 	case XFS_BTNUM_BNO:
 	case XFS_BTNUM_CNT:
-		return (int)XFS_ALLOC_BLOCK_MAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur);
+		return (int)XFS_ALLOC_BLOCK_MAXRECS(
+				be16_to_cpu(block->bb_h.bb_level), cur);
 	case XFS_BTNUM_BMAP:
-		return (int)XFS_BMAP_BLOCK_IMAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur);
+		return (int)XFS_BMAP_BLOCK_IMAXRECS(
+				be16_to_cpu(block->bb_h.bb_level), cur);
 	case XFS_BTNUM_INO:
-		return (int)XFS_INOBT_BLOCK_MAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur);
+		return (int)XFS_INOBT_BLOCK_MAXRECS(
+				be16_to_cpu(block->bb_h.bb_level), cur);
 	default:
 		ASSERT(0);
 		return 0;
@@ -140,7 +143,7 @@
 
 		k1 = ak1;
 		k2 = ak2;
-		ASSERT(INT_GET(k1->ar_startblock, ARCH_CONVERT) < INT_GET(k2->ar_startblock, ARCH_CONVERT));
+		ASSERT(be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock));
 		break;
 	    }
 	case XFS_BTNUM_CNT: {
@@ -149,9 +152,9 @@
 
 		k1 = ak1;
 		k2 = ak2;
-		ASSERT(INT_GET(k1->ar_blockcount, ARCH_CONVERT) < INT_GET(k2->ar_blockcount, ARCH_CONVERT) ||
-		       (INT_GET(k1->ar_blockcount, ARCH_CONVERT) == INT_GET(k2->ar_blockcount, ARCH_CONVERT) &&
-			INT_GET(k1->ar_startblock, ARCH_CONVERT) < INT_GET(k2->ar_startblock, ARCH_CONVERT)));
+		ASSERT(be32_to_cpu(k1->ar_blockcount) < be32_to_cpu(k2->ar_blockcount) ||
+		       (k1->ar_blockcount == k2->ar_blockcount &&
+			be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock)));
 		break;
 	    }
 	case XFS_BTNUM_BMAP: {
@@ -194,16 +197,16 @@
 
 	mp = cur->bc_mp;
 	lblock_ok =
-		INT_GET(block->bb_magic, ARCH_CONVERT) == xfs_magics[cur->bc_btnum] &&
-		INT_GET(block->bb_level, ARCH_CONVERT) == level &&
-		INT_GET(block->bb_numrecs, ARCH_CONVERT) <=
+		be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] &&
+		be16_to_cpu(block->bb_level) == level &&
+		be16_to_cpu(block->bb_numrecs) <=
 			xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) &&
 		block->bb_leftsib &&
-		(INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO ||
-		 XFS_FSB_SANITY_CHECK(mp, INT_GET(block->bb_leftsib, ARCH_CONVERT))) &&
+		(be64_to_cpu(block->bb_leftsib) == NULLDFSBNO ||
+		 XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_leftsib))) &&
 		block->bb_rightsib &&
-		(INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO ||
-		 XFS_FSB_SANITY_CHECK(mp, INT_GET(block->bb_rightsib, ARCH_CONVERT)));
+		(be64_to_cpu(block->bb_rightsib) == NULLDFSBNO ||
+		 XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_rightsib)));
 	if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK,
 			XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
 		if (bp)
@@ -251,8 +254,9 @@
 
 		r1 = ar1;
 		r2 = ar2;
-		ASSERT(INT_GET(r1->ar_startblock, ARCH_CONVERT) + INT_GET(r1->ar_blockcount, ARCH_CONVERT) <=
-		       INT_GET(r2->ar_startblock, ARCH_CONVERT));
+		ASSERT(be32_to_cpu(r1->ar_startblock) +
+		       be32_to_cpu(r1->ar_blockcount) <=
+		       be32_to_cpu(r2->ar_startblock));
 		break;
 	    }
 	case XFS_BTNUM_CNT: {
@@ -261,9 +265,9 @@
 
 		r1 = ar1;
 		r2 = ar2;
-		ASSERT(INT_GET(r1->ar_blockcount, ARCH_CONVERT) < INT_GET(r2->ar_blockcount, ARCH_CONVERT) ||
-		       (INT_GET(r1->ar_blockcount, ARCH_CONVERT) == INT_GET(r2->ar_blockcount, ARCH_CONVERT) &&
-			INT_GET(r1->ar_startblock, ARCH_CONVERT) < INT_GET(r2->ar_startblock, ARCH_CONVERT)));
+		ASSERT(be32_to_cpu(r1->ar_blockcount) < be32_to_cpu(r2->ar_blockcount) ||
+		       (r1->ar_blockcount == r2->ar_blockcount &&
+			be32_to_cpu(r1->ar_startblock) < be32_to_cpu(r2->ar_startblock)));
 		break;
 	    }
 	case XFS_BTNUM_BMAP: {
@@ -311,17 +315,17 @@
 
 	agbp = cur->bc_private.a.agbp;
 	agf = XFS_BUF_TO_AGF(agbp);
-	agflen = INT_GET(agf->agf_length, ARCH_CONVERT);
+	agflen = be32_to_cpu(agf->agf_length);
 	sblock_ok =
-		INT_GET(block->bb_magic, ARCH_CONVERT) == xfs_magics[cur->bc_btnum] &&
-		INT_GET(block->bb_level, ARCH_CONVERT) == level &&
-		INT_GET(block->bb_numrecs, ARCH_CONVERT) <=
+		be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] &&
+		be16_to_cpu(block->bb_level) == level &&
+		be16_to_cpu(block->bb_numrecs) <=
 			xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) &&
-		(INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK ||
-		 INT_GET(block->bb_leftsib, ARCH_CONVERT) < agflen) &&
+		(be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK ||
+		 be32_to_cpu(block->bb_leftsib) < agflen) &&
 		block->bb_leftsib &&
-		(INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK ||
-		 INT_GET(block->bb_rightsib, ARCH_CONVERT) < agflen) &&
+		(be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK ||
+		 be32_to_cpu(block->bb_rightsib) < agflen) &&
 		block->bb_rightsib;
 	if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp,
 			XFS_ERRTAG_BTREE_CHECK_SBLOCK,
@@ -352,7 +356,7 @@
 	XFS_WANT_CORRUPTED_RETURN(
 		level > 0 &&
 		ptr != NULLAGBLOCK && ptr != 0 &&
-		ptr < INT_GET(agf->agf_length, ARCH_CONVERT));
+		ptr < be32_to_cpu(agf->agf_length));
 	return 0;
 }
 
@@ -591,15 +595,15 @@
 	case XFS_BTNUM_BNO:
 	case XFS_BTNUM_CNT:
 		agf = XFS_BUF_TO_AGF(agbp);
-		nlevels = INT_GET(agf->agf_levels[btnum], ARCH_CONVERT);
+		nlevels = be32_to_cpu(agf->agf_levels[btnum]);
 		break;
 	case XFS_BTNUM_BMAP:
 		ifp = XFS_IFORK_PTR(ip, whichfork);
-		nlevels = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1;
+		nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
 		break;
 	case XFS_BTNUM_INO:
 		agi = XFS_BUF_TO_AGI(agbp);
-		nlevels = INT_GET(agi->agi_level, ARCH_CONVERT);
+		nlevels = be32_to_cpu(agi->agi_level);
 		break;
 	default:
 		ASSERT(0);
@@ -663,9 +667,9 @@
 	block = xfs_btree_get_block(cur, level, &bp);
 	xfs_btree_check_block(cur, block, level, bp);
 	if (XFS_BTREE_LONG_PTRS(cur->bc_btnum))
-		return INT_GET(block->bb_u.l.bb_rightsib, ARCH_CONVERT) == NULLDFSBNO;
+		return be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO;
 	else
-		return INT_GET(block->bb_u.s.bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK;
+		return be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK;
 }
 
 /*
@@ -693,7 +697,7 @@
 	/*
 	 * Set the ptr value to numrecs, that's the last record/key.
 	 */
-	cur->bc_ptrs[level] = INT_GET(block->bb_h.bb_numrecs, ARCH_CONVERT);
+	cur->bc_ptrs[level] = be16_to_cpu(block->bb_h.bb_numrecs);
 	return 1;
 }
 
@@ -863,38 +867,38 @@
 	case XFS_BTNUM_BNO:
 	case XFS_BTNUM_CNT:
 		a = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]);
-		if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(a->bb_leftsib, ARCH_CONVERT) != NULLAGBLOCK) {
+		if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(a->bb_leftsib) != NULLAGBLOCK) {
 			xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
-				INT_GET(a->bb_leftsib, ARCH_CONVERT), 1);
+				be32_to_cpu(a->bb_leftsib), 1);
 			rval++;
 		}
-		if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(a->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+		if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(a->bb_rightsib) != NULLAGBLOCK) {
 			xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
-				INT_GET(a->bb_rightsib, ARCH_CONVERT), 1);
+				be32_to_cpu(a->bb_rightsib), 1);
 			rval++;
 		}
 		break;
 	case XFS_BTNUM_BMAP:
 		b = XFS_BUF_TO_BMBT_BLOCK(cur->bc_bufs[lev]);
-		if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(b->bb_leftsib, ARCH_CONVERT) != NULLDFSBNO) {
-			xfs_btree_reada_bufl(cur->bc_mp, INT_GET(b->bb_leftsib, ARCH_CONVERT), 1);
+		if ((lr & XFS_BTCUR_LEFTRA) && be64_to_cpu(b->bb_leftsib) != NULLDFSBNO) {
+			xfs_btree_reada_bufl(cur->bc_mp, be64_to_cpu(b->bb_leftsib), 1);
 			rval++;
 		}
-		if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(b->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) {
-			xfs_btree_reada_bufl(cur->bc_mp, INT_GET(b->bb_rightsib, ARCH_CONVERT), 1);
+		if ((lr & XFS_BTCUR_RIGHTRA) && be64_to_cpu(b->bb_rightsib) != NULLDFSBNO) {
+			xfs_btree_reada_bufl(cur->bc_mp, be64_to_cpu(b->bb_rightsib), 1);
 			rval++;
 		}
 		break;
 	case XFS_BTNUM_INO:
 		i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]);
-		if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(i->bb_leftsib, ARCH_CONVERT) != NULLAGBLOCK) {
+		if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(i->bb_leftsib) != NULLAGBLOCK) {
 			xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno,
-				INT_GET(i->bb_leftsib, ARCH_CONVERT), 1);
+				be32_to_cpu(i->bb_leftsib), 1);
 			rval++;
 		}
-		if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(i->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+		if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(i->bb_rightsib) != NULLAGBLOCK) {
 			xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno,
-				INT_GET(i->bb_rightsib, ARCH_CONVERT), 1);
+				be32_to_cpu(i->bb_rightsib), 1);
 			rval++;
 		}
 		break;
@@ -926,14 +930,14 @@
 		return;
 	b = XFS_BUF_TO_BLOCK(bp);
 	if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) {
-		if (INT_GET(b->bb_u.l.bb_leftsib, ARCH_CONVERT) == NULLDFSBNO)
+		if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO)
 			cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
-		if (INT_GET(b->bb_u.l.bb_rightsib, ARCH_CONVERT) == NULLDFSBNO)
+		if (be64_to_cpu(b->bb_u.l.bb_rightsib) == NULLDFSBNO)
 			cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
 	} else {
-		if (INT_GET(b->bb_u.s.bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK)
+		if (be32_to_cpu(b->bb_u.s.bb_leftsib) == NULLAGBLOCK)
 			cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
-		if (INT_GET(b->bb_u.s.bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK)
+		if (be32_to_cpu(b->bb_u.s.bb_rightsib) == NULLAGBLOCK)
 			cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
 	}
 }
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index e6b2a09..44f1bd9 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -39,25 +39,23 @@
 /*
  * Short form header: space allocation btrees.
  */
-typedef struct xfs_btree_sblock
-{
-	__uint32_t	bb_magic;	/* magic number for block type */
-	__uint16_t	bb_level;	/* 0 is a leaf */
-	__uint16_t	bb_numrecs;	/* current # of data records */
-	xfs_agblock_t	bb_leftsib;	/* left sibling block or NULLAGBLOCK */
-	xfs_agblock_t	bb_rightsib;	/* right sibling block or NULLAGBLOCK */
+typedef struct xfs_btree_sblock {
+	__be32		bb_magic;	/* magic number for block type */
+	__be16		bb_level;	/* 0 is a leaf */
+	__be16		bb_numrecs;	/* current # of data records */
+	__be32		bb_leftsib;	/* left sibling block or NULLAGBLOCK */
+	__be32		bb_rightsib;	/* right sibling block or NULLAGBLOCK */
 } xfs_btree_sblock_t;
 
 /*
  * Long form header: bmap btrees.
  */
-typedef struct xfs_btree_lblock
-{
-	__uint32_t	bb_magic;	/* magic number for block type */
-	__uint16_t	bb_level;	/* 0 is a leaf */
-	__uint16_t	bb_numrecs;	/* current # of data records */
-	xfs_dfsbno_t	bb_leftsib;	/* left sibling block or NULLDFSBNO */
-	xfs_dfsbno_t	bb_rightsib;	/* right sibling block or NULLDFSBNO */
+typedef struct xfs_btree_lblock {
+	__be32		bb_magic;	/* magic number for block type */
+	__be16		bb_level;	/* 0 is a leaf */
+	__be16		bb_numrecs;	/* current # of data records */
+	__be64		bb_leftsib;	/* left sibling block or NULLDFSBNO */
+	__be64		bb_rightsib;	/* right sibling block or NULLDFSBNO */
 } xfs_btree_lblock_t;
 
 /*
@@ -65,24 +63,23 @@
  */
 typedef struct xfs_btree_hdr
 {
-	__uint32_t	bb_magic;	/* magic number for block type */
-	__uint16_t	bb_level;	/* 0 is a leaf */
-	__uint16_t	bb_numrecs;	/* current # of data records */
+	__be32		bb_magic;	/* magic number for block type */
+	__be16		bb_level;	/* 0 is a leaf */
+	__be16		bb_numrecs;	/* current # of data records */
 } xfs_btree_hdr_t;
 
-typedef struct xfs_btree_block
-{
+typedef struct xfs_btree_block {
 	xfs_btree_hdr_t	bb_h;		/* header */
-	union		{
+	union {
+		struct {
+			__be32		bb_leftsib;
+			__be32		bb_rightsib;
+		} s;			/* short form pointers */
 		struct	{
-			xfs_agblock_t	bb_leftsib;
-			xfs_agblock_t	bb_rightsib;
-		}	s;		/* short form pointers */
-		struct	{
-			xfs_dfsbno_t	bb_leftsib;
-			xfs_dfsbno_t	bb_rightsib;
-		}	l;		/* long form pointers */
-	}		bb_u;		/* rest */
+			__be64		bb_leftsib;
+			__be64		bb_rightsib;
+		} l;			/* long form pointers */
+	} bb_u;				/* rest */
 } xfs_btree_block_t;
 
 /*
@@ -146,7 +143,7 @@
 	struct xfs_trans	*bc_tp;	/* transaction we're in, if any */
 	struct xfs_mount	*bc_mp;	/* file system mount struct */
 	union {
-		xfs_alloc_rec_t		a;
+		xfs_alloc_rec_incore_t	a;
 		xfs_bmbt_irec_t		b;
 		xfs_inobt_rec_t		i;
 	}		bc_rec;		/* current insert/search record value */
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 239e701..0ca597b 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -191,28 +191,26 @@
 				  XFS_FSS_TO_BB(mp, 1), 0);
 		agf = XFS_BUF_TO_AGF(bp);
 		memset(agf, 0, mp->m_sb.sb_sectsize);
-		INT_SET(agf->agf_magicnum, ARCH_CONVERT, XFS_AGF_MAGIC);
-		INT_SET(agf->agf_versionnum, ARCH_CONVERT, XFS_AGF_VERSION);
-		INT_SET(agf->agf_seqno, ARCH_CONVERT, agno);
+		agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
+		agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
+		agf->agf_seqno = cpu_to_be32(agno);
 		if (agno == nagcount - 1)
 			agsize =
 				nb -
 				(agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
 		else
 			agsize = mp->m_sb.sb_agblocks;
-		INT_SET(agf->agf_length, ARCH_CONVERT, agsize);
-		INT_SET(agf->agf_roots[XFS_BTNUM_BNOi], ARCH_CONVERT,
-			XFS_BNO_BLOCK(mp));
-		INT_SET(agf->agf_roots[XFS_BTNUM_CNTi], ARCH_CONVERT,
-			XFS_CNT_BLOCK(mp));
-		INT_SET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT, 1);
-		INT_SET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT, 1);
+		agf->agf_length = cpu_to_be32(agsize);
+		agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
+		agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
+		agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
+		agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
 		agf->agf_flfirst = 0;
-		INT_SET(agf->agf_fllast, ARCH_CONVERT, XFS_AGFL_SIZE(mp) - 1);
+		agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
 		agf->agf_flcount = 0;
 		tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
-		INT_SET(agf->agf_freeblks, ARCH_CONVERT, tmpsize);
-		INT_SET(agf->agf_longest, ARCH_CONVERT, tmpsize);
+		agf->agf_freeblks = cpu_to_be32(tmpsize);
+		agf->agf_longest = cpu_to_be32(tmpsize);
 		error = xfs_bwrite(mp, bp);
 		if (error) {
 			goto error0;
@@ -225,19 +223,18 @@
 				  XFS_FSS_TO_BB(mp, 1), 0);
 		agi = XFS_BUF_TO_AGI(bp);
 		memset(agi, 0, mp->m_sb.sb_sectsize);
-		INT_SET(agi->agi_magicnum, ARCH_CONVERT, XFS_AGI_MAGIC);
-		INT_SET(agi->agi_versionnum, ARCH_CONVERT, XFS_AGI_VERSION);
-		INT_SET(agi->agi_seqno, ARCH_CONVERT, agno);
-		INT_SET(agi->agi_length, ARCH_CONVERT, agsize);
+		agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
+		agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
+		agi->agi_seqno = cpu_to_be32(agno);
+		agi->agi_length = cpu_to_be32(agsize);
 		agi->agi_count = 0;
-		INT_SET(agi->agi_root, ARCH_CONVERT, XFS_IBT_BLOCK(mp));
-		INT_SET(agi->agi_level, ARCH_CONVERT, 1);
+		agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
+		agi->agi_level = cpu_to_be32(1);
 		agi->agi_freecount = 0;
-		INT_SET(agi->agi_newino, ARCH_CONVERT, NULLAGINO);
-		INT_SET(agi->agi_dirino, ARCH_CONVERT, NULLAGINO);
+		agi->agi_newino = cpu_to_be32(NULLAGINO);
+		agi->agi_dirino = cpu_to_be32(NULLAGINO);
 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
-			INT_SET(agi->agi_unlinked[bucket], ARCH_CONVERT,
-				NULLAGINO);
+			agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
 		error = xfs_bwrite(mp, bp);
 		if (error) {
 			goto error0;
@@ -250,17 +247,16 @@
 			BTOBB(mp->m_sb.sb_blocksize), 0);
 		block = XFS_BUF_TO_SBLOCK(bp);
 		memset(block, 0, mp->m_sb.sb_blocksize);
-		INT_SET(block->bb_magic, ARCH_CONVERT, XFS_ABTB_MAGIC);
+		block->bb_magic = cpu_to_be32(XFS_ABTB_MAGIC);
 		block->bb_level = 0;
-		INT_SET(block->bb_numrecs, ARCH_CONVERT, 1);
-		INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK);
-		INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
+		block->bb_numrecs = cpu_to_be16(1);
+		block->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
+		block->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
 		arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc,
 			block, 1, mp->m_alloc_mxr[0]);
-		INT_SET(arec->ar_startblock, ARCH_CONVERT,
-			XFS_PREALLOC_BLOCKS(mp));
-		INT_SET(arec->ar_blockcount, ARCH_CONVERT,
-			agsize - INT_GET(arec->ar_startblock, ARCH_CONVERT));
+		arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
+		arec->ar_blockcount = cpu_to_be32(
+			agsize - be32_to_cpu(arec->ar_startblock));
 		error = xfs_bwrite(mp, bp);
 		if (error) {
 			goto error0;
@@ -273,18 +269,17 @@
 			BTOBB(mp->m_sb.sb_blocksize), 0);
 		block = XFS_BUF_TO_SBLOCK(bp);
 		memset(block, 0, mp->m_sb.sb_blocksize);
-		INT_SET(block->bb_magic, ARCH_CONVERT, XFS_ABTC_MAGIC);
+		block->bb_magic = cpu_to_be32(XFS_ABTC_MAGIC);
 		block->bb_level = 0;
-		INT_SET(block->bb_numrecs, ARCH_CONVERT, 1);
-		INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK);
-		INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
+		block->bb_numrecs = cpu_to_be16(1);
+		block->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
+		block->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
 		arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc,
 			block, 1, mp->m_alloc_mxr[0]);
-		INT_SET(arec->ar_startblock, ARCH_CONVERT,
-			XFS_PREALLOC_BLOCKS(mp));
-		INT_SET(arec->ar_blockcount, ARCH_CONVERT,
-			agsize - INT_GET(arec->ar_startblock, ARCH_CONVERT));
-		nfree += INT_GET(arec->ar_blockcount, ARCH_CONVERT);
+		arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
+		arec->ar_blockcount = cpu_to_be32(
+			agsize - be32_to_cpu(arec->ar_startblock));
+		nfree += be32_to_cpu(arec->ar_blockcount);
 		error = xfs_bwrite(mp, bp);
 		if (error) {
 			goto error0;
@@ -297,11 +292,11 @@
 			BTOBB(mp->m_sb.sb_blocksize), 0);
 		block = XFS_BUF_TO_SBLOCK(bp);
 		memset(block, 0, mp->m_sb.sb_blocksize);
-		INT_SET(block->bb_magic, ARCH_CONVERT, XFS_IBT_MAGIC);
+		block->bb_magic = cpu_to_be32(XFS_IBT_MAGIC);
 		block->bb_level = 0;
 		block->bb_numrecs = 0;
-		INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK);
-		INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
+		block->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
+		block->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
 		error = xfs_bwrite(mp, bp);
 		if (error) {
 			goto error0;
@@ -321,10 +316,9 @@
 		}
 		ASSERT(bp);
 		agi = XFS_BUF_TO_AGI(bp);
-		INT_MOD(agi->agi_length, ARCH_CONVERT, new);
+		be32_add(&agi->agi_length, new);
 		ASSERT(nagcount == oagcount ||
-		       INT_GET(agi->agi_length, ARCH_CONVERT) ==
-				mp->m_sb.sb_agblocks);
+		       be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
 		xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
 		/*
 		 * Change agf length.
@@ -335,14 +329,14 @@
 		}
 		ASSERT(bp);
 		agf = XFS_BUF_TO_AGF(bp);
-		INT_MOD(agf->agf_length, ARCH_CONVERT, new);
-		ASSERT(INT_GET(agf->agf_length, ARCH_CONVERT) ==
-				INT_GET(agi->agi_length, ARCH_CONVERT));
+		be32_add(&agf->agf_length, new);
+		ASSERT(be32_to_cpu(agf->agf_length) ==
+		       be32_to_cpu(agi->agi_length));
 		/*
 		 * Free the new space.
 		 */
 		error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno,
-			INT_GET(agf->agf_length, ARCH_CONVERT) - new), new);
+			be32_to_cpu(agf->agf_length) - new), new);
 		if (error) {
 			goto error0;
 		}
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index d8ceb3d..8f3fae1 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -178,8 +178,8 @@
 	 * Ideally they should be spaced out through the a.g.
 	 * For now, just allocate blocks up front.
 	 */
-	args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT);
-	args.fsbno = XFS_AGB_TO_FSB(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT),
+	args.agbno = be32_to_cpu(agi->agi_root);
+	args.fsbno = XFS_AGB_TO_FSB(args.mp, be32_to_cpu(agi->agi_seqno),
 				    args.agbno);
 	/*
 	 * Allocate a fixed-size extent of inodes.
@@ -201,9 +201,9 @@
 	 */
 	if (isaligned && args.fsbno == NULLFSBLOCK) {
 		args.type = XFS_ALLOCTYPE_NEAR_BNO;
-		args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT);
+		args.agbno = be32_to_cpu(agi->agi_root);
 		args.fsbno = XFS_AGB_TO_FSB(args.mp,
-				INT_GET(agi->agi_seqno, ARCH_CONVERT), args.agbno);
+				be32_to_cpu(agi->agi_seqno), args.agbno);
 		if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&
 			args.mp->m_sb.sb_inoalignmt >=
 			XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp)))
@@ -258,7 +258,7 @@
 		/*
 		 * Get the block.
 		 */
-		d = XFS_AGB_TO_DADDR(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT),
+		d = XFS_AGB_TO_DADDR(args.mp, be32_to_cpu(agi->agi_seqno),
 				     args.agbno + (j * blks_per_cluster));
 		fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d,
 					 args.mp->m_bsize * blks_per_cluster,
@@ -278,17 +278,17 @@
 		}
 		xfs_trans_inode_alloc_buf(tp, fbuf);
 	}
-	INT_MOD(agi->agi_count, ARCH_CONVERT, newlen);
-	INT_MOD(agi->agi_freecount, ARCH_CONVERT, newlen);
+	be32_add(&agi->agi_count, newlen);
+	be32_add(&agi->agi_freecount, newlen);
 	down_read(&args.mp->m_peraglock);
-	args.mp->m_perag[INT_GET(agi->agi_seqno, ARCH_CONVERT)].pagi_freecount += newlen;
+	args.mp->m_perag[be32_to_cpu(agi->agi_seqno)].pagi_freecount += newlen;
 	up_read(&args.mp->m_peraglock);
-	INT_SET(agi->agi_newino, ARCH_CONVERT, newino);
+	agi->agi_newino = cpu_to_be32(newino);
 	/*
 	 * Insert records describing the new inode chunk into the btree.
 	 */
 	cur = xfs_btree_init_cursor(args.mp, tp, agbp,
-			INT_GET(agi->agi_seqno, ARCH_CONVERT),
+			be32_to_cpu(agi->agi_seqno),
 			XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
 	for (thisino = newino;
 	     thisino < newino + newlen;
@@ -528,7 +528,7 @@
 			return 0;
 		}
 		agi = XFS_BUF_TO_AGI(agbp);
-		ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
+		ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
 	} else {
 		/*
 		 * Continue where we left off before.  In this case, we
@@ -536,12 +536,12 @@
 		 */
 		agbp = *IO_agbp;
 		agi = XFS_BUF_TO_AGI(agbp);
-		ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
-		ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0);
+		ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
+		ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
 	}
 	mp = tp->t_mountp;
 	agcount = mp->m_sb.sb_agcount;
-	agno = INT_GET(agi->agi_seqno, ARCH_CONVERT);
+	agno = be32_to_cpu(agi->agi_seqno);
 	tagno = agno;
 	pagno = XFS_INO_TO_AGNO(mp, parent);
 	pagino = XFS_INO_TO_AGINO(mp, parent);
@@ -589,7 +589,7 @@
 				 * can commit the current transaction and call
 				 * us again where we left off.
 				 */
-				ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0);
+				ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
 				*alloc_done = B_TRUE;
 				*IO_agbp = agbp;
 				*inop = NULLFSINO;
@@ -620,7 +620,7 @@
 		if (error)
 			goto nextag;
 		agi = XFS_BUF_TO_AGI(agbp);
-		ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
+		ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
 	}
 	/*
 	 * Here with an allocation group that has a free inode.
@@ -629,14 +629,14 @@
 	 */
 	agno = tagno;
 	*IO_agbp = NULL;
-	cur = xfs_btree_init_cursor(mp, tp, agbp, INT_GET(agi->agi_seqno, ARCH_CONVERT),
+	cur = xfs_btree_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno),
 				    XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
 	/*
 	 * If pagino is 0 (this is the root inode allocation) use newino.
 	 * This must work because we've just allocated some.
 	 */
 	if (!pagino)
-		pagino = INT_GET(agi->agi_newino, ARCH_CONVERT);
+		pagino = be32_to_cpu(agi->agi_newino);
 #ifdef DEBUG
 	if (cur->bc_nlevels == 1) {
 		int	freecount = 0;
@@ -654,7 +654,7 @@
 				goto error0;
 		} while (i == 1);
 
-		ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) ||
+		ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
 		       XFS_FORCED_SHUTDOWN(mp));
 	}
 #endif
@@ -813,9 +813,9 @@
 	 * In a different a.g. from the parent.
 	 * See if the most recently allocated block has any free.
 	 */
-	else if (INT_GET(agi->agi_newino, ARCH_CONVERT) != NULLAGINO) {
+	else if (be32_to_cpu(agi->agi_newino) != NULLAGINO) {
 		if ((error = xfs_inobt_lookup_eq(cur,
-				INT_GET(agi->agi_newino, ARCH_CONVERT), 0, 0, &i)))
+				be32_to_cpu(agi->agi_newino), 0, 0, &i)))
 			goto error0;
 		if (i == 1 &&
 		    (error = xfs_inobt_get_rec(cur, &rec.ir_startino,
@@ -862,7 +862,7 @@
 	if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount,
 			rec.ir_free)))
 		goto error0;
-	INT_MOD(agi->agi_freecount, ARCH_CONVERT, -1);
+	be32_add(&agi->agi_freecount, -1);
 	xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
 	down_read(&mp->m_peraglock);
 	mp->m_perag[tagno].pagi_freecount--;
@@ -882,7 +882,7 @@
 			if ((error = xfs_inobt_increment(cur, 0, &i)))
 				goto error0;
 		} while (i == 1);
-		ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) ||
+		ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
 		       XFS_FORCED_SHUTDOWN(mp));
 	}
 #endif
@@ -970,8 +970,8 @@
 		return error;
 	}
 	agi = XFS_BUF_TO_AGI(agbp);
-	ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
-	ASSERT(agbno < INT_GET(agi->agi_length, ARCH_CONVERT));
+	ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
+	ASSERT(agbno < be32_to_cpu(agi->agi_length));
 	/*
 	 * Initialize the cursor.
 	 */
@@ -993,7 +993,7 @@
 					goto error0;
 			}
 		} while (i == 1);
-		ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) ||
+		ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
 		       XFS_FORCED_SHUTDOWN(mp));
 	}
 #endif
@@ -1042,8 +1042,8 @@
 		 * to be freed when the transaction is committed.
 		 */
 		ilen = XFS_IALLOC_INODES(mp);
-		INT_MOD(agi->agi_count, ARCH_CONVERT, -ilen);
-		INT_MOD(agi->agi_freecount, ARCH_CONVERT, -(ilen - 1));
+		be32_add(&agi->agi_count, -ilen);
+		be32_add(&agi->agi_freecount, -(ilen - 1));
 		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
 		down_read(&mp->m_peraglock);
 		mp->m_perag[agno].pagi_freecount -= ilen - 1;
@@ -1072,7 +1072,7 @@
 		/* 
 		 * Change the inode free counts and log the ag/sb changes.
 		 */
-		INT_MOD(agi->agi_freecount, ARCH_CONVERT, 1);
+		be32_add(&agi->agi_freecount, 1);
 		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
 		down_read(&mp->m_peraglock);
 		mp->m_perag[agno].pagi_freecount++;
@@ -1098,7 +1098,7 @@
 					goto error0;
 			}
 		} while (i == 1);
-		ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) ||
+		ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
 		       XFS_FORCED_SHUTDOWN(mp));
 	}
 #endif
@@ -1307,7 +1307,7 @@
 	xfs_agi_t		*agi;	/* allocation group header */
 
 	agi = XFS_BUF_TO_AGI(bp);
-	ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
+	ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
 #endif
 	/*
 	 * Compute byte offsets for the first and last fields.
@@ -1349,9 +1349,8 @@
 	 */
 	agi = XFS_BUF_TO_AGI(bp);
 	agi_ok =
-		INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC &&
-		XFS_AGI_GOOD_VERSION(
-			INT_GET(agi->agi_versionnum, ARCH_CONVERT));
+		be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
+		XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
 	if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI,
 			XFS_RANDOM_IALLOC_READ_AGI))) {
 		XFS_CORRUPTION_ERROR("xfs_ialloc_read_agi", XFS_ERRLEVEL_LOW,
@@ -1361,16 +1360,15 @@
 	}
 	pag = &mp->m_perag[agno];
 	if (!pag->pagi_init) {
-		pag->pagi_freecount = INT_GET(agi->agi_freecount, ARCH_CONVERT);
+		pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
 		pag->pagi_init = 1;
 	} else {
 		/*
 		 * It's possible for these to be out of sync if
 		 * we are in the middle of a forced shutdown.
 		 */
-		ASSERT(pag->pagi_freecount ==
-				INT_GET(agi->agi_freecount, ARCH_CONVERT)
-			|| XFS_FORCED_SHUTDOWN(mp));
+		ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
+			XFS_FORCED_SHUTDOWN(mp));
 	}
 
 #ifdef DEBUG
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index 6912143..60c65683 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -118,7 +118,7 @@
 	 * Fail if we're off the end of the block.
 	 */
 
-	numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+	numrecs = be16_to_cpu(block->bb_numrecs);
 	if (ptr > numrecs) {
 		*stat = 0;
 		return 0;
@@ -133,7 +133,7 @@
 		pp = XFS_INOBT_PTR_ADDR(block, 1, cur);
 #ifdef DEBUG
 		for (i = ptr; i < numrecs; i++) {
-			if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i], ARCH_CONVERT), level)))
+			if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i]), level)))
 				return error;
 		}
 #endif
@@ -170,7 +170,7 @@
 	 * Decrement and log the number of entries in the block.
 	 */
 	numrecs--;
-	INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs);
+	block->bb_numrecs = cpu_to_be16(numrecs);
 	xfs_inobt_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
 	/*
 	 * Is this the root level?  If so, we're almost done.
@@ -189,9 +189,9 @@
 			 * pp is still set to the first pointer in the block.
 			 * Make it the new root of the btree.
 			 */
-			bno = INT_GET(agi->agi_root, ARCH_CONVERT);
+			bno = be32_to_cpu(agi->agi_root);
 			agi->agi_root = *pp;
-			INT_MOD(agi->agi_level, ARCH_CONVERT, -1);
+			be32_add(&agi->agi_level, -1);
 			/*
 			 * Free the block.
 			 */
@@ -234,8 +234,8 @@
 	 * tree balanced.  Look at the left and right sibling blocks to
 	 * see if we can re-balance by moving only one record.
 	 */
-	rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
-	lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT);
+	rbno = be32_to_cpu(block->bb_rightsib);
+	lbno = be32_to_cpu(block->bb_leftsib);
 	bno = NULLAGBLOCK;
 	ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK);
 	/*
@@ -272,18 +272,18 @@
 		/*
 		 * Grab the current block number, for future use.
 		 */
-		bno = INT_GET(right->bb_leftsib, ARCH_CONVERT);
+		bno = be32_to_cpu(right->bb_leftsib);
 		/*
 		 * If right block is full enough so that removing one entry
 		 * won't make it too empty, and left-shifting an entry out
 		 * of right to us works, we're done.
 		 */
-		if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >=
+		if (be16_to_cpu(right->bb_numrecs) - 1 >=
 		     XFS_INOBT_BLOCK_MINRECS(level, cur)) {
 			if ((error = xfs_inobt_lshift(tcur, level, &i)))
 				goto error0;
 			if (i) {
-				ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >=
+				ASSERT(be16_to_cpu(block->bb_numrecs) >=
 				       XFS_INOBT_BLOCK_MINRECS(level, cur));
 				xfs_btree_del_cursor(tcur,
 						     XFS_BTREE_NOERROR);
@@ -300,7 +300,7 @@
 		 * future reference, and fix up the temp cursor to point
 		 * to our block again (last record).
 		 */
-		rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT);
+		rrecs = be16_to_cpu(right->bb_numrecs);
 		if (lbno != NULLAGBLOCK) {
 			xfs_btree_firstrec(tcur, level);
 			if ((error = xfs_inobt_decrement(tcur, level, &i)))
@@ -332,18 +332,18 @@
 		/*
 		 * Grab the current block number, for future use.
 		 */
-		bno = INT_GET(left->bb_rightsib, ARCH_CONVERT);
+		bno = be32_to_cpu(left->bb_rightsib);
 		/*
 		 * If left block is full enough so that removing one entry
 		 * won't make it too empty, and right-shifting an entry out
 		 * of left to us works, we're done.
 		 */
-		if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >=
+		if (be16_to_cpu(left->bb_numrecs) - 1 >=
 		     XFS_INOBT_BLOCK_MINRECS(level, cur)) {
 			if ((error = xfs_inobt_rshift(tcur, level, &i)))
 				goto error0;
 			if (i) {
-				ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >=
+				ASSERT(be16_to_cpu(block->bb_numrecs) >=
 				       XFS_INOBT_BLOCK_MINRECS(level, cur));
 				xfs_btree_del_cursor(tcur,
 						     XFS_BTREE_NOERROR);
@@ -357,7 +357,7 @@
 		 * Otherwise, grab the number of records in right for
 		 * future reference.
 		 */
-		lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
+		lrecs = be16_to_cpu(left->bb_numrecs);
 	}
 	/*
 	 * Delete the temp cursor, we're done with it.
@@ -378,14 +378,14 @@
 		 */
 		rbno = bno;
 		right = block;
-		rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT);
+		rrecs = be16_to_cpu(right->bb_numrecs);
 		rbp = bp;
 		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
 				cur->bc_private.i.agno, lbno, 0, &lbp,
 				XFS_INO_BTREE_REF)))
 			return error;
 		left = XFS_BUF_TO_INOBT_BLOCK(lbp);
-		lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
+		lrecs = be16_to_cpu(left->bb_numrecs);
 		if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
 			return error;
 	}
@@ -400,14 +400,14 @@
 		 */
 		lbno = bno;
 		left = block;
-		lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT);
+		lrecs = be16_to_cpu(left->bb_numrecs);
 		lbp = bp;
 		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
 				cur->bc_private.i.agno, rbno, 0, &rbp,
 				XFS_INO_BTREE_REF)))
 			return error;
 		right = XFS_BUF_TO_INOBT_BLOCK(rbp);
-		rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT);
+		rrecs = be16_to_cpu(right->bb_numrecs);
 		if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
 			return error;
 	}
@@ -435,7 +435,7 @@
 		rpp = XFS_INOBT_PTR_ADDR(right, 1, cur);
 #ifdef DEBUG
 		for (i = 0; i < rrecs; i++) {
-			if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level)))
+			if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
 				return error;
 		}
 #endif
@@ -471,7 +471,7 @@
 	 * Fix up the number of records in the surviving block.
 	 */
 	lrecs += rrecs;
-	INT_SET(left->bb_numrecs, ARCH_CONVERT, lrecs);
+	left->bb_numrecs = cpu_to_be16(lrecs);
 	/*
 	 * Fix up the right block pointer in the surviving block, and log it.
 	 */
@@ -481,18 +481,18 @@
 	 * If there is a right sibling now, make it point to the
 	 * remaining block.
 	 */
-	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+	if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) {
 		xfs_inobt_block_t	*rrblock;
 		xfs_buf_t		*rrbp;
 
 		if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
-				cur->bc_private.i.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0,
+				cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib), 0,
 				&rrbp, XFS_INO_BTREE_REF)))
 			return error;
 		rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp);
 		if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
 			return error;
-		INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno);
+		rrblock->bb_leftsib = cpu_to_be32(lbno);
 		xfs_inobt_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
 	}
 	/*
@@ -584,7 +584,7 @@
 	 */
 	bp = cur->bc_bufs[level];
 	block = XFS_BUF_TO_INOBT_BLOCK(bp);
-	numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+	numrecs = be16_to_cpu(block->bb_numrecs);
 #ifdef DEBUG
 	if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
 		return error;
@@ -658,7 +658,7 @@
 	 * At this point we know there's room for our new entry in the block
 	 * we're pointing at.
 	 */
-	numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+	numrecs = be16_to_cpu(block->bb_numrecs);
 	if (level > 0) {
 		/*
 		 * It's a non-leaf entry.  Make a hole for the new data
@@ -668,7 +668,7 @@
 		pp = XFS_INOBT_PTR_ADDR(block, 1, cur);
 #ifdef DEBUG
 		for (i = numrecs; i >= ptr; i--) {
-			if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level)))
+			if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level)))
 				return error;
 		}
 #endif
@@ -684,9 +684,9 @@
 			return error;
 #endif
 		kp[ptr - 1] = key; /* INT_: struct copy */
-		INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop);
+		pp[ptr - 1] = cpu_to_be32(*bnop);
 		numrecs++;
-		INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs);
+		block->bb_numrecs = cpu_to_be16(numrecs);
 		xfs_inobt_log_keys(cur, bp, ptr, numrecs);
 		xfs_inobt_log_ptrs(cur, bp, ptr, numrecs);
 	} else {
@@ -702,7 +702,7 @@
 		 */
 		rp[ptr - 1] = *recp; /* INT_: struct copy */
 		numrecs++;
-		INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs);
+		block->bb_numrecs = cpu_to_be16(numrecs);
 		xfs_inobt_log_recs(cur, bp, ptr, numrecs);
 	}
 	/*
@@ -857,8 +857,8 @@
 		xfs_agi_t	*agi;	/* a.g. inode header */
 
 		agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp);
-		agno = INT_GET(agi->agi_seqno, ARCH_CONVERT);
-		agbno = INT_GET(agi->agi_root, ARCH_CONVERT);
+		agno = be32_to_cpu(agi->agi_seqno);
+		agbno = be32_to_cpu(agi->agi_root);
 	}
 	/*
 	 * Iterate over each level in the btree, starting at the root.
@@ -925,7 +925,7 @@
 			 * Set low and high entry numbers, 1-based.
 			 */
 			low = 1;
-			if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) {
+			if (!(high = be16_to_cpu(block->bb_numrecs))) {
 				/*
 				 * If the block is empty, the tree must
 				 * be an empty leaf.
@@ -992,7 +992,7 @@
 			 */
 			if (diff > 0 && --keyno < 1)
 				keyno = 1;
-			agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, keyno, cur), ARCH_CONVERT);
+			agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, keyno, cur));
 #ifdef DEBUG
 			if ((error = xfs_btree_check_sptr(cur, agbno, level)))
 				return error;
@@ -1011,8 +1011,8 @@
 		 * not the last block, we're in the wrong block.
 		 */
 		if (dir == XFS_LOOKUP_GE &&
-		    keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) &&
-		    INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+		    keyno > be16_to_cpu(block->bb_numrecs) &&
+		    be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) {
 			int	i;
 
 			cur->bc_ptrs[0] = keyno;
@@ -1029,7 +1029,7 @@
 	/*
 	 * Return if we succeeded or not.
 	 */
-	if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT))
+	if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs))
 		*stat = 0;
 	else
 		*stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0));
@@ -1075,7 +1075,7 @@
 	/*
 	 * If we've got no left sibling then we can't shift an entry left.
 	 */
-	if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) {
+	if (be32_to_cpu(right->bb_leftsib) == NULLAGBLOCK) {
 		*stat = 0;
 		return 0;
 	}
@@ -1091,8 +1091,8 @@
 	 * Set up the left neighbor as "left".
 	 */
 	if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
-			cur->bc_private.i.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp,
-			XFS_INO_BTREE_REF)))
+			cur->bc_private.i.agno, be32_to_cpu(right->bb_leftsib),
+			0, &lbp, XFS_INO_BTREE_REF)))
 		return error;
 	left = XFS_BUF_TO_INOBT_BLOCK(lbp);
 	if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
@@ -1100,11 +1100,11 @@
 	/*
 	 * If it's full, it can't take another entry.
 	 */
-	if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) {
+	if (be16_to_cpu(left->bb_numrecs) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) {
 		*stat = 0;
 		return 0;
 	}
-	nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1;
+	nrec = be16_to_cpu(left->bb_numrecs) + 1;
 	/*
 	 * If non-leaf, copy a key and a ptr to the left block.
 	 */
@@ -1116,7 +1116,7 @@
 		lpp = XFS_INOBT_PTR_ADDR(left, nrec, cur);
 		rpp = XFS_INOBT_PTR_ADDR(right, 1, cur);
 #ifdef DEBUG
-		if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level)))
+		if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level)))
 			return error;
 #endif
 		*lpp = *rpp; /* INT_: no-change copy */
@@ -1134,7 +1134,7 @@
 	/*
 	 * Bump and log left's numrecs, decrement and log right's numrecs.
 	 */
-	INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1);
+	be16_add(&left->bb_numrecs, 1);
 	xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
 #ifdef DEBUG
 	if (level > 0)
@@ -1142,26 +1142,26 @@
 	else
 		xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp);
 #endif
-	INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1);
+	be16_add(&right->bb_numrecs, -1);
 	xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
 	/*
 	 * Slide the contents of right down one entry.
 	 */
 	if (level > 0) {
 #ifdef DEBUG
-		for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) {
-			if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT),
+		for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
+			if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i + 1]),
 					level)))
 				return error;
 		}
 #endif
-		memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
-		memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
-		xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
-		xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		memmove(rkp, rkp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
+		memmove(rpp, rpp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
+		xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
+		xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
 	} else {
-		memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
-		xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
+		xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
 		key.ir_startino = rrp->ir_startino; /* INT_: direct copy */
 		rkp = &key;
 	}
@@ -1213,7 +1213,7 @@
 	args.tp = cur->bc_tp;
 	args.mp = cur->bc_mp;
 	args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno,
-		INT_GET(agi->agi_root, ARCH_CONVERT));
+		be32_to_cpu(agi->agi_root));
 	args.mod = args.minleft = args.alignment = args.total = args.wasdel =
 		args.isfl = args.userdata = args.minalignslop = 0;
 	args.minlen = args.maxlen = args.prod = 1;
@@ -1233,8 +1233,8 @@
 	/*
 	 * Set the root data in the a.g. inode structure.
 	 */
-	INT_SET(agi->agi_root, ARCH_CONVERT, args.agbno);
-	INT_MOD(agi->agi_level, ARCH_CONVERT, 1);
+	agi->agi_root = cpu_to_be32(args.agbno);
+	be32_add(&agi->agi_level, 1);
 	xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp,
 		XFS_AGI_ROOT | XFS_AGI_LEVEL);
 	/*
@@ -1249,14 +1249,14 @@
 	if ((error = xfs_btree_check_sblock(cur, block, cur->bc_nlevels - 1, bp)))
 		return error;
 #endif
-	if (INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+	if (be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) {
 		/*
 		 * Our block is left, pick up the right block.
 		 */
 		lbp = bp;
 		lbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(lbp));
 		left = block;
-		rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT);
+		rbno = be32_to_cpu(left->bb_rightsib);
 		if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno,
 				rbno, 0, &rbp, XFS_INO_BTREE_REF)))
 			return error;
@@ -1273,7 +1273,7 @@
 		rbp = bp;
 		rbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(rbp));
 		right = block;
-		lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT);
+		lbno = be32_to_cpu(right->bb_leftsib);
 		if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno,
 				lbno, 0, &lbp, XFS_INO_BTREE_REF)))
 			return error;
@@ -1287,18 +1287,18 @@
 	/*
 	 * Fill in the new block's btree header and log it.
 	 */
-	INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]);
-	INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels);
-	INT_SET(new->bb_numrecs, ARCH_CONVERT, 2);
-	INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK);
-	INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK);
+	new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
+	new->bb_level = cpu_to_be16(cur->bc_nlevels);
+	new->bb_numrecs = cpu_to_be16(2);
+	new->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
+	new->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
 	xfs_inobt_log_block(args.tp, nbp, XFS_BB_ALL_BITS);
 	ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK);
 	/*
 	 * Fill in the key data in the new root.
 	 */
 	kp = XFS_INOBT_KEY_ADDR(new, 1, cur);
-	if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) {
+	if (be16_to_cpu(left->bb_level) > 0) {
 		kp[0] = *XFS_INOBT_KEY_ADDR(left, 1, cur); /* INT_: struct copy */
 		kp[1] = *XFS_INOBT_KEY_ADDR(right, 1, cur); /* INT_: struct copy */
 	} else {
@@ -1312,8 +1312,8 @@
 	 * Fill in the pointer data in the new root.
 	 */
 	pp = XFS_INOBT_PTR_ADDR(new, 1, cur);
-	INT_SET(pp[0], ARCH_CONVERT, lbno);
-	INT_SET(pp[1], ARCH_CONVERT, rbno);
+	pp[0] = cpu_to_be32(lbno);
+	pp[1] = cpu_to_be32(rbno);
 	xfs_inobt_log_ptrs(cur, nbp, 1, 2);
 	/*
 	 * Fix up the cursor.
@@ -1362,7 +1362,7 @@
 	/*
 	 * If we've got no right sibling then we can't shift an entry right.
 	 */
-	if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) {
+	if (be32_to_cpu(left->bb_rightsib) == NULLAGBLOCK) {
 		*stat = 0;
 		return 0;
 	}
@@ -1370,7 +1370,7 @@
 	 * If the cursor entry is the one that would be moved, don't
 	 * do it... it's too complicated.
 	 */
-	if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) {
+	if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) {
 		*stat = 0;
 		return 0;
 	}
@@ -1378,8 +1378,8 @@
 	 * Set up the right neighbor as "right".
 	 */
 	if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
-			cur->bc_private.i.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp,
-			XFS_INO_BTREE_REF)))
+			cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib),
+			0, &rbp, XFS_INO_BTREE_REF)))
 		return error;
 	right = XFS_BUF_TO_INOBT_BLOCK(rbp);
 	if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
@@ -1387,7 +1387,7 @@
 	/*
 	 * If it's full, it can't take another entry.
 	 */
-	if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) {
+	if (be16_to_cpu(right->bb_numrecs) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) {
 		*stat = 0;
 		return 0;
 	}
@@ -1396,41 +1396,41 @@
 	 * copy the last left block entry to the hole.
 	 */
 	if (level > 0) {
-		lkp = XFS_INOBT_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
-		lpp = XFS_INOBT_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		lkp = XFS_INOBT_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
+		lpp = XFS_INOBT_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
 		rkp = XFS_INOBT_KEY_ADDR(right, 1, cur);
 		rpp = XFS_INOBT_PTR_ADDR(right, 1, cur);
 #ifdef DEBUG
-		for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) {
-			if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level)))
+		for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) {
+			if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
 				return error;
 		}
 #endif
-		memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
-		memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
+		memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
+		memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
 #ifdef DEBUG
-		if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level)))
+		if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level)))
 			return error;
 #endif
 		*rkp = *lkp; /* INT_: no change copy */
 		*rpp = *lpp; /* INT_: no change copy */
-		xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
-		xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+		xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
+		xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
 	} else {
-		lrp = XFS_INOBT_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur);
+		lrp = XFS_INOBT_REC_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
 		rrp = XFS_INOBT_REC_ADDR(right, 1, cur);
-		memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
+		memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
 		*rrp = *lrp;
-		xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1);
+		xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
 		key.ir_startino = rrp->ir_startino; /* INT_: direct copy */
 		rkp = &key;
 	}
 	/*
 	 * Decrement and log left's numrecs, bump and log right's numrecs.
 	 */
-	INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1);
+	be16_add(&left->bb_numrecs, -1);
 	xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
-	INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1);
+	be16_add(&right->bb_numrecs, 1);
 #ifdef DEBUG
 	if (level > 0)
 		xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1);
@@ -1522,17 +1522,17 @@
 	/*
 	 * Fill in the btree header for the new block.
 	 */
-	INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]);
-	right->bb_level = left->bb_level; /* INT_: direct copy */
-	INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2));
+	right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
+	right->bb_level = left->bb_level;
+	right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
 	/*
 	 * Make sure that if there's an odd number of entries now, that
 	 * each new block will have the same number of entries.
 	 */
-	if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) &&
-	    cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1)
-		INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1);
-	i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1;
+	if ((be16_to_cpu(left->bb_numrecs) & 1) &&
+	    cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
+		be16_add(&right->bb_numrecs, 1);
+	i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
 	/*
 	 * For non-leaf blocks, copy keys and addresses over to the new block.
 	 */
@@ -1542,15 +1542,15 @@
 		rkp = XFS_INOBT_KEY_ADDR(right, 1, cur);
 		rpp = XFS_INOBT_PTR_ADDR(right, 1, cur);
 #ifdef DEBUG
-		for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) {
-			if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level)))
+		for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
+			if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
 				return error;
 		}
 #endif
-		memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp));
-		memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp));
-		xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
-		xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
+		memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
+		xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
+		xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
 		*keyp = *rkp;
 	}
 	/*
@@ -1559,36 +1559,36 @@
 	else {
 		lrp = XFS_INOBT_REC_ADDR(left, i, cur);
 		rrp = XFS_INOBT_REC_ADDR(right, 1, cur);
-		memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp));
-		xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT));
+		memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
+		xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
 		keyp->ir_startino = rrp->ir_startino; /* INT_: direct copy */
 	}
 	/*
 	 * Find the left block number by looking in the buffer.
 	 * Adjust numrecs, sibling pointers.
 	 */
-	INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT)));
-	right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */
-	INT_SET(left->bb_rightsib, ARCH_CONVERT, args.agbno);
-	INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno);
+	be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
+	right->bb_rightsib = left->bb_rightsib;
+	left->bb_rightsib = cpu_to_be32(args.agbno);
+	right->bb_leftsib = cpu_to_be32(lbno);
 	xfs_inobt_log_block(args.tp, rbp, XFS_BB_ALL_BITS);
 	xfs_inobt_log_block(args.tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
 	/*
 	 * If there's a block to the new block's right, make that block
 	 * point back to right instead of to left.
 	 */
-	if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) {
+	if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) {
 		xfs_inobt_block_t	*rrblock;	/* rr btree block */
 		xfs_buf_t		*rrbp;		/* buffer for rrblock */
 
 		if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno,
-				INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, &rrbp,
+				be32_to_cpu(right->bb_rightsib), 0, &rrbp,
 				XFS_INO_BTREE_REF)))
 			return error;
 		rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp);
 		if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
 			return error;
-		INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, args.agbno);
+		rrblock->bb_leftsib = cpu_to_be32(args.agbno);
 		xfs_inobt_log_block(args.tp, rrbp, XFS_BB_LEFTSIB);
 	}
 	/*
@@ -1596,9 +1596,9 @@
 	 * If it's just pointing past the last entry in left, then we'll
 	 * insert there, so don't change anything in that case.
 	 */
-	if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) {
+	if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) {
 		xfs_btree_setbuf(cur, level, rbp);
-		cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT);
+		cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs);
 	}
 	/*
 	 * If there are more levels, we'll need another cursor which refers
@@ -1696,7 +1696,7 @@
 	/*
 	 * If we just went off the left edge of the tree, return failure.
 	 */
-	if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) {
+	if (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK) {
 		*stat = 0;
 		return 0;
 	}
@@ -1725,7 +1725,7 @@
 		xfs_agblock_t	agbno;	/* block number of btree block */
 		xfs_buf_t	*bp;	/* buffer containing btree block */
 
-		agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT);
+		agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
 		if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
 				cur->bc_private.i.agno, agbno, 0, &bp,
 				XFS_INO_BTREE_REF)))
@@ -1735,7 +1735,7 @@
 		block = XFS_BUF_TO_INOBT_BLOCK(bp);
 		if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
 			return error;
-		cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT);
+		cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs);
 	}
 	*stat = 1;
 	return 0;
@@ -1807,7 +1807,7 @@
 	/*
 	 * Off the right end or left end, return failure.
 	 */
-	if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) {
+	if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) {
 		*stat = 0;
 		return 0;
 	}
@@ -1855,14 +1855,14 @@
 	 * Increment the ptr at this level.  If we're still in the block
 	 * then we're done.
 	 */
-	if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) {
+	if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) {
 		*stat = 1;
 		return 0;
 	}
 	/*
 	 * If we just went off the right edge of the tree, return failure.
 	 */
-	if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) {
+	if (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK) {
 		*stat = 0;
 		return 0;
 	}
@@ -1877,7 +1877,7 @@
 		if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
 			return error;
 #endif
-		if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT))
+		if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs))
 			break;
 		/*
 		 * Read-ahead the right block, we're going to read it
@@ -1897,7 +1897,7 @@
 	     lev > level; ) {
 		xfs_agblock_t	agbno;	/* block number of btree block */
 
-		agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT);
+		agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
 		if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
 				cur->bc_private.i.agno, agbno, 0, &bp,
 				XFS_INO_BTREE_REF)))
diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h
index 86ed749..ae3904c 100644
--- a/fs/xfs/xfs_ialloc_btree.h
+++ b/fs/xfs/xfs_ialloc_btree.h
@@ -62,8 +62,10 @@
 	xfs_agino_t	ir_startino;	/* starting inode number */
 } xfs_inobt_key_t;
 
-typedef xfs_agblock_t xfs_inobt_ptr_t;	/* btree pointer type */
-					/* btree block header type */
+/* btree pointer type */
+typedef __be32 xfs_inobt_ptr_t;
+
+/* btree block header type */
 typedef	struct xfs_btree_sblock xfs_inobt_block_t;
 
 #define	XFS_BUF_TO_INOBT_BLOCK(bp)	((xfs_inobt_block_t *)XFS_BUF_PTR(bp))
@@ -86,7 +88,7 @@
 #define	XFS_INOBT_BLOCK_MAXRECS(lev,cur) ((cur)->bc_mp->m_inobt_mxr[lev != 0])
 #define	XFS_INOBT_BLOCK_MINRECS(lev,cur) ((cur)->bc_mp->m_inobt_mnr[lev != 0])
 #define	XFS_INOBT_IS_LAST_REC(cur)	\
-	((cur)->bc_ptrs[0] == INT_GET(XFS_BUF_TO_INOBT_BLOCK((cur)->bc_bufs[0])->bb_numrecs, ARCH_CONVERT))
+	((cur)->bc_ptrs[0] == be16_to_cpu(XFS_BUF_TO_INOBT_BLOCK((cur)->bc_bufs[0])->bb_numrecs))
 
 /*
  * Maximum number of inode btree levels.
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 281cbd4..df0d457 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1864,8 +1864,8 @@
 	 */
 	agi = XFS_BUF_TO_AGI(agibp);
 	agi_ok =
-		INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC &&
-		XFS_AGI_GOOD_VERSION(INT_GET(agi->agi_versionnum, ARCH_CONVERT));
+		be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
+		XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
 	if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK,
 			XFS_RANDOM_IUNLINK))) {
 		XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi);
@@ -1880,9 +1880,9 @@
 	ASSERT(agino != 0);
 	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
 	ASSERT(agi->agi_unlinked[bucket_index]);
-	ASSERT(INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != agino);
+	ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
 
-	if (INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != NULLAGINO) {
+	if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) {
 		/*
 		 * There is already another inode in the bucket we need
 		 * to add ourselves to.  Add us at the front of the list.
@@ -1909,7 +1909,7 @@
 	 * Point the bucket head pointer at the inode being inserted.
 	 */
 	ASSERT(agino != 0);
-	INT_SET(agi->agi_unlinked[bucket_index], ARCH_CONVERT, agino);
+	agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
 	offset = offsetof(xfs_agi_t, agi_unlinked) +
 		(sizeof(xfs_agino_t) * bucket_index);
 	xfs_trans_log_buf(tp, agibp, offset,
@@ -1967,8 +1967,8 @@
 	 */
 	agi = XFS_BUF_TO_AGI(agibp);
 	agi_ok =
-		INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC &&
-		XFS_AGI_GOOD_VERSION(INT_GET(agi->agi_versionnum, ARCH_CONVERT));
+		be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
+		XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
 	if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE,
 			XFS_RANDOM_IUNLINK_REMOVE))) {
 		XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW,
@@ -1986,10 +1986,10 @@
 	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
 	ASSERT(agino != 0);
 	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
-	ASSERT(INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != NULLAGINO);
+	ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO);
 	ASSERT(agi->agi_unlinked[bucket_index]);
 
-	if (INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) == agino) {
+	if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
 		/*
 		 * We're at the head of the list.  Get the inode's
 		 * on-disk buffer to see if there is anyone after us
@@ -2023,7 +2023,7 @@
 		 */
 		ASSERT(next_agino != 0);
 		ASSERT(next_agino != agino);
-		INT_SET(agi->agi_unlinked[bucket_index], ARCH_CONVERT, next_agino);
+		agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
 		offset = offsetof(xfs_agi_t, agi_unlinked) +
 			(sizeof(xfs_agino_t) * bucket_index);
 		xfs_trans_log_buf(tp, agibp, offset,
@@ -2032,7 +2032,7 @@
 		/*
 		 * We need to search the list for the inode being freed.
 		 */
-		next_agino = INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT);
+		next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
 		last_ibp = NULL;
 		while (next_agino != agino) {
 			/*
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index a4d186d..f63646e 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -448,7 +448,7 @@
 			while (error) {
 				agino += XFS_INODES_PER_CHUNK;
 				if (XFS_AGINO_TO_AGBNO(mp, agino) >=
-						INT_GET(agi->agi_length, ARCH_CONVERT))
+						be32_to_cpu(agi->agi_length))
 					break;
 				error = xfs_inobt_lookup_ge(cur, agino, 0, 0,
 							    &tmp);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 6326898..8ab7df7 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3160,13 +3160,12 @@
 	}
 
 	agi = XFS_BUF_TO_AGI(agibp);
-	if (INT_GET(agi->agi_magicnum, ARCH_CONVERT) != XFS_AGI_MAGIC) {
+	if (be32_to_cpu(agi->agi_magicnum) != XFS_AGI_MAGIC) {
 		xfs_trans_cancel(tp, XFS_TRANS_ABORT);
 		return;
 	}
-	ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
 
-	INT_SET(agi->agi_unlinked[bucket], ARCH_CONVERT, NULLAGINO);
+	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
 	offset = offsetof(xfs_agi_t, agi_unlinked) +
 		 (sizeof(xfs_agino_t) * bucket);
 	xfs_trans_log_buf(tp, agibp, offset,
@@ -3225,12 +3224,11 @@
 				XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)));
 		}
 		agi = XFS_BUF_TO_AGI(agibp);
-		ASSERT(XFS_AGI_MAGIC ==
-			INT_GET(agi->agi_magicnum, ARCH_CONVERT));
+		ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agi->agi_magicnum));
 
 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
 
-			agino = INT_GET(agi->agi_unlinked[bucket], ARCH_CONVERT);
+			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
 			while (agino != NULLAGINO) {
 
 				/*
@@ -3318,8 +3316,8 @@
 							XFS_AGI_DADDR(mp)));
 				}
 				agi = XFS_BUF_TO_AGI(agibp);
-				ASSERT(XFS_AGI_MAGIC == INT_GET(
-					agi->agi_magicnum, ARCH_CONVERT));
+				ASSERT(XFS_AGI_MAGIC == be32_to_cpu(
+					agi->agi_magicnum));
 			}
 		}
 
@@ -4022,14 +4020,12 @@
 						mp, agfbp, agfdaddr);
 		}
 		agfp = XFS_BUF_TO_AGF(agfbp);
-		ASSERT(XFS_AGF_MAGIC ==
-			INT_GET(agfp->agf_magicnum, ARCH_CONVERT));
-		ASSERT(XFS_AGF_GOOD_VERSION(
-			INT_GET(agfp->agf_versionnum, ARCH_CONVERT)));
-		ASSERT(INT_GET(agfp->agf_seqno, ARCH_CONVERT) == agno);
+		ASSERT(XFS_AGF_MAGIC == be32_to_cpu(agfp->agf_magicnum));
+		ASSERT(XFS_AGF_GOOD_VERSION(be32_to_cpu(agfp->agf_versionnum)));
+		ASSERT(be32_to_cpu(agfp->agf_seqno) == agno);
 
-		freeblks += INT_GET(agfp->agf_freeblks, ARCH_CONVERT) +
-			    INT_GET(agfp->agf_flcount, ARCH_CONVERT);
+		freeblks += be32_to_cpu(agfp->agf_freeblks) +
+			    be32_to_cpu(agfp->agf_flcount);
 		xfs_buf_relse(agfbp);
 
 		agidaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
@@ -4040,14 +4036,12 @@
 					  mp, agibp, agidaddr);
 		}
 		agip = XFS_BUF_TO_AGI(agibp);
-		ASSERT(XFS_AGI_MAGIC ==
-			INT_GET(agip->agi_magicnum, ARCH_CONVERT));
-		ASSERT(XFS_AGI_GOOD_VERSION(
-			INT_GET(agip->agi_versionnum, ARCH_CONVERT)));
-		ASSERT(INT_GET(agip->agi_seqno, ARCH_CONVERT) == agno);
+		ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agip->agi_magicnum));
+		ASSERT(XFS_AGI_GOOD_VERSION(be32_to_cpu(agip->agi_versionnum)));
+		ASSERT(be32_to_cpu(agip->agi_seqno) == agno);
 
-		itotal += INT_GET(agip->agi_count, ARCH_CONVERT);
-		ifree += INT_GET(agip->agi_freecount, ARCH_CONVERT);
+		itotal += be32_to_cpu(agip->agi_count);
+		ifree += be32_to_cpu(agip->agi_freecount);
 		xfs_buf_relse(agibp);
 	}