Btrfs: unaligned access fixes

Btrfs set/get macros lose type information needed to avoid
unaligned accesses on sparc64.
ere is a patch for the kernel bits which fixes most of the
unaligned accesses on sparc64.

btrfs_name_hash is modified to return the hash value instead
of getting a return location via a (potentially unaligned)
pointer.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 52144b0..086cb05 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -495,22 +495,17 @@
 #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits)		\
 static inline u##bits btrfs_##name(struct extent_buffer *eb)		\
 {									\
-	char *kaddr = kmap_atomic(eb->first_page, KM_USER0);		\
-	unsigned long offset = offsetof(type, member);			\
-	u##bits res;							\
-	__le##bits *tmp = (__le##bits *)(kaddr + offset);		\
-	res = le##bits##_to_cpu(*tmp);					\
-	kunmap_atomic(kaddr, KM_USER0);					\
+	type *p = kmap_atomic(eb->first_page, KM_USER0);		\
+	u##bits res = le##bits##_to_cpu(p->member);			\
+	kunmap_atomic(p, KM_USER0);					\
 	return res;							\
 }									\
 static inline void btrfs_set_##name(struct extent_buffer *eb,		\
 				    u##bits val)			\
 {									\
-	char *kaddr = kmap_atomic(eb->first_page, KM_USER0);		\
-	unsigned long offset = offsetof(type, member);			\
-	__le##bits *tmp = (__le##bits *)(kaddr + offset);		\
-	*tmp = cpu_to_le##bits(val);					\
-	kunmap_atomic(kaddr, KM_USER0);					\
+	type *p = kmap_atomic(eb->first_page, KM_USER0);		\
+	p->member = cpu_to_le##bits(val);				\
+	kunmap_atomic(p, KM_USER0);					\
 }
 
 #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits)		\
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 5247a9a..7a73dc5 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -71,8 +71,7 @@
 
 	key.objectid = dir;
 	btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
-	ret = btrfs_name_hash(name, name_len, &key.offset);
-	BUG_ON(ret);
+	key.offset = btrfs_name_hash(name, name_len);
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -125,8 +124,7 @@
 
 	key.objectid = dir;
 	btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
-	ret = btrfs_name_hash(name, name_len, &key.offset);
-	BUG_ON(ret);
+	key.offset = btrfs_name_hash(name, name_len);
 	path = btrfs_alloc_path();
 	data_size = sizeof(*dir_item) + name_len;
 	dir_item = insert_with_overflow(trans, root, path, &key, data_size,
@@ -199,8 +197,7 @@
 	key.objectid = dir;
 	btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
 
-	ret = btrfs_name_hash(name, name_len, &key.offset);
-	BUG_ON(ret);
+	key.offset = btrfs_name_hash(name, name_len);
 
 	ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
 	if (ret < 0)
@@ -261,8 +258,7 @@
 
 	key.objectid = dir;
 	btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
-	ret = btrfs_name_hash(name, name_len, &key.offset);
-	BUG_ON(ret);
+	key.offset = btrfs_name_hash(name, name_len);
 	ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
 	if (ret < 0)
 		return ERR_PTR(ret);
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c
index e5c7690..d5252f5 100644
--- a/fs/btrfs/hash.c
+++ b/fs/btrfs/hash.c
@@ -76,19 +76,18 @@
 		*buf++ = pad;
 }
 
-int btrfs_name_hash(const char *name, int len, u64 *hash_result)
+u64 btrfs_name_hash(const char *name, int len)
 {
 	__u32	hash;
 	__u32	minor_hash = 0;
 	const char	*p;
 	__u32		in[8], buf[2];
+	u64		hash_result;
 
 	if (len == 1 && *name == '.') {
-		*hash_result = 1;
-		return 0;
+		return 1;
 	} else if (len == 2 && name[0] == '.' && name[1] == '.') {
-		*hash_result = 2;
-		return 0;
+		return 2;
 	}
 
 	/* Initialize the default seed for the hash checksum functions */
@@ -106,8 +105,8 @@
 	}
 	hash = buf[0];
 	minor_hash = buf[1];
-	*hash_result = buf[0];
-	*hash_result <<= 32;
-	*hash_result |= buf[1];
-	return 0;
+	hash_result = buf[0];
+	hash_result <<= 32;
+	hash_result |= buf[1];
+	return hash_result;
 }
diff --git a/fs/btrfs/hash.h b/fs/btrfs/hash.h
index d3be026..868ee17 100644
--- a/fs/btrfs/hash.h
+++ b/fs/btrfs/hash.h
@@ -18,5 +18,5 @@
 
 #ifndef __HASH__
 #define __HASH__
-int btrfs_name_hash(const char *name, int len, u64 *hash_result);
+u64 btrfs_name_hash(const char *name, int len);
 #endif
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index c5715a6..ad03a32 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -21,16 +21,15 @@
 u##bits btrfs_##name(struct extent_buffer *eb,				\
 				   type *s)				\
 {									\
-	unsigned long offset = (unsigned long)s +			\
-				offsetof(type, member);			\
-	__le##bits *tmp;						\
+	unsigned long part_offset = (unsigned long)s;			\
+	unsigned long offset = part_offset + offsetof(type, member);	\
+	type *p;							\
 	/* ugly, but we want the fast path here */			\
 	if (eb->map_token && offset >= eb->map_start &&			\
 	    offset + sizeof(((type *)0)->member) <= eb->map_start +	\
 	    eb->map_len) {						\
-		tmp = (__le##bits *)(eb->kaddr + offset -		\
-				     eb->map_start);			\
-		return le##bits##_to_cpu(*tmp);				\
+		p = (type *)(eb->kaddr + part_offset - eb->map_start);	\
+		return le##bits##_to_cpu(p->member);			\
 	}								\
 	{								\
 		int err;						\
@@ -48,8 +47,8 @@
 			read_eb_member(eb, s, type, member, &res);	\
 			return le##bits##_to_cpu(res);			\
 		}							\
-		tmp = (__le##bits *)(kaddr + offset - map_start);	\
-		res = le##bits##_to_cpu(*tmp);				\
+		p = (type *)(kaddr + part_offset - map_start);		\
+		res = le##bits##_to_cpu(p->member);			\
 		if (unmap_on_exit)					\
 			unmap_extent_buffer(eb, map_token, KM_USER1);	\
 		return res;						\
@@ -58,16 +57,15 @@
 void btrfs_set_##name(struct extent_buffer *eb,				\
 				    type *s, u##bits val)		\
 {									\
-	unsigned long offset = (unsigned long)s +			\
-				offsetof(type, member);			\
-	__le##bits *tmp;						\
+	unsigned long part_offset = (unsigned long)s;			\
+	unsigned long offset = part_offset + offsetof(type, member);	\
+	type *p;							\
 	/* ugly, but we want the fast path here */			\
 	if (eb->map_token && offset >= eb->map_start &&			\
 	    offset + sizeof(((type *)0)->member) <= eb->map_start +	\
 	    eb->map_len) {						\
-		tmp = (__le##bits *)(eb->kaddr + offset -		\
-				     eb->map_start);			\
-		*tmp = cpu_to_le##bits(val);				\
+		p = (type *)(eb->kaddr + part_offset - eb->map_start);	\
+		p->member = cpu_to_le##bits(val);			\
 		return;							\
 	}								\
 	{								\
@@ -86,8 +84,8 @@
 			write_eb_member(eb, s, type, member, &val);	\
 			return;						\
 		}							\
-		tmp = (__le##bits *)(kaddr + offset - map_start);	\
-		*tmp = cpu_to_le##bits(val);				\
+		p = (type *)(kaddr + part_offset - map_start);		\
+		p->member = cpu_to_le##bits(val);			\
 		if (unmap_on_exit)					\
 			unmap_extent_buffer(eb, map_token, KM_USER1);	\
 	}								\