hfsplus: over 80 character lines clean-up

Match coding style line length limitation where checkpatch.pl
reported over-80-character-line warnings.

Signed-off-by: Anton Salikhmetov <alexo@tuxera.com>
Signed-off-by: Christoph Hellwig <hch@tuxera.com>
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index c8aa165..cf7dc8d 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -212,7 +212,8 @@
 				dst_page--;
 			}
 			src -= len;
-			memmove(kmap(*dst_page) + src, kmap(*src_page) + src, len);
+			memmove(kmap(*dst_page) + src,
+				kmap(*src_page) + src, len);
 			kunmap(*src_page);
 			set_page_dirty(*dst_page);
 			kunmap(*dst_page);
@@ -250,14 +251,16 @@
 
 		if (src == dst) {
 			l = min(len, (int)PAGE_CACHE_SIZE - src);
-			memmove(kmap(*dst_page) + src, kmap(*src_page) + src, l);
+			memmove(kmap(*dst_page) + src,
+				kmap(*src_page) + src, l);
 			kunmap(*src_page);
 			set_page_dirty(*dst_page);
 			kunmap(*dst_page);
 
 			while ((len -= l) != 0) {
 				l = min(len, (int)PAGE_CACHE_SIZE);
-				memmove(kmap(*++dst_page), kmap(*++src_page), l);
+				memmove(kmap(*++dst_page),
+					kmap(*++src_page), l);
 				kunmap(*src_page);
 				set_page_dirty(*dst_page);
 				kunmap(*dst_page);
@@ -268,7 +271,8 @@
 			do {
 				src_ptr = kmap(*src_page) + src;
 				dst_ptr = kmap(*dst_page) + dst;
-				if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
+				if (PAGE_CACHE_SIZE - src <
+						PAGE_CACHE_SIZE - dst) {
 					l = PAGE_CACHE_SIZE - src;
 					src = 0;
 					dst += l;
@@ -340,7 +344,8 @@
 			return;
 		tmp->next = node->next;
 		cnid = cpu_to_be32(tmp->next);
-		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
+		hfs_bnode_write(tmp, &cnid,
+			offsetof(struct hfs_bnode_desc, next), 4);
 		hfs_bnode_put(tmp);
 	} else if (node->type == HFS_NODE_LEAF)
 		tree->leaf_head = node->next;
@@ -351,7 +356,8 @@
 			return;
 		tmp->prev = node->prev;
 		cnid = cpu_to_be32(tmp->prev);
-		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4);
+		hfs_bnode_write(tmp, &cnid,
+			offsetof(struct hfs_bnode_desc, prev), 4);
 		hfs_bnode_put(tmp);
 	} else if (node->type == HFS_NODE_LEAF)
 		tree->leaf_tail = node->prev;
@@ -379,7 +385,9 @@
 	struct hfs_bnode *node;
 
 	if (cnid >= tree->node_count) {
-		printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid);
+		printk(KERN_ERR "hfs: request for non-existent node "
+				"%d in B*Tree\n",
+			cnid);
 		return NULL;
 	}
 
@@ -402,7 +410,9 @@
 	loff_t off;
 
 	if (cnid >= tree->node_count) {
-		printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid);
+		printk(KERN_ERR "hfs: request for non-existent node "
+				"%d in B*Tree\n",
+			cnid);
 		return NULL;
 	}
 
@@ -429,7 +439,8 @@
 	} else {
 		spin_unlock(&tree->hash_lock);
 		kfree(node);
-		wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags));
+		wait_event(node2->lock_wq,
+			!test_bit(HFS_BNODE_NEW, &node2->flags));
 		return node2;
 	}
 	spin_unlock(&tree->hash_lock);
@@ -483,7 +494,8 @@
 	if (node) {
 		hfs_bnode_get(node);
 		spin_unlock(&tree->hash_lock);
-		wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags));
+		wait_event(node->lock_wq,
+			!test_bit(HFS_BNODE_NEW, &node->flags));
 		if (test_bit(HFS_BNODE_ERROR, &node->flags))
 			goto node_error;
 		return node;
@@ -497,7 +509,8 @@
 	if (!test_bit(HFS_BNODE_NEW, &node->flags))
 		return node;
 
-	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
+	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
+			node->page_offset);
 	node->prev = be32_to_cpu(desc->prev);
 	node->next = be32_to_cpu(desc->next);
 	node->num_recs = be16_to_cpu(desc->num_recs);
@@ -607,7 +620,8 @@
 	if (node) {
 		atomic_inc(&node->refcnt);
 		dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n",
-		       node->tree->cnid, node->this, atomic_read(&node->refcnt));
+			node->tree->cnid, node->this,
+			atomic_read(&node->refcnt));
 	}
 }
 
@@ -619,7 +633,8 @@
 		int i;
 
 		dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
-		       node->tree->cnid, node->this, atomic_read(&node->refcnt));
+			node->tree->cnid, node->this,
+			atomic_read(&node->refcnt));
 		BUG_ON(!atomic_read(&node->refcnt));
 		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
 			return;