block: make core bits checkpatch compliant

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
diff --git a/block/blk-map.c b/block/blk-map.c
index 916cfc9..955d75c 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -53,7 +53,8 @@
 	 * direct dma. else, set up kernel bounce buffers
 	 */
 	uaddr = (unsigned long) ubuf;
-	if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+	if (!(uaddr & queue_dma_alignment(q)) &&
+	    !(len & queue_dma_alignment(q)))
 		bio = bio_map_user(q, NULL, uaddr, len, reading);
 	else
 		bio = bio_copy_user(q, uaddr, len, reading);
@@ -144,7 +145,6 @@
 	blk_rq_unmap_user(bio);
 	return ret;
 }
-
 EXPORT_SYMBOL(blk_rq_map_user);
 
 /**
@@ -179,7 +179,8 @@
 	/* we don't allow misaligned data like bio_map_user() does.  If the
 	 * user is using sg, they're expected to know the alignment constraints
 	 * and respect them accordingly */
-	bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
+	bio = bio_map_user_iov(q, NULL, iov, iov_count,
+				rq_data_dir(rq) == READ);
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
 
@@ -194,7 +195,6 @@
 	rq->buffer = rq->data = NULL;
 	return 0;
 }
-
 EXPORT_SYMBOL(blk_rq_map_user_iov);
 
 /**
@@ -227,7 +227,6 @@
 
 	return ret;
 }
-
 EXPORT_SYMBOL(blk_rq_unmap_user);
 
 /**
@@ -260,5 +259,4 @@
 	rq->buffer = rq->data = NULL;
 	return 0;
 }
-
 EXPORT_SYMBOL(blk_rq_map_kern);