block: make core bits checkpatch compliant

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 5f74fec..6901eed 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -26,7 +26,8 @@
 {
 	if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
 	    prepare_flush_fn == NULL) {
-		printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
+		printk(KERN_ERR "%s: prepare_flush_fn required\n",
+								__FUNCTION__);
 		return -EINVAL;
 	}
 
@@ -47,7 +48,6 @@
 
 	return 0;
 }
-
 EXPORT_SYMBOL(blk_queue_ordered);
 
 /*
@@ -315,5 +315,4 @@
 	bio_put(bio);
 	return ret;
 }
-
 EXPORT_SYMBOL(blkdev_issue_flush);
diff --git a/block/blk-core.c b/block/blk-core.c
index 55cf293..4afb39c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3,7 +3,8 @@
  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
- * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> -  July2000
+ * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
+ *	-  July2000
  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
  */
 
@@ -42,7 +43,7 @@
 /*
  * For queue allocation
  */
-struct kmem_cache *blk_requestq_cachep = NULL;
+struct kmem_cache *blk_requestq_cachep;
 
 /*
  * Controlling structure to kblockd
@@ -137,7 +138,7 @@
 			error = -EIO;
 
 		if (unlikely(nbytes > bio->bi_size)) {
-			printk("%s: want %u bytes done, only %u left\n",
+			printk(KERN_ERR "%s: want %u bytes done, %u left\n",
 			       __FUNCTION__, nbytes, bio->bi_size);
 			nbytes = bio->bi_size;
 		}
@@ -161,23 +162,26 @@
 {
 	int bit;
 
-	printk("%s: dev %s: type=%x, flags=%x\n", msg,
+	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
 		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
 		rq->cmd_flags);
 
-	printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
-						       rq->nr_sectors,
-						       rq->current_nr_sectors);
-	printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
+	printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
+						(unsigned long long)rq->sector,
+						rq->nr_sectors,
+						rq->current_nr_sectors);
+	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
+						rq->bio, rq->biotail,
+						rq->buffer, rq->data,
+						rq->data_len);
 
 	if (blk_pc_request(rq)) {
-		printk("cdb: ");
+		printk(KERN_INFO "  cdb: ");
 		for (bit = 0; bit < sizeof(rq->cmd); bit++)
 			printk("%02x ", rq->cmd[bit]);
 		printk("\n");
 	}
 }
-
 EXPORT_SYMBOL(blk_dump_rq_flags);
 
 /*
@@ -204,7 +208,6 @@
 		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
 	}
 }
-
 EXPORT_SYMBOL(blk_plug_device);
 
 /*
@@ -221,7 +224,6 @@
 	del_timer(&q->unplug_timer);
 	return 1;
 }
-
 EXPORT_SYMBOL(blk_remove_plug);
 
 /*
@@ -328,7 +330,6 @@
 		kblockd_schedule_work(&q->unplug_work);
 	}
 }
-
 EXPORT_SYMBOL(blk_start_queue);
 
 /**
@@ -408,7 +409,7 @@
 }
 EXPORT_SYMBOL(blk_put_queue);
 
-void blk_cleanup_queue(struct request_queue * q)
+void blk_cleanup_queue(struct request_queue *q)
 {
 	mutex_lock(&q->sysfs_lock);
 	set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
@@ -419,7 +420,6 @@
 
 	blk_put_queue(q);
 }
-
 EXPORT_SYMBOL(blk_cleanup_queue);
 
 static int blk_init_free_list(struct request_queue *q)
@@ -575,7 +575,6 @@
 
 	return 1;
 }
-
 EXPORT_SYMBOL(blk_get_queue);
 
 static inline void blk_free_request(struct request_queue *q, struct request *rq)
@@ -774,7 +773,7 @@
 	 */
 	if (ioc_batching(q, ioc))
 		ioc->nr_batch_requests--;
-	
+
 	rq_init(q, rq);
 
 	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
@@ -888,7 +887,6 @@
 
 	elv_requeue_request(q, rq);
 }
-
 EXPORT_SYMBOL(blk_requeue_request);
 
 /**
@@ -939,7 +937,6 @@
 	blk_start_queueing(q);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 }
-
 EXPORT_SYMBOL(blk_insert_request);
 
 /*
@@ -947,7 +944,7 @@
  * queue lock is held and interrupts disabled, as we muck with the
  * request queue list.
  */
-static inline void add_request(struct request_queue * q, struct request * req)
+static inline void add_request(struct request_queue *q, struct request *req)
 {
 	drive_stat_acct(req, 1);
 
@@ -957,7 +954,7 @@
 	 */
 	__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
 }
- 
+
 /*
  * disk_round_stats()	- Round off the performance stats on a struct
  * disk_stats.
@@ -987,7 +984,6 @@
 	}
 	disk->stamp = now;
 }
-
 EXPORT_SYMBOL_GPL(disk_round_stats);
 
 /*
@@ -1017,7 +1013,6 @@
 		freed_request(q, rw, priv);
 	}
 }
-
 EXPORT_SYMBOL_GPL(__blk_put_request);
 
 void blk_put_request(struct request *req)
@@ -1035,7 +1030,6 @@
 		spin_unlock_irqrestore(q->queue_lock, flags);
 	}
 }
-
 EXPORT_SYMBOL(blk_put_request);
 
 void init_request_from_bio(struct request *req, struct bio *bio)
@@ -1096,53 +1090,53 @@
 
 	el_ret = elv_merge(q, &req, bio);
 	switch (el_ret) {
-		case ELEVATOR_BACK_MERGE:
-			BUG_ON(!rq_mergeable(req));
+	case ELEVATOR_BACK_MERGE:
+		BUG_ON(!rq_mergeable(req));
 
-			if (!ll_back_merge_fn(q, req, bio))
-				break;
+		if (!ll_back_merge_fn(q, req, bio))
+			break;
 
-			blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+		blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
 
-			req->biotail->bi_next = bio;
-			req->biotail = bio;
-			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
-			req->ioprio = ioprio_best(req->ioprio, prio);
-			drive_stat_acct(req, 0);
-			if (!attempt_back_merge(q, req))
-				elv_merged_request(q, req, el_ret);
-			goto out;
+		req->biotail->bi_next = bio;
+		req->biotail = bio;
+		req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+		req->ioprio = ioprio_best(req->ioprio, prio);
+		drive_stat_acct(req, 0);
+		if (!attempt_back_merge(q, req))
+			elv_merged_request(q, req, el_ret);
+		goto out;
 
-		case ELEVATOR_FRONT_MERGE:
-			BUG_ON(!rq_mergeable(req));
+	case ELEVATOR_FRONT_MERGE:
+		BUG_ON(!rq_mergeable(req));
 
-			if (!ll_front_merge_fn(q, req, bio))
-				break;
+		if (!ll_front_merge_fn(q, req, bio))
+			break;
 
-			blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+		blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
 
-			bio->bi_next = req->bio;
-			req->bio = bio;
+		bio->bi_next = req->bio;
+		req->bio = bio;
 
-			/*
-			 * may not be valid. if the low level driver said
-			 * it didn't need a bounce buffer then it better
-			 * not touch req->buffer either...
-			 */
-			req->buffer = bio_data(bio);
-			req->current_nr_sectors = bio_cur_sectors(bio);
-			req->hard_cur_sectors = req->current_nr_sectors;
-			req->sector = req->hard_sector = bio->bi_sector;
-			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
-			req->ioprio = ioprio_best(req->ioprio, prio);
-			drive_stat_acct(req, 0);
-			if (!attempt_front_merge(q, req))
-				elv_merged_request(q, req, el_ret);
-			goto out;
+		/*
+		 * may not be valid. if the low level driver said
+		 * it didn't need a bounce buffer then it better
+		 * not touch req->buffer either...
+		 */
+		req->buffer = bio_data(bio);
+		req->current_nr_sectors = bio_cur_sectors(bio);
+		req->hard_cur_sectors = req->current_nr_sectors;
+		req->sector = req->hard_sector = bio->bi_sector;
+		req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+		req->ioprio = ioprio_best(req->ioprio, prio);
+		drive_stat_acct(req, 0);
+		if (!attempt_front_merge(q, req))
+			elv_merged_request(q, req, el_ret);
+		goto out;
 
-		/* ELV_NO_MERGE: elevator says don't/can't merge. */
-		default:
-			;
+	/* ELV_NO_MERGE: elevator says don't/can't merge. */
+	default:
+		;
 	}
 
 get_rq:
@@ -1350,7 +1344,7 @@
 		}
 
 		if (unlikely(nr_sectors > q->max_hw_sectors)) {
-			printk("bio too big device %s (%u > %u)\n", 
+			printk(KERN_ERR "bio too big device %s (%u > %u)\n",
 				bdevname(bio->bi_bdev, b),
 				bio_sectors(bio),
 				q->max_hw_sectors);
@@ -1439,7 +1433,6 @@
 	} while (bio);
 	current->bio_tail = NULL; /* deactivate */
 }
-
 EXPORT_SYMBOL(generic_make_request);
 
 /**
@@ -1480,13 +1473,12 @@
 			current->comm, task_pid_nr(current),
 				(rw & WRITE) ? "WRITE" : "READ",
 				(unsigned long long)bio->bi_sector,
-				bdevname(bio->bi_bdev,b));
+				bdevname(bio->bi_bdev, b));
 		}
 	}
 
 	generic_make_request(bio);
 }
-
 EXPORT_SYMBOL(submit_bio);
 
 /**
@@ -1518,9 +1510,8 @@
 	if (!blk_pc_request(req))
 		req->errors = 0;
 
-	if (error) {
-		if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
-			printk("end_request: I/O error, dev %s, sector %llu\n",
+	if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
+		printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
 				req->rq_disk ? req->rq_disk->disk_name : "?",
 				(unsigned long long)req->sector);
 	}
@@ -1554,9 +1545,9 @@
 
 			if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
 				blk_dump_rq_flags(req, "__end_that");
-				printk("%s: bio idx %d >= vcnt %d\n",
-						__FUNCTION__,
-						bio->bi_idx, bio->bi_vcnt);
+				printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
+						__FUNCTION__, bio->bi_idx,
+						bio->bi_vcnt);
 				break;
 			}
 
@@ -1582,7 +1573,8 @@
 		total_bytes += nbytes;
 		nr_bytes -= nbytes;
 
-		if ((bio = req->bio)) {
+		bio = req->bio;
+		if (bio) {
 			/*
 			 * end more in this run, or just return 'not-done'
 			 */
@@ -1626,15 +1618,16 @@
 	local_irq_enable();
 
 	while (!list_empty(&local_list)) {
-		struct request *rq = list_entry(local_list.next, struct request, donelist);
+		struct request *rq;
 
+		rq = list_entry(local_list.next, struct request, donelist);
 		list_del_init(&rq->donelist);
 		rq->q->softirq_done_fn(rq);
 	}
 }
 
-static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
-			  void *hcpu)
+static int __cpuinit blk_cpu_notify(struct notifier_block *self,
+				    unsigned long action, void *hcpu)
 {
 	/*
 	 * If a CPU goes away, splice its entries to the current CPU
@@ -1676,7 +1669,7 @@
 	unsigned long flags;
 
 	BUG_ON(!req->q->softirq_done_fn);
-		
+
 	local_irq_save(flags);
 
 	cpu_list = &__get_cpu_var(blk_cpu_done);
@@ -1685,9 +1678,8 @@
 
 	local_irq_restore(flags);
 }
-
 EXPORT_SYMBOL(blk_complete_request);
-	
+
 /*
  * queue lock must be held
  */
@@ -2002,7 +1994,6 @@
 {
 	return queue_work(kblockd_workqueue, work);
 }
-
 EXPORT_SYMBOL(kblockd_schedule_work);
 
 void kblockd_flush_work(struct work_struct *work)
diff --git a/block/blk-exec.c b/block/blk-exec.c
index ebfb44e..391dd62 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -101,5 +101,4 @@
 
 	return err;
 }
-
 EXPORT_SYMBOL(blk_execute_rq);
diff --git a/block/blk-map.c b/block/blk-map.c
index 916cfc9..955d75c 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -53,7 +53,8 @@
 	 * direct dma. else, set up kernel bounce buffers
 	 */
 	uaddr = (unsigned long) ubuf;
-	if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+	if (!(uaddr & queue_dma_alignment(q)) &&
+	    !(len & queue_dma_alignment(q)))
 		bio = bio_map_user(q, NULL, uaddr, len, reading);
 	else
 		bio = bio_copy_user(q, uaddr, len, reading);
@@ -144,7 +145,6 @@
 	blk_rq_unmap_user(bio);
 	return ret;
 }
-
 EXPORT_SYMBOL(blk_rq_map_user);
 
 /**
@@ -179,7 +179,8 @@
 	/* we don't allow misaligned data like bio_map_user() does.  If the
 	 * user is using sg, they're expected to know the alignment constraints
 	 * and respect them accordingly */
-	bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
+	bio = bio_map_user_iov(q, NULL, iov, iov_count,
+				rq_data_dir(rq) == READ);
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
 
@@ -194,7 +195,6 @@
 	rq->buffer = rq->data = NULL;
 	return 0;
 }
-
 EXPORT_SYMBOL(blk_rq_map_user_iov);
 
 /**
@@ -227,7 +227,6 @@
 
 	return ret;
 }
-
 EXPORT_SYMBOL(blk_rq_unmap_user);
 
 /**
@@ -260,5 +259,4 @@
 	rq->buffer = rq->data = NULL;
 	return 0;
 }
-
 EXPORT_SYMBOL(blk_rq_map_kern);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5023f0b..845ef81 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -32,7 +32,7 @@
 		 * size, something has gone terribly wrong
 		 */
 		if (rq->nr_sectors < rq->current_nr_sectors) {
-			printk("blk: request botched\n");
+			printk(KERN_ERR "blk: request botched\n");
 			rq->nr_sectors = rq->current_nr_sectors;
 		}
 	}
@@ -235,7 +235,6 @@
 
 	return nsegs;
 }
-
 EXPORT_SYMBOL(blk_rq_map_sg);
 
 static inline int ll_new_mergeable(struct request_queue *q,
@@ -305,8 +304,8 @@
 	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
 		blk_recount_segments(q, bio);
 	len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
-	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
-	    !BIOVEC_VIRT_OVERSIZE(len)) {
+	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
+	    && !BIOVEC_VIRT_OVERSIZE(len)) {
 		int mergeable =  ll_new_mergeable(q, req, bio);
 
 		if (mergeable) {
@@ -321,7 +320,7 @@
 	return ll_new_hw_segment(q, req, bio);
 }
 
-int ll_front_merge_fn(struct request_queue *q, struct request *req, 
+int ll_front_merge_fn(struct request_queue *q, struct request *req,
 		      struct bio *bio)
 {
 	unsigned short max_sectors;
@@ -388,7 +387,8 @@
 
 	total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
 	if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
-		int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
+		int len = req->biotail->bi_hw_back_size +
+				next->bio->bi_hw_front_size;
 		/*
 		 * propagate the combined length to the end of the requests
 		 */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 4df09a1..c8d0c57 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -10,8 +10,10 @@
 
 #include "blk.h"
 
-unsigned long blk_max_low_pfn, blk_max_pfn;
+unsigned long blk_max_low_pfn;
 EXPORT_SYMBOL(blk_max_low_pfn);
+
+unsigned long blk_max_pfn;
 EXPORT_SYMBOL(blk_max_pfn);
 
 /**
@@ -29,7 +31,6 @@
 {
 	q->prep_rq_fn = pfn;
 }
-
 EXPORT_SYMBOL(blk_queue_prep_rq);
 
 /**
@@ -52,14 +53,12 @@
 {
 	q->merge_bvec_fn = mbfn;
 }
-
 EXPORT_SYMBOL(blk_queue_merge_bvec);
 
 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
 {
 	q->softirq_done_fn = fn;
 }
-
 EXPORT_SYMBOL(blk_queue_softirq_done);
 
 /**
@@ -84,7 +83,7 @@
  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
  *    blk_queue_bounce() to create a buffer in normal memory.
  **/
-void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
+void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
 {
 	/*
 	 * set defaults
@@ -93,7 +92,8 @@
 	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
 	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
 	q->make_request_fn = mfn;
-	q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+	q->backing_dev_info.ra_pages =
+			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
 	q->backing_dev_info.state = 0;
 	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
 	blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
@@ -117,7 +117,6 @@
 	 */
 	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
 }
-
 EXPORT_SYMBOL(blk_queue_make_request);
 
 /**
@@ -133,7 +132,7 @@
  **/
 void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
 {
-	unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
+	unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
 	int dma = 0;
 
 	q->bounce_gfp = GFP_NOIO;
@@ -141,21 +140,20 @@
 	/* Assume anything <= 4GB can be handled by IOMMU.
 	   Actually some IOMMUs can handle everything, but I don't
 	   know of a way to test this here. */
-	if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+	if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
 		dma = 1;
 	q->bounce_pfn = max_low_pfn;
 #else
-	if (bounce_pfn < blk_max_low_pfn)
+	if (b_pfn < blk_max_low_pfn)
 		dma = 1;
-	q->bounce_pfn = bounce_pfn;
+	q->bounce_pfn = b_pfn;
 #endif
 	if (dma) {
 		init_emergency_isa_pool();
 		q->bounce_gfp = GFP_NOIO | GFP_DMA;
-		q->bounce_pfn = bounce_pfn;
+		q->bounce_pfn = b_pfn;
 	}
 }
-
 EXPORT_SYMBOL(blk_queue_bounce_limit);
 
 /**
@@ -171,7 +169,8 @@
 {
 	if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
 		max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
-		printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
+		printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
+							max_sectors);
 	}
 
 	if (BLK_DEF_MAX_SECTORS > max_sectors)
@@ -181,7 +180,6 @@
 		q->max_hw_sectors = max_sectors;
 	}
 }
-
 EXPORT_SYMBOL(blk_queue_max_sectors);
 
 /**
@@ -199,12 +197,12 @@
 {
 	if (!max_segments) {
 		max_segments = 1;
-		printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
+		printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
+							max_segments);
 	}
 
 	q->max_phys_segments = max_segments;
 }
-
 EXPORT_SYMBOL(blk_queue_max_phys_segments);
 
 /**
@@ -223,12 +221,12 @@
 {
 	if (!max_segments) {
 		max_segments = 1;
-		printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
+		printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
+							max_segments);
 	}
 
 	q->max_hw_segments = max_segments;
 }
-
 EXPORT_SYMBOL(blk_queue_max_hw_segments);
 
 /**
@@ -244,12 +242,12 @@
 {
 	if (max_size < PAGE_CACHE_SIZE) {
 		max_size = PAGE_CACHE_SIZE;
-		printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
+		printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
+							max_size);
 	}
 
 	q->max_segment_size = max_size;
 }
-
 EXPORT_SYMBOL(blk_queue_max_segment_size);
 
 /**
@@ -267,7 +265,6 @@
 {
 	q->hardsect_size = size;
 }
-
 EXPORT_SYMBOL(blk_queue_hardsect_size);
 
 /*
@@ -283,17 +280,16 @@
 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
 {
 	/* zero is "infinity" */
-	t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
-	t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
+	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
+	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
 
-	t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
-	t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
-	t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
-	t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
+	t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
+	t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
+	t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
+	t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
 	if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
 		clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
 }
-
 EXPORT_SYMBOL(blk_queue_stack_limits);
 
 /**
@@ -332,7 +328,6 @@
 
 	return 0;
 }
-
 EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
 
 /**
@@ -344,12 +339,12 @@
 {
 	if (mask < PAGE_CACHE_SIZE - 1) {
 		mask = PAGE_CACHE_SIZE - 1;
-		printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
+		printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__,
+							mask);
 	}
 
 	q->seg_boundary_mask = mask;
 }
-
 EXPORT_SYMBOL(blk_queue_segment_boundary);
 
 /**
@@ -366,7 +361,6 @@
 {
 	q->dma_alignment = mask;
 }
-
 EXPORT_SYMBOL(blk_queue_dma_alignment);
 
 /**
@@ -390,7 +384,6 @@
 	if (mask > q->dma_alignment)
 		q->dma_alignment = mask;
 }
-
 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
 
 int __init blk_settings_init(void)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index bc28776..54d0db1 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -207,12 +207,13 @@
 		    const char *page, size_t length)
 {
 	struct queue_sysfs_entry *entry = to_queue(attr);
-	struct request_queue *q = container_of(kobj, struct request_queue, kobj);
-
+	struct request_queue *q;
 	ssize_t res;
 
 	if (!entry->store)
 		return -EIO;
+
+	q = container_of(kobj, struct request_queue, kobj);
 	mutex_lock(&q->sysfs_lock);
 	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
 		mutex_unlock(&q->sysfs_lock);
diff --git a/block/blk-tag.c b/block/blk-tag.c
index d1fd300..a8c37d4 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -21,7 +21,6 @@
 {
 	return blk_map_queue_find_tag(q->queue_tags, tag);
 }
-
 EXPORT_SYMBOL(blk_queue_find_tag);
 
 /**
@@ -99,7 +98,6 @@
 {
 	clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
 }
-
 EXPORT_SYMBOL(blk_queue_free_tags);
 
 static int
@@ -185,7 +183,8 @@
 		if (!tags)
 			goto fail;
 	} else if (q->queue_tags) {
-		if ((rc = blk_queue_resize_tags(q, depth)))
+		rc = blk_queue_resize_tags(q, depth);
+		if (rc)
 			return rc;
 		set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
 		return 0;
@@ -203,7 +202,6 @@
 	kfree(tags);
 	return -ENOMEM;
 }
-
 EXPORT_SYMBOL(blk_queue_init_tags);
 
 /**
@@ -260,7 +258,6 @@
 	kfree(tag_map);
 	return 0;
 }
-
 EXPORT_SYMBOL(blk_queue_resize_tags);
 
 /**
@@ -313,7 +310,6 @@
 	clear_bit_unlock(tag, bqt->tag_map);
 	bqt->busy--;
 }
-
 EXPORT_SYMBOL(blk_queue_end_tag);
 
 /**
@@ -340,7 +336,7 @@
 	int tag;
 
 	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
-		printk(KERN_ERR 
+		printk(KERN_ERR
 		       "%s: request %p for device [%s] already tagged %d",
 		       __FUNCTION__, rq,
 		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
@@ -370,7 +366,6 @@
 	bqt->busy++;
 	return 0;
 }
-
 EXPORT_SYMBOL(blk_queue_start_tag);
 
 /**
@@ -392,5 +387,4 @@
 	list_for_each_safe(tmp, n, &q->tag_busy_list)
 		blk_requeue_request(q, list_entry_rq(tmp));
 }
-
 EXPORT_SYMBOL(blk_queue_invalidate_tags);