Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  Add UNPLUG traces to all appropriate places
  block: fix requeue handling in blk_queue_invalidate_tags()
  mmc: Fix sg helper copy-and-paste error
  pktcdvd: fix BUG caused by sysfs module reference semantics change
  ioprio: allow sys_ioprio_set() value of 0 to reset ioprio setting
  cfq_idle_class_timer: add paranoid checks for jiffies overflow
  cfq: fix IOPRIO_CLASS_IDLE delays
  cfq: fix IOPRIO_CLASS_IDLE accounting
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e47a930..0b4a479 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -789,6 +789,20 @@
 		__cfq_slice_expired(cfqd, cfqq, timed_out);
 }
 
+static int start_idle_class_timer(struct cfq_data *cfqd)
+{
+	unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
+	unsigned long now = jiffies;
+
+	if (time_before(now, end) &&
+	    time_after_eq(now, cfqd->last_end_request)) {
+		mod_timer(&cfqd->idle_class_timer, end);
+		return 1;
+	}
+
+	return 0;
+}
+
 /*
  * Get next queue for service. Unless we have a queue preemption,
  * we'll simply select the first cfqq in the service tree.
@@ -805,19 +819,14 @@
 	cfqq = rb_entry(n, struct cfq_queue, rb_node);
 
 	if (cfq_class_idle(cfqq)) {
-		unsigned long end;
-
 		/*
 		 * if we have idle queues and no rt or be queues had
 		 * pending requests, either allow immediate service if
 		 * the grace period has passed or arm the idle grace
 		 * timer
 		 */
-		end = cfqd->last_end_request + CFQ_IDLE_GRACE;
-		if (time_before(jiffies, end)) {
-			mod_timer(&cfqd->idle_class_timer, end);
+		if (start_idle_class_timer(cfqd))
 			cfqq = NULL;
-		}
 	}
 
 	return cfqq;
@@ -2036,17 +2045,14 @@
 static void cfq_idle_class_timer(unsigned long data)
 {
 	struct cfq_data *cfqd = (struct cfq_data *) data;
-	unsigned long flags, end;
+	unsigned long flags;
 
 	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
 
 	/*
 	 * race with a non-idle queue, reset timer
 	 */
-	end = cfqd->last_end_request + CFQ_IDLE_GRACE;
-	if (!time_after_eq(jiffies, end))
-		mod_timer(&cfqd->idle_class_timer, end);
-	else
+	if (!start_idle_class_timer(cfqd))
 		cfq_schedule_dispatch(cfqd);
 
 	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
@@ -2068,9 +2074,10 @@
 			cfq_put_queue(cfqd->async_cfqq[0][i]);
 		if (cfqd->async_cfqq[1][i])
 			cfq_put_queue(cfqd->async_cfqq[1][i]);
-		if (cfqd->async_idle_cfqq)
-			cfq_put_queue(cfqd->async_idle_cfqq);
 	}
+
+	if (cfqd->async_idle_cfqq)
+		cfq_put_queue(cfqd->async_idle_cfqq);
 }
 
 static void cfq_exit_queue(elevator_t *e)
@@ -2125,6 +2132,7 @@
 
 	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
 
+	cfqd->last_end_request = jiffies;
 	cfqd->cfq_quantum = cfq_quantum;
 	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
 	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 75c98d5..3b927be 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1143,22 +1143,9 @@
 void blk_queue_invalidate_tags(struct request_queue *q)
 {
 	struct list_head *tmp, *n;
-	struct request *rq;
 
-	list_for_each_safe(tmp, n, &q->tag_busy_list) {
-		rq = list_entry_rq(tmp);
-
-		if (rq->tag == -1) {
-			printk(KERN_ERR
-			       "%s: bad tag found on list\n", __FUNCTION__);
-			list_del_init(&rq->queuelist);
-			rq->cmd_flags &= ~REQ_QUEUED;
-		} else
-			blk_queue_end_tag(q, rq);
-
-		rq->cmd_flags &= ~REQ_STARTED;
-		__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
-	}
+	list_for_each_safe(tmp, n, &q->tag_busy_list)
+		blk_requeue_request(q, list_entry_rq(tmp));
 }
 
 EXPORT_SYMBOL(blk_queue_invalidate_tags);
@@ -1634,15 +1621,7 @@
 {
 	struct request_queue *q = bdi->unplug_io_data;
 
-	/*
-	 * devices don't necessarily have an ->unplug_fn defined
-	 */
-	if (q->unplug_fn) {
-		blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-					q->rq.count[READ] + q->rq.count[WRITE]);
-
-		q->unplug_fn(q);
-	}
+	blk_unplug(q);
 }
 
 static void blk_unplug_work(struct work_struct *work)
@@ -1666,6 +1645,20 @@
 	kblockd_schedule_work(&q->unplug_work);
 }
 
+void blk_unplug(struct request_queue *q)
+{
+	/*
+	 * devices don't necessarily have an ->unplug_fn defined
+	 */
+	if (q->unplug_fn) {
+		blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+					q->rq.count[READ] + q->rq.count[WRITE]);
+
+		q->unplug_fn(q);
+	}
+}
+EXPORT_SYMBOL(blk_unplug);
+
 /**
  * blk_start_queue - restart a previously stopped queue
  * @q:    The &struct request_queue in question
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index a8130a4..a5ee213 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -358,10 +358,19 @@
 					size_t count)
 {
 	unsigned int major, minor;
+
 	if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
+		/* pkt_setup_dev() expects caller to hold reference to self */
+		if (!try_module_get(THIS_MODULE))
+			return -ENODEV;
+
 		pkt_setup_dev(MKDEV(major, minor), NULL);
+
+		module_put(THIS_MODULE);
+
 		return count;
 	}
+
 	return -EINVAL;
 }
 
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7c426d0..1b1ef31 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1207,8 +1207,7 @@
 			prepare_to_wait(&bitmap->overflow_wait, &__wait,
 					TASK_UNINTERRUPTIBLE);
 			spin_unlock_irq(&bitmap->lock);
-			bitmap->mddev->queue
-				->unplug_fn(bitmap->mddev->queue);
+			blk_unplug(bitmap->mddev->queue);
 			schedule();
 			finish_wait(&bitmap->overflow_wait, &__wait);
 			continue;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 5a7eb65..e298d8d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1000,8 +1000,7 @@
 		struct dm_dev *dd = list_entry(d, struct dm_dev, list);
 		struct request_queue *q = bdev_get_queue(dd->bdev);
 
-		if (q->unplug_fn)
-			q->unplug_fn(q);
+		blk_unplug(q);
 	}
 }
 
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 56a11f6..3dac1cf 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -87,8 +87,7 @@
 
 	for (i=0; i < mddev->raid_disks; i++) {
 		struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
-		if (r_queue->unplug_fn)
-			r_queue->unplug_fn(r_queue);
+		blk_unplug(r_queue);
 	}
 }
 
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 808cd95..cef9ebd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5445,7 +5445,7 @@
 		 * about not overloading the IO subsystem. (things like an
 		 * e2fsck being done on the RAID array should execute fast)
 		 */
-		mddev->queue->unplug_fn(mddev->queue);
+		blk_unplug(mddev->queue);
 		cond_resched();
 
 		currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@@ -5464,7 +5464,7 @@
 	 * this also signals 'finished resyncing' to md_stop
 	 */
  out:
-	mddev->queue->unplug_fn(mddev->queue);
+	blk_unplug(mddev->queue);
 
 	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index b35731c..eb631eb 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -125,8 +125,7 @@
 			atomic_inc(&rdev->nr_pending);
 			rcu_read_unlock();
 
-			if (r_queue->unplug_fn)
-				r_queue->unplug_fn(r_queue);
+			blk_unplug(r_queue);
 
 			rdev_dec_pending(rdev, mddev);
 			rcu_read_lock();
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c111105..f8e5917 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -35,8 +35,7 @@
 	for (i=0; i<mddev->raid_disks; i++) {
 		struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
 
-		if (r_queue->unplug_fn)
-			r_queue->unplug_fn(r_queue);
+		blk_unplug(r_queue);
 	}
 }
 
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 85478d6..4a69c41 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -549,8 +549,7 @@
 			atomic_inc(&rdev->nr_pending);
 			rcu_read_unlock();
 
-			if (r_queue->unplug_fn)
-				r_queue->unplug_fn(r_queue);
+			blk_unplug(r_queue);
 
 			rdev_dec_pending(rdev, mddev);
 			rcu_read_lock();
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index fc6607a..5cdcc93 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -593,8 +593,7 @@
 			atomic_inc(&rdev->nr_pending);
 			rcu_read_unlock();
 
-			if (r_queue->unplug_fn)
-				r_queue->unplug_fn(r_queue);
+			blk_unplug(r_queue);
 
 			rdev_dec_pending(rdev, mddev);
 			rcu_read_lock();
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 82af346..1cfc984 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3186,8 +3186,7 @@
 			atomic_inc(&rdev->nr_pending);
 			rcu_read_unlock();
 
-			if (r_queue->unplug_fn)
-				r_queue->unplug_fn(r_queue);
+			blk_unplug(r_queue);
 
 			rdev_dec_pending(rdev, mddev);
 			rcu_read_lock();
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 9203a0b..1b9c9b6 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -310,7 +310,7 @@
 		}
 
 		if (src_size == 0) {
-			src_buf = sg_virt(dst);
+			src_buf = sg_virt(src);
 			src_size = src->length;
 		}
 
diff --git a/fs/ioprio.c b/fs/ioprio.c
index d6ff77e..e4e01bc 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -78,6 +78,10 @@
 			if (!capable(CAP_SYS_ADMIN))
 				return -EPERM;
 			break;
+		case IOPRIO_CLASS_NONE:
+			if (data)
+				return -EINVAL;
+			break;
 		default:
 			return -EINVAL;
 	}
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8396db2..d18ee67 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -697,6 +697,7 @@
 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
 				  struct request *, int, rq_end_io_fn *);
 extern int blk_verify_command(unsigned char *, int);
+extern void blk_unplug(struct request_queue *q);
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 {