[PATCH] cfq-iosched: many performance fixes

This is a collection of patches that greatly improve CFQ performance
in some circumstances.

- Change the idling logic to only kick in after a request is done and we
  are deciding what to do. Before the idling included the request service
  time, so it was hard to adjust. Now it's true think/idle time.

- Take advantage of TCQ/NCQ/queueing for seeky sync workloads, but keep
  it in control for sync and sequential (or close to) workloads.

- Expire queues immediately and move on to other busy queues, if we are
  not going to idle after the current one finishes.

- Don't rearm idle timer if there are no busy queues. Just leave the
  system idle.

Signed-off-by: Jens Axboe <axboe@suse.de>
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 84b75f8..13c4793 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -26,7 +26,7 @@
 static const int cfq_slice_sync = HZ / 10;
 static int cfq_slice_async = HZ / 25;
 static const int cfq_slice_async_rq = 2;
-static int cfq_slice_idle = HZ / 70;
+static int cfq_slice_idle = HZ / 125;
 
 #define CFQ_IDLE_GRACE		(HZ / 10)
 #define CFQ_SLICE_SCALE		(5)
@@ -906,6 +906,8 @@
 	return cfqq;
 }
 
+#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))
+
 static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
 {
@@ -939,7 +941,7 @@
 	 * fair distribution of slice time for a process doing back-to-back
 	 * seeks. so allow a little bit of time for him to submit a new rq
 	 */
-	if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072)
+	if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
 		sl = 2;
 
 	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
@@ -1038,8 +1040,10 @@
 	 */
 	if (!RB_EMPTY(&cfqq->sort_list))
 		goto keep_queue;
-	else if (cfq_cfqq_class_sync(cfqq) &&
-		 time_before(now, cfqq->slice_end)) {
+	else if (cfq_cfqq_dispatched(cfqq)) {
+		cfqq = NULL;
+		goto keep_queue;
+	} else if (cfq_cfqq_class_sync(cfqq)) {
 		if (cfq_arm_slice_timer(cfqd, cfqq))
 			return NULL;
 	}
@@ -1088,8 +1092,7 @@
 	} while (dispatched < max_dispatch);
 
 	/*
-	 * if slice end isn't set yet, set it. if at least one request was
-	 * sync, use the sync time slice value
+	 * if slice end isn't set yet, set it.
 	 */
 	if (!cfqq->slice_end)
 		cfq_set_prio_slice(cfqd, cfqq);
@@ -1100,7 +1103,8 @@
 	 */
 	if ((!cfq_cfqq_sync(cfqq) &&
 	    cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
-	    cfq_class_idle(cfqq))
+	    cfq_class_idle(cfqq) ||
+	    !cfq_cfqq_idle_window(cfqq))
 		cfq_slice_expired(cfqd, 0);
 
 	return dispatched;
@@ -1109,10 +1113,11 @@
 static int
 cfq_forced_dispatch_cfqqs(struct list_head *list)
 {
-	int dispatched = 0;
 	struct cfq_queue *cfqq, *next;
 	struct cfq_rq *crq;
+	int dispatched;
 
+	dispatched = 0;
 	list_for_each_entry_safe(cfqq, next, list, cfq_list) {
 		while ((crq = cfqq->next_crq)) {
 			cfq_dispatch_insert(cfqq->cfqd->queue, crq);
@@ -1120,6 +1125,7 @@
 		}
 		BUG_ON(!list_empty(&cfqq->fifo));
 	}
+
 	return dispatched;
 }
 
@@ -1146,7 +1152,8 @@
 cfq_dispatch_requests(request_queue_t *q, int force)
 {
 	struct cfq_data *cfqd = q->elevator->elevator_data;
-	struct cfq_queue *cfqq;
+	struct cfq_queue *cfqq, *prev_cfqq;
+	int dispatched;
 
 	if (!cfqd->busy_queues)
 		return 0;
@@ -1154,10 +1161,17 @@
 	if (unlikely(force))
 		return cfq_forced_dispatch(cfqd);
 
-	cfqq = cfq_select_queue(cfqd);
-	if (cfqq) {
+	dispatched = 0;
+	prev_cfqq = NULL;
+	while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
 		int max_dispatch;
 
+		/*
+		 * Don't repeat dispatch from the previous queue.
+		 */
+		if (prev_cfqq == cfqq)
+			break;
+
 		cfq_clear_cfqq_must_dispatch(cfqq);
 		cfq_clear_cfqq_wait_request(cfqq);
 		del_timer(&cfqd->idle_slice_timer);
@@ -1166,10 +1180,19 @@
 		if (cfq_class_idle(cfqq))
 			max_dispatch = 1;
 
-		return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+		dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+
+		/*
+		 * If the dispatch cfqq has idling enabled and is still
+		 * the active queue, break out.
+		 */
+		if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue)
+			break;
+
+		prev_cfqq = cfqq;
 	}
 
-	return 0;
+	return dispatched;
 }
 
 /*
@@ -1375,24 +1398,28 @@
 {
 	struct cfq_data *cfqd = cic->key;
 	struct cfq_queue *cfqq;
-	if (cfqd) {
-		spin_lock(cfqd->queue->queue_lock);
-		cfqq = cic->cfqq[ASYNC];
-		if (cfqq) {
-			struct cfq_queue *new_cfqq;
-			new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC,
-						cic->ioc->task, GFP_ATOMIC);
-			if (new_cfqq) {
-				cic->cfqq[ASYNC] = new_cfqq;
-				cfq_put_queue(cfqq);
-			}
-		}
-		cfqq = cic->cfqq[SYNC];
-		if (cfqq)
-			cfq_mark_cfqq_prio_changed(cfqq);
 
-		spin_unlock(cfqd->queue->queue_lock);
+	if (unlikely(!cfqd))
+		return;
+
+	spin_lock(cfqd->queue->queue_lock);
+
+	cfqq = cic->cfqq[ASYNC];
+	if (cfqq) {
+		struct cfq_queue *new_cfqq;
+		new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
+					 GFP_ATOMIC);
+		if (new_cfqq) {
+			cic->cfqq[ASYNC] = new_cfqq;
+			cfq_put_queue(cfqq);
+		}
 	}
+
+	cfqq = cic->cfqq[SYNC];
+	if (cfqq)
+		cfq_mark_cfqq_prio_changed(cfqq);
+
+	spin_unlock(cfqd->queue->queue_lock);
 }
 
 /*
@@ -1461,8 +1488,7 @@
 		 * set ->slice_left to allow preemption for a new process
 		 */
 		cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
-		if (!cfqd->hw_tag)
-			cfq_mark_cfqq_idle_window(cfqq);
+		cfq_mark_cfqq_idle_window(cfqq);
 		cfq_mark_cfqq_prio_changed(cfqq);
 		cfq_init_prio_data(cfqq);
 	}
@@ -1653,7 +1679,8 @@
 {
 	int enable_idle = cfq_cfqq_idle_window(cfqq);
 
-	if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
+	if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
+	    (cfqd->hw_tag && CIC_SEEKY(cic)))
 		enable_idle = 0;
 	else if (sample_valid(cic->ttime_samples)) {
 		if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -1683,7 +1710,7 @@
 		return 0;
 
 	if (!cfqq)
-		return 1;
+		return 0;
 
 	if (cfq_class_idle(cfqq))
 		return 1;
@@ -1715,7 +1742,7 @@
 		cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
 
 	cfqq->slice_end = cfqq->slice_left + jiffies;
-	__cfq_slice_expired(cfqd, cfqq, 1);
+	cfq_slice_expired(cfqd, 1);
 	__cfq_set_active_queue(cfqd, cfqq);
 }
 
@@ -1834,11 +1861,23 @@
 			cfqq->service_last = now;
 			cfq_resort_rr_list(cfqq, 0);
 		}
-		cfq_schedule_dispatch(cfqd);
 	}
 
-	if (cfq_crq_is_sync(crq))
+	if (sync)
 		crq->io_context->last_end_request = now;
+
+	/*
+	 * If this is the active queue, check if it needs to be expired,
+	 * or if we want to idle in case it has no pending requests.
+	 */
+	if (cfqd->active_queue == cfqq) {
+		if (time_after(now, cfqq->slice_end))
+			cfq_slice_expired(cfqd, 0);
+		else if (sync && RB_EMPTY(&cfqq->sort_list)) {
+			if (!cfq_arm_slice_timer(cfqd, cfqq))
+				cfq_schedule_dispatch(cfqd);
+		}
+	}
 }
 
 static struct request *
@@ -2106,11 +2145,8 @@
 		 * only expire and reinvoke request handler, if there are
 		 * other queues with pending requests
 		 */
-		if (!cfqd->busy_queues) {
-			cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
-			add_timer(&cfqd->idle_slice_timer);
+		if (!cfqd->busy_queues)
 			goto out_cont;
-		}
 
 		/*
 		 * not expired and it has a request pending, let it dispatch