block: row: Insert dispatch_quantum into struct row_queue

There is really no point in keeping the dispatch quantum
of a queue outside of it. By inserting it to the row_queue
structure we spare extra level in accessing it.

Change-Id: Ic77571818b643e71f9aafbb2ca93d0a92158b199
Signed-off-by: Tatyana Brokhman <tlinder@codeaurora.org>
diff --git a/block/row-iosched.c b/block/row-iosched.c
index f610a39..bf9ad05 100644
--- a/block/row-iosched.c
+++ b/block/row-iosched.c
@@ -105,6 +105,8 @@
  *			the current dispatch cycle
  * @slice:		number of requests to dispatch in a cycle
  * @nr_req:		number of requests in queue
+ * @dispatch quantum:	number of requests this queue may
+ *			dispatch in a dispatch cycle
  * @idle_data:		data for idling on queues
  *
  */
@@ -117,6 +119,7 @@
 	unsigned int		slice;
 
 	unsigned int		nr_req;
+	int			disp_quantum;
 
 	/* used only for READ queues */
 	struct rowq_idling_data	idle_data;
@@ -141,8 +144,7 @@
 /**
  * struct row_queue - Per block device rqueue structure
  * @dispatch_queue:	dispatch rqueue
- * @row_queues:		array of priority request queues with
- *			dispatch quantum per rqueue
+ * @row_queues:		array of priority request queues
  * @curr_queue:		index in the row_queues array of the
  *			currently serviced rqueue
  * @read_idle:		data for idling after READ request
@@ -155,10 +157,7 @@
 struct row_data {
 	struct request_queue		*dispatch_queue;
 
-	struct {
-		struct row_queue	rqueue;
-		int			disp_quantum;
-	} row_queues[ROWQ_MAX_PRIO];
+	struct row_queue row_queues[ROWQ_MAX_PRIO];
 
 	enum row_queue_prio		curr_queue;
 
@@ -198,8 +197,7 @@
 {
 	int i;
 
-	row_log(rd->dispatch_queue, " Queues status (curr_queue=%d):",
-			rd->curr_queue);
+	row_log(rd->dispatch_queue, " Queues status:");
 	for (i = 0; i < ROWQ_MAX_PRIO; i++)
 		row_log(rd->dispatch_queue,
 			"queue%d: dispatched= %d, nr_req=%d", i,
@@ -226,7 +224,7 @@
 
 	row_log_rowq(rd, rd->curr_queue, "Performing delayed work");
 	/* Mark idling process as done */
-	rd->row_queues[rd->curr_queue].rqueue.idle_data.begin_idling = false;
+	rd->row_queues[rd->curr_queue].idle_data.begin_idling = false;
 
 	if (!(rd->nr_reqs[0] + rd->nr_reqs[1]))
 		row_log(rd->dispatch_queue, "No requests in scheduler");
@@ -251,7 +249,7 @@
 	int i;
 
 	for (i = 0; i < ROWQ_MAX_PRIO; i++)
-		rd->row_queues[i].rqueue.nr_dispatched = 0;
+		rd->row_queues[i].nr_dispatched = 0;
 
 	rd->curr_queue = ROWQ_PRIO_HIGH_READ;
 	row_log(rd->dispatch_queue, "Restarting cycle");
@@ -355,7 +353,7 @@
 
 	for (i = 0; i < ROWQ_MAX_PRIO; i++)
 		if (urgent_queues[i] && row_rowq_unserved(rd, i) &&
-		    !list_empty(&rd->row_queues[i].rqueue.fifo)) {
+		    !list_empty(&rd->row_queues[i].fifo)) {
 			row_log_rowq(rd, i,
 				     "Urgent request pending (curr=%i)",
 				     rd->curr_queue);
@@ -394,13 +392,13 @@
 {
 	struct request *rq;
 
-	rq = rq_entry_fifo(rd->row_queues[rd->curr_queue].rqueue.fifo.next);
+	rq = rq_entry_fifo(rd->row_queues[rd->curr_queue].fifo.next);
 	row_remove_request(rd->dispatch_queue, rq);
 	elv_dispatch_add_tail(rd->dispatch_queue, rq);
-	rd->row_queues[rd->curr_queue].rqueue.nr_dispatched++;
+	rd->row_queues[rd->curr_queue].nr_dispatched++;
 	row_clear_rowq_unserved(rd, rd->curr_queue);
 	row_log_rowq(rd, rd->curr_queue, " Dispatched request nr_disp = %d",
-		     rd->row_queues[rd->curr_queue].rqueue.nr_dispatched);
+		     rd->row_queues[rd->curr_queue].nr_dispatched);
 }
 
 /*
@@ -426,7 +424,7 @@
 	 * Loop over all queues to find the next queue that is not empty.
 	 * Stop when you get back to curr_queue
 	 */
-	while (list_empty(&rd->row_queues[rd->curr_queue].rqueue.fifo)
+	while (list_empty(&rd->row_queues[rd->curr_queue].fifo)
 	       && rd->curr_queue != prev_curr_queue) {
 		/* Mark rqueue as unserved */
 		row_mark_rowq_unserved(rd, rd->curr_queue);
@@ -458,10 +456,10 @@
 	 */
 	for (i = 0; i < currq; i++) {
 		if (row_rowq_unserved(rd, i) &&
-		    !list_empty(&rd->row_queues[i].rqueue.fifo)) {
+		    !list_empty(&rd->row_queues[i].fifo)) {
 			row_log_rowq(rd, currq,
 				" Preemting for unserved rowq%d. (nr_req=%u)",
-				i, rd->row_queues[currq].rqueue.nr_req);
+				i, rd->row_queues[currq].nr_req);
 			rd->curr_queue = i;
 			row_dispatch_insert(rd);
 			ret = 1;
@@ -469,9 +467,9 @@
 		}
 	}
 
-	if (rd->row_queues[currq].rqueue.nr_dispatched >=
+	if (rd->row_queues[currq].nr_dispatched >=
 	    rd->row_queues[currq].disp_quantum) {
-		rd->row_queues[currq].rqueue.nr_dispatched = 0;
+		rd->row_queues[currq].nr_dispatched = 0;
 		row_log_rowq(rd, currq, "Expiring rqueue");
 		ret = row_choose_queue(rd);
 		if (ret)
@@ -480,7 +478,7 @@
 	}
 
 	/* Dispatch from curr_queue */
-	if (list_empty(&rd->row_queues[currq].rqueue.fifo)) {
+	if (list_empty(&rd->row_queues[currq].fifo)) {
 		/* check idling */
 		if (delayed_work_pending(&rd->read_idle.idle_work)) {
 			if (force) {
@@ -496,7 +494,7 @@
 		}
 
 		if (!force && queue_idling_enabled[currq] &&
-		    rd->row_queues[currq].rqueue.idle_data.begin_idling) {
+		    rd->row_queues[currq].idle_data.begin_idling) {
 			if (!queue_delayed_work(rd->read_idle.idle_workqueue,
 						&rd->read_idle.idle_work,
 						rd->read_idle.idle_time)) {
@@ -543,12 +541,12 @@
 		return NULL;
 
 	for (i = 0; i < ROWQ_MAX_PRIO; i++) {
-		INIT_LIST_HEAD(&rdata->row_queues[i].rqueue.fifo);
+		INIT_LIST_HEAD(&rdata->row_queues[i].fifo);
 		rdata->row_queues[i].disp_quantum = queue_quantum[i];
-		rdata->row_queues[i].rqueue.rdata = rdata;
-		rdata->row_queues[i].rqueue.prio = i;
-		rdata->row_queues[i].rqueue.idle_data.begin_idling = false;
-		rdata->row_queues[i].rqueue.idle_data.last_insert_time =
+		rdata->row_queues[i].rdata = rdata;
+		rdata->row_queues[i].prio = i;
+		rdata->row_queues[i].idle_data.begin_idling = false;
+		rdata->row_queues[i].idle_data.last_insert_time =
 			ktime_set(0, 0);
 	}
 
@@ -587,7 +585,7 @@
 	int i;
 
 	for (i = 0; i < ROWQ_MAX_PRIO; i++)
-		BUG_ON(!list_empty(&rd->row_queues[i].rqueue.fifo));
+		BUG_ON(!list_empty(&rd->row_queues[i].fifo));
 	(void)cancel_delayed_work_sync(&rd->read_idle.idle_work);
 	BUG_ON(delayed_work_pending(&rd->read_idle.idle_work));
 	destroy_workqueue(rd->read_idle.idle_workqueue);