Revert "mmc: add new request stats infrastructure"

This reverts commit 66842b019fba9e31b4232809f95f8cb86b30954d
and parts of commit I05db0959ded400e292eb5e84e1ecfc579b78ee62.

This patch removes the aync_request statistics from the operational
code. This includes counters and functions which modify these counters.
In addition we remove any code that is dependent of these statistics.

Change-Id: Id4b92a242615afc54647b5b6f057f7d7b88175f1
Signed-off-by: Lee Susman <lsusman@codeaurora.org>
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 9babeab..c4b2d16 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -59,19 +59,12 @@
 	struct request_queue *q = mq->queue;
 	struct request *req;
 	struct mmc_card *card = mq->card;
-	struct mmc_async_event_stats *stats;
-	struct mmc_queue_req *tmp;
-
-	if (!card)
-		return 0;
-
-	stats = &mq->card->async_event_stats;
 
 	current->flags |= PF_MEMALLOC;
 
 	down(&mq->thread_sem);
 	do {
-
+		struct mmc_queue_req *tmp;
 		req = NULL;	/* Must be set to NULL at each iteration */
 
 		spin_lock_irq(q->queue_lock);
@@ -85,8 +78,6 @@
 			mq->issue_fn(mq, req);
 			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
 				mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
-				if (stats && stats->enabled)
-					stats->fetch_due_to_new_req++;
 				continue; /* fetch again */
 			}
 
@@ -124,7 +115,6 @@
 static void mmc_request(struct request_queue *q)
 {
 	struct mmc_queue *mq = q->queuedata;
-	struct mmc_async_event_stats *stats;
 	struct request *req;
 	unsigned long flags;
 	struct mmc_context_info *cntx;
@@ -136,39 +126,22 @@
 		}
 		return;
 	}
-	if (mq->card) {
-		cntx = &mq->card->host->context_info;
-		stats = &mq->card->async_event_stats;
-	} else
-		return;
 
 	cntx = &mq->card->host->context_info;
-	stats = &mq->card->async_event_stats;
 	if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
 		/*
 		 * New MMC request arrived when MMC thread may be
 		 * blocked on the previous request to be complete
 		 * with no current request fetched
 		 */
-
 		spin_lock_irqsave(&cntx->lock, flags);
 		if (cntx->is_waiting_last_req) {
-			if (stats && stats->enabled)
-				stats->wakeup_new++;
-			if (cntx->is_new_req)
-				if (stats->enabled)
-					stats->new_req_when_new_marked++;
 			cntx->is_new_req = true;
 			wake_up_interruptible(&cntx->wait);
-		} else if (stats->enabled)
-			stats->q_no_waiting++;
+		}
 		spin_unlock_irqrestore(&cntx->lock, flags);
-	} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) {
+	} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
 		wake_up_process(mq->thread);
-		if (stats->enabled)
-			stats->wakeup_mq_thread++;
-	} else if (stats->enabled)
-			stats->no_mmc_request_action++;
 }
 
 static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)