mmc: fix read latency of urgent request

In order to improve read request latency, urgent notification allowed
anytime, when current or previous requests are existing in the mmc layer.
It is not allowed to stop following running requests:
- urgent request: block layer should serialize urgent request notification.
- read request: it can't be packed, therefore it is better
to wait for its completion.
- REQ_FUA: interruption and delay in such request will impact sync process
of upper layers.

Change-Id: Id7d1480cce2059c1f23a5f29ad8f74e858be1ee6
Signed-off-by: Konstantin Dorfman <kdorfman@codeaurora.org>
Signed-off-by: Maya Erez <merez@codeaurora.org>
Signed-off-by: Tatyana Brokhman <tlinder@codeaurora.org>
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index cf6f97c..834e0e2 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1831,6 +1831,7 @@
 	}
 
 	mqrq->mmc_active.mrq = &brq->mrq;
+	mqrq->mmc_active.cmd_flags = req->cmd_flags;
 	if (mq->err_check_fn)
 		mqrq->mmc_active.err_check = mq->err_check_fn;
 	else
@@ -2168,6 +2169,7 @@
 	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
 
 	mqrq->mmc_active.mrq = &brq->mrq;
+	mqrq->mmc_active.cmd_flags = req->cmd_flags;
 
 	/*
 	 * This is intended for packed commands tests usage - in case these
@@ -2342,6 +2344,7 @@
 			mq->flags |= MMC_QUEUE_URGENT_REQUEST;
 			ret = 0;
 			break;
+		case MMC_BLK_URGENT_DONE:
 		case MMC_BLK_SUCCESS:
 		case MMC_BLK_PARTIAL:
 			/*
@@ -2542,8 +2545,15 @@
 	}
 
 out:
+	/*
+	 * packet burst is over, when one of the following occurs:
+	 * - no more requests and new request notification is not in progress
+	 * - urgent notification in progress and current request is not urgent
+	 *   (all existing requests completed or reinserted to the block layer)
+	 */
 	if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
-			(mq->flags & MMC_QUEUE_URGENT_REQUEST)) {
+			((mq->flags & MMC_QUEUE_URGENT_REQUEST) &&
+				!(mq->mqrq_cur->req->cmd_flags & REQ_URGENT))) {
 		if (mmc_card_need_bkops(card))
 			mmc_start_bkops(card, false);
 		/* release host only when there are no more requests */
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 65a1322..169ccf1 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -78,7 +78,15 @@
 			mq->issue_fn(mq, req);
 			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
 				continue; /* fetch again */
-			} else if (mq->flags & MMC_QUEUE_URGENT_REQUEST) {
+			} else if ((mq->flags & MMC_QUEUE_URGENT_REQUEST) &&
+				   (mq->mqrq_cur->req &&
+				!(mq->mqrq_cur->req->cmd_flags & REQ_URGENT))) {
+				/*
+				 * clean current request when urgent request
+				 * processing in progress and current request is
+				 * not urgent (all existing requests completed
+				 * or reinserted to the block layer
+				 */
 				mq->mqrq_cur->brq.mrq.data = NULL;
 				mq->mqrq_cur->req = NULL;
 			}
@@ -98,6 +106,7 @@
 				break;
 			}
 			mmc_start_delayed_bkops(card);
+			mq->card->host->context_info.is_urgent = false;
 			up(&mq->thread_sem);
 			schedule();
 			down(&mq->thread_sem);
@@ -172,7 +181,7 @@
 	spin_lock_irqsave(&cntx->lock, flags);
 
 	/* do stop flow only when mmc thread is waiting for done */
-	if (cntx->is_waiting) {
+	if (mq->mqrq_cur->req || mq->mqrq_prev->req) {
 		/*
 		 * Urgent request must be executed alone
 		 * so disable the write packing
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index b395fc8..a3c3967 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -646,6 +646,11 @@
 {
 	int remainder;
 
+	if (host->areq->cmd_flags & REQ_URGENT ||
+	    !(host->areq->cmd_flags & REQ_WRITE) ||
+	    (host->areq->cmd_flags & REQ_FUA))
+		return false;
+
 	remainder = (host->ops->get_xfer_remain) ?
 		host->ops->get_xfer_remain(host) : -1;
 	return (remainder > 0);
@@ -732,14 +737,12 @@
 	unsigned long flags;
 
 	while (1) {
-		context_info->is_waiting = true;
 		wait_io_event_interruptible(context_info->wait,
 				(context_info->is_done_rcv ||
 				 context_info->is_new_req  ||
 				 context_info->is_urgent));
 		spin_lock_irqsave(&context_info->lock, flags);
 		is_urgent = context_info->is_urgent;
-		context_info->is_waiting = false;
 		context_info->is_waiting_last_req = false;
 		spin_unlock_irqrestore(&context_info->lock, flags);
 		if (context_info->is_done_rcv) {
@@ -758,7 +761,10 @@
 					 */
 					if ((err == MMC_BLK_PARTIAL) ||
 						(err == MMC_BLK_SUCCESS))
-						err = MMC_BLK_URGENT;
+						err = pending_is_urgent ?
+						       MMC_BLK_URGENT_DONE
+						       : MMC_BLK_URGENT;
+
 					/* reset is_urgent for next request */
 					context_info->is_urgent = false;
 				}
@@ -931,6 +937,8 @@
 	int err = 0;
 	int start_err = 0;
 	struct mmc_async_req *data = host->areq;
+	unsigned long flags;
+	bool is_urgent;
 
 	/* Prepare a new request */
 	if (areq) {
@@ -938,20 +946,29 @@
 		 * start waiting here for possible interrupt
 		 * because mmc_pre_req() taking long time
 		 */
-		host->context_info.is_waiting = true;
 		mmc_pre_req(host, areq->mrq, !host->areq);
 	}
 
 	if (host->areq) {
 		err = mmc_wait_for_data_req_done(host, host->areq->mrq,
 				areq);
-		if (err == MMC_BLK_URGENT) {
+		if (err == MMC_BLK_URGENT || err == MMC_BLK_URGENT_DONE) {
 			mmc_post_req(host, host->areq->mrq, 0);
-			if (areq) { /* reinsert ready request */
-				areq->reinsert_req(areq);
-				mmc_post_req(host, areq->mrq, 0);
-			}
 			host->areq = NULL;
+			if (areq) {
+				if (!(areq->cmd_flags & REQ_URGENT)) {
+					areq->reinsert_req(areq);
+					mmc_post_req(host, areq->mrq, 0);
+				} else {
+					start_err = __mmc_start_data_req(host,
+							areq->mrq);
+					if (start_err)
+						mmc_post_req(host, areq->mrq,
+								-EINVAL);
+					else
+						host->areq = areq;
+				}
+			}
 			goto exit;
 		} else if (err == MMC_BLK_NEW_REQUEST) {
 			if (error)
@@ -974,9 +991,27 @@
 				 mmc_hostname(host), __func__);
 		}
 	}
+	if (!err && areq) {
+		/* urgent notification may come again */
+		spin_lock_irqsave(&host->context_info.lock, flags);
+		is_urgent = host->context_info.is_urgent;
+		host->context_info.is_urgent = false;
+		spin_unlock_irqrestore(&host->context_info.lock, flags);
 
-	if (!err && areq)
-		start_err = __mmc_start_data_req(host, areq->mrq);
+		if (!is_urgent || (areq->cmd_flags & REQ_URGENT)) {
+			start_err = __mmc_start_data_req(host, areq->mrq);
+		} else {
+			/* previous request was done */
+			err = MMC_BLK_URGENT_DONE;
+			if (host->areq) {
+				mmc_post_req(host, host->areq->mrq, 0);
+				host->areq = NULL;
+			}
+			areq->reinsert_req(areq);
+			mmc_post_req(host, areq->mrq, 0);
+			goto exit;
+		}
+	}
 
 	if (host->areq)
 		mmc_post_req(host, host->areq->mrq, 0);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index dd61824..e4aab43 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -215,6 +215,7 @@
 	MMC_BLK_NOMEDIUM,
 	MMC_BLK_NEW_REQUEST,
 	MMC_BLK_URGENT,
+	MMC_BLK_URGENT_DONE,
 };
 
 struct mmc_wr_pack_stats {
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 1a3c662..2bfd9a2 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -150,6 +150,8 @@
 struct mmc_async_req {
 	/* active mmc request */
 	struct mmc_request	*mrq;
+	unsigned int cmd_flags; /* copied from struct request */
+
 	/*
 	 * Check error status of completed mmc request.
 	 * Returns 0 if success otherwise non zero.
@@ -170,10 +172,6 @@
  *			NULL fetched as second request. MMC_BLK_NEW_REQUEST
  *			notification will wake up mmc thread from waiting.
  * @is_urgent		wake up reason was urgent request
- * @is_waiting		is true, when first request is running on the bus,
- *			second request preparation started or mmc thread is
- *			waiting for the completion of the current request
- *			(latter case is like @is_waiting_last_req)
  * @wait		wait queue
  * @lock		lock to protect data fields
  */
@@ -182,7 +180,6 @@
 	bool			is_new_req;
 	bool			is_waiting_last_req;
 	bool			is_urgent;
-	bool			is_waiting;
 	wait_queue_head_t	wait;
 	spinlock_t		lock;
 };