mmc: block: Add cache barrier support

eMMC cache barrier provides a way to perform delayed
and in-order flushing of cached data. Host can use this
to avoid flushing delays but still maintain the order
between the data requests in cache.

In case the device gets more barrier requests than it
supports, then a barrier request is treated as a normal flush
request in the device.

If the eMMC cache flushing policy is set to 1, then the device
inherently flushes all the cached requests in FIFO order. For such
devices, as per spec, it is redundant to send any barrier requests
to the device. This may add unnecessary overhead to both host and
the device. So make sure to not send barrier requests in such cases.

Change-Id: Ia7af316800a6895942d3cabcd64600d56fab25a6
Signed-off-by: Sahitya Tummala <stummala@codeaurora.org>
[subhashj@codeaurora.org: fixed trivial merge conflicts]
Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
[xiaonian@codeaurora.org: fixed trivial merge conflicts,
changed cmd_flags to req_op() in mmc_blk_issue_flush()
and mmc_blk_issue_rq() function to judge if a req
requires flush]
Signed-off-by: Xiaonian Wang <xiaonian@codeaurora.org>
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 8ba15a6..47fe11a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2063,7 +2063,30 @@
 	struct mmc_card *card = md->queue.card;
 	int ret = 0;
 
-	ret = mmc_flush_cache(card);
+	if (!req)
+		return 0;
+
+	if (req->cmd_flags & REQ_BARRIER) {
+		/*
+		 * If eMMC cache flush policy is set to 1, then the device
+		 * shall flush the requests in First-In-First-Out (FIFO) order.
+		 * In this case, as per spec, the host must not send any cache
+		 * barrier requests as they are redundant and add unnecessary
+		 * overhead to both device and host.
+		 */
+		if (card->ext_csd.cache_flush_policy & 1)
+			goto end_req;
+
+		/*
+		 * In case barrier is not supported or enabled in the device,
+		 * use flush as a fallback option.
+		 */
+		ret = mmc_cache_barrier(card);
+		if (ret)
+			ret = mmc_flush_cache(card);
+	} else if (req_op(req) == REQ_OP_FLUSH) {
+		ret = mmc_flush_cache(card);
+	}
 	if (ret == -ENODEV) {
 		pr_err("%s: %s: restart mmc card",
 				req->rq_disk->disk_name, __func__);
@@ -2097,6 +2120,7 @@
 		}
 	}
 #endif
+end_req:
 	blk_end_request_all(req, ret);
 
 	return ret ? 0 : 1;
@@ -3671,6 +3695,7 @@
 	struct mmc_card *card = md->queue.card;
 	struct mmc_host *host = card->host;
 	unsigned long flags;
+	unsigned int cmd_flags = req ? req->cmd_flags : 0;
 	bool req_is_special = mmc_req_is_special(req);
 
 	if (req && !mq->mqrq_prev->req)
@@ -3702,7 +3727,8 @@
 			ret = mmc_blk_issue_secdiscard_rq(mq, req);
 		else
 			ret = mmc_blk_issue_discard_rq(mq, req);
-	} else if (req && req_op(req) == REQ_OP_FLUSH) {
+	} else if ((req && req_op(req) == REQ_OP_FLUSH) ||
+			(cmd_flags & REQ_BARRIER)) {
 		/* complete ongoing async transfer before issuing flush */
 		if (card->host->areq)
 			mmc_blk_issue_rw_rq(mq, NULL);