Merge "mmc: block: Add error handling to command queue host" into msm-4.9
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 4fdc4eb..8373903 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -65,6 +65,7 @@
 #define MMC_BLK_TIMEOUT_MS  (30 * 1000)        /* 30 sec timeout */
 #define MMC_SANITIZE_REQ_TIMEOUT 240000
 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
+#define MMC_CMDQ_STOP_TIMEOUT_MS 100
 
 #define mmc_req_rel_wr(req)	((req->cmd_flags & REQ_FUA) && \
 				  (rq_data_dir(req) == WRITE))
@@ -79,6 +80,7 @@
 			stats->pack_stop_reason[reason]++;		\
 	} while (0)
 
+#define MAX_RETRIES 5
 #define PCKD_TRGR_INIT_MEAN_POTEN	17
 #define PCKD_TRGR_POTEN_LOWER_BOUND	5
 #define PCKD_TRGR_URGENT_PENALTY	2
@@ -3004,6 +3006,103 @@
 }
 EXPORT_SYMBOL(mmc_blk_cmdq_issue_flush_rq);
 
+static void mmc_blk_cmdq_reset(struct mmc_host *host, bool clear_all)
+{
+	if (!host->cmdq_ops->reset)
+		return;
+
+	if (!test_bit(CMDQ_STATE_HALT, &host->cmdq_ctx.curr_state)) {
+		if (mmc_cmdq_halt(host, true)) {
+			pr_err("%s: halt failed\n", mmc_hostname(host));
+			goto reset;
+		}
+	}
+
+	if (clear_all)
+		mmc_cmdq_discard_queue(host, 0);
+reset:
+	mmc_hw_reset(host);
+	host->cmdq_ops->reset(host, true);
+	clear_bit(CMDQ_STATE_HALT, &host->cmdq_ctx.curr_state);
+}
+
+static void mmc_blk_cmdq_err(struct mmc_queue *mq)
+{
+	int err;
+	int retry = 0;
+	int gen_err;
+	u32 status;
+
+	struct mmc_host *host = mq->card->host;
+	struct mmc_request *mrq = host->err_mrq;
+	struct mmc_card *card = mq->card;
+	struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+
+	err = mmc_cmdq_halt(host, true);
+	if (err) {
+		pr_err("halt: failed: %d\n", err);
+		goto reset;
+	}
+
+	/* RED error - Fatal: requires reset */
+	if (mrq->cmdq_req->resp_err) {
+		pr_crit("%s: Response error detected: Device in bad state\n",
+			mmc_hostname(host));
+		blk_end_request_all(mrq->req, -EIO);
+		goto reset;
+	}
+
+	if (mrq->data->error) {
+		blk_end_request_all(mrq->req, mrq->data->error);
+		for (; retry < MAX_RETRIES; retry++) {
+			err = get_card_status(card, &status, 0);
+			if (!err)
+				break;
+		}
+
+		if (err) {
+			pr_err("%s: No response from card !!!\n",
+			       mmc_hostname(host));
+			goto reset;
+		}
+
+		if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
+		    R1_CURRENT_STATE(status) == R1_STATE_RCV) {
+			err =  send_stop(card, MMC_CMDQ_STOP_TIMEOUT_MS,
+					 mrq->req, &gen_err, &status);
+			if (err) {
+				pr_err("%s: error %d sending stop (%d) command\n",
+					mrq->req->rq_disk->disk_name,
+					err, status);
+				goto reset;
+			}
+		}
+
+		if (mmc_cmdq_discard_queue(host, mrq->req->tag))
+			goto reset;
+		else
+			goto unhalt;
+	}
+
+	/* DCMD commands */
+	if (mrq->cmd->error)
+		blk_end_request_all(mrq->req, mrq->cmd->error);
+
+reset:
+	spin_lock_irq(mq->queue->queue_lock);
+	blk_queue_invalidate_tags(mrq->req->q);
+	spin_unlock_irq(mq->queue->queue_lock);
+	mmc_blk_cmdq_reset(host, true);
+	goto out;
+
+unhalt:
+	mmc_cmdq_halt(host, false);
+
+out:
+	if (test_and_clear_bit(0, &ctx_info->req_starved))
+		blk_run_queue(mrq->req->q);
+}
+
 /* invoked by block layer in softirq context */
 void mmc_blk_cmdq_complete_rq(struct request *rq)
 {
@@ -3020,26 +3119,36 @@
 	else if (mrq->data && mrq->data->error)
 		err = mrq->data->error;
 
+	/* clear pending request */
+	BUG_ON(!test_and_clear_bit(cmdq_req->tag,
+				   &ctx_info->active_reqs));
+
 	mmc_cmdq_post_req(host, mrq, err);
 	if (err) {
 		pr_err("%s: %s: txfr error: %d\n", mmc_hostname(mrq->host),
 		       __func__, err);
-		set_bit(CMDQ_STATE_ERR, &ctx_info->curr_state);
-		WARN_ON(1);
+		if (test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
+			pr_err("%s: CQ in error state, ending current req: %d\n",
+				__func__, err);
+			blk_end_request_all(rq, err);
+		} else {
+			set_bit(CMDQ_STATE_ERR, &ctx_info->curr_state);
+			schedule_work(&mq->cmdq_err_work);
+		}
+		goto out;
 	}
 
-	BUG_ON(!test_and_clear_bit(cmdq_req->tag,
-				   &ctx_info->active_reqs));
 	if (cmdq_req->cmdq_req_flags & DCMD) {
 		clear_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
-		blk_end_request_all(rq, 0);
+		blk_end_request_all(rq, err);
 		goto out;
 	}
 
 	blk_end_request(rq, err, cmdq_req->data.bytes_xfered);
 
 out:
-	if (test_and_clear_bit(0, &ctx_info->req_starved))
+	if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state) &&
+			test_and_clear_bit(0, &ctx_info->req_starved))
 		blk_run_queue(mq->queue);
 	mmc_release_host(host);
 	return;
@@ -3509,6 +3618,7 @@
 		md->flags |= MMC_BLK_CMD_QUEUE;
 		md->queue.cmdq_complete_fn = mmc_blk_cmdq_complete_rq;
 		md->queue.cmdq_issue_fn = mmc_blk_cmdq_issue_rq;
+		md->queue.cmdq_error_fn = mmc_blk_cmdq_err;
 	}
 
 	if (mmc_card_mmc(card) && !card->cmdq_init &&
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index f2d3356..94c6035 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -573,6 +573,14 @@
 	mq->cmdq_complete_fn(rq);
 }
 
+static void mmc_cmdq_error_work(struct work_struct *work)
+{
+	struct mmc_queue *mq = container_of(work, struct mmc_queue,
+					    cmdq_err_work);
+
+	mq->cmdq_error_fn(mq);
+}
+
 int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card)
 {
 	int i, ret = 0;
@@ -613,7 +621,10 @@
 	}
 
 	blk_queue_softirq_done(mq->queue, mmc_cmdq_softirq_done);
+	INIT_WORK(&mq->cmdq_err_work, mmc_cmdq_error_work);
+
 	card->cmdq_init = true;
+
 	goto out;
 
 free_mqrq_sg:
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index ebe4a08..7562f03 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -62,6 +62,7 @@
 	int (*cmdq_issue_fn)(struct mmc_queue *,
 			     struct request *);
 	void (*cmdq_complete_fn)(struct request *);
+	void (*cmdq_error_fn)(struct mmc_queue *);
 	void			*data;
 	struct request_queue	*queue;
 	struct mmc_queue_req	mqrq[2];
@@ -72,6 +73,8 @@
 	int			num_of_potential_packed_wr_reqs;
 	int			num_wr_reqs_to_start_packing;
 	bool			no_pack_for_random;
+	struct work_struct	cmdq_err_work;
+
 	int (*err_check_fn)(struct mmc_card *, struct mmc_async_req *);
 	void (*packed_test_fn)(struct request_queue *, struct mmc_queue_req *);
 #ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index a774e8a..2ac25738 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1285,6 +1285,19 @@
 }
 
 /**
+ *	mmc_cmdq_discard_card_queue - discard the task[s] in the device
+ *	@host: host instance
+ *	@tasks: mask of tasks to be knocked off
+ *		0: remove all queued tasks
+ */
+int mmc_cmdq_discard_queue(struct mmc_host *host, u32 tasks)
+{
+	return mmc_discard_queue(host, tasks);
+}
+EXPORT_SYMBOL(mmc_cmdq_discard_queue);
+
+
+/**
  *	mmc_cmdq_post_req - post process of a completed request
  *	@host: host instance
  *	@mrq: the request to be processed
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 508c81a..b41d910 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -837,3 +837,21 @@
 {
 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
 }
+
+int mmc_discard_queue(struct mmc_host *host, u32 tasks)
+{
+	struct mmc_command cmd = {0};
+
+	cmd.opcode = MMC_CMDQ_TASK_MGMT;
+	if (tasks) {
+		cmd.arg = DISCARD_TASK;
+		cmd.arg |= (tasks << 16);
+	} else {
+		cmd.arg = DISCARD_QUEUE;
+	}
+
+	cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+	return mmc_wait_for_cmd(host, &cmd, 0);
+}
+EXPORT_SYMBOL(mmc_discard_queue);
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index f1b8e81..1eea7bd 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -27,6 +27,7 @@
 int mmc_bus_test(struct mmc_card *card, u8 bus_width);
 int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
 int mmc_can_ext_csd(struct mmc_card *card);
+int mmc_discard_queue(struct mmc_host *host, u32 tasks);
 int mmc_switch_status_error(struct mmc_host *host, u32 status);
 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 		unsigned int timeout_ms, bool use_busy_signal, bool send_status,
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index 64be0ce..bc913b6 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -32,6 +32,12 @@
 /* 1 sec */
 #define HALT_TIMEOUT_MS 1000
 
+static inline struct mmc_request *get_req_by_tag(struct cmdq_host *cq_host,
+					  unsigned int tag)
+{
+	return cq_host->mrq_slot[tag];
+}
+
 static inline u8 *get_desc(struct cmdq_host *cq_host, u8 tag)
 {
 	return cq_host->desc_base + (tag * cq_host->slot_sz);
@@ -122,43 +128,43 @@
 {
 	struct mmc_host *mmc = cq_host->mmc;
 
-	pr_info(DRV_NAME ": ========== REGISTER DUMP (%s)==========\n",
+	pr_err(DRV_NAME ": ========== REGISTER DUMP (%s)==========\n",
 		mmc_hostname(mmc));
 
-	pr_info(DRV_NAME ": Caps: 0x%08x	  | Version:  0x%08x\n",
+	pr_err(DRV_NAME ": Caps: 0x%08x		  | Version:  0x%08x\n",
 		cmdq_readl(cq_host, CQCAP),
 		cmdq_readl(cq_host, CQVER));
-	pr_info(DRV_NAME ": Queing config: 0x%08x | Queue Ctrl:  0x%08x\n",
+	pr_err(DRV_NAME ": Queing config: 0x%08x  | Queue Ctrl:  0x%08x\n",
 		cmdq_readl(cq_host, CQCFG),
 		cmdq_readl(cq_host, CQCTL));
-	pr_info(DRV_NAME ": Int stat: 0x%08x	  | Int enab:  0x%08x\n",
+	pr_err(DRV_NAME ": Int stat: 0x%08x	  | Int enab:  0x%08x\n",
 		cmdq_readl(cq_host, CQIS),
 		cmdq_readl(cq_host, CQISTE));
-	pr_info(DRV_NAME ": Int sig: 0x%08x	  | Int Coal:  0x%08x\n",
+	pr_err(DRV_NAME ": Int sig: 0x%08x	  | Int Coal:  0x%08x\n",
 		cmdq_readl(cq_host, CQISGE),
 		cmdq_readl(cq_host, CQIC));
-	pr_info(DRV_NAME ": TDL base: 0x%08x	  | TDL up32:  0x%08x\n",
+	pr_err(DRV_NAME ": TDL base: 0x%08x	  | TDL up32:  0x%08x\n",
 		cmdq_readl(cq_host, CQTDLBA),
 		cmdq_readl(cq_host, CQTDLBAU));
-	pr_info(DRV_NAME ": Doorbell: 0x%08x	  | Comp Notif:  0x%08x\n",
+	pr_err(DRV_NAME ": Doorbell: 0x%08x	  | Comp Notif:  0x%08x\n",
 		cmdq_readl(cq_host, CQTDBR),
 		cmdq_readl(cq_host, CQTCN));
-	pr_info(DRV_NAME ": Dev queue: 0x%08x	  | Dev Pend:  0x%08x\n",
+	pr_err(DRV_NAME ": Dev queue: 0x%08x	  | Dev Pend:  0x%08x\n",
 		cmdq_readl(cq_host, CQDQS),
 		cmdq_readl(cq_host, CQDPT));
-	pr_info(DRV_NAME ": Task clr: 0x%08x	  | Send stat 1:  0x%08x\n",
+	pr_err(DRV_NAME ": Task clr: 0x%08x	  | Send stat 1:  0x%08x\n",
 		cmdq_readl(cq_host, CQTCLR),
 		cmdq_readl(cq_host, CQSSC1));
-	pr_info(DRV_NAME ": Send stat 2: 0x%08x	  | DCMD resp:  0x%08x\n",
+	pr_err(DRV_NAME ": Send stat 2: 0x%08x	  | DCMD resp:  0x%08x\n",
 		cmdq_readl(cq_host, CQSSC2),
 		cmdq_readl(cq_host, CQCRDCT));
-	pr_info(DRV_NAME ": Resp err mask: 0x%08x | Task err:  0x%08x\n",
+	pr_err(DRV_NAME ": Resp err mask: 0x%08x  | Task err:  0x%08x\n",
 		cmdq_readl(cq_host, CQRMEM),
 		cmdq_readl(cq_host, CQTERRI));
-	pr_info(DRV_NAME ": Resp idx 0x%08x	  | Resp arg:  0x%08x\n",
+	pr_err(DRV_NAME ": Resp idx 0x%08x	  | Resp arg:  0x%08x\n",
 		cmdq_readl(cq_host, CQCRI),
 		cmdq_readl(cq_host, CQCRA));
-	pr_info(DRV_NAME ": ===========================================\n");
+	pr_err(DRV_NAME ": ===========================================\n");
 
 	cmdq_dump_debug_ram(cq_host);
 	if (cq_host->ops->dump_vendor_regs)
@@ -296,7 +302,6 @@
 				CQTDLBA);
 		cmdq_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
 				CQTDLBAU);
-		cmdq_dumpregs(cq_host);
 	}
 
 	/*
@@ -347,6 +352,49 @@
 	cq_host->enabled = false;
 }
 
+static void cmdq_reset(struct mmc_host *mmc, bool soft)
+{
+	struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+	unsigned int cqcfg;
+	unsigned int tdlba;
+	unsigned int tdlbau;
+	unsigned int rca;
+	int ret;
+
+	cqcfg = cmdq_readl(cq_host, CQCFG);
+	tdlba = cmdq_readl(cq_host, CQTDLBA);
+	tdlbau = cmdq_readl(cq_host, CQTDLBAU);
+	rca = cmdq_readl(cq_host, CQSSC2);
+
+	cmdq_disable(mmc, true);
+
+	if (cq_host->ops->reset) {
+		ret = cq_host->ops->reset(mmc);
+		if (ret) {
+			pr_crit("%s: reset CMDQ controller: failed\n",
+				mmc_hostname(mmc));
+			BUG();
+		}
+	}
+
+	cmdq_writel(cq_host, tdlba, CQTDLBA);
+	cmdq_writel(cq_host, tdlbau, CQTDLBAU);
+
+	if (cq_host->ops->clear_set_irqs)
+		cq_host->ops->clear_set_irqs(mmc, true);
+
+	cmdq_clear_set_irqs(cq_host, 0x0, CQ_INT_ALL);
+
+	/* cq_host would use this rca to address the card */
+	cmdq_writel(cq_host, rca, CQSSC2);
+
+	/* ensure the writes are done before enabling CQE */
+	mb();
+
+	cmdq_writel(cq_host, cqcfg, CQCFG);
+	cq_host->enabled = true;
+}
+
 static void cmdq_prep_task_desc(struct mmc_request *mrq,
 					u64 *data, bool intr, bool qbr)
 {
@@ -539,19 +587,66 @@
 	struct mmc_request *mrq;
 	struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
 
-	mrq = cq_host->mrq_slot[tag];
+	mrq = get_req_by_tag(cq_host, tag);
 	mrq->done(mrq);
 }
 
-irqreturn_t cmdq_irq(struct mmc_host *mmc, u32 intmask)
+irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
 {
 	u32 status;
 	unsigned long tag = 0, comp_status;
 	struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+	unsigned long err_info = 0;
+	struct mmc_request *mrq;
 
 	status = cmdq_readl(cq_host, CQIS);
 	cmdq_writel(cq_host, status, CQIS);
 
+	if (!status && !err)
+		return IRQ_NONE;
+
+	if (err || (status & CQIS_RED)) {
+		err_info = cmdq_readl(cq_host, CQTERRI);
+		pr_err("%s: err: %d status: 0x%08x task-err-info (0x%08lx)\n",
+		       mmc_hostname(mmc), err, status, err_info);
+
+		cmdq_dumpregs(cq_host);
+
+		if (err_info & CQ_RMEFV) {
+			tag = GET_CMD_ERR_TAG(err_info);
+			pr_err("%s: CMD err tag: %lu\n", __func__, tag);
+
+			mrq = get_req_by_tag(cq_host, tag);
+			/* CMD44/45/46/47 will not have a valid cmd */
+			if (mrq->cmd)
+				mrq->cmd->error = err;
+			else
+				mrq->data->error = err;
+		} else {
+			tag = GET_DAT_ERR_TAG(err_info);
+			pr_err("%s: Dat err  tag: %lu\n", __func__, tag);
+			mrq = get_req_by_tag(cq_host, tag);
+			mrq->data->error = err;
+		}
+
+		tag = 0;
+		/*
+		 * CQE detected a response error from device
+		 * In most cases, this would require a reset.
+		 */
+		if (status & CQIS_RED) {
+			mrq->cmdq_req->resp_err = true;
+			pr_err("%s: Response error (0x%08x) from card !!!",
+					mmc_hostname(mmc), status);
+		} else {
+			mrq->cmdq_req->resp_idx = cmdq_readl(cq_host, CQCRI);
+			mrq->cmdq_req->resp_arg = cmdq_readl(cq_host, CQCRA);
+		}
+
+		mmc->err_mrq = mrq;
+		cmdq_finish_data(mmc, tag);
+	}
+
 	if (status & CQIS_TCC) {
 		/* read QCTCN and complete the request */
 		comp_status = cmdq_readl(cq_host, CQTCN);
@@ -567,12 +662,6 @@
 		cmdq_writel(cq_host, comp_status, CQTCN);
 	}
 
-	if (status & CQIS_RED) {
-		/* task response has an error */
-		pr_err("%s: RED error %d !!!\n", mmc_hostname(mmc), status);
-		cmdq_dumpregs(cq_host);
-	}
-
 	if (status & CQIS_HAC) {
 		/* halt is completed, wakeup waiting thread */
 		complete(&cq_host->halt_comp);
@@ -632,6 +721,7 @@
 	.request = cmdq_request,
 	.post_req = cmdq_post_req,
 	.halt = cmdq_halt,
+	.reset	= cmdq_reset,
 };
 
 struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev)
diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h
index e7f5a15..6786b38 100644
--- a/drivers/mmc/host/cmdq_hci.h
+++ b/drivers/mmc/host/cmdq_hci.h
@@ -92,6 +92,17 @@
 /* task error info */
 #define CQTERRI		0x54
 
+/* CQTERRI bit fields */
+#define CQ_RMECI	0x1F
+#define CQ_RMETI	(0x1F << 8)
+#define CQ_RMEFV	(1 << 15)
+#define CQ_DTECI	(0x3F << 16)
+#define CQ_DTETI	(0x1F << 24)
+#define CQ_DTEFV	(1 << 31)
+
+#define GET_CMD_ERR_TAG(__r__) ((__r__ & CQ_RMETI) >> 8)
+#define GET_DAT_ERR_TAG(__r__) ((__r__ & CQ_DTETI) >> 24)
+
 /* command response index */
 #define CQCRI		0x58
 
@@ -106,6 +117,7 @@
 #define CQ_CMD_DBG_RAM_WA 0x198
 #define CQ_CMD_DBG_RAM_OL 0x19C
 
+
 /* attribute fields */
 #define VALID(x)	((x & 1) << 0)
 #define END(x)		((x & 1) << 1)
@@ -186,6 +198,7 @@
 	void (*write_l)(struct cmdq_host *host, u32 val, int reg);
 	u32 (*read_l)(struct cmdq_host *host, int reg);
 	void (*clear_set_dumpregs)(struct mmc_host *mmc, bool set);
+	int (*reset)(struct mmc_host *mmc);
 };
 
 static inline void cmdq_writel(struct cmdq_host *host, u32 val, int reg)
@@ -204,7 +217,7 @@
 		return readl_relaxed(host->mmio + reg);
 }
 
-extern irqreturn_t cmdq_irq(struct mmc_host *mmc, u32 intmask);
+extern irqreturn_t cmdq_irq(struct mmc_host *mmc, int err);
 extern int cmdq_init(struct cmdq_host *cq_host, struct mmc_host *mmc,
 		     bool dma64);
 extern struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev);
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 9679fbd..9989c23 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -127,6 +127,7 @@
 struct mmc_async_req;
 struct mmc_cmdq_req;
 
+extern int mmc_cmdq_discard_queue(struct mmc_host *host, u32 tasks);
 extern int mmc_cmdq_halt(struct mmc_host *host, bool enable);
 extern void mmc_cmdq_post_req(struct mmc_host *host, struct mmc_request *mrq,
 			      int err);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index a75de77d..95b86a0 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -99,6 +99,7 @@
 	void (*post_req)(struct mmc_host *host, struct mmc_request *mrq,
 			 int err);
 	int (*halt)(struct mmc_host *host, bool halt);
+	void (*reset)(struct mmc_host *host, bool soft);
 };
 
 struct mmc_host_ops {
@@ -211,6 +212,11 @@
 #define DAT_TAG	(1 << 5)
 #define FORCED_PRG	(1 << 6)
 	unsigned int		cmdq_req_flags;
+
+	unsigned int		resp_idx;
+	unsigned int		resp_arg;
+	unsigned int		dev_pend_tasks;
+	bool			resp_err;
 	int			tag; /* used for command queuing */
 	u8			ctx_id;
 };
@@ -581,6 +587,7 @@
 	 * controller.
 	 */
 	void *cmdq_private;
+	struct mmc_request	*err_mrq;
 	unsigned long		private[0] ____cacheline_aligned;
 };
 
diff --git a/include/uapi/linux/mmc/mmc.h b/include/uapi/linux/mmc/mmc.h
index f75ae94..093d89e 100644
--- a/include/uapi/linux/mmc/mmc.h
+++ b/include/uapi/linux/mmc/mmc.h
@@ -64,4 +64,9 @@
 #define MMC_APP_CMD              55   /* ac   [31:16] RCA        R1  */
 #define MMC_GEN_CMD              56   /* adtc [0] RD/WR          R1  */
 
+/* class 11 */
+#define MMC_CMDQ_TASK_MGMT       48  /* ac   [31:0] task ID     R1b */
+#define DISCARD_QUEUE		0x1
+#define DISCARD_TASK		0x2
+
 #endif /* UAPI_MMC_MMC_H */