mmc: cmdq_hci: Helper API/info in cmdq for halt
This patch adds following helper API/info -
1. cmdq_halt_poll to halt the controller using polling
method. This is to be mainly used in case of an error
from cmdq_irq context.
2. Adds num_cq_slots & dcmd_cq_slot info to
mmc_host structure. This can be useful info
for mmc host structure like in case of handling
of multiple error requests
3. Adds CMDQ_STATE_CQ_DISABLE for cmdq host.
In case of an error if halt also fails, CQE error handling
code will disable CQ. So block layer needs to know
- to not pull any requests in such case.
Change-Id: I8e9a8d5094db82336917fcca4361ce84316c34ef
Signed-off-by: Ritesh Harjani <riteshh@codeaurora.org>
[subhashj@codeaurora.org: fixed merge conflicts]
Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index 3082d67..5069dc7 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -36,6 +36,8 @@
/* 1 sec */
#define HALT_TIMEOUT_MS 1000
+static int cmdq_halt_poll(struct mmc_host *mmc);
+
#ifdef CONFIG_PM_RUNTIME
static int cmdq_runtime_pm_get(struct cmdq_host *host)
{
@@ -115,6 +117,20 @@
}
}
+static void cmdq_set_halt_irq(struct cmdq_host *cq_host, bool enable)
+{
+ u32 ier;
+
+ ier = cmdq_readl(cq_host, CQISTE);
+ if (enable) {
+ cmdq_writel(cq_host, ier | HALT, CQISTE);
+ cmdq_writel(cq_host, ier | HALT, CQISGE);
+ } else {
+ cmdq_writel(cq_host, ier & ~HALT, CQISTE);
+ cmdq_writel(cq_host, ier & ~HALT, CQISGE);
+ }
+}
+
static void cmdq_clear_set_irqs(struct cmdq_host *cq_host, u32 clear, u32 set)
{
u32 ier;
@@ -368,6 +384,7 @@
mb();
cq_host->enabled = true;
+ mmc_host_clr_cq_disable(mmc);
if (cq_host->ops->set_block_size)
cq_host->ops->set_block_size(cq_host->mmc);
@@ -402,6 +419,7 @@
cmdq_runtime_pm_put(cq_host);
cq_host->enabled = false;
+ mmc_host_set_cq_disable(mmc);
}
static void cmdq_reset(struct mmc_host *mmc, bool soft)
@@ -447,6 +465,7 @@
cmdq_writel(cq_host, cqcfg, CQCFG);
cmdq_runtime_pm_put(cq_host);
cq_host->enabled = true;
+ mmc_host_clr_cq_disable(mmc);
}
static void cmdq_prep_task_desc(struct mmc_request *mrq,
@@ -717,6 +736,7 @@
struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
unsigned long err_info = 0;
struct mmc_request *mrq;
+ int ret;
status = cmdq_readl(cq_host, CQIS);
cmdq_writel(cq_host, status, CQIS);
@@ -729,6 +749,17 @@
pr_err("%s: err: %d status: 0x%08x task-err-info (0x%08lx)\n",
mmc_hostname(mmc), err, status, err_info);
+ /*
+ * Need to halt CQE in case of error in interrupt context itself
+ * otherwise CQE may proceed with sending CMD to device even if
+ * CQE/card is in error state.
+ * CMDQ error handling will make sure that it is unhalted after
+ * handling all the errors.
+ */
+ ret = cmdq_halt_poll(mmc);
+ if (ret)
+ pr_err("%s: %s: halt failed ret=%d\n",
+ mmc_hostname(mmc), __func__, ret);
cmdq_dumpregs(cq_host);
if (err_info & CQ_RMEFV) {
@@ -811,6 +842,38 @@
}
EXPORT_SYMBOL(cmdq_irq);
+/* cmdq_halt_poll - Halting CQE using polling method.
+ * @mmc: struct mmc_host
+ * This is used mainly from interrupt context to halt
+ * CQE engine.
+ */
+static int cmdq_halt_poll(struct mmc_host *mmc)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ int retries = 100;
+
+ cmdq_set_halt_irq(cq_host, false);
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) | HALT, CQCTL);
+ while (retries) {
+ if (!(cmdq_readl(cq_host, CQCTL) & HALT)) {
+ udelay(5);
+ retries--;
+ continue;
+ } else {
+ if (cq_host->ops->post_cqe_halt)
+ cq_host->ops->post_cqe_halt(mmc);
+ /* halt done: re-enable legacy interrupts */
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc,
+ false);
+ mmc_host_set_halt(mmc);
+ break;
+ }
+ }
+ cmdq_set_halt_irq(cq_host, true);
+ return retries ? 0 : -ETIMEDOUT;
+}
+
/* May sleep */
static int cmdq_halt(struct mmc_host *mmc, bool halt)
{
@@ -953,6 +1016,8 @@
cq_host->dcmd_slot = DCMD_SLOT;
mmc->cmdq_ops = &cmdq_host_ops;
+ mmc->num_cq_slots = NUM_SLOTS;
+ mmc->dcmd_cq_slot = DCMD_SLOT;
cq_host->mrq_slot = kzalloc(sizeof(cq_host->mrq_slot) *
cq_host->num_slots, GFP_KERNEL);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 525a1c9..94511da 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -275,6 +275,7 @@
#define CMDQ_STATE_ERR 0
#define CMDQ_STATE_DCMD_ACTIVE 1
#define CMDQ_STATE_HALT 2
+#define CMDQ_STATE_CQ_DISABLE 3
wait_queue_head_t queue_empty_wq;
wait_queue_head_t wait;
int active_small_sector_read_reqs;
@@ -601,6 +602,8 @@
enum dev_state dev_status;
bool wakeup_on_idle;
struct mmc_cmdq_context_info cmdq_ctx;
+ int num_cq_slots;
+ int dcmd_cq_slot;
bool cmdq_thist_enabled;
/*
* several cmdq supporting host controllers are extensions
@@ -748,6 +751,21 @@
return test_bit(CMDQ_STATE_HALT, &host->cmdq_ctx.curr_state);
}
+static inline void mmc_host_set_cq_disable(struct mmc_host *host)
+{
+ set_bit(CMDQ_STATE_CQ_DISABLE, &host->cmdq_ctx.curr_state);
+}
+
+static inline void mmc_host_clr_cq_disable(struct mmc_host *host)
+{
+ clear_bit(CMDQ_STATE_CQ_DISABLE, &host->cmdq_ctx.curr_state);
+}
+
+static inline int mmc_host_cq_disable(struct mmc_host *host)
+{
+ return test_bit(CMDQ_STATE_CQ_DISABLE, &host->cmdq_ctx.curr_state);
+}
+
#ifdef CONFIG_MMC_CLKGATE
void mmc_host_clk_hold(struct mmc_host *host);
void mmc_host_clk_release(struct mmc_host *host);