mmc: sdhci-msm: Add new workqueue for pm qos unvoting
mmc request issuing thread sometimes flushes the pm_qos unvoting
works. The mmc thread can come in memory reclaim path and so any work
it flushes must be running in a workqueue with MEM_RECLAIM flag set.
So create a new workqueue for pm_qos_irq und pm_qos_cpu unvoting and
queue the works there.
Without this change observing below warning in below pointed stack:
___________________________________________________________________
Warning: workqueue: PF_MEMALLOC task 304(mmc-cmdqd/0) is flushing
!WQ_MEM_RECLAIM events:sdhci_msm_pm_qos_irq_unvote_work
callstack:
(check_flush_dependency)
(flush_work)
(__cancel_work_timer)
(cancel_delayed_work_sync)
(sdhci_msm_pm_qos_irq_vote)
(cmdq_request)
(mmc_cmdq_start_req)
(mmc_blk_cmdq_issue_rq)
(mmc_cmdq_thread)
___________________________________________________________________
Change-Id: Iae0bca3e6f15f1a5a099e0e693982f64a9ed3f0e
Signed-off-by: Vijay Viswanath <vviswana@codeaurora.org>
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 0bea2cb..831b64d 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -3952,8 +3952,9 @@
return;
if (async) {
- schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
- msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
+ queue_delayed_work(msm_host->pm_qos_wq,
+ &msm_host->pm_qos_irq.unvote_work,
+ msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
return;
}
@@ -4030,6 +4031,33 @@
struct sdhci_host *host) { }
#endif
+static bool sdhci_msm_pm_qos_wq_init(struct sdhci_msm_host *msm_host)
+{
+ char *wq = NULL;
+ bool ret = true;
+
+ wq = kasprintf(GFP_KERNEL, "sdhci_msm_pm_qos/%s",
+ dev_name(&msm_host->pdev->dev));
+ if (!wq)
+ return false;
+ /*
+ * Create a work queue with flag WQ_MEM_RECLAIM set for
+ * pm_qos_unvote work. Because mmc thread is created with
+ * flag PF_MEMALLOC set, kernel will check for work queue
+ * flag WQ_MEM_RECLAIM when flush the work queue. If work
+ * queue flag WQ_MEM_RECLAIM is not set, kernel warning
+ * will be triggered.
+ */
+ msm_host->pm_qos_wq = create_workqueue(wq);
+ if (!msm_host->pm_qos_wq) {
+ ret = false;
+ dev_err(&msm_host->pdev->dev,
+ "failed to create pm qos unvote work queue\n");
+ }
+ kfree(wq);
+ return ret;
+}
+
void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -4054,6 +4082,8 @@
cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
+ sdhci_msm_pm_qos_wq_init(msm_host);
+
INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
sdhci_msm_pm_qos_irq_unvote_work);
/* For initialization phase, set the performance latency */
@@ -4228,8 +4258,9 @@
return false;
if (async) {
- schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
- msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
+ queue_delayed_work(msm_host->pm_qos_wq,
+ &msm_host->pm_qos[group].unvote_work,
+ msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
return true;
}
@@ -5169,6 +5200,9 @@
device_remove_file(&pdev->dev, &msm_host->polling);
device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
pm_runtime_disable(&pdev->dev);
+
+ if (msm_host->pm_qos_wq)
+ destroy_workqueue(msm_host->pm_qos_wq);
sdhci_remove_host(host, dead);
sdhci_pltfm_free(pdev);