clk: msm: Add delay of 50uSec before polling lock_detect status

Some PLL HWs require an additional delay for the PLL lock detect
to stabilize after being brought out of reset and SW to poll for
lock detect status. Add delay of 50uSec before polling lock_det
bit by introducing new pll ops.
Also if PLL fails to lock, record additional PLL debug information
in the kernel log before panic().

Change-Id: I1b04dccc8d3c3e45d4aa7a3c1c60311331e490fa
Signed-off-by: Odelu Kukatla <okukatla@codeaurora.org>
diff --git a/drivers/clk/msm/clock-pll.c b/drivers/clk/msm/clock-pll.c
index 26c04e5..381c8db 100644
--- a/drivers/clk/msm/clock-pll.c
+++ b/drivers/clk/msm/clock-pll.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -217,13 +217,46 @@
 	writel_relaxed(regval, pll_config);
 }
 
+static void pll_wait_for_lock(struct pll_clk *pll)
+{
+	int count;
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+	u32 status_reg, user_reg, l_reg, m_reg, n_reg, config_reg;
+
+	/* Wait for pll to lock. */
+	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
+			break;
+		udelay(1);
+	}
+
+	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)) {
+		mode = readl_relaxed(PLL_MODE_REG(pll));
+		status_reg = readl_relaxed(PLL_STATUS_REG(pll));
+		user_reg = readl_relaxed(PLL_CONFIG_REG(pll));
+		config_reg = readl_relaxed(PLL_CFG_CTL_REG(pll));
+		l_reg = readl_relaxed(PLL_L_REG(pll));
+		m_reg = readl_relaxed(PLL_M_REG(pll));
+		n_reg = readl_relaxed(PLL_N_REG(pll));
+		pr_err("count = %d\n", (int)count);
+		pr_err("mode register is 0x%x\n", mode);
+		pr_err("status register is 0x%x\n", status_reg);
+		pr_err("user control register is 0x%x\n", user_reg);
+		pr_err("config control register is 0x%x\n", config_reg);
+		pr_err("L value register is 0x%x\n", l_reg);
+		pr_err("M value register is 0x%x\n", m_reg);
+		pr_err("N value control register is 0x%x\n", n_reg);
+		panic("PLL %s didn't lock after enabling it!\n",
+				pll->c.dbg_name);
+	}
+}
+
 static int sr2_pll_clk_enable(struct clk *c)
 {
 	unsigned long flags;
 	struct pll_clk *pll = to_pll_clk(c);
-	int ret = 0, count;
 	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
-	u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
 
 	spin_lock_irqsave(&pll_reg_lock, flags);
 
@@ -245,15 +278,7 @@
 	mode |= PLL_RESET_N;
 	writel_relaxed(mode, PLL_MODE_REG(pll));
 
-	/* Wait for pll to lock. */
-	for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
-		if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
-			break;
-		udelay(1);
-	}
-
-	if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
-		pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+	pll_wait_for_lock(pll);
 
 	/* Enable PLL output. */
 	mode |= PLL_OUTCTRL;
@@ -263,7 +288,50 @@
 	mb();
 
 	spin_unlock_irqrestore(&pll_reg_lock, flags);
-	return ret;
+	return 0;
+}
+
+static int acpu_pll_clk_enable(struct clk *c)
+{
+	unsigned long flags;
+	struct pll_clk *pll = to_pll_clk(c);
+	u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+
+	spin_lock_irqsave(&pll_reg_lock, flags);
+
+	spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset,
+				pll->spm_ctrl.event_bit, false);
+
+	/* Disable PLL bypass mode. */
+	mode |= PLL_BYPASSNL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	mb();
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	mode |= PLL_RESET_N;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* PLL H/W requires a 50uSec delay before polling lock_detect. */
+	mb();
+	udelay(50);
+
+	pll_wait_for_lock(pll);
+
+	/* Enable PLL output. */
+	mode |= PLL_OUTCTRL;
+	writel_relaxed(mode, PLL_MODE_REG(pll));
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+
+	spin_unlock_irqrestore(&pll_reg_lock, flags);
+	return 0;
 }
 
 void __variable_rate_pll_init(struct clk *c)
@@ -886,6 +954,15 @@
 	.list_registers = local_pll_clk_list_registers,
 };
 
+const struct clk_ops clk_ops_acpu_pll = {
+	.enable = acpu_pll_clk_enable,
+	.disable = local_pll_clk_disable,
+	.set_rate = local_pll_clk_set_rate,
+	.round_rate = local_pll_clk_round_rate,
+	.handoff = local_pll_clk_handoff,
+	.list_registers = local_pll_clk_list_registers,
+};
+
 const struct clk_ops clk_ops_variable_rate_pll_hwfsm = {
 	.enable = variable_rate_pll_clk_enable_hwfsm,
 	.disable = variable_rate_pll_clk_disable_hwfsm,
diff --git a/include/soc/qcom/clock-pll.h b/include/soc/qcom/clock-pll.h
index 1865e3c..dd7e186 100644
--- a/include/soc/qcom/clock-pll.h
+++ b/include/soc/qcom/clock-pll.h
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -174,6 +175,7 @@
 
 extern const struct clk_ops clk_ops_local_pll;
 extern const struct clk_ops clk_ops_sr2_pll;
+extern const struct clk_ops clk_ops_acpu_pll;
 extern const struct clk_ops clk_ops_variable_rate_pll;
 extern const struct clk_ops clk_ops_variable_rate_pll_hwfsm;