msm: clock-local: Adopt a more consistent variable name convention
Name variables of similar types that are used for similar purposes
with a consistent naming scheme. This makes the code easier to read
by eliminating confusion caused by variables with names like 'clk'
being used for 'struct clk' types in some functions, 'struct rcg_clk'
in others, 'struct branch_clk' in others, and so on.
Change-Id: Id53acda2a3872b640999126809ba1f76aaee85db
Signed-off-by: Matt Wagantall <mattw@codeaurora.org>
diff --git a/arch/arm/mach-msm/clock-7x30.c b/arch/arm/mach-msm/clock-7x30.c
index aa94be6..225ea2b 100644
--- a/arch/arm/mach-msm/clock-7x30.c
+++ b/arch/arm/mach-msm/clock-7x30.c
@@ -200,7 +200,7 @@
#define PCOM_XO_TCXO 0
#define PCOM_XO_LPXO 1
-static bool pcom_is_local(struct clk *clk)
+static bool pcom_is_local(struct clk *c)
{
return false;
}
@@ -2441,7 +2441,7 @@
struct measure_sel {
u32 test_vector;
- struct clk *clk;
+ struct clk *c;
};
static struct measure_sel measure_mux[] = {
@@ -2538,17 +2538,17 @@
{ CLK_TEST_LS(0x3F), &usb_hs_clk.c },
};
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
{
int i;
for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
- if (measure_mux[i].clk == clk)
+ if (measure_mux[i].c == c)
return &measure_mux[i];
return NULL;
}
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
{
struct measure_sel *p;
unsigned long flags;
@@ -2599,7 +2599,7 @@
/* Perform a hardware rate measurement for a given clock.
FOR DEBUG USE ONLY: Measurements take ~15 ms! */
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
{
unsigned long flags;
u32 regval, prph_web_reg_old;
@@ -2647,12 +2647,12 @@
return ret;
}
#else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
{
return -EINVAL;
}
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
{
return 0;
}
@@ -2670,14 +2670,14 @@
};
/* Implementation for clk_set_flags(). */
-int soc_clk_set_flags(struct clk *clk, unsigned clk_flags)
+int soc_clk_set_flags(struct clk *c, unsigned clk_flags)
{
uint32_t regval, ret = 0;
unsigned long flags;
spin_lock_irqsave(&local_clock_reg_lock, flags);
- if (clk == &vfe_clk.c) {
+ if (c == &vfe_clk.c) {
regval = readl_relaxed(CAM_VFE_NS_REG);
/* Flag values chosen for backward compatibility
* with proc_comm remote clock control. */
@@ -2701,17 +2701,15 @@
return ret;
}
-static int msm7x30_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int msm7x30_clk_reset(struct clk *c, enum clk_reset_action action)
{
/* reset_mask is actually a proc_comm id */
- unsigned id = to_rcg_clk(clk)->b.reset_mask;
- return pc_clk_reset(id, action);
+ return pc_clk_reset(to_rcg_clk(c)->b.reset_mask, action);
}
-static int soc_branch_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int soc_branch_clk_reset(struct clk *c, enum clk_reset_action action)
{
- unsigned id = to_branch_clk(clk)->b.reset_mask;
- return pc_clk_reset(id, action);
+ return pc_clk_reset(to_branch_clk(c)->b.reset_mask, action);
}
/*
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 7866fc7..2846940 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -2858,9 +2858,9 @@
struct clk c;
};
-static inline struct pix_rdi_clk *to_pix_rdi_clk(struct clk *clk)
+static inline struct pix_rdi_clk *to_pix_rdi_clk(struct clk *c)
{
- return container_of(clk, struct pix_rdi_clk, c);
+ return container_of(c, struct pix_rdi_clk, c);
}
static int pix_rdi_clk_set_rate(struct clk *c, unsigned long rate)
@@ -2868,7 +2868,7 @@
int ret, i;
u32 reg;
unsigned long flags;
- struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+ struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
struct clk **mux_map = pix_rdi_mux_map;
/*
@@ -2889,32 +2889,32 @@
goto err;
}
/* Keep the new source on when switching inputs of an enabled clock */
- if (clk->enabled) {
- clk_disable(mux_map[clk->cur_rate]);
+ if (rdi->enabled) {
+ clk_disable(mux_map[rdi->cur_rate]);
clk_enable(mux_map[rate]);
}
spin_lock_irqsave(&local_clock_reg_lock, flags);
- reg = readl_relaxed(clk->s2_reg);
- reg &= ~clk->s2_mask;
- reg |= rate == 2 ? clk->s2_mask : 0;
- writel_relaxed(reg, clk->s2_reg);
+ reg = readl_relaxed(rdi->s2_reg);
+ reg &= ~rdi->s2_mask;
+ reg |= rate == 2 ? rdi->s2_mask : 0;
+ writel_relaxed(reg, rdi->s2_reg);
/*
* Wait at least 6 cycles of slowest clock
* for the glitch-free MUX to fully switch sources.
*/
mb();
udelay(1);
- reg = readl_relaxed(clk->s_reg);
- reg &= ~clk->s_mask;
- reg |= rate == 1 ? clk->s_mask : 0;
- writel_relaxed(reg, clk->s_reg);
+ reg = readl_relaxed(rdi->s_reg);
+ reg &= ~rdi->s_mask;
+ reg |= rate == 1 ? rdi->s_mask : 0;
+ writel_relaxed(reg, rdi->s_reg);
/*
* Wait at least 6 cycles of slowest clock
* for the glitch-free MUX to fully switch sources.
*/
mb();
udelay(1);
- clk->cur_rate = rate;
+ rdi->cur_rate = rate;
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
err:
for (i--; i >= 0; i--)
@@ -2931,12 +2931,12 @@
static int pix_rdi_clk_enable(struct clk *c)
{
unsigned long flags;
- struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+ struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
+ __branch_clk_enable_reg(&rdi->b, rdi->c.dbg_name);
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
- clk->enabled = true;
+ rdi->enabled = true;
return 0;
}
@@ -2944,24 +2944,22 @@
static void pix_rdi_clk_disable(struct clk *c)
{
unsigned long flags;
- struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+ struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
+ __branch_clk_disable_reg(&rdi->b, rdi->c.dbg_name);
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
- clk->enabled = false;
+ rdi->enabled = false;
}
-static int pix_rdi_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int pix_rdi_clk_reset(struct clk *c, enum clk_reset_action action)
{
- return branch_reset(&to_pix_rdi_clk(clk)->b, action);
+ return branch_reset(&to_pix_rdi_clk(c)->b, action);
}
static struct clk *pix_rdi_clk_get_parent(struct clk *c)
{
- struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
-
- return pix_rdi_mux_map[clk->cur_rate];
+ return pix_rdi_mux_map[to_pix_rdi_clk(c)->cur_rate];
}
static int pix_rdi_clk_list_rate(struct clk *c, unsigned n)
@@ -2974,17 +2972,17 @@
static enum handoff pix_rdi_clk_handoff(struct clk *c)
{
u32 reg;
- struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+ struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
enum handoff ret;
- ret = branch_handoff(&clk->b, &clk->c);
+ ret = branch_handoff(&rdi->b, &rdi->c);
if (ret == HANDOFF_DISABLED_CLK)
return ret;
- reg = readl_relaxed(clk->s_reg);
- clk->cur_rate = reg & clk->s_mask ? 1 : 0;
- reg = readl_relaxed(clk->s2_reg);
- clk->cur_rate = reg & clk->s2_mask ? 2 : clk->cur_rate;
+ reg = readl_relaxed(rdi->s_reg);
+ rdi->cur_rate = reg & rdi->s_mask ? 1 : 0;
+ reg = readl_relaxed(rdi->s2_reg);
+ rdi->cur_rate = reg & rdi->s2_mask ? 2 : rdi->cur_rate;
return HANDOFF_ENABLED_CLK;
}
@@ -3897,7 +3895,7 @@
},
};
-static int hdmi_pll_clk_enable(struct clk *clk)
+static int hdmi_pll_clk_enable(struct clk *c)
{
int ret;
unsigned long flags;
@@ -3907,7 +3905,7 @@
return ret;
}
-static void hdmi_pll_clk_disable(struct clk *clk)
+static void hdmi_pll_clk_disable(struct clk *c)
{
unsigned long flags;
spin_lock_irqsave(&local_clock_reg_lock, flags);
@@ -3915,12 +3913,12 @@
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
}
-static unsigned long hdmi_pll_clk_get_rate(struct clk *clk)
+static unsigned long hdmi_pll_clk_get_rate(struct clk *c)
{
return hdmi_pll_get_rate();
}
-static struct clk *hdmi_pll_clk_get_parent(struct clk *clk)
+static struct clk *hdmi_pll_clk_get_parent(struct clk *c)
{
return &pxo_clk.c;
}
@@ -3975,12 +3973,12 @@
* Unlike other clocks, the TV rate is adjusted through PLL
* re-programming. It is also routed through an MND divider.
*/
-void set_rate_tv(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_tv(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
unsigned long pll_rate = (unsigned long)nf->extra_freq_data;
if (pll_rate)
hdmi_pll_set_rate(pll_rate);
- set_rate_mnd(clk, nf);
+ set_rate_mnd(rcg, nf);
}
static struct rcg_clk tv_src_clk = {
@@ -4616,7 +4614,7 @@
#ifdef CONFIG_DEBUG_FS
struct measure_sel {
u32 test_vector;
- struct clk *clk;
+ struct clk *c;
};
static DEFINE_CLK_MEASURE(l2_m_clk);
@@ -4840,12 +4838,12 @@
{ TEST_CPUL2(0x5), &krait3_m_clk },
};
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
{
int i;
for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
- if (measure_mux[i].clk == clk)
+ if (measure_mux[i].c == c)
return &measure_mux[i];
return NULL;
}
@@ -4855,7 +4853,7 @@
int ret = 0;
u32 clk_sel;
struct measure_sel *p;
- struct measure_clk *clk = to_measure_clk(c);
+ struct measure_clk *measure = to_measure_clk(c);
unsigned long flags;
if (!parent)
@@ -4871,9 +4869,9 @@
* Program the test vector, measurement period (sample_ticks)
* and scaling multiplier.
*/
- clk->sample_ticks = 0x10000;
+ measure->sample_ticks = 0x10000;
clk_sel = p->test_vector & TEST_CLK_SEL_MASK;
- clk->multiplier = 1;
+ measure->multiplier = 1;
switch (p->test_vector >> TEST_TYPE_SHIFT) {
case TEST_TYPE_PER_LS:
writel_relaxed(0x4030D00|BVAL(7, 0, clk_sel), CLK_TEST_REG);
@@ -4902,8 +4900,8 @@
case TEST_TYPE_CPUL2:
writel_relaxed(0x4030400, CLK_TEST_REG);
writel_relaxed(0x80|BVAL(5, 3, clk_sel), GCC_APCS_CLK_DIAG);
- clk->sample_ticks = 0x4000;
- clk->multiplier = 2;
+ measure->sample_ticks = 0x4000;
+ measure->multiplier = 2;
break;
default:
ret = -EPERM;
@@ -4946,7 +4944,7 @@
unsigned long flags;
u32 pdm_reg_backup, ringosc_reg_backup;
u64 raw_count_short, raw_count_full;
- struct measure_clk *clk = to_measure_clk(c);
+ struct measure_clk *measure = to_measure_clk(c);
unsigned ret;
ret = clk_prepare_enable(&cxo_clk.c);
@@ -4973,7 +4971,7 @@
/* Run a short measurement. (~1 ms) */
raw_count_short = run_measurement(0x1000);
/* Run a full measurement. (~14 ms) */
- raw_count_full = run_measurement(clk->sample_ticks);
+ raw_count_full = run_measurement(measure->sample_ticks);
writel_relaxed(ringosc_reg_backup, RINGOSC_NS_REG);
writel_relaxed(pdm_reg_backup, PDM_CLK_NS_REG);
@@ -4984,8 +4982,8 @@
else {
/* Compute rate in Hz. */
raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
- do_div(raw_count_full, ((clk->sample_ticks * 10) + 35));
- ret = (raw_count_full * clk->multiplier);
+ do_div(raw_count_full, ((measure->sample_ticks * 10) + 35));
+ ret = (raw_count_full * measure->multiplier);
}
/* Route dbg_hs_clk to PLLTEST. 300mV single-ended amplitude. */
@@ -4997,12 +4995,12 @@
return ret;
}
#else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
{
return -EINVAL;
}
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
{
return 0;
}
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index 74d71a2..da7dca7 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -341,24 +341,24 @@
},
};
-static int pll4_clk_enable(struct clk *clk)
+static int pll4_clk_enable(struct clk *c)
{
struct msm_rpm_iv_pair iv = { MSM_RPM_ID_PLL_4, 1 };
return msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
}
-static void pll4_clk_disable(struct clk *clk)
+static void pll4_clk_disable(struct clk *c)
{
struct msm_rpm_iv_pair iv = { MSM_RPM_ID_PLL_4, 0 };
msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
}
-static struct clk *pll4_clk_get_parent(struct clk *clk)
+static struct clk *pll4_clk_get_parent(struct clk *c)
{
return &pxo_clk.c;
}
-static bool pll4_clk_is_local(struct clk *clk)
+static bool pll4_clk_is_local(struct clk *c)
{
return false;
}
@@ -397,7 +397,7 @@
/* Unlike other clocks, the TV rate is adjusted through PLL
* re-programming. It is also routed through an MND divider. */
-static void set_rate_tv(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+static void set_rate_tv(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
struct pll_rate *rate = nf->extra_freq_data;
uint32_t pll_mode, pll_config, misc_cc2;
@@ -426,7 +426,7 @@
writel_relaxed(pll_config, MM_PLL2_CONFIG_REG);
/* Configure MND. */
- set_rate_mnd(clk, nf);
+ set_rate_mnd(rcg, nf);
/* Configure hdmi_ref_clk to be equal to the TV clock rate. */
misc_cc2 = readl_relaxed(MISC_CC2_REG);
@@ -3133,7 +3133,7 @@
#ifdef CONFIG_DEBUG_FS
struct measure_sel {
u32 test_vector;
- struct clk *clk;
+ struct clk *c;
};
static struct measure_sel measure_mux[] = {
@@ -3308,12 +3308,12 @@
{ TEST_SC(0x42), &l2_m_clk },
};
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
{
int i;
for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
- if (measure_mux[i].clk == clk)
+ if (measure_mux[i].c == c)
return &measure_mux[i];
return NULL;
}
@@ -3323,7 +3323,7 @@
int ret = 0;
u32 clk_sel;
struct measure_sel *p;
- struct measure_clk *clk = to_measure_clk(c);
+ struct measure_clk *measure = to_measure_clk(c);
unsigned long flags;
if (!parent)
@@ -3340,9 +3340,9 @@
* and scaling factors (multiplier, divider).
*/
clk_sel = p->test_vector & TEST_CLK_SEL_MASK;
- clk->sample_ticks = 0x10000;
- clk->multiplier = 1;
- clk->divider = 1;
+ measure->sample_ticks = 0x10000;
+ measure->multiplier = 1;
+ measure->divider = 1;
switch (p->test_vector >> TEST_TYPE_SHIFT) {
case TEST_TYPE_PER_LS:
writel_relaxed(0x4030D00|BVAL(7, 0, clk_sel), CLK_TEST_REG);
@@ -3355,7 +3355,7 @@
writel_relaxed(BVAL(6, 1, clk_sel)|BIT(0), DBG_CFG_REG_LS_REG);
break;
case TEST_TYPE_MM_HS2X:
- clk->divider = 2;
+ measure->divider = 2;
case TEST_TYPE_MM_HS:
writel_relaxed(0x402B800, CLK_TEST_REG);
writel_relaxed(BVAL(6, 1, clk_sel)|BIT(0), DBG_CFG_REG_HS_REG);
@@ -3367,8 +3367,8 @@
break;
case TEST_TYPE_SC:
writel_relaxed(0x5020000|BVAL(16, 10, clk_sel), CLK_TEST_REG);
- clk->sample_ticks = 0x4000;
- clk->multiplier = 2;
+ measure->sample_ticks = 0x4000;
+ measure->multiplier = 2;
break;
default:
ret = -EPERM;
@@ -3410,7 +3410,7 @@
unsigned long flags;
u32 pdm_reg_backup, ringosc_reg_backup;
u64 raw_count_short, raw_count_full;
- struct measure_clk *clk = to_measure_clk(c);
+ struct measure_clk *measure = to_measure_clk(c);
unsigned ret;
spin_lock_irqsave(&local_clock_reg_lock, flags);
@@ -3431,7 +3431,7 @@
/* Run a short measurement. (~1 ms) */
raw_count_short = run_measurement(0x1000);
/* Run a full measurement. (~14 ms) */
- raw_count_full = run_measurement(clk->sample_ticks);
+ raw_count_full = run_measurement(measure->sample_ticks);
writel_relaxed(ringosc_reg_backup, RINGOSC_NS_REG);
writel_relaxed(pdm_reg_backup, PDM_CLK_NS_REG);
@@ -3442,9 +3442,9 @@
else {
/* Compute rate in Hz. */
raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
- do_div(raw_count_full,
- (((clk->sample_ticks * 10) + 35) * clk->divider));
- ret = (raw_count_full * clk->multiplier);
+ do_div(raw_count_full, (((measure->sample_ticks * 10) + 35)
+ * measure->divider));
+ ret = (raw_count_full * measure->multiplier);
}
/* Route dbg_hs_clk to PLLTEST. 300mV single-ended amplitude. */
@@ -3454,12 +3454,12 @@
return ret;
}
#else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
{
return -EINVAL;
}
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
{
return 0;
}
diff --git a/arch/arm/mach-msm/clock-9615.c b/arch/arm/mach-msm/clock-9615.c
index a2e0bc9..f7ccb35 100644
--- a/arch/arm/mach-msm/clock-9615.c
+++ b/arch/arm/mach-msm/clock-9615.c
@@ -216,33 +216,33 @@
static DEFINE_SPINLOCK(soft_vote_lock);
-static int pll_acpu_vote_clk_enable(struct clk *clk)
+static int pll_acpu_vote_clk_enable(struct clk *c)
{
int ret = 0;
unsigned long flags;
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
spin_lock_irqsave(&soft_vote_lock, flags);
- if (!*pll->soft_vote)
- ret = pll_vote_clk_enable(clk);
+ if (!*pllv->soft_vote)
+ ret = pll_vote_clk_enable(c);
if (ret == 0)
- *pll->soft_vote |= (pll->soft_vote_mask);
+ *pllv->soft_vote |= (pllv->soft_vote_mask);
spin_unlock_irqrestore(&soft_vote_lock, flags);
return ret;
}
-static void pll_acpu_vote_clk_disable(struct clk *clk)
+static void pll_acpu_vote_clk_disable(struct clk *c)
{
unsigned long flags;
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
spin_lock_irqsave(&soft_vote_lock, flags);
- *pll->soft_vote &= ~(pll->soft_vote_mask);
- if (!*pll->soft_vote)
- pll_vote_clk_disable(clk);
+ *pllv->soft_vote &= ~(pllv->soft_vote_mask);
+ if (!*pllv->soft_vote)
+ pll_vote_clk_disable(c);
spin_unlock_irqrestore(&soft_vote_lock, flags);
}
@@ -1376,7 +1376,7 @@
#ifdef CONFIG_DEBUG_FS
struct measure_sel {
u32 test_vector;
- struct clk *clk;
+ struct clk *c;
};
static DEFINE_CLK_MEASURE(q6sw_clk);
@@ -1447,12 +1447,12 @@
{ TEST_LPA_HS(0x00), &q6_func_clk },
};
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
{
int i;
for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
- if (measure_mux[i].clk == clk)
+ if (measure_mux[i].c == c)
return &measure_mux[i];
return NULL;
}
@@ -1462,7 +1462,7 @@
int ret = 0;
u32 clk_sel;
struct measure_sel *p;
- struct measure_clk *clk = to_measure_clk(c);
+ struct measure_clk *measure = to_measure_clk(c);
unsigned long flags;
if (!parent)
@@ -1478,9 +1478,9 @@
* Program the test vector, measurement period (sample_ticks)
* and scaling multiplier.
*/
- clk->sample_ticks = 0x10000;
+ measure->sample_ticks = 0x10000;
clk_sel = p->test_vector & TEST_CLK_SEL_MASK;
- clk->multiplier = 1;
+ measure->multiplier = 1;
switch (p->test_vector >> TEST_TYPE_SHIFT) {
case TEST_TYPE_PER_LS:
writel_relaxed(0x4030D00|BVAL(7, 0, clk_sel), CLK_TEST_REG);
@@ -1539,7 +1539,7 @@
unsigned long flags;
u32 pdm_reg_backup, ringosc_reg_backup;
u64 raw_count_short, raw_count_full;
- struct measure_clk *clk = to_measure_clk(c);
+ struct measure_clk *measure = to_measure_clk(c);
unsigned ret;
spin_lock_irqsave(&local_clock_reg_lock, flags);
@@ -1560,7 +1560,7 @@
/* Run a short measurement. (~1 ms) */
raw_count_short = run_measurement(0x1000);
/* Run a full measurement. (~14 ms) */
- raw_count_full = run_measurement(clk->sample_ticks);
+ raw_count_full = run_measurement(measure->sample_ticks);
writel_relaxed(ringosc_reg_backup, RINGOSC_NS_REG);
writel_relaxed(pdm_reg_backup, PDM_CLK_NS_REG);
@@ -1571,8 +1571,8 @@
else {
/* Compute rate in Hz. */
raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
- do_div(raw_count_full, ((clk->sample_ticks * 10) + 35));
- ret = (raw_count_full * clk->multiplier);
+ do_div(raw_count_full, ((measure->sample_ticks * 10) + 35));
+ ret = (raw_count_full * measure->multiplier);
}
/* Route dbg_hs_clk to PLLTEST. 300mV single-ended amplitude. */
@@ -1582,12 +1582,12 @@
return ret;
}
#else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
{
return -EINVAL;
}
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
{
return 0;
}
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index b5ae4ab..8222e87 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -53,32 +53,32 @@
*/
/* For clocks with MND dividers. */
-void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
uint32_t ns_reg_val, ctl_reg_val;
/* Assert MND reset. */
- ns_reg_val = readl_relaxed(clk->ns_reg);
+ ns_reg_val = readl_relaxed(rcg->ns_reg);
ns_reg_val |= BIT(7);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
/* Program M and D values. */
- writel_relaxed(nf->md_val, clk->md_reg);
+ writel_relaxed(nf->md_val, rcg->md_reg);
/* If the clock has a separate CC register, program it. */
- if (clk->ns_reg != clk->b.ctl_reg) {
- ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
- ctl_reg_val &= ~(clk->ctl_mask);
+ if (rcg->ns_reg != rcg->b.ctl_reg) {
+ ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
+ ctl_reg_val &= ~(rcg->ctl_mask);
ctl_reg_val |= nf->ctl_val;
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
}
/* Deassert MND reset. */
ns_reg_val &= ~BIT(7);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
}
-void set_rate_nop(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_nop(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
/*
* Nothing to do for fixed-rate or integer-divider clocks. Any settings
@@ -88,31 +88,31 @@
*/
}
-void set_rate_mnd_8(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_mnd_8(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
uint32_t ctl_reg_val;
/* Assert MND reset. */
- ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
+ ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
ctl_reg_val |= BIT(8);
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
/* Program M and D values. */
- writel_relaxed(nf->md_val, clk->md_reg);
+ writel_relaxed(nf->md_val, rcg->md_reg);
/* Program MN counter Enable and Mode. */
- ctl_reg_val &= ~(clk->ctl_mask);
+ ctl_reg_val &= ~(rcg->ctl_mask);
ctl_reg_val |= nf->ctl_val;
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
/* Deassert MND reset. */
ctl_reg_val &= ~BIT(8);
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
}
-void set_rate_mnd_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_mnd_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
- struct bank_masks *banks = clk->bank_info;
+ struct bank_masks *banks = rcg->bank_info;
const struct bank_mask_info *new_bank_masks;
const struct bank_mask_info *old_bank_masks;
uint32_t ns_reg_val, ctl_reg_val;
@@ -123,10 +123,10 @@
* off, program the active bank since bank switching won't work if
* both banks aren't running.
*/
- ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
+ ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
bank_sel = !!(ctl_reg_val & banks->bank_sel_mask);
/* If clock isn't running, don't switch banks. */
- bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
+ bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0);
if (bank_sel == 0) {
new_bank_masks = &banks->bank1_mask;
old_bank_masks = &banks->bank0_mask;
@@ -135,46 +135,46 @@
old_bank_masks = &banks->bank1_mask;
}
- ns_reg_val = readl_relaxed(clk->ns_reg);
+ ns_reg_val = readl_relaxed(rcg->ns_reg);
/* Assert bank MND reset. */
ns_reg_val |= new_bank_masks->rst_mask;
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
/*
* Program NS only if the clock is enabled, since the NS will be set
* as part of the enable procedure and should remain with a low-power
* MUX input selected until then.
*/
- if (clk->enabled) {
+ if (rcg->enabled) {
ns_reg_val &= ~(new_bank_masks->ns_mask);
ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
}
writel_relaxed(nf->md_val, new_bank_masks->md_reg);
/* Enable counter only if clock is enabled. */
- if (clk->enabled)
+ if (rcg->enabled)
ctl_reg_val |= new_bank_masks->mnd_en_mask;
else
ctl_reg_val &= ~(new_bank_masks->mnd_en_mask);
ctl_reg_val &= ~(new_bank_masks->mode_mask);
ctl_reg_val |= (nf->ctl_val & new_bank_masks->mode_mask);
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
/* Deassert bank MND reset. */
ns_reg_val &= ~(new_bank_masks->rst_mask);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
/*
* Switch to the new bank if clock is running. If it isn't, then
* no switch is necessary since we programmed the active bank.
*/
- if (clk->enabled && clk->current_freq->freq_hz) {
+ if (rcg->enabled && rcg->current_freq->freq_hz) {
ctl_reg_val ^= banks->bank_sel_mask;
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
/*
* Wait at least 6 cycles of slowest bank's clock
* for the glitch-free MUX to fully switch sources.
@@ -184,22 +184,22 @@
/* Disable old bank's MN counter. */
ctl_reg_val &= ~(old_bank_masks->mnd_en_mask);
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
/* Program old bank to a low-power source and divider. */
ns_reg_val &= ~(old_bank_masks->ns_mask);
- ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
}
/* Update the MND_EN and NS masks to match the current bank. */
- clk->mnd_en_mask = new_bank_masks->mnd_en_mask;
- clk->ns_mask = new_bank_masks->ns_mask;
+ rcg->mnd_en_mask = new_bank_masks->mnd_en_mask;
+ rcg->ns_mask = new_bank_masks->ns_mask;
}
-void set_rate_div_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_div_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
- struct bank_masks *banks = clk->bank_info;
+ struct bank_masks *banks = rcg->bank_info;
const struct bank_mask_info *new_bank_masks;
const struct bank_mask_info *old_bank_masks;
uint32_t ns_reg_val, bank_sel;
@@ -209,10 +209,10 @@
* off, program the active bank since bank switching won't work if
* both banks aren't running.
*/
- ns_reg_val = readl_relaxed(clk->ns_reg);
+ ns_reg_val = readl_relaxed(rcg->ns_reg);
bank_sel = !!(ns_reg_val & banks->bank_sel_mask);
/* If clock isn't running, don't switch banks. */
- bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
+ bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0);
if (bank_sel == 0) {
new_bank_masks = &banks->bank1_mask;
old_bank_masks = &banks->bank0_mask;
@@ -226,19 +226,19 @@
* as part of the enable procedure and should remain with a low-power
* MUX input selected until then.
*/
- if (clk->enabled) {
+ if (rcg->enabled) {
ns_reg_val &= ~(new_bank_masks->ns_mask);
ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
}
/*
* Switch to the new bank if clock is running. If it isn't, then
* no switch is necessary since we programmed the active bank.
*/
- if (clk->enabled && clk->current_freq->freq_hz) {
+ if (rcg->enabled && rcg->current_freq->freq_hz) {
ns_reg_val ^= banks->bank_sel_mask;
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
/*
* Wait at least 6 cycles of slowest bank's clock
* for the glitch-free MUX to fully switch sources.
@@ -248,12 +248,12 @@
/* Program old bank to a low-power source and divider. */
ns_reg_val &= ~(old_bank_masks->ns_mask);
- ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
}
/* Update the NS mask to match the current bank. */
- clk->ns_mask = new_bank_masks->ns_mask;
+ rcg->ns_mask = new_bank_masks->ns_mask;
}
/*
@@ -261,10 +261,10 @@
*/
/* Return non-zero if a clock status registers shows the clock is halted. */
-static int branch_clk_is_halted(const struct branch *clk)
+static int branch_clk_is_halted(const struct branch *b)
{
- int invert = (clk->halt_check == ENABLE);
- int status_bit = readl_relaxed(clk->halt_reg) & BIT(clk->halt_bit);
+ int invert = (b->halt_check == ENABLE);
+ int status_bit = readl_relaxed(b->halt_reg) & BIT(b->halt_bit);
return invert ? !status_bit : status_bit;
}
@@ -276,14 +276,14 @@
return !!(readl_relaxed(b->hwcg_reg) & b->hwcg_mask);
}
-void __branch_clk_enable_reg(const struct branch *clk, const char *name)
+void __branch_clk_enable_reg(const struct branch *b, const char *name)
{
u32 reg_val;
- if (clk->en_mask) {
- reg_val = readl_relaxed(clk->ctl_reg);
- reg_val |= clk->en_mask;
- writel_relaxed(reg_val, clk->ctl_reg);
+ if (b->en_mask) {
+ reg_val = readl_relaxed(b->ctl_reg);
+ reg_val |= b->en_mask;
+ writel_relaxed(reg_val, b->ctl_reg);
}
/*
@@ -295,19 +295,19 @@
mb();
/* Skip checking halt bit if the clock is in hardware gated mode */
- if (branch_in_hwcg_mode(clk))
+ if (branch_in_hwcg_mode(b))
return;
/* Wait for clock to enable before returning. */
- if (clk->halt_check == DELAY)
+ if (b->halt_check == DELAY) {
udelay(HALT_CHECK_DELAY_US);
- else if (clk->halt_check == ENABLE || clk->halt_check == HALT
- || clk->halt_check == ENABLE_VOTED
- || clk->halt_check == HALT_VOTED) {
+ } else if (b->halt_check == ENABLE || b->halt_check == HALT
+ || b->halt_check == ENABLE_VOTED
+ || b->halt_check == HALT_VOTED) {
int count;
/* Wait up to HALT_CHECK_MAX_LOOPS for clock to enable. */
- for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(clk)
+ for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(b)
&& count > 0; count--)
udelay(1);
WARN(count == 0, "%s status stuck at 'off'", name);
@@ -315,50 +315,50 @@
}
/* Perform any register operations required to enable the clock. */
-static void __rcg_clk_enable_reg(struct rcg_clk *clk)
+static void __rcg_clk_enable_reg(struct rcg_clk *rcg)
{
u32 reg_val;
- void __iomem *const reg = clk->b.ctl_reg;
+ void __iomem *const reg = rcg->b.ctl_reg;
- WARN(clk->current_freq == &rcg_dummy_freq,
+ WARN(rcg->current_freq == &rcg_dummy_freq,
"Attempting to enable %s before setting its rate. "
- "Set the rate first!\n", clk->c.dbg_name);
+ "Set the rate first!\n", rcg->c.dbg_name);
/*
* Program the NS register, if applicable. NS registers are not
* set in the set_rate path because power can be saved by deferring
* the selection of a clocked source until the clock is enabled.
*/
- if (clk->ns_mask) {
- reg_val = readl_relaxed(clk->ns_reg);
- reg_val &= ~(clk->ns_mask);
- reg_val |= (clk->current_freq->ns_val & clk->ns_mask);
- writel_relaxed(reg_val, clk->ns_reg);
+ if (rcg->ns_mask) {
+ reg_val = readl_relaxed(rcg->ns_reg);
+ reg_val &= ~(rcg->ns_mask);
+ reg_val |= (rcg->current_freq->ns_val & rcg->ns_mask);
+ writel_relaxed(reg_val, rcg->ns_reg);
}
/* Enable MN counter, if applicable. */
reg_val = readl_relaxed(reg);
- if (clk->current_freq->md_val) {
- reg_val |= clk->mnd_en_mask;
+ if (rcg->current_freq->md_val) {
+ reg_val |= rcg->mnd_en_mask;
writel_relaxed(reg_val, reg);
}
/* Enable root. */
- if (clk->root_en_mask) {
- reg_val |= clk->root_en_mask;
+ if (rcg->root_en_mask) {
+ reg_val |= rcg->root_en_mask;
writel_relaxed(reg_val, reg);
}
- __branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
+ __branch_clk_enable_reg(&rcg->b, rcg->c.dbg_name);
}
/* Perform any register operations required to disable the branch. */
-u32 __branch_clk_disable_reg(const struct branch *clk, const char *name)
+u32 __branch_clk_disable_reg(const struct branch *b, const char *name)
{
u32 reg_val;
- reg_val = readl_relaxed(clk->ctl_reg);
- if (clk->en_mask) {
- reg_val &= ~(clk->en_mask);
- writel_relaxed(reg_val, clk->ctl_reg);
+ reg_val = readl_relaxed(b->ctl_reg);
+ if (b->en_mask) {
+ reg_val &= ~(b->en_mask);
+ writel_relaxed(reg_val, b->ctl_reg);
}
/*
@@ -370,18 +370,18 @@
mb();
/* Skip checking halt bit if the clock is in hardware gated mode */
- if (branch_in_hwcg_mode(clk))
+ if (branch_in_hwcg_mode(b))
return reg_val;
/* Wait for clock to disable before continuing. */
- if (clk->halt_check == DELAY || clk->halt_check == ENABLE_VOTED
- || clk->halt_check == HALT_VOTED)
+ if (b->halt_check == DELAY || b->halt_check == ENABLE_VOTED
+ || b->halt_check == HALT_VOTED) {
udelay(HALT_CHECK_DELAY_US);
- else if (clk->halt_check == ENABLE || clk->halt_check == HALT) {
+ } else if (b->halt_check == ENABLE || b->halt_check == HALT) {
int count;
/* Wait up to HALT_CHECK_MAX_LOOPS for clock to disable. */
- for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(clk)
+ for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(b)
&& count > 0; count--)
udelay(1);
WARN(count == 0, "%s status stuck at 'on'", name);
@@ -391,31 +391,31 @@
}
/* Perform any register operations required to disable the generator. */
-static void __rcg_clk_disable_reg(struct rcg_clk *clk)
+static void __rcg_clk_disable_reg(struct rcg_clk *rcg)
{
- void __iomem *const reg = clk->b.ctl_reg;
+ void __iomem *const reg = rcg->b.ctl_reg;
uint32_t reg_val;
- reg_val = __branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
+ reg_val = __branch_clk_disable_reg(&rcg->b, rcg->c.dbg_name);
/* Disable root. */
- if (clk->root_en_mask) {
- reg_val &= ~(clk->root_en_mask);
+ if (rcg->root_en_mask) {
+ reg_val &= ~(rcg->root_en_mask);
writel_relaxed(reg_val, reg);
}
/* Disable MN counter, if applicable. */
- if (clk->current_freq->md_val) {
- reg_val &= ~(clk->mnd_en_mask);
+ if (rcg->current_freq->md_val) {
+ reg_val &= ~(rcg->mnd_en_mask);
writel_relaxed(reg_val, reg);
}
/*
* Program NS register to low-power value with an un-clocked or
* slowly-clocked source selected.
*/
- if (clk->ns_mask) {
- reg_val = readl_relaxed(clk->ns_reg);
- reg_val &= ~(clk->ns_mask);
- reg_val |= (clk->freq_tbl->ns_val & clk->ns_mask);
- writel_relaxed(reg_val, clk->ns_reg);
+ if (rcg->ns_mask) {
+ reg_val = readl_relaxed(rcg->ns_reg);
+ reg_val &= ~(rcg->ns_mask);
+ reg_val |= (rcg->freq_tbl->ns_val & rcg->ns_mask);
+ writel_relaxed(reg_val, rcg->ns_reg);
}
}
@@ -423,11 +423,11 @@
static int rcg_clk_enable(struct clk *c)
{
unsigned long flags;
- struct rcg_clk *clk = to_rcg_clk(c);
+ struct rcg_clk *rcg = to_rcg_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __rcg_clk_enable_reg(clk);
- clk->enabled = true;
+ __rcg_clk_enable_reg(rcg);
+ rcg->enabled = true;
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
return 0;
@@ -437,11 +437,11 @@
static void rcg_clk_disable(struct clk *c)
{
unsigned long flags;
- struct rcg_clk *clk = to_rcg_clk(c);
+ struct rcg_clk *rcg = to_rcg_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __rcg_clk_disable_reg(clk);
- clk->enabled = false;
+ __rcg_clk_disable_reg(rcg);
+ rcg->enabled = false;
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
}
@@ -452,21 +452,21 @@
/* Set a clock to an exact rate. */
static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
{
- struct rcg_clk *clk = to_rcg_clk(c);
+ struct rcg_clk *rcg = to_rcg_clk(c);
struct clk_freq_tbl *nf, *cf;
struct clk *chld;
int rc = 0;
- for (nf = clk->freq_tbl; nf->freq_hz != FREQ_END
+ for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
&& nf->freq_hz != rate; nf++)
;
if (nf->freq_hz == FREQ_END)
return -EINVAL;
- cf = clk->current_freq;
+ cf = rcg->current_freq;
- if (clk->enabled) {
+ if (rcg->enabled) {
/* Enable source clock dependency for the new freq. */
rc = clk_enable(nf->src_clk);
if (rc)
@@ -476,9 +476,9 @@
spin_lock(&local_clock_reg_lock);
/* Disable branch if clock isn't dual-banked with a glitch-free MUX. */
- if (!clk->bank_info) {
+ if (!rcg->bank_info) {
/* Disable all branches to prevent glitches. */
- list_for_each_entry(chld, &clk->c.children, siblings) {
+ list_for_each_entry(chld, &rcg->c.children, siblings) {
struct branch_clk *x = to_branch_clk(chld);
/*
* We don't need to grab the child's lock because
@@ -488,26 +488,26 @@
if (x->enabled)
__branch_clk_disable_reg(&x->b, x->c.dbg_name);
}
- if (clk->enabled)
- __rcg_clk_disable_reg(clk);
+ if (rcg->enabled)
+ __rcg_clk_disable_reg(rcg);
}
/* Perform clock-specific frequency switch operations. */
- BUG_ON(!clk->set_rate);
- clk->set_rate(clk, nf);
+ BUG_ON(!rcg->set_rate);
+ rcg->set_rate(rcg, nf);
/*
* Current freq must be updated before __rcg_clk_enable_reg()
* is called to make sure the MNCNTR_EN bit is set correctly.
*/
- clk->current_freq = nf;
+ rcg->current_freq = nf;
/* Enable any clocks that were disabled. */
- if (!clk->bank_info) {
- if (clk->enabled)
- __rcg_clk_enable_reg(clk);
+ if (!rcg->bank_info) {
+ if (rcg->enabled)
+ __rcg_clk_enable_reg(rcg);
/* Enable only branches that were ON before. */
- list_for_each_entry(chld, &clk->c.children, siblings) {
+ list_for_each_entry(chld, &rcg->c.children, siblings) {
struct branch_clk *x = to_branch_clk(chld);
if (x->enabled)
__branch_clk_enable_reg(&x->b, x->c.dbg_name);
@@ -517,25 +517,25 @@
spin_unlock(&local_clock_reg_lock);
/* Release source requirements of the old freq. */
- if (clk->enabled)
+ if (rcg->enabled)
clk_disable(cf->src_clk);
return rc;
}
/* Check if a clock is currently enabled. */
-static int rcg_clk_is_enabled(struct clk *clk)
+static int rcg_clk_is_enabled(struct clk *c)
{
- return to_rcg_clk(clk)->enabled;
+ return to_rcg_clk(c)->enabled;
}
/* Return a supported rate that's at least the specified rate. */
static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
{
- struct rcg_clk *clk = to_rcg_clk(c);
+ struct rcg_clk *rcg = to_rcg_clk(c);
struct clk_freq_tbl *f;
- for (f = clk->freq_tbl; f->freq_hz != FREQ_END; f++)
+ for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
if (f->freq_hz >= rate)
return f->freq_hz;
@@ -545,26 +545,26 @@
/* Return the nth supported frequency for a given clock. */
static int rcg_clk_list_rate(struct clk *c, unsigned n)
{
- struct rcg_clk *clk = to_rcg_clk(c);
+ struct rcg_clk *rcg = to_rcg_clk(c);
- if (!clk->freq_tbl || clk->freq_tbl->freq_hz == FREQ_END)
+ if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
return -ENXIO;
- return (clk->freq_tbl + n)->freq_hz;
+ return (rcg->freq_tbl + n)->freq_hz;
}
-static struct clk *rcg_clk_get_parent(struct clk *clk)
+static struct clk *rcg_clk_get_parent(struct clk *c)
{
- return to_rcg_clk(clk)->current_freq->src_clk;
+ return to_rcg_clk(c)->current_freq->src_clk;
}
/* Disable hw clock gating if not set at boot */
-enum handoff branch_handoff(struct branch *clk, struct clk *c)
+enum handoff branch_handoff(struct branch *b, struct clk *c)
{
- if (!branch_in_hwcg_mode(clk)) {
- clk->hwcg_mask = 0;
+ if (!branch_in_hwcg_mode(b)) {
+ b->hwcg_mask = 0;
c->flags &= ~CLKFLAG_HWCG;
- if (readl_relaxed(clk->ctl_reg) & clk->en_mask)
+ if (readl_relaxed(b->ctl_reg) & b->en_mask)
return HANDOFF_ENABLED_CLK;
} else {
c->flags |= CLKFLAG_HWCG;
@@ -574,24 +574,24 @@
static enum handoff branch_clk_handoff(struct clk *c)
{
- struct branch_clk *clk = to_branch_clk(c);
- return branch_handoff(&clk->b, &clk->c);
+ struct branch_clk *br = to_branch_clk(c);
+ return branch_handoff(&br->b, &br->c);
}
static enum handoff rcg_clk_handoff(struct clk *c)
{
- struct rcg_clk *clk = to_rcg_clk(c);
+ struct rcg_clk *rcg = to_rcg_clk(c);
uint32_t ctl_val, ns_val, md_val, ns_mask;
struct clk_freq_tbl *freq;
enum handoff ret;
- ctl_val = readl_relaxed(clk->b.ctl_reg);
- ret = branch_handoff(&clk->b, &clk->c);
+ ctl_val = readl_relaxed(rcg->b.ctl_reg);
+ ret = branch_handoff(&rcg->b, &rcg->c);
if (ret == HANDOFF_DISABLED_CLK)
return HANDOFF_DISABLED_CLK;
- if (clk->bank_info) {
- const struct bank_masks *bank_masks = clk->bank_info;
+ if (rcg->bank_info) {
+ const struct bank_masks *bank_masks = rcg->bank_info;
const struct bank_mask_info *bank_info;
if (!(ctl_val & bank_masks->bank_sel_mask))
bank_info = &bank_masks->bank0_mask;
@@ -602,13 +602,13 @@
md_val = bank_info->md_reg ?
readl_relaxed(bank_info->md_reg) : 0;
} else {
- ns_mask = clk->ns_mask;
- md_val = clk->md_reg ? readl_relaxed(clk->md_reg) : 0;
+ ns_mask = rcg->ns_mask;
+ md_val = rcg->md_reg ? readl_relaxed(rcg->md_reg) : 0;
}
if (!ns_mask)
return HANDOFF_UNKNOWN_RATE;
- ns_val = readl_relaxed(clk->ns_reg) & ns_mask;
- for (freq = clk->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+ ns_val = readl_relaxed(rcg->ns_reg) & ns_mask;
+ for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
if ((freq->ns_val & ns_mask) == ns_val &&
(!freq->md_val || freq->md_val == md_val))
break;
@@ -616,7 +616,7 @@
if (freq->freq_hz == FREQ_END)
return HANDOFF_UNKNOWN_RATE;
- clk->current_freq = freq;
+ rcg->current_freq = freq;
c->rate = freq->freq_hz;
return HANDOFF_ENABLED_CLK;
@@ -632,40 +632,38 @@
},
};
-static int branch_clk_enable(struct clk *clk)
+static int branch_clk_enable(struct clk *c)
{
unsigned long flags;
- struct branch_clk *branch = to_branch_clk(clk);
+ struct branch_clk *br = to_branch_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __branch_clk_enable_reg(&branch->b, branch->c.dbg_name);
- branch->enabled = true;
+ __branch_clk_enable_reg(&br->b, br->c.dbg_name);
+ br->enabled = true;
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
return 0;
}
-static void branch_clk_disable(struct clk *clk)
+static void branch_clk_disable(struct clk *c)
{
unsigned long flags;
- struct branch_clk *branch = to_branch_clk(clk);
+ struct branch_clk *br = to_branch_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __branch_clk_disable_reg(&branch->b, branch->c.dbg_name);
- branch->enabled = false;
+ __branch_clk_disable_reg(&br->b, br->c.dbg_name);
+ br->enabled = false;
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
}
-static struct clk *branch_clk_get_parent(struct clk *clk)
+static struct clk *branch_clk_get_parent(struct clk *c)
{
- struct branch_clk *branch = to_branch_clk(clk);
- return branch->parent;
+ return to_branch_clk(c)->parent;
}
-static int branch_clk_is_enabled(struct clk *clk)
+static int branch_clk_is_enabled(struct clk *c)
{
- struct branch_clk *branch = to_branch_clk(clk);
- return branch->enabled;
+ return to_branch_clk(c)->enabled;
}
static void branch_enable_hwcg(struct branch *b)
@@ -692,16 +690,14 @@
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
}
-static void branch_clk_enable_hwcg(struct clk *clk)
+static void branch_clk_enable_hwcg(struct clk *c)
{
- struct branch_clk *branch = to_branch_clk(clk);
- branch_enable_hwcg(&branch->b);
+ branch_enable_hwcg(&to_branch_clk(c)->b);
}
-static void branch_clk_disable_hwcg(struct clk *clk)
+static void branch_clk_disable_hwcg(struct clk *c)
{
- struct branch_clk *branch = to_branch_clk(clk);
- branch_disable_hwcg(&branch->b);
+ branch_disable_hwcg(&to_branch_clk(c)->b);
}
static int branch_set_flags(struct branch *b, unsigned flags)
@@ -738,26 +734,22 @@
static int branch_clk_in_hwcg_mode(struct clk *c)
{
- struct branch_clk *clk = to_branch_clk(c);
- return branch_in_hwcg_mode(&clk->b);
+ return branch_in_hwcg_mode(&to_branch_clk(c)->b);
}
-static void rcg_clk_enable_hwcg(struct clk *clk)
+static void rcg_clk_enable_hwcg(struct clk *c)
{
- struct rcg_clk *rcg = to_rcg_clk(clk);
- branch_enable_hwcg(&rcg->b);
+ branch_enable_hwcg(&to_rcg_clk(c)->b);
}
-static void rcg_clk_disable_hwcg(struct clk *clk)
+static void rcg_clk_disable_hwcg(struct clk *c)
{
- struct rcg_clk *rcg = to_rcg_clk(clk);
- branch_disable_hwcg(&rcg->b);
+ branch_disable_hwcg(&to_rcg_clk(c)->b);
}
static int rcg_clk_in_hwcg_mode(struct clk *c)
{
- struct rcg_clk *clk = to_rcg_clk(c);
- return branch_in_hwcg_mode(&clk->b);
+ return branch_in_hwcg_mode(&to_rcg_clk(c)->b);
}
static int rcg_clk_set_flags(struct clk *clk, unsigned flags)
@@ -802,9 +794,9 @@
return ret;
}
-static int branch_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
{
- return branch_reset(&to_branch_clk(clk)->b, action);
+ return branch_reset(&to_branch_clk(c)->b, action);
}
struct clk_ops clk_ops_branch = {
@@ -825,9 +817,9 @@
.reset = branch_clk_reset,
};
-static int rcg_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int rcg_clk_reset(struct clk *c, enum clk_reset_action action)
{
- return branch_reset(&to_rcg_clk(clk)->b, action);
+ return branch_reset(&to_rcg_clk(c)->b, action);
}
struct clk_ops clk_ops_rcg = {
@@ -850,10 +842,10 @@
static int cdiv_clk_enable(struct clk *c)
{
unsigned long flags;
- struct cdiv_clk *clk = to_cdiv_clk(c);
+ struct cdiv_clk *cdiv = to_cdiv_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
+ __branch_clk_enable_reg(&cdiv->b, cdiv->c.dbg_name);
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
return 0;
@@ -862,70 +854,67 @@
static void cdiv_clk_disable(struct clk *c)
{
unsigned long flags;
- struct cdiv_clk *clk = to_cdiv_clk(c);
+ struct cdiv_clk *cdiv = to_cdiv_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
+ __branch_clk_disable_reg(&cdiv->b, cdiv->c.dbg_name);
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
}
static int cdiv_clk_set_rate(struct clk *c, unsigned long rate)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
+ struct cdiv_clk *cdiv = to_cdiv_clk(c);
u32 reg_val;
- if (rate > clk->max_div)
+ if (rate > cdiv->max_div)
return -EINVAL;
spin_lock(&local_clock_reg_lock);
- reg_val = readl_relaxed(clk->ns_reg);
- reg_val &= ~(clk->ext_mask | (clk->max_div - 1) << clk->div_offset);
+ reg_val = readl_relaxed(cdiv->ns_reg);
+ reg_val &= ~(cdiv->ext_mask | (cdiv->max_div - 1) << cdiv->div_offset);
/* Non-zero rates mean set a divider, zero means use external input */
if (rate)
- reg_val |= (rate - 1) << clk->div_offset;
+ reg_val |= (rate - 1) << cdiv->div_offset;
else
- reg_val |= clk->ext_mask;
- writel_relaxed(reg_val, clk->ns_reg);
+ reg_val |= cdiv->ext_mask;
+ writel_relaxed(reg_val, cdiv->ns_reg);
spin_unlock(&local_clock_reg_lock);
- clk->cur_div = rate;
+ cdiv->cur_div = rate;
return 0;
}
static unsigned long cdiv_clk_get_rate(struct clk *c)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
- return clk->cur_div;
+ return to_cdiv_clk(c)->cur_div;
}
static long cdiv_clk_round_rate(struct clk *c, unsigned long rate)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
- return rate > clk->max_div ? -EPERM : rate;
+ return rate > to_cdiv_clk(c)->max_div ? -EPERM : rate;
}
static int cdiv_clk_list_rate(struct clk *c, unsigned n)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
- return n > clk->max_div ? -ENXIO : n;
+ return n > to_cdiv_clk(c)->max_div ? -ENXIO : n;
}
static enum handoff cdiv_clk_handoff(struct clk *c)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
+ struct cdiv_clk *cdiv = to_cdiv_clk(c);
enum handoff ret;
u32 reg_val;
- ret = branch_handoff(&clk->b, &clk->c);
+ ret = branch_handoff(&cdiv->b, &cdiv->c);
if (ret == HANDOFF_DISABLED_CLK)
return ret;
- reg_val = readl_relaxed(clk->ns_reg);
- if (reg_val & clk->ext_mask) {
- clk->cur_div = 0;
+ reg_val = readl_relaxed(cdiv->ns_reg);
+ if (reg_val & cdiv->ext_mask) {
+ cdiv->cur_div = 0;
} else {
- reg_val >>= clk->div_offset;
- clk->cur_div = (reg_val & (clk->max_div - 1)) + 1;
+ reg_val >>= cdiv->div_offset;
+ cdiv->cur_div = (reg_val & (cdiv->max_div - 1)) + 1;
}
return HANDOFF_ENABLED_CLK;
@@ -933,20 +922,17 @@
static void cdiv_clk_enable_hwcg(struct clk *c)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
- branch_enable_hwcg(&clk->b);
+ branch_enable_hwcg(&to_cdiv_clk(c)->b);
}
static void cdiv_clk_disable_hwcg(struct clk *c)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
- branch_disable_hwcg(&clk->b);
+ branch_disable_hwcg(&to_cdiv_clk(c)->b);
}
static int cdiv_clk_in_hwcg_mode(struct clk *c)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
- return branch_in_hwcg_mode(&clk->b);
+ return branch_in_hwcg_mode(&to_cdiv_clk(c)->b);
}
struct clk_ops clk_ops_cdiv = {
diff --git a/arch/arm/mach-msm/clock-local.h b/arch/arm/mach-msm/clock-local.h
index ffc7057..c0a7827 100644
--- a/arch/arm/mach-msm/clock-local.h
+++ b/arch/arm/mach-msm/clock-local.h
@@ -156,9 +156,9 @@
extern struct clk_ops clk_ops_reset;
int branch_reset(struct branch *b, enum clk_reset_action action);
-void __branch_clk_enable_reg(const struct branch *clk, const char *name);
-u32 __branch_clk_disable_reg(const struct branch *clk, const char *name);
-enum handoff branch_handoff(struct branch *clk, struct clk *c);
+void __branch_clk_enable_reg(const struct branch *b, const char *name);
+u32 __branch_clk_disable_reg(const struct branch *b, const char *name);
+enum handoff branch_handoff(struct branch *b, struct clk *c);
/*
* Generic clock-definition struct and macros
@@ -183,9 +183,9 @@
struct clk c;
};
-static inline struct rcg_clk *to_rcg_clk(struct clk *clk)
+static inline struct rcg_clk *to_rcg_clk(struct clk *c)
{
- return container_of(clk, struct rcg_clk, c);
+ return container_of(c, struct rcg_clk, c);
}
extern struct clk_ops clk_ops_rcg;
@@ -214,9 +214,9 @@
struct clk c;
};
-static inline struct cdiv_clk *to_cdiv_clk(struct clk *clk)
+static inline struct cdiv_clk *to_cdiv_clk(struct clk *c)
{
- return container_of(clk, struct cdiv_clk, c);
+ return container_of(c, struct cdiv_clk, c);
}
extern struct clk_ops clk_ops_cdiv;
@@ -234,7 +234,7 @@
* @enabled: true if clock is on, false otherwise
* @b: branch
* @parent: clock source
- * @c: clk
+ * @c: clock
*
* An on/off switch with a rate derived from the parent.
*/
@@ -245,9 +245,9 @@
struct clk c;
};
-static inline struct branch_clk *to_branch_clk(struct clk *clk)
+static inline struct branch_clk *to_branch_clk(struct clk *c)
{
- return container_of(clk, struct branch_clk, c);
+ return container_of(c, struct branch_clk, c);
}
/**
@@ -255,7 +255,7 @@
* @sample_ticks: sample period in reference clock ticks
* @multiplier: measurement scale-up factor
* @divider: measurement scale-down factor
- * @c: clk
+ * @c: clock
*/
struct measure_clk {
u64 sample_ticks;
@@ -266,9 +266,9 @@
extern struct clk_ops clk_ops_empty;
-static inline struct measure_clk *to_measure_clk(struct clk *clk)
+static inline struct measure_clk *to_measure_clk(struct clk *c)
{
- return container_of(clk, struct measure_clk, c);
+ return container_of(c, struct measure_clk, c);
}
/*
@@ -280,11 +280,11 @@
/*
* Generic set-rate implementations
*/
-void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_nop(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_mnd_8(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_mnd_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_div_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf);
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_nop(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_mnd_8(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_mnd_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_div_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
#endif /* __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_H */
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index d839911..49bb063 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -55,16 +55,16 @@
#define ENABLE_WAIT_MAX_LOOPS 200
-int pll_vote_clk_enable(struct clk *clk)
+int pll_vote_clk_enable(struct clk *c)
{
u32 ena, count;
unsigned long flags;
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
spin_lock_irqsave(&pll_reg_lock, flags);
- ena = readl_relaxed(PLL_EN_REG(pll));
- ena |= pll->en_mask;
- writel_relaxed(ena, PLL_EN_REG(pll));
+ ena = readl_relaxed(PLL_EN_REG(pllv));
+ ena |= pllv->en_mask;
+ writel_relaxed(ena, PLL_EN_REG(pllv));
spin_unlock_irqrestore(&pll_reg_lock, flags);
/*
@@ -75,45 +75,44 @@
/* Wait for pll to enable. */
for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
- if (readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask)
+ if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
return 0;
udelay(1);
}
- WARN("PLL %s didn't enable after voting for it!\n", clk->dbg_name);
+ WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
return -ETIMEDOUT;
}
-void pll_vote_clk_disable(struct clk *clk)
+void pll_vote_clk_disable(struct clk *c)
{
u32 ena;
unsigned long flags;
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
spin_lock_irqsave(&pll_reg_lock, flags);
- ena = readl_relaxed(PLL_EN_REG(pll));
- ena &= ~(pll->en_mask);
- writel_relaxed(ena, PLL_EN_REG(pll));
+ ena = readl_relaxed(PLL_EN_REG(pllv));
+ ena &= ~(pllv->en_mask);
+ writel_relaxed(ena, PLL_EN_REG(pllv));
spin_unlock_irqrestore(&pll_reg_lock, flags);
}
-struct clk *pll_vote_clk_get_parent(struct clk *clk)
+struct clk *pll_vote_clk_get_parent(struct clk *c)
{
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
- return pll->parent;
+ return to_pll_vote_clk(c)->parent;
}
-int pll_vote_clk_is_enabled(struct clk *clk)
+int pll_vote_clk_is_enabled(struct clk *c)
{
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
- return !!(readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask);
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+ return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
}
-static enum handoff pll_vote_clk_handoff(struct clk *clk)
+static enum handoff pll_vote_clk_handoff(struct clk *c)
{
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
- if (readl_relaxed(PLL_EN_REG(pll)) & pll->en_mask)
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+ if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
return HANDOFF_ENABLED_CLK;
return HANDOFF_DISABLED_CLK;
@@ -158,10 +157,10 @@
mb();
}
-static int local_pll_clk_enable(struct clk *clk)
+static int local_pll_clk_enable(struct clk *c)
{
unsigned long flags;
- struct pll_clk *pll = to_pll_clk(clk);
+ struct pll_clk *pll = to_pll_clk(c);
spin_lock_irqsave(&pll_reg_lock, flags);
__pll_clk_enable_reg(PLL_MODE_REG(pll));
@@ -177,10 +176,10 @@
writel_relaxed(mode, mode_reg);
}
-static void local_pll_clk_disable(struct clk *clk)
+static void local_pll_clk_disable(struct clk *c)
{
unsigned long flags;
- struct pll_clk *pll = to_pll_clk(clk);
+ struct pll_clk *pll = to_pll_clk(c);
/*
* Disable the PLL output, disable test mode, enable
@@ -191,9 +190,9 @@
spin_unlock_irqrestore(&pll_reg_lock, flags);
}
-static enum handoff local_pll_clk_handoff(struct clk *clk)
+static enum handoff local_pll_clk_handoff(struct clk *c)
{
- struct pll_clk *pll = to_pll_clk(clk);
+ struct pll_clk *pll = to_pll_clk(c);
u32 mode = readl_relaxed(PLL_MODE_REG(pll));
u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
@@ -203,17 +202,16 @@
return HANDOFF_DISABLED_CLK;
}
-static struct clk *local_pll_clk_get_parent(struct clk *clk)
+static struct clk *local_pll_clk_get_parent(struct clk *c)
{
- struct pll_clk *pll = to_pll_clk(clk);
- return pll->parent;
+ return to_pll_clk(c)->parent;
}
-int sr_pll_clk_enable(struct clk *clk)
+int sr_pll_clk_enable(struct clk *c)
{
u32 mode;
unsigned long flags;
- struct pll_clk *pll = to_pll_clk(clk);
+ struct pll_clk *pll = to_pll_clk(c);
spin_lock_irqsave(&pll_reg_lock, flags);
mode = readl_relaxed(PLL_MODE_REG(pll));
@@ -250,10 +248,10 @@
#define PLL_LOCKED_BIT BIT(16)
-int copper_pll_clk_enable(struct clk *clk)
+int copper_pll_clk_enable(struct clk *c)
{
unsigned long flags;
- struct pll_clk *pll = to_pll_clk(clk);
+ struct pll_clk *pll = to_pll_clk(c);
u32 count, mode;
int ret = 0;
@@ -282,7 +280,7 @@
}
if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
- WARN("PLL %s didn't lock after enabling it!\n", clk->dbg_name);
+ WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
ret = -ETIMEDOUT;
goto out;
}
@@ -379,9 +377,9 @@
}
-static int pll_clk_enable(struct clk *clk)
+static int pll_clk_enable(struct clk *c)
{
- struct pll_shared_clk *pll = to_pll_shared_clk(clk);
+ struct pll_shared_clk *pll = to_pll_shared_clk(c);
unsigned int pll_id = pll->id;
remote_spin_lock(&pll_lock);
@@ -396,9 +394,9 @@
return 0;
}
-static void pll_clk_disable(struct clk *clk)
+static void pll_clk_disable(struct clk *c)
{
- struct pll_shared_clk *pll = to_pll_shared_clk(clk);
+ struct pll_shared_clk *pll = to_pll_shared_clk(c);
unsigned int pll_id = pll->id;
remote_spin_lock(&pll_lock);
@@ -413,16 +411,14 @@
remote_spin_unlock(&pll_lock);
}
-static int pll_clk_is_enabled(struct clk *clk)
+static int pll_clk_is_enabled(struct clk *c)
{
- struct pll_shared_clk *pll = to_pll_shared_clk(clk);
-
- return readl_relaxed(PLL_MODE_REG(pll)) & BIT(0);
+ return readl_relaxed(PLL_MODE_REG(to_pll_shared_clk(c))) & BIT(0);
}
-static enum handoff pll_clk_handoff(struct clk *clk)
+static enum handoff pll_clk_handoff(struct clk *c)
{
- struct pll_shared_clk *pll = to_pll_shared_clk(clk);
+ struct pll_shared_clk *pll = to_pll_shared_clk(c);
unsigned int pll_lval;
struct pll_rate *l;
@@ -438,12 +434,12 @@
/* Convert PLL L values to PLL Output rate */
for (l = pll_l_rate; l->rate != 0; l++) {
if (l->lvalue == pll_lval) {
- clk->rate = l->rate;
+ c->rate = l->rate;
break;
}
}
- if (!clk->rate) {
+ if (!c->rate) {
pr_crit("Unknown PLL's L value!\n");
BUG();
}
diff --git a/arch/arm/mach-msm/clock-pll.h b/arch/arm/mach-msm/clock-pll.h
index a8c642f..f24b066 100644
--- a/arch/arm/mach-msm/clock-pll.h
+++ b/arch/arm/mach-msm/clock-pll.h
@@ -34,7 +34,7 @@
* @id: PLL ID
* @mode_reg: enable register
* @parent: clock source
- * @c: clk
+ * @c: clock
*/
struct pll_shared_clk {
unsigned int id;
@@ -45,9 +45,9 @@
extern struct clk_ops clk_ops_pll;
-static inline struct pll_shared_clk *to_pll_shared_clk(struct clk *clk)
+static inline struct pll_shared_clk *to_pll_shared_clk(struct clk *c)
{
- return container_of(clk, struct pll_shared_clk, c);
+ return container_of(c, struct pll_shared_clk, c);
}
/**
@@ -64,7 +64,7 @@
* @status_mask: ANDed with @status_reg to determine if PLL is active.
* @status_reg: status register
* @parent: clock source
- * @c: clk
+ * @c: clock
*/
struct pll_vote_clk {
u32 *soft_vote;
@@ -81,9 +81,9 @@
extern struct clk_ops clk_ops_pll_vote;
-static inline struct pll_vote_clk *to_pll_vote_clk(struct clk *clk)
+static inline struct pll_vote_clk *to_pll_vote_clk(struct clk *c)
{
- return container_of(clk, struct pll_vote_clk, c);
+ return container_of(c, struct pll_vote_clk, c);
}
/**
@@ -105,21 +105,21 @@
extern struct clk_ops clk_ops_local_pll;
-static inline struct pll_clk *to_pll_clk(struct clk *clk)
+static inline struct pll_clk *to_pll_clk(struct clk *c)
{
- return container_of(clk, struct pll_clk, c);
+ return container_of(c, struct pll_clk, c);
}
-int sr_pll_clk_enable(struct clk *clk);
-int copper_pll_clk_enable(struct clk *clk);
+int sr_pll_clk_enable(struct clk *c);
+int copper_pll_clk_enable(struct clk *c);
/*
* PLL vote clock APIs
*/
-int pll_vote_clk_enable(struct clk *clk);
-void pll_vote_clk_disable(struct clk *clk);
-struct clk *pll_vote_clk_get_parent(struct clk *clk);
-int pll_vote_clk_is_enabled(struct clk *clk);
+int pll_vote_clk_enable(struct clk *c);
+void pll_vote_clk_disable(struct clk *c);
+struct clk *pll_vote_clk_get_parent(struct clk *c);
+int pll_vote_clk_is_enabled(struct clk *c);
struct pll_config {
u32 l;