blob: a610a239c042394b4dca33f906116b9f8dc429d9 [file] [log] [blame]
Patrick Dalyb12d7cc2013-09-23 12:52:08 -07001/*
2 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
Patrick Dalyfb983b52013-08-06 18:22:15 -070017#include <linux/module.h>
18#include <linux/io.h>
Patrick Dalyb12d7cc2013-09-23 12:52:08 -070019#include <linux/err.h>
20#include <linux/clk.h>
Patrick Dalyfb983b52013-08-06 18:22:15 -070021#include <linux/mutex.h>
22#include <linux/delay.h>
23#include <linux/platform_device.h>
24#include <linux/regulator/consumer.h>
25#include <linux/of.h>
Patrick Dalyb12d7cc2013-09-23 12:52:08 -070026
27#include <mach/clock-generic.h>
Patrick Dalyfb983b52013-08-06 18:22:15 -070028#include "clock-local2.h"
Patrick Dalyb12d7cc2013-09-23 12:52:08 -070029
30#define UPDATE_CHECK_MAX_LOOPS 200
31
32struct cortex_reg_data {
33 u32 cmd_offset;
34 u32 update_mask;
35 u32 poll_mask;
36};
37
38#define DIV_REG(x) ((x)->base + (x)->div_offset)
39#define SRC_REG(x) ((x)->base + (x)->src_offset)
40#define CMD_REG(x) ((x)->base + \
41 ((struct cortex_reg_data *)(x)->priv)->cmd_offset)
42
43static int update_config(struct mux_div_clk *md)
44{
45 u32 regval, count;
46 struct cortex_reg_data *r = md->priv;
47
48 /* Update the configuration */
49 regval = readl_relaxed(CMD_REG(md));
50 regval |= r->update_mask;
51 writel_relaxed(regval, CMD_REG(md));
52
53 /* Wait for update to take effect */
54 for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
55 if (!(readl_relaxed(CMD_REG(md)) &
56 r->poll_mask))
57 return 0;
58 udelay(1);
59 }
60
61 CLK_WARN(&md->c, true, "didn't update its configuration.");
62
63 return -EINVAL;
64}
65
66static void cortex_get_config(struct mux_div_clk *md, u32 *src_sel, u32 *div)
67{
68 u32 regval;
69
70 regval = readl_relaxed(DIV_REG(md));
71 regval &= (md->div_mask << md->div_shift);
72 *div = regval >> md->div_shift;
73 *div = max((u32)1, (*div + 1) / 2);
74
75 regval = readl_relaxed(SRC_REG(md));
76 regval &= (md->src_mask << md->src_shift);
77 *src_sel = regval >> md->src_shift;
78}
79
80static int cortex_set_config(struct mux_div_clk *md, u32 src_sel, u32 div)
81{
82 u32 regval;
83
84 div = div ? ((2 * div) - 1) : 0;
85 regval = readl_relaxed(DIV_REG(md));
86 regval &= ~(md->div_mask << md->div_shift);
87 regval |= div << md->div_shift;
88 writel_relaxed(regval, DIV_REG(md));
89
90 regval = readl_relaxed(SRC_REG(md));
91 regval &= ~(md->src_mask << md->src_shift);
92 regval |= src_sel << md->src_shift;
93 writel_relaxed(regval, SRC_REG(md));
94
95 return update_config(md);
96}
97
98static int cortex_enable(struct mux_div_clk *md)
99{
Patrick Dalyf8e62ef2014-02-18 20:21:34 -0800100 return cortex_set_config(md, md->src_sel, md->data.div);
Patrick Dalyb12d7cc2013-09-23 12:52:08 -0700101}
102
103static void cortex_disable(struct mux_div_clk *md)
104{
105 u32 src_sel = parent_to_src_sel(md->parents, md->num_parents,
106 md->safe_parent);
107 cortex_set_config(md, src_sel, md->safe_div);
108}
109
110static bool cortex_is_enabled(struct mux_div_clk *md)
111{
112 return true;
113}
114
115struct mux_div_ops cortex_mux_div_ops = {
116 .set_src_div = cortex_set_config,
117 .get_src_div = cortex_get_config,
118 .is_enabled = cortex_is_enabled,
119 .enable = cortex_enable,
120 .disable = cortex_disable,
121};
Patrick Dalyfb983b52013-08-06 18:22:15 -0700122
123static struct cortex_reg_data a7ssmux_priv = {
124 .cmd_offset = 0x0,
125 .update_mask = BIT(0),
126 .poll_mask = BIT(0),
127};
128
129DEFINE_VDD_REGS_INIT(vdd_cpu, 1);
130
131static struct mux_div_clk a7ssmux = {
132 .ops = &cortex_mux_div_ops,
133 .safe_freq = 300000000,
134 .data = {
135 .max_div = 8,
136 .min_div = 1,
137 },
138 .c = {
139 .dbg_name = "a7ssmux",
140 .ops = &clk_ops_mux_div_clk,
141 .vdd_class = &vdd_cpu,
142 CLK_INIT(a7ssmux.c),
143 },
144 .parents = (struct clk_src[8]) {},
145 .priv = &a7ssmux_priv,
146 .div_offset = 0x4,
147 .div_mask = BM(4, 0),
148 .div_shift = 0,
149 .src_offset = 0x4,
150 .src_mask = BM(10, 8) >> 8,
151 .src_shift = 8,
152};
153
154static struct clk_lookup clock_tbl_a7[] = {
155 CLK_LOOKUP("cpu0_clk", a7ssmux.c, "0.qcom,msm-cpufreq"),
156 CLK_LOOKUP("cpu0_clk", a7ssmux.c, "fe805664.qcom,pm-8x60"),
157};
158
159static int of_get_fmax_vdd_class(struct platform_device *pdev, struct clk *c,
160 char *prop_name)
161{
162 struct device_node *of = pdev->dev.of_node;
163 int prop_len, i;
164 struct clk_vdd_class *vdd = c->vdd_class;
165 u32 *array;
166
167 if (!of_find_property(of, prop_name, &prop_len)) {
168 dev_err(&pdev->dev, "missing %s\n", prop_name);
169 return -EINVAL;
170 }
171
172 prop_len /= sizeof(u32);
173 if (prop_len % 2) {
174 dev_err(&pdev->dev, "bad length %d\n", prop_len);
175 return -EINVAL;
176 }
177
178 prop_len /= 2;
179 vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
180 GFP_KERNEL);
181 if (!vdd->level_votes)
182 return -ENOMEM;
183
184 vdd->vdd_uv = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
185 GFP_KERNEL);
186 if (!vdd->vdd_uv)
187 return -ENOMEM;
188
189 c->fmax = devm_kzalloc(&pdev->dev, prop_len * sizeof(unsigned long),
190 GFP_KERNEL);
191 if (!c->fmax)
192 return -ENOMEM;
193
Prasad Sodagudi27ebf842013-12-30 11:26:40 +0530194 array = devm_kzalloc(&pdev->dev,
195 prop_len * sizeof(u32) * 2, GFP_KERNEL);
Patrick Dalyfb983b52013-08-06 18:22:15 -0700196 if (!array)
197 return -ENOMEM;
198
199 of_property_read_u32_array(of, prop_name, array, prop_len * 2);
200 for (i = 0; i < prop_len; i++) {
201 c->fmax[i] = array[2 * i];
202 vdd->vdd_uv[i] = array[2 * i + 1];
203 }
204
205 devm_kfree(&pdev->dev, array);
206 vdd->num_levels = prop_len;
207 vdd->cur_level = prop_len;
208 c->num_fmax = prop_len;
209 return 0;
210}
211
212static void get_speed_bin(struct platform_device *pdev, int *bin, int *version)
213{
214 struct resource *res;
215 void __iomem *base;
216 u32 pte_efuse, redundant_sel, valid;
217
218 *bin = 0;
219 *version = 0;
220
221 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse");
222 if (!res) {
223 dev_info(&pdev->dev,
224 "No speed/PVS binning available. Defaulting to 0!\n");
225 return;
226 }
227
228 base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
229 if (!base) {
230 dev_warn(&pdev->dev,
231 "Unable to read efuse data. Defaulting to 0!\n");
232 return;
233 }
234
235 pte_efuse = readl_relaxed(base);
236 devm_iounmap(&pdev->dev, base);
237
238 redundant_sel = (pte_efuse >> 24) & 0x7;
239 *bin = pte_efuse & 0x7;
240 valid = (pte_efuse >> 3) & 0x1;
241 *version = (pte_efuse >> 4) & 0x3;
242
243 if (redundant_sel == 1)
244 *bin = (pte_efuse >> 27) & 0x7;
245
246 if (!valid) {
247 dev_info(&pdev->dev, "Speed bin not set. Defaulting to 0!\n");
248 *bin = 0;
249 } else {
250 dev_info(&pdev->dev, "Speed bin: %d\n", *bin);
251 }
252
253 dev_info(&pdev->dev, "PVS version: %d\n", *version);
254
255 return;
256}
257
258static int of_get_clk_src(struct platform_device *pdev, struct clk_src *parents)
259{
260 struct device_node *of = pdev->dev.of_node;
261 int num_parents, i, j, index;
262 struct clk *c;
263 char clk_name[] = "clk-x";
264
265 num_parents = of_property_count_strings(of, "clock-names");
266 if (num_parents <= 0 || num_parents > 8) {
267 dev_err(&pdev->dev, "missing clock-names\n");
268 return -EINVAL;
269 }
270
271 j = 0;
272 for (i = 0; i < 8; i++) {
273 snprintf(clk_name, ARRAY_SIZE(clk_name), "clk-%d", i);
274 index = of_property_match_string(of, "clock-names", clk_name);
275 if (IS_ERR_VALUE(index))
276 continue;
277
278 parents[j].sel = i;
279 parents[j].src = c = devm_clk_get(&pdev->dev, clk_name);
280 if (IS_ERR(c)) {
281 if (c != ERR_PTR(-EPROBE_DEFER))
282 dev_err(&pdev->dev, "clk_get: %s\n fail",
283 clk_name);
284 return PTR_ERR(c);
285 }
286 j++;
287 }
288
289 return num_parents;
290}
291
292static int clock_a7_probe(struct platform_device *pdev)
293{
294 struct resource *res;
Patrick Daly696ebba2013-11-20 19:57:49 -0800295 int speed_bin = 0, version = 0, rc;
Patrick Dalyfb983b52013-08-06 18:22:15 -0700296 unsigned long rate, aux_rate;
297 struct clk *aux_clk, *main_pll;
298 char prop_name[] = "qcom,speedX-bin-vX";
299
300 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rcg-base");
301 if (!res) {
302 dev_err(&pdev->dev, "missing rcg-base\n");
303 return -EINVAL;
304 }
305 a7ssmux.base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
306 if (!a7ssmux.base) {
307 dev_err(&pdev->dev, "ioremap failed for rcg-base\n");
308 return -ENOMEM;
309 }
310
311 vdd_cpu.regulator[0] = devm_regulator_get(&pdev->dev, "cpu-vdd");
312 if (IS_ERR(vdd_cpu.regulator[0])) {
313 if (PTR_ERR(vdd_cpu.regulator[0]) != -EPROBE_DEFER)
314 dev_err(&pdev->dev, "unable to get regulator\n");
315 return PTR_ERR(vdd_cpu.regulator[0]);
316 }
317
318 a7ssmux.num_parents = of_get_clk_src(pdev, a7ssmux.parents);
319 if (IS_ERR_VALUE(a7ssmux.num_parents))
320 return a7ssmux.num_parents;
321
322 get_speed_bin(pdev, &speed_bin, &version);
323
324 snprintf(prop_name, ARRAY_SIZE(prop_name),
325 "qcom,speed%d-bin-v%d", speed_bin, version);
326 rc = of_get_fmax_vdd_class(pdev, &a7ssmux.c, prop_name);
327 if (rc) {
328 /* Fall back to most conservative PVS table */
329 dev_err(&pdev->dev, "Unable to load voltage plan %s!\n",
330 prop_name);
331 rc = of_get_fmax_vdd_class(pdev, &a7ssmux.c,
332 "qcom,speed0-bin-v0");
333 if (rc) {
334 dev_err(&pdev->dev,
335 "Unable to load safe voltage plan\n");
336 return rc;
337 }
338 dev_info(&pdev->dev, "Safe voltage plan loaded.\n");
339 }
340
341 rc = msm_clock_register(clock_tbl_a7, ARRAY_SIZE(clock_tbl_a7));
342 if (rc) {
343 dev_err(&pdev->dev, "msm_clock_register failed\n");
344 return rc;
345 }
346
347 /* Force a PLL reconfiguration */
348 aux_clk = a7ssmux.parents[0].src;
349 main_pll = a7ssmux.parents[1].src;
350
351 aux_rate = clk_get_rate(aux_clk);
352 rate = clk_get_rate(&a7ssmux.c);
353 clk_set_rate(&a7ssmux.c, aux_rate);
354 clk_set_rate(main_pll, clk_round_rate(main_pll, 1));
355 clk_set_rate(&a7ssmux.c, rate);
356
357 /*
358 * We don't want the CPU clocks to be turned off at late init
359 * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
360 * refcount of these clocks. Any cpufreq/hotplug manager can assume
361 * that the clocks have already been prepared and enabled by the time
362 * they take over.
363 */
Patrick Daly696ebba2013-11-20 19:57:49 -0800364 WARN(clk_prepare_enable(&a7ssmux.c),
365 "Unable to turn on CPU clock");
Patrick Dalyfb983b52013-08-06 18:22:15 -0700366 return 0;
367}
368
369static struct of_device_id clock_a7_match_table[] = {
370 {.compatible = "qcom,clock-a7-8226"},
371 {}
372};
373
374static struct platform_driver clock_a7_driver = {
375 .driver = {
376 .name = "clock-a7",
377 .of_match_table = clock_a7_match_table,
378 .owner = THIS_MODULE,
379 },
380};
381
382static int __init clock_a7_init(void)
383{
384 return platform_driver_probe(&clock_a7_driver, clock_a7_probe);
385}
386device_initcall(clock_a7_init);