Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 Freescale Semiconductor, Inc. |
| 3 | * |
| 4 | * The OPP code in function cpu0_set_target() is reused from |
| 5 | * drivers/cpufreq/omap-cpufreq.c |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | |
| 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 13 | |
| 14 | #include <linux/clk.h> |
Sudeep KarkadaNagesha | e1825b2 | 2013-09-10 18:59:46 +0100 | [diff] [blame] | 15 | #include <linux/cpu.h> |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 16 | #include <linux/cpufreq.h> |
| 17 | #include <linux/err.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/of.h> |
| 20 | #include <linux/opp.h> |
Shawn Guo | 5553f9e | 2013-01-30 14:27:49 +0000 | [diff] [blame] | 21 | #include <linux/platform_device.h> |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 22 | #include <linux/regulator/consumer.h> |
| 23 | #include <linux/slab.h> |
| 24 | |
| 25 | static unsigned int transition_latency; |
| 26 | static unsigned int voltage_tolerance; /* in percentage */ |
| 27 | |
| 28 | static struct device *cpu_dev; |
| 29 | static struct clk *cpu_clk; |
| 30 | static struct regulator *cpu_reg; |
| 31 | static struct cpufreq_frequency_table *freq_table; |
| 32 | |
| 33 | static int cpu0_verify_speed(struct cpufreq_policy *policy) |
| 34 | { |
| 35 | return cpufreq_frequency_table_verify(policy, freq_table); |
| 36 | } |
| 37 | |
| 38 | static unsigned int cpu0_get_speed(unsigned int cpu) |
| 39 | { |
| 40 | return clk_get_rate(cpu_clk) / 1000; |
| 41 | } |
| 42 | |
| 43 | static int cpu0_set_target(struct cpufreq_policy *policy, |
| 44 | unsigned int target_freq, unsigned int relation) |
| 45 | { |
| 46 | struct cpufreq_freqs freqs; |
| 47 | struct opp *opp; |
jhbird.choi@samsung.com | 5df6055 | 2013-03-18 08:09:42 +0000 | [diff] [blame] | 48 | unsigned long volt = 0, volt_old = 0, tol = 0; |
Guennadi Liakhovetski | 0ca6843 | 2013-02-25 18:22:37 +0100 | [diff] [blame] | 49 | long freq_Hz, freq_exact; |
Viresh Kumar | b43a7ff | 2013-03-24 11:56:43 +0530 | [diff] [blame] | 50 | unsigned int index; |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 51 | int ret; |
| 52 | |
| 53 | ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, |
| 54 | relation, &index); |
| 55 | if (ret) { |
| 56 | pr_err("failed to match target freqency %d: %d\n", |
| 57 | target_freq, ret); |
| 58 | return ret; |
| 59 | } |
| 60 | |
| 61 | freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); |
| 62 | if (freq_Hz < 0) |
| 63 | freq_Hz = freq_table[index].frequency * 1000; |
Guennadi Liakhovetski | 0ca6843 | 2013-02-25 18:22:37 +0100 | [diff] [blame] | 64 | freq_exact = freq_Hz; |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 65 | freqs.new = freq_Hz / 1000; |
| 66 | freqs.old = clk_get_rate(cpu_clk) / 1000; |
| 67 | |
| 68 | if (freqs.old == freqs.new) |
| 69 | return 0; |
| 70 | |
Viresh Kumar | b43a7ff | 2013-03-24 11:56:43 +0530 | [diff] [blame] | 71 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 72 | |
Mark Brown | 4a511de | 2013-08-13 14:58:24 +0200 | [diff] [blame] | 73 | if (!IS_ERR(cpu_reg)) { |
Nishanth Menon | 78e8eb8 | 2013-01-18 19:52:33 +0000 | [diff] [blame] | 74 | rcu_read_lock(); |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 75 | opp = opp_find_freq_ceil(cpu_dev, &freq_Hz); |
| 76 | if (IS_ERR(opp)) { |
Nishanth Menon | 78e8eb8 | 2013-01-18 19:52:33 +0000 | [diff] [blame] | 77 | rcu_read_unlock(); |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 78 | pr_err("failed to find OPP for %ld\n", freq_Hz); |
Viresh Kumar | fd143b4 | 2013-04-01 12:57:44 +0000 | [diff] [blame] | 79 | freqs.new = freqs.old; |
| 80 | ret = PTR_ERR(opp); |
| 81 | goto post_notify; |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 82 | } |
| 83 | volt = opp_get_voltage(opp); |
Nishanth Menon | 78e8eb8 | 2013-01-18 19:52:33 +0000 | [diff] [blame] | 84 | rcu_read_unlock(); |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 85 | tol = volt * voltage_tolerance / 100; |
| 86 | volt_old = regulator_get_voltage(cpu_reg); |
| 87 | } |
| 88 | |
| 89 | pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n", |
| 90 | freqs.old / 1000, volt_old ? volt_old / 1000 : -1, |
| 91 | freqs.new / 1000, volt ? volt / 1000 : -1); |
| 92 | |
| 93 | /* scaling up? scale voltage before frequency */ |
Mark Brown | 4a511de | 2013-08-13 14:58:24 +0200 | [diff] [blame] | 94 | if (!IS_ERR(cpu_reg) && freqs.new > freqs.old) { |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 95 | ret = regulator_set_voltage_tol(cpu_reg, volt, tol); |
| 96 | if (ret) { |
| 97 | pr_err("failed to scale voltage up: %d\n", ret); |
| 98 | freqs.new = freqs.old; |
Viresh Kumar | fd143b4 | 2013-04-01 12:57:44 +0000 | [diff] [blame] | 99 | goto post_notify; |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 100 | } |
| 101 | } |
| 102 | |
Guennadi Liakhovetski | 0ca6843 | 2013-02-25 18:22:37 +0100 | [diff] [blame] | 103 | ret = clk_set_rate(cpu_clk, freq_exact); |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 104 | if (ret) { |
| 105 | pr_err("failed to set clock rate: %d\n", ret); |
Mark Brown | 4a511de | 2013-08-13 14:58:24 +0200 | [diff] [blame] | 106 | if (!IS_ERR(cpu_reg)) |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 107 | regulator_set_voltage_tol(cpu_reg, volt_old, tol); |
Viresh Kumar | fd143b4 | 2013-04-01 12:57:44 +0000 | [diff] [blame] | 108 | freqs.new = freqs.old; |
| 109 | goto post_notify; |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | /* scaling down? scale voltage after frequency */ |
Mark Brown | 4a511de | 2013-08-13 14:58:24 +0200 | [diff] [blame] | 113 | if (!IS_ERR(cpu_reg) && freqs.new < freqs.old) { |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 114 | ret = regulator_set_voltage_tol(cpu_reg, volt, tol); |
| 115 | if (ret) { |
| 116 | pr_err("failed to scale voltage down: %d\n", ret); |
| 117 | clk_set_rate(cpu_clk, freqs.old * 1000); |
| 118 | freqs.new = freqs.old; |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 119 | } |
| 120 | } |
| 121 | |
Viresh Kumar | fd143b4 | 2013-04-01 12:57:44 +0000 | [diff] [blame] | 122 | post_notify: |
Viresh Kumar | b43a7ff | 2013-03-24 11:56:43 +0530 | [diff] [blame] | 123 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 124 | |
Viresh Kumar | fd143b4 | 2013-04-01 12:57:44 +0000 | [diff] [blame] | 125 | return ret; |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | static int cpu0_cpufreq_init(struct cpufreq_policy *policy) |
| 129 | { |
| 130 | int ret; |
| 131 | |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 132 | ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); |
| 133 | if (ret) { |
| 134 | pr_err("invalid frequency table: %d\n", ret); |
| 135 | return ret; |
| 136 | } |
| 137 | |
| 138 | policy->cpuinfo.transition_latency = transition_latency; |
| 139 | policy->cur = clk_get_rate(cpu_clk) / 1000; |
| 140 | |
| 141 | /* |
| 142 | * The driver only supports the SMP configuartion where all processors |
| 143 | * share the clock and voltage and clock. Use cpufreq affected_cpus |
| 144 | * interface to have all CPUs scaled together. |
| 145 | */ |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 146 | cpumask_setall(policy->cpus); |
| 147 | |
| 148 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); |
| 149 | |
| 150 | return 0; |
| 151 | } |
| 152 | |
| 153 | static int cpu0_cpufreq_exit(struct cpufreq_policy *policy) |
| 154 | { |
| 155 | cpufreq_frequency_table_put_attr(policy->cpu); |
| 156 | |
| 157 | return 0; |
| 158 | } |
| 159 | |
| 160 | static struct freq_attr *cpu0_cpufreq_attr[] = { |
| 161 | &cpufreq_freq_attr_scaling_available_freqs, |
| 162 | NULL, |
| 163 | }; |
| 164 | |
| 165 | static struct cpufreq_driver cpu0_cpufreq_driver = { |
| 166 | .flags = CPUFREQ_STICKY, |
| 167 | .verify = cpu0_verify_speed, |
| 168 | .target = cpu0_set_target, |
| 169 | .get = cpu0_get_speed, |
| 170 | .init = cpu0_cpufreq_init, |
| 171 | .exit = cpu0_cpufreq_exit, |
| 172 | .name = "generic_cpu0", |
| 173 | .attr = cpu0_cpufreq_attr, |
| 174 | }; |
| 175 | |
Shawn Guo | 5553f9e | 2013-01-30 14:27:49 +0000 | [diff] [blame] | 176 | static int cpu0_cpufreq_probe(struct platform_device *pdev) |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 177 | { |
Sudeep KarkadaNagesha | f837a9b | 2013-06-17 15:04:19 +0100 | [diff] [blame] | 178 | struct device_node *np; |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 179 | int ret; |
| 180 | |
Sudeep KarkadaNagesha | e1825b2 | 2013-09-10 18:59:46 +0100 | [diff] [blame] | 181 | cpu_dev = get_cpu_device(0); |
| 182 | if (!cpu_dev) { |
| 183 | pr_err("failed to get cpu0 device\n"); |
| 184 | return -ENODEV; |
| 185 | } |
Paolo Pisati | f5c3ef2 | 2013-03-28 09:24:29 +0000 | [diff] [blame] | 186 | |
Sudeep KarkadaNagesha | f837a9b | 2013-06-17 15:04:19 +0100 | [diff] [blame] | 187 | np = of_node_get(cpu_dev->of_node); |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 188 | if (!np) { |
| 189 | pr_err("failed to find cpu0 node\n"); |
Sudeep KarkadaNagesha | f837a9b | 2013-06-17 15:04:19 +0100 | [diff] [blame] | 190 | return -ENOENT; |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 191 | } |
| 192 | |
Mark Brown | 7d74897 | 2013-08-09 19:07:12 +0100 | [diff] [blame] | 193 | cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0"); |
Nishanth Menon | fc31d6f | 2013-05-01 13:38:12 +0000 | [diff] [blame] | 194 | if (IS_ERR(cpu_reg)) { |
| 195 | /* |
| 196 | * If cpu0 regulator supply node is present, but regulator is |
| 197 | * not yet registered, we should try defering probe. |
| 198 | */ |
| 199 | if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { |
| 200 | dev_err(cpu_dev, "cpu0 regulator not ready, retry\n"); |
| 201 | ret = -EPROBE_DEFER; |
| 202 | goto out_put_node; |
| 203 | } |
| 204 | pr_warn("failed to get cpu0 regulator: %ld\n", |
| 205 | PTR_ERR(cpu_reg)); |
Nishanth Menon | fc31d6f | 2013-05-01 13:38:12 +0000 | [diff] [blame] | 206 | } |
| 207 | |
Shawn Guo | 5553f9e | 2013-01-30 14:27:49 +0000 | [diff] [blame] | 208 | cpu_clk = devm_clk_get(cpu_dev, NULL); |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 209 | if (IS_ERR(cpu_clk)) { |
| 210 | ret = PTR_ERR(cpu_clk); |
| 211 | pr_err("failed to get cpu0 clock: %d\n", ret); |
| 212 | goto out_put_node; |
| 213 | } |
| 214 | |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 215 | ret = of_init_opp_table(cpu_dev); |
| 216 | if (ret) { |
| 217 | pr_err("failed to init OPP table: %d\n", ret); |
| 218 | goto out_put_node; |
| 219 | } |
| 220 | |
| 221 | ret = opp_init_cpufreq_table(cpu_dev, &freq_table); |
| 222 | if (ret) { |
| 223 | pr_err("failed to init cpufreq table: %d\n", ret); |
| 224 | goto out_put_node; |
| 225 | } |
| 226 | |
| 227 | of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); |
| 228 | |
| 229 | if (of_property_read_u32(np, "clock-latency", &transition_latency)) |
| 230 | transition_latency = CPUFREQ_ETERNAL; |
| 231 | |
| 232 | if (cpu_reg) { |
| 233 | struct opp *opp; |
| 234 | unsigned long min_uV, max_uV; |
| 235 | int i; |
| 236 | |
| 237 | /* |
| 238 | * OPP is maintained in order of increasing frequency, and |
| 239 | * freq_table initialised from OPP is therefore sorted in the |
| 240 | * same order. |
| 241 | */ |
| 242 | for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) |
| 243 | ; |
Nishanth Menon | 78e8eb8 | 2013-01-18 19:52:33 +0000 | [diff] [blame] | 244 | rcu_read_lock(); |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 245 | opp = opp_find_freq_exact(cpu_dev, |
| 246 | freq_table[0].frequency * 1000, true); |
| 247 | min_uV = opp_get_voltage(opp); |
| 248 | opp = opp_find_freq_exact(cpu_dev, |
| 249 | freq_table[i-1].frequency * 1000, true); |
| 250 | max_uV = opp_get_voltage(opp); |
Nishanth Menon | 78e8eb8 | 2013-01-18 19:52:33 +0000 | [diff] [blame] | 251 | rcu_read_unlock(); |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 252 | ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); |
| 253 | if (ret > 0) |
| 254 | transition_latency += ret * 1000; |
| 255 | } |
| 256 | |
| 257 | ret = cpufreq_register_driver(&cpu0_cpufreq_driver); |
| 258 | if (ret) { |
| 259 | pr_err("failed register driver: %d\n", ret); |
| 260 | goto out_free_table; |
| 261 | } |
| 262 | |
| 263 | of_node_put(np); |
| 264 | return 0; |
| 265 | |
| 266 | out_free_table: |
| 267 | opp_free_cpufreq_table(cpu_dev, &freq_table); |
| 268 | out_put_node: |
| 269 | of_node_put(np); |
| 270 | return ret; |
| 271 | } |
Shawn Guo | 5553f9e | 2013-01-30 14:27:49 +0000 | [diff] [blame] | 272 | |
| 273 | static int cpu0_cpufreq_remove(struct platform_device *pdev) |
| 274 | { |
| 275 | cpufreq_unregister_driver(&cpu0_cpufreq_driver); |
| 276 | opp_free_cpufreq_table(cpu_dev, &freq_table); |
| 277 | |
| 278 | return 0; |
| 279 | } |
| 280 | |
| 281 | static struct platform_driver cpu0_cpufreq_platdrv = { |
| 282 | .driver = { |
| 283 | .name = "cpufreq-cpu0", |
| 284 | .owner = THIS_MODULE, |
| 285 | }, |
| 286 | .probe = cpu0_cpufreq_probe, |
| 287 | .remove = cpu0_cpufreq_remove, |
| 288 | }; |
| 289 | module_platform_driver(cpu0_cpufreq_platdrv); |
Shawn Guo | 95ceafd | 2012-09-06 07:09:11 +0000 | [diff] [blame] | 290 | |
| 291 | MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); |
| 292 | MODULE_DESCRIPTION("Generic CPU0 cpufreq driver"); |
| 293 | MODULE_LICENSE("GPL"); |