blob: a7b6ac6e048e9b3a52f684dc1ee086f4cd1c8300 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
Viresh Kumarbb176f72013-06-19 14:19:33 +05306 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Ashok Rajc32b6b82005-10-30 14:59:54 -08008 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
Dave Jones32ee8c32006-02-28 00:43:23 -05009 * Added handling for CPU hotplug
Dave Jones8ff69732006-03-05 03:37:23 -050010 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
Ashok Rajc32b6b82005-10-30 14:59:54 -080012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Viresh Kumardb701152012-10-23 01:29:03 +020018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Viresh Kumar5ff0a262013-08-06 22:53:03 +053020#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/cpufreq.h>
22#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/device.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053024#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080027#include <linux/mutex.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053028#include <linux/slab.h>
Viresh Kumar2f0aea92014-03-04 11:00:26 +080029#include <linux/suspend.h>
Doug Anderson90de2a42014-12-23 22:09:48 -080030#include <linux/syscore_ops.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053031#include <linux/tick.h>
Thomas Renninger6f4f2722010-04-20 13:17:36 +020032#include <trace/events/power.h>
33
Viresh Kumarb4f06762015-01-27 14:06:08 +053034static LIST_HEAD(cpufreq_policy_list);
Viresh Kumarf9637352015-05-12 12:20:11 +053035
36static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37{
38 return cpumask_empty(policy->cpus);
39}
40
41static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42{
43 return active == !policy_is_inactive(policy);
44}
45
46/* Finds Next Acive/Inactive policy */
47static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48 bool active)
49{
50 do {
51 policy = list_next_entry(policy, policy_list);
52
53 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list)
55 return NULL;
56 } while (!suitable_policy(policy, active));
57
58 return policy;
59}
60
61static struct cpufreq_policy *first_policy(bool active)
62{
63 struct cpufreq_policy *policy;
64
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list))
67 return NULL;
68
69 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70 policy_list);
71
72 if (!suitable_policy(policy, active))
73 policy = next_policy(policy, active);
74
75 return policy;
76}
77
78/* Macros to iterate over CPU policies */
79#define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
81 __policy; \
82 __policy = next_policy(__policy, __active))
83
84#define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86#define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
88
89#define for_each_policy(__policy) \
Viresh Kumarb4f06762015-01-27 14:06:08 +053090 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91
Viresh Kumarf7b27062015-01-27 14:06:09 +053092/* Iterate over governors */
93static LIST_HEAD(cpufreq_governor_list);
94#define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097/**
Dave Jonescd878472006-08-11 17:59:28 -040098 * The "cpufreq driver" - the arch- or hardware-dependent low
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
101 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200102static struct cpufreq_driver *cpufreq_driver;
Mike Travis7a6aedf2008-03-25 15:06:53 -0700103static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
Viresh Kumarbb176f72013-06-19 14:19:33 +0530104static DEFINE_RWLOCK(cpufreq_driver_lock);
Jane Li6f1e4ef2014-01-03 17:17:41 +0800105DEFINE_MUTEX(cpufreq_governor_lock);
Viresh Kumarbb176f72013-06-19 14:19:33 +0530106
Viresh Kumar2f0aea92014-03-04 11:00:26 +0800107/* Flag to suspend/resume CPUFreq governors */
108static bool cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530110static inline bool has_target(void)
111{
112 return cpufreq_driver->target_index || cpufreq_driver->target;
113}
114
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800115/*
Viresh Kumar6eed9402013-08-06 22:53:11 +0530116 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
117 * sections
118 */
119static DECLARE_RWSEM(cpufreq_rwsem);
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121/* internal prototypes */
Dave Jones29464f22009-01-18 01:37:11 -0500122static int __cpufreq_governor(struct cpufreq_policy *policy,
123 unsigned int event);
Viresh Kumard92d50a2015-01-02 12:34:29 +0530124static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
David Howells65f27f32006-11-22 14:55:48 +0000125static void handle_update(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127/**
Dave Jones32ee8c32006-02-28 00:43:23 -0500128 * Two notifier lists: the "policy" list is involved in the
129 * validation process for a new CPU frequency policy; the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 * "transition" list for kernel code that needs to handle
131 * changes to devices when the CPU clock speed changes.
132 * The mutex locks both lists.
133 */
Alan Sterne041c682006-03-27 01:16:30 -0800134static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700135static struct srcu_notifier_head cpufreq_transition_notifier_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200137static bool init_cpufreq_transition_notifier_list_called;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700138static int __init init_cpufreq_transition_notifier_list(void)
139{
140 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200141 init_cpufreq_transition_notifier_list_called = true;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700142 return 0;
143}
Linus Torvaldsb3438f82006-11-20 11:47:18 -0800144pure_initcall(init_cpufreq_transition_notifier_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400146static int off __read_mostly;
Viresh Kumarda584452012-10-26 00:51:32 +0200147static int cpufreq_disabled(void)
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400148{
149 return off;
150}
151void disable_cpufreq(void)
152{
153 off = 1;
154}
Dave Jones29464f22009-01-18 01:37:11 -0500155static DEFINE_MUTEX(cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000157bool have_governor_per_policy(void)
158{
Viresh Kumar0b981e72013-10-02 14:13:18 +0530159 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000160}
Viresh Kumar3f869d62013-05-16 05:09:56 +0000161EXPORT_SYMBOL_GPL(have_governor_per_policy);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000162
Viresh Kumar944e9a02013-05-16 05:09:57 +0000163struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
164{
165 if (have_governor_per_policy())
166 return &policy->kobj;
167 else
168 return cpufreq_global_kobject;
169}
170EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
171
Viresh Kumar5a31d592015-07-10 01:43:27 +0200172struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
173{
174 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
175
176 return policy && !policy_is_inactive(policy) ?
177 policy->freq_table : NULL;
178}
179EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
180
Viresh Kumar72a4ce32013-05-17 11:26:32 +0000181static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
182{
183 u64 idle_time;
184 u64 cur_wall_time;
185 u64 busy_time;
186
187 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
188
189 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
190 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
191 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
192 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
193 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
194 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
195
196 idle_time = cur_wall_time - busy_time;
197 if (wall)
198 *wall = cputime_to_usecs(cur_wall_time);
199
200 return cputime_to_usecs(idle_time);
201}
202
203u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
204{
205 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
206
207 if (idle_time == -1ULL)
208 return get_cpu_idle_time_jiffy(cpu, wall);
209 else if (!io_busy)
210 idle_time += get_cpu_iowait_time_us(cpu, wall);
211
212 return idle_time;
213}
214EXPORT_SYMBOL_GPL(get_cpu_idle_time);
215
Viresh Kumar70e9e772013-10-03 20:29:07 +0530216/*
217 * This is a generic cpufreq init() routine which can be used by cpufreq
218 * drivers of SMP systems. It will do following:
219 * - validate & show freq table passed
220 * - set policies transition latency
221 * - policy->cpus with all possible CPUs
222 */
223int cpufreq_generic_init(struct cpufreq_policy *policy,
224 struct cpufreq_frequency_table *table,
225 unsigned int transition_latency)
226{
227 int ret;
228
229 ret = cpufreq_table_validate_and_show(policy, table);
230 if (ret) {
231 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
232 return ret;
233 }
234
235 policy->cpuinfo.transition_latency = transition_latency;
236
237 /*
Shailendra Verma58405af2015-05-22 22:48:22 +0530238 * The driver only supports the SMP configuration where all processors
Viresh Kumar70e9e772013-10-03 20:29:07 +0530239 * share the clock and voltage and clock.
240 */
241 cpumask_setall(policy->cpus);
242
243 return 0;
244}
245EXPORT_SYMBOL_GPL(cpufreq_generic_init);
246
Viresh Kumar988bed02015-05-08 11:53:45 +0530247/* Only for cpufreq core internal use */
248struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
Viresh Kumar652ed952014-01-09 20:38:43 +0530249{
250 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
251
Viresh Kumar988bed02015-05-08 11:53:45 +0530252 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
253}
254
255unsigned int cpufreq_generic_get(unsigned int cpu)
256{
257 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
258
Viresh Kumar652ed952014-01-09 20:38:43 +0530259 if (!policy || IS_ERR(policy->clk)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700260 pr_err("%s: No %s associated to cpu: %d\n",
261 __func__, policy ? "clk" : "policy", cpu);
Viresh Kumar652ed952014-01-09 20:38:43 +0530262 return 0;
263 }
264
265 return clk_get_rate(policy->clk) / 1000;
266}
267EXPORT_SYMBOL_GPL(cpufreq_generic_get);
268
Viresh Kumar50e9c852015-02-19 17:02:03 +0530269/**
270 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
271 *
272 * @cpu: cpu to find policy for.
273 *
274 * This returns policy for 'cpu', returns NULL if it doesn't exist.
275 * It also increments the kobject reference count to mark it busy and so would
276 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
277 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
278 * freed as that depends on the kobj count.
279 *
280 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
281 * valid policy is found. This is done to make sure the driver doesn't get
282 * unregistered while the policy is being used.
283 *
284 * Return: A valid policy on success, otherwise NULL on failure.
285 */
Viresh Kumar6eed9402013-08-06 22:53:11 +0530286struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530288 struct cpufreq_policy *policy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 unsigned long flags;
290
Viresh Kumar1b947c92015-02-19 17:02:05 +0530291 if (WARN_ON(cpu >= nr_cpu_ids))
Viresh Kumar6eed9402013-08-06 22:53:11 +0530292 return NULL;
293
294 if (!down_read_trylock(&cpufreq_rwsem))
295 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
297 /* get the cpufreq driver */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000298 read_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Viresh Kumar6eed9402013-08-06 22:53:11 +0530300 if (cpufreq_driver) {
301 /* get the CPU */
Viresh Kumar988bed02015-05-08 11:53:45 +0530302 policy = cpufreq_cpu_get_raw(cpu);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530303 if (policy)
304 kobject_get(&policy->kobj);
305 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200306
Viresh Kumar6eed9402013-08-06 22:53:11 +0530307 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530309 if (!policy)
Viresh Kumar6eed9402013-08-06 22:53:11 +0530310 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530312 return policy;
Stephen Boyda9144432012-07-20 18:14:38 +0000313}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
315
Viresh Kumar50e9c852015-02-19 17:02:03 +0530316/**
317 * cpufreq_cpu_put: Decrements the usage count of a policy
318 *
319 * @policy: policy earlier returned by cpufreq_cpu_get().
320 *
321 * This decrements the kobject reference count incremented earlier by calling
322 * cpufreq_cpu_get().
323 *
324 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
325 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530326void cpufreq_cpu_put(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530328 kobject_put(&policy->kobj);
329 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330}
331EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
335 *********************************************************************/
336
337/**
338 * adjust_jiffies - adjust the system "loops_per_jiffy"
339 *
340 * This function alters the system "loops_per_jiffy" for the clock
341 * speed change. Note that loops_per_jiffy cannot be updated on SMP
Dave Jones32ee8c32006-02-28 00:43:23 -0500342 * systems as each CPU might be scaled differently. So, use the arch
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 * per-CPU loops_per_jiffy value wherever possible.
344 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800345static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
Viresh Kumar39c132e2015-01-02 12:34:34 +0530347#ifndef CONFIG_SMP
348 static unsigned long l_p_j_ref;
349 static unsigned int l_p_j_ref_freq;
350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 if (ci->flags & CPUFREQ_CONST_LOOPS)
352 return;
353
354 if (!l_p_j_ref_freq) {
355 l_p_j_ref = loops_per_jiffy;
356 l_p_j_ref_freq = ci->old;
Joe Perchese837f9b2014-03-11 10:03:00 -0700357 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
358 l_p_j_ref, l_p_j_ref_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 }
Viresh Kumar0b443ea2014-03-19 11:24:58 +0530360 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530361 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
362 ci->new);
Joe Perchese837f9b2014-03-11 10:03:00 -0700363 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
364 loops_per_jiffy, ci->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366#endif
Viresh Kumar39c132e2015-01-02 12:34:34 +0530367}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Viresh Kumar0956df9c2013-06-19 14:19:34 +0530369static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530370 struct cpufreq_freqs *freqs, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
372 BUG_ON(irqs_disabled());
373
Dirk Brandewied5aaffa2013-01-17 16:22:21 +0000374 if (cpufreq_disabled())
375 return;
376
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200377 freqs->flags = cpufreq_driver->flags;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200378 pr_debug("notification %u of frequency transition to %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -0700379 state, freqs->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 switch (state) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 case CPUFREQ_PRECHANGE:
Dave Jones32ee8c32006-02-28 00:43:23 -0500384 /* detect if the driver reported a value as "old frequency"
Dave Jonese4472cb2006-01-31 15:53:55 -0800385 * which is not equal to what the cpufreq core thinks is
386 * "old frequency".
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200388 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800389 if ((policy) && (policy->cpu == freqs->cpu) &&
390 (policy->cur) && (policy->cur != freqs->old)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700391 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
392 freqs->old, policy->cur);
Dave Jonese4472cb2006-01-31 15:53:55 -0800393 freqs->old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 }
395 }
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700396 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800397 CPUFREQ_PRECHANGE, freqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
399 break;
Dave Jonese4472cb2006-01-31 15:53:55 -0800400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 case CPUFREQ_POSTCHANGE:
402 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
Joe Perchese837f9b2014-03-11 10:03:00 -0700403 pr_debug("FREQ: %lu - CPU: %lu\n",
404 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
Thomas Renninger25e41932011-01-03 17:50:44 +0100405 trace_cpu_frequency(freqs->new, freqs->cpu);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700406 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800407 CPUFREQ_POSTCHANGE, freqs);
Dave Jonese4472cb2006-01-31 15:53:55 -0800408 if (likely(policy) && likely(policy->cpu == freqs->cpu))
409 policy->cur = freqs->new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 break;
411 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412}
Viresh Kumarbb176f72013-06-19 14:19:33 +0530413
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530414/**
415 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
416 * on frequency transition.
417 *
418 * This function calls the transition notifiers and the "adjust_jiffies"
419 * function. It is called twice on all CPU frequency changes that have
420 * external effects.
421 */
Viresh Kumar236a9802014-03-24 13:35:46 +0530422static void cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530423 struct cpufreq_freqs *freqs, unsigned int state)
424{
425 for_each_cpu(freqs->cpu, policy->cpus)
426 __cpufreq_notify_transition(policy, freqs, state);
427}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530429/* Do post notifications when there are chances that transition has failed */
Viresh Kumar236a9802014-03-24 13:35:46 +0530430static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530431 struct cpufreq_freqs *freqs, int transition_failed)
432{
433 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
434 if (!transition_failed)
435 return;
436
437 swap(freqs->old, freqs->new);
438 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
439 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
440}
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530441
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530442void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
443 struct cpufreq_freqs *freqs)
444{
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530445
446 /*
447 * Catch double invocations of _begin() which lead to self-deadlock.
448 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
449 * doesn't invoke _begin() on their behalf, and hence the chances of
450 * double invocations are very low. Moreover, there are scenarios
451 * where these checks can emit false-positive warnings in these
452 * drivers; so we avoid that by skipping them altogether.
453 */
454 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
455 && current == policy->transition_task);
456
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530457wait:
458 wait_event(policy->transition_wait, !policy->transition_ongoing);
459
460 spin_lock(&policy->transition_lock);
461
462 if (unlikely(policy->transition_ongoing)) {
463 spin_unlock(&policy->transition_lock);
464 goto wait;
465 }
466
467 policy->transition_ongoing = true;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530468 policy->transition_task = current;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530469
470 spin_unlock(&policy->transition_lock);
471
472 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
473}
474EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
475
476void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
477 struct cpufreq_freqs *freqs, int transition_failed)
478{
479 if (unlikely(WARN_ON(!policy->transition_ongoing)))
480 return;
481
482 cpufreq_notify_post_transition(policy, freqs, transition_failed);
483
484 policy->transition_ongoing = false;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530485 policy->transition_task = NULL;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530486
487 wake_up(&policy->transition_wait);
488}
489EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492/*********************************************************************
493 * SYSFS INTERFACE *
494 *********************************************************************/
Rashika Kheria8a5c74a2014-02-26 22:12:42 +0530495static ssize_t show_boost(struct kobject *kobj,
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100496 struct attribute *attr, char *buf)
497{
498 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
499}
500
501static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
502 const char *buf, size_t count)
503{
504 int ret, enable;
505
506 ret = sscanf(buf, "%d", &enable);
507 if (ret != 1 || enable < 0 || enable > 1)
508 return -EINVAL;
509
510 if (cpufreq_boost_trigger_state(enable)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700511 pr_err("%s: Cannot %s BOOST!\n",
512 __func__, enable ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100513 return -EINVAL;
514 }
515
Joe Perchese837f9b2014-03-11 10:03:00 -0700516 pr_debug("%s: cpufreq BOOST %s\n",
517 __func__, enable ? "enabled" : "disabled");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100518
519 return count;
520}
521define_one_global_rw(boost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530523static struct cpufreq_governor *find_governor(const char *str_governor)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700524{
525 struct cpufreq_governor *t;
526
Viresh Kumarf7b27062015-01-27 14:06:09 +0530527 for_each_governor(t)
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200528 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700529 return t;
530
531 return NULL;
532}
533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534/**
535 * cpufreq_parse_governor - parse a governor string
536 */
Dave Jones905d77c2008-03-05 14:28:32 -0500537static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 struct cpufreq_governor **governor)
539{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700540 int err = -EINVAL;
541
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200542 if (!cpufreq_driver)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700543 goto out;
544
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200545 if (cpufreq_driver->setpolicy) {
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200546 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 *policy = CPUFREQ_POLICY_PERFORMANCE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700548 err = 0;
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200549 } else if (!strncasecmp(str_governor, "powersave",
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530550 CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 *policy = CPUFREQ_POLICY_POWERSAVE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700552 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 }
Viresh Kumar2e1cc3a2015-01-02 12:34:27 +0530554 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 struct cpufreq_governor *t;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700556
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800557 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700558
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530559 t = find_governor(str_governor);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700560
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700561 if (t == NULL) {
Kees Cook1a8e1462011-05-04 08:38:56 -0700562 int ret;
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700563
Kees Cook1a8e1462011-05-04 08:38:56 -0700564 mutex_unlock(&cpufreq_governor_mutex);
565 ret = request_module("cpufreq_%s", str_governor);
566 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700567
Kees Cook1a8e1462011-05-04 08:38:56 -0700568 if (ret == 0)
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530569 t = find_governor(str_governor);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700570 }
571
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700572 if (t != NULL) {
573 *governor = t;
574 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 }
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700576
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800577 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 }
Dave Jones29464f22009-01-18 01:37:11 -0500579out:
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700580 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583/**
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530584 * cpufreq_per_cpu_attr_read() / show_##file_name() -
585 * print out cpufreq information
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 *
587 * Write out information from cpufreq_driver->policy[cpu]; object must be
588 * "unsigned int".
589 */
590
Dave Jones32ee8c32006-02-28 00:43:23 -0500591#define show_one(file_name, object) \
592static ssize_t show_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500593(struct cpufreq_policy *policy, char *buf) \
Dave Jones32ee8c32006-02-28 00:43:23 -0500594{ \
Dave Jones29464f22009-01-18 01:37:11 -0500595 return sprintf(buf, "%u\n", policy->object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596}
597
598show_one(cpuinfo_min_freq, cpuinfo.min_freq);
599show_one(cpuinfo_max_freq, cpuinfo.max_freq);
Thomas Renningered129782009-02-04 01:17:41 +0100600show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601show_one(scaling_min_freq, min);
602show_one(scaling_max_freq, max);
Dirk Brandewiec034b022014-10-13 08:37:40 -0700603
Viresh Kumar09347b22015-01-02 12:34:24 +0530604static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
Dirk Brandewiec034b022014-10-13 08:37:40 -0700605{
606 ssize_t ret;
607
608 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
609 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
610 else
611 ret = sprintf(buf, "%u\n", policy->cur);
612 return ret;
613}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
Viresh Kumar037ce832013-10-02 14:13:16 +0530615static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530616 struct cpufreq_policy *new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200617
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618/**
619 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
620 */
621#define store_one(file_name, object) \
622static ssize_t store_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500623(struct cpufreq_policy *policy, const char *buf, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624{ \
Vince Hsu619c144c2014-11-10 14:14:50 +0800625 int ret, temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 struct cpufreq_policy new_policy; \
627 \
628 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
629 if (ret) \
630 return -EINVAL; \
631 \
Dave Jones29464f22009-01-18 01:37:11 -0500632 ret = sscanf(buf, "%u", &new_policy.object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 if (ret != 1) \
634 return -EINVAL; \
635 \
Vince Hsu619c144c2014-11-10 14:14:50 +0800636 temp = new_policy.object; \
Viresh Kumar037ce832013-10-02 14:13:16 +0530637 ret = cpufreq_set_policy(policy, &new_policy); \
Vince Hsu619c144c2014-11-10 14:14:50 +0800638 if (!ret) \
639 policy->user_policy.object = temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 \
641 return ret ? ret : count; \
642}
643
Dave Jones29464f22009-01-18 01:37:11 -0500644store_one(scaling_min_freq, min);
645store_one(scaling_max_freq, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
647/**
648 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
649 */
Dave Jones905d77c2008-03-05 14:28:32 -0500650static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
651 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652{
Viresh Kumard92d50a2015-01-02 12:34:29 +0530653 unsigned int cur_freq = __cpufreq_get(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 if (!cur_freq)
655 return sprintf(buf, "<unknown>");
656 return sprintf(buf, "%u\n", cur_freq);
657}
658
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659/**
660 * show_scaling_governor - show the current policy for the specified CPU
661 */
Dave Jones905d77c2008-03-05 14:28:32 -0500662static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663{
Dave Jones29464f22009-01-18 01:37:11 -0500664 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 return sprintf(buf, "powersave\n");
666 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
667 return sprintf(buf, "performance\n");
668 else if (policy->governor)
viresh kumar4b972f02012-10-23 01:23:43 +0200669 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
Dave Jones29464f22009-01-18 01:37:11 -0500670 policy->governor->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return -EINVAL;
672}
673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674/**
675 * store_scaling_governor - store policy for the specified CPU
676 */
Dave Jones905d77c2008-03-05 14:28:32 -0500677static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
678 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679{
Srivatsa S. Bhat5136fa52013-09-07 01:24:06 +0530680 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 char str_governor[16];
682 struct cpufreq_policy new_policy;
683
684 ret = cpufreq_get_policy(&new_policy, policy->cpu);
685 if (ret)
686 return ret;
687
Dave Jones29464f22009-01-18 01:37:11 -0500688 ret = sscanf(buf, "%15s", str_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 if (ret != 1)
690 return -EINVAL;
691
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530692 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
693 &new_policy.governor))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 return -EINVAL;
695
Viresh Kumar037ce832013-10-02 14:13:16 +0530696 ret = cpufreq_set_policy(policy, &new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200697
698 policy->user_policy.policy = policy->policy;
699 policy->user_policy.governor = policy->governor;
Thomas Renninger7970e082006-04-13 15:14:04 +0200700
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530701 if (ret)
702 return ret;
703 else
704 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705}
706
707/**
708 * show_scaling_driver - show the cpufreq driver currently loaded
709 */
Dave Jones905d77c2008-03-05 14:28:32 -0500710static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200712 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713}
714
715/**
716 * show_scaling_available_governors - show the available CPUfreq governors
717 */
Dave Jones905d77c2008-03-05 14:28:32 -0500718static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
719 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720{
721 ssize_t i = 0;
722 struct cpufreq_governor *t;
723
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530724 if (!has_target()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 i += sprintf(buf, "performance powersave");
726 goto out;
727 }
728
Viresh Kumarf7b27062015-01-27 14:06:09 +0530729 for_each_governor(t) {
Dave Jones29464f22009-01-18 01:37:11 -0500730 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
731 - (CPUFREQ_NAME_LEN + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 goto out;
viresh kumar4b972f02012-10-23 01:23:43 +0200733 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 }
Dave Jones7d5e3502006-02-02 17:03:42 -0500735out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 i += sprintf(&buf[i], "\n");
737 return i;
738}
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700739
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800740ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741{
742 ssize_t i = 0;
743 unsigned int cpu;
744
Rusty Russell835481d2009-01-04 05:18:06 -0800745 for_each_cpu(cpu, mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 if (i)
747 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
748 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
749 if (i >= (PAGE_SIZE - 5))
Dave Jones29464f22009-01-18 01:37:11 -0500750 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 }
752 i += sprintf(&buf[i], "\n");
753 return i;
754}
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800755EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700757/**
758 * show_related_cpus - show the CPUs affected by each transition even if
759 * hw coordination is in use
760 */
761static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
762{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800763 return cpufreq_show_cpus(policy->related_cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700764}
765
766/**
767 * show_affected_cpus - show the CPUs affected by each transition
768 */
769static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
770{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800771 return cpufreq_show_cpus(policy->cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700772}
773
Venki Pallipadi9e769882007-10-26 10:18:21 -0700774static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
Dave Jones905d77c2008-03-05 14:28:32 -0500775 const char *buf, size_t count)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700776{
777 unsigned int freq = 0;
778 unsigned int ret;
779
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700780 if (!policy->governor || !policy->governor->store_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700781 return -EINVAL;
782
783 ret = sscanf(buf, "%u", &freq);
784 if (ret != 1)
785 return -EINVAL;
786
787 policy->governor->store_setspeed(policy, freq);
788
789 return count;
790}
791
792static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
793{
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700794 if (!policy->governor || !policy->governor->show_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700795 return sprintf(buf, "<unsupported>\n");
796
797 return policy->governor->show_setspeed(policy, buf);
798}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799
Thomas Renningere2f74f32009-11-19 12:31:01 +0100800/**
viresh kumar8bf1ac722012-10-23 01:23:33 +0200801 * show_bios_limit - show the current cpufreq HW/BIOS limitation
Thomas Renningere2f74f32009-11-19 12:31:01 +0100802 */
803static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
804{
805 unsigned int limit;
806 int ret;
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200807 if (cpufreq_driver->bios_limit) {
808 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
Thomas Renningere2f74f32009-11-19 12:31:01 +0100809 if (!ret)
810 return sprintf(buf, "%u\n", limit);
811 }
812 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
813}
814
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200815cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
816cpufreq_freq_attr_ro(cpuinfo_min_freq);
817cpufreq_freq_attr_ro(cpuinfo_max_freq);
818cpufreq_freq_attr_ro(cpuinfo_transition_latency);
819cpufreq_freq_attr_ro(scaling_available_governors);
820cpufreq_freq_attr_ro(scaling_driver);
821cpufreq_freq_attr_ro(scaling_cur_freq);
822cpufreq_freq_attr_ro(bios_limit);
823cpufreq_freq_attr_ro(related_cpus);
824cpufreq_freq_attr_ro(affected_cpus);
825cpufreq_freq_attr_rw(scaling_min_freq);
826cpufreq_freq_attr_rw(scaling_max_freq);
827cpufreq_freq_attr_rw(scaling_governor);
828cpufreq_freq_attr_rw(scaling_setspeed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
Dave Jones905d77c2008-03-05 14:28:32 -0500830static struct attribute *default_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 &cpuinfo_min_freq.attr,
832 &cpuinfo_max_freq.attr,
Thomas Renningered129782009-02-04 01:17:41 +0100833 &cpuinfo_transition_latency.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 &scaling_min_freq.attr,
835 &scaling_max_freq.attr,
836 &affected_cpus.attr,
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700837 &related_cpus.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 &scaling_governor.attr,
839 &scaling_driver.attr,
840 &scaling_available_governors.attr,
Venki Pallipadi9e769882007-10-26 10:18:21 -0700841 &scaling_setspeed.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 NULL
843};
844
Dave Jones29464f22009-01-18 01:37:11 -0500845#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
846#define to_attr(a) container_of(a, struct freq_attr, attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Dave Jones29464f22009-01-18 01:37:11 -0500848static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
Dave Jones905d77c2008-03-05 14:28:32 -0500850 struct cpufreq_policy *policy = to_policy(kobj);
851 struct freq_attr *fattr = to_attr(attr);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530852 ssize_t ret;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530853
854 if (!down_read_trylock(&cpufreq_rwsem))
Viresh Kumar1b750e32013-10-02 14:13:09 +0530855 return -EINVAL;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800856
viresh kumarad7722d2013-10-18 19:10:15 +0530857 down_read(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800858
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530859 if (fattr->show)
860 ret = fattr->show(policy, buf);
861 else
862 ret = -EIO;
863
viresh kumarad7722d2013-10-18 19:10:15 +0530864 up_read(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530865 up_read(&cpufreq_rwsem);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530866
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return ret;
868}
869
Dave Jones905d77c2008-03-05 14:28:32 -0500870static ssize_t store(struct kobject *kobj, struct attribute *attr,
871 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872{
Dave Jones905d77c2008-03-05 14:28:32 -0500873 struct cpufreq_policy *policy = to_policy(kobj);
874 struct freq_attr *fattr = to_attr(attr);
Dave Jonesa07530b2008-03-05 14:22:25 -0500875 ssize_t ret = -EINVAL;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530876
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530877 get_online_cpus();
878
879 if (!cpu_online(policy->cpu))
880 goto unlock;
881
Viresh Kumar6eed9402013-08-06 22:53:11 +0530882 if (!down_read_trylock(&cpufreq_rwsem))
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530883 goto unlock;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800884
viresh kumarad7722d2013-10-18 19:10:15 +0530885 down_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800886
Viresh Kumar11e584c2015-06-10 02:11:45 +0200887 /* Updating inactive policies is invalid, so avoid doing that. */
888 if (unlikely(policy_is_inactive(policy))) {
889 ret = -EBUSY;
890 goto unlock_policy_rwsem;
891 }
892
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530893 if (fattr->store)
894 ret = fattr->store(policy, buf, count);
895 else
896 ret = -EIO;
897
Viresh Kumar11e584c2015-06-10 02:11:45 +0200898unlock_policy_rwsem:
viresh kumarad7722d2013-10-18 19:10:15 +0530899 up_write(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530900
Viresh Kumar6eed9402013-08-06 22:53:11 +0530901 up_read(&cpufreq_rwsem);
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530902unlock:
903 put_online_cpus();
904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 return ret;
906}
907
Dave Jones905d77c2008-03-05 14:28:32 -0500908static void cpufreq_sysfs_release(struct kobject *kobj)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909{
Dave Jones905d77c2008-03-05 14:28:32 -0500910 struct cpufreq_policy *policy = to_policy(kobj);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200911 pr_debug("last reference is dropped\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 complete(&policy->kobj_unregister);
913}
914
Emese Revfy52cf25d2010-01-19 02:58:23 +0100915static const struct sysfs_ops sysfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 .show = show,
917 .store = store,
918};
919
920static struct kobj_type ktype_cpufreq = {
921 .sysfs_ops = &sysfs_ops,
922 .default_attrs = default_attrs,
923 .release = cpufreq_sysfs_release,
924};
925
Viresh Kumar2361be22013-05-17 16:09:09 +0530926struct kobject *cpufreq_global_kobject;
927EXPORT_SYMBOL(cpufreq_global_kobject);
928
929static int cpufreq_global_kobject_usage;
930
931int cpufreq_get_global_kobject(void)
932{
933 if (!cpufreq_global_kobject_usage++)
934 return kobject_add(cpufreq_global_kobject,
935 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
936
937 return 0;
938}
939EXPORT_SYMBOL(cpufreq_get_global_kobject);
940
941void cpufreq_put_global_kobject(void)
942{
943 if (!--cpufreq_global_kobject_usage)
944 kobject_del(cpufreq_global_kobject);
945}
946EXPORT_SYMBOL(cpufreq_put_global_kobject);
947
948int cpufreq_sysfs_create_file(const struct attribute *attr)
949{
950 int ret = cpufreq_get_global_kobject();
951
952 if (!ret) {
953 ret = sysfs_create_file(cpufreq_global_kobject, attr);
954 if (ret)
955 cpufreq_put_global_kobject();
956 }
957
958 return ret;
959}
960EXPORT_SYMBOL(cpufreq_sysfs_create_file);
961
962void cpufreq_sysfs_remove_file(const struct attribute *attr)
963{
964 sysfs_remove_file(cpufreq_global_kobject, attr);
965 cpufreq_put_global_kobject();
966}
967EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
968
Viresh Kumar87549142015-06-10 02:13:21 +0200969static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
970{
971 struct device *cpu_dev;
972
973 pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
974
975 if (!policy)
976 return 0;
977
978 cpu_dev = get_cpu_device(cpu);
979 if (WARN_ON(!cpu_dev))
980 return 0;
981
982 return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
983}
984
985static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
986{
987 struct device *cpu_dev;
988
989 pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
990
991 cpu_dev = get_cpu_device(cpu);
992 if (WARN_ON(!cpu_dev))
993 return;
994
995 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
996}
997
998/* Add/remove symlinks for all related CPUs */
Viresh Kumar308b60e2013-07-31 14:35:14 +0200999static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
Dave Jones19d6f7e2009-07-08 17:35:39 -04001000{
1001 unsigned int j;
1002 int ret = 0;
1003
Viresh Kumar87549142015-06-10 02:13:21 +02001004 /* Some related CPUs might not be present (physically hotplugged) */
1005 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
Saravana Kannan9d16f202015-05-18 10:43:31 +05301006 if (j == policy->kobj_cpu)
Dave Jones19d6f7e2009-07-08 17:35:39 -04001007 continue;
Dave Jones19d6f7e2009-07-08 17:35:39 -04001008
Viresh Kumar87549142015-06-10 02:13:21 +02001009 ret = add_cpu_dev_symlink(policy, j);
Rafael J. Wysocki71c34612013-08-04 01:19:34 +02001010 if (ret)
1011 break;
Dave Jones19d6f7e2009-07-08 17:35:39 -04001012 }
Viresh Kumar87549142015-06-10 02:13:21 +02001013
Dave Jones19d6f7e2009-07-08 17:35:39 -04001014 return ret;
1015}
1016
Viresh Kumar87549142015-06-10 02:13:21 +02001017static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1018{
1019 unsigned int j;
1020
1021 /* Some related CPUs might not be present (physically hotplugged) */
1022 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
1023 if (j == policy->kobj_cpu)
1024 continue;
1025
1026 remove_cpu_dev_symlink(policy, j);
1027 }
1028}
1029
Viresh Kumar308b60e2013-07-31 14:35:14 +02001030static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001031 struct device *dev)
Dave Jones909a6942009-07-08 18:05:42 -04001032{
1033 struct freq_attr **drv_attr;
Dave Jones909a6942009-07-08 18:05:42 -04001034 int ret = 0;
Dave Jones909a6942009-07-08 18:05:42 -04001035
Dave Jones909a6942009-07-08 18:05:42 -04001036 /* set up files for this cpu device */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001037 drv_attr = cpufreq_driver->attr;
Viresh Kumarf13f1182015-01-02 12:34:23 +05301038 while (drv_attr && *drv_attr) {
Dave Jones909a6942009-07-08 18:05:42 -04001039 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1040 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001041 return ret;
Dave Jones909a6942009-07-08 18:05:42 -04001042 drv_attr++;
1043 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001044 if (cpufreq_driver->get) {
Dave Jones909a6942009-07-08 18:05:42 -04001045 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1046 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001047 return ret;
Dave Jones909a6942009-07-08 18:05:42 -04001048 }
Dirk Brandewiec034b022014-10-13 08:37:40 -07001049
1050 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1051 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001052 return ret;
Dirk Brandewiec034b022014-10-13 08:37:40 -07001053
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001054 if (cpufreq_driver->bios_limit) {
Thomas Renningere2f74f32009-11-19 12:31:01 +01001055 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1056 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001057 return ret;
Thomas Renningere2f74f32009-11-19 12:31:01 +01001058 }
Dave Jones909a6942009-07-08 18:05:42 -04001059
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001060 return cpufreq_add_dev_symlink(policy);
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301061}
1062
Viresh Kumar7f0fa402015-07-08 15:12:16 +05301063static int cpufreq_init_policy(struct cpufreq_policy *policy)
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301064{
viresh kumar6e2c89d2014-03-04 11:43:59 +08001065 struct cpufreq_governor *gov = NULL;
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301066 struct cpufreq_policy new_policy;
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301067
Viresh Kumard5b73cd2013-08-06 22:53:06 +05301068 memcpy(&new_policy, policy, sizeof(*policy));
Jason Barona27a9ab2013-12-19 22:50:50 +00001069
viresh kumar6e2c89d2014-03-04 11:43:59 +08001070 /* Update governor of new_policy to the governor used before hotplug */
Viresh Kumar45732372015-05-12 12:22:34 +05301071 gov = find_governor(policy->last_governor);
viresh kumar6e2c89d2014-03-04 11:43:59 +08001072 if (gov)
1073 pr_debug("Restoring governor %s for cpu %d\n",
1074 policy->governor->name, policy->cpu);
1075 else
1076 gov = CPUFREQ_DEFAULT_GOVERNOR;
1077
1078 new_policy.governor = gov;
1079
Jason Barona27a9ab2013-12-19 22:50:50 +00001080 /* Use the default policy if its valid. */
1081 if (cpufreq_driver->setpolicy)
viresh kumar6e2c89d2014-03-04 11:43:59 +08001082 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
Dave Jonesecf7e462009-07-08 18:48:47 -04001083
1084 /* set default policy */
Viresh Kumar7f0fa402015-07-08 15:12:16 +05301085 return cpufreq_set_policy(policy, &new_policy);
Dave Jones909a6942009-07-08 18:05:42 -04001086}
1087
Viresh Kumard8d3b472013-08-04 01:20:07 +02001088static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
Viresh Kumar42f921a2013-12-20 21:26:02 +05301089 unsigned int cpu, struct device *dev)
Viresh Kumarfcf80582013-01-29 14:39:08 +00001090{
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301091 int ret = 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001092
Viresh Kumarbb29ae12015-02-19 17:02:06 +05301093 /* Has this CPU been taken care of already? */
1094 if (cpumask_test_cpu(cpu, policy->cpus))
1095 return 0;
1096
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301097 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301098 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1099 if (ret) {
1100 pr_err("%s: Failed to stop governor\n", __func__);
1101 return ret;
1102 }
1103 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001104
viresh kumarad7722d2013-10-18 19:10:15 +05301105 down_write(&policy->rwsem);
Viresh Kumarfcf80582013-01-29 14:39:08 +00001106 cpumask_set_cpu(cpu, policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301107 up_write(&policy->rwsem);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301108
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301109 if (has_target()) {
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001110 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1111 if (!ret)
1112 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1113
1114 if (ret) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301115 pr_err("%s: Failed to start governor\n", __func__);
1116 return ret;
1117 }
Viresh Kumar820c6ca2013-04-22 00:48:03 +02001118 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001119
Viresh Kumar87549142015-06-10 02:13:21 +02001120 return 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001121}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301123static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1124{
1125 struct cpufreq_policy *policy;
1126 unsigned long flags;
1127
Lan Tianyu44871c92013-09-11 15:05:05 +08001128 read_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar3914d372015-05-08 11:53:46 +05301129 policy = per_cpu(cpufreq_cpu_data, cpu);
Lan Tianyu44871c92013-09-11 15:05:05 +08001130 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301131
Viresh Kumar3914d372015-05-08 11:53:46 +05301132 if (likely(policy)) {
1133 /* Policy should be inactive here */
1134 WARN_ON(!policy_is_inactive(policy));
Viresh Kumar37829022015-06-08 18:25:32 +05301135
1136 down_write(&policy->rwsem);
1137 policy->cpu = cpu;
Viresh Kumar35afd022015-07-10 01:36:27 +02001138 policy->governor = NULL;
Viresh Kumar37829022015-06-08 18:25:32 +05301139 up_write(&policy->rwsem);
Viresh Kumar3914d372015-05-08 11:53:46 +05301140 }
viresh kumar6e2c89d2014-03-04 11:43:59 +08001141
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301142 return policy;
1143}
1144
Viresh Kumar2fc33842015-06-08 18:25:29 +05301145static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301146{
1147 struct cpufreq_policy *policy;
Viresh Kumar2fc33842015-06-08 18:25:29 +05301148 int ret;
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301149
1150 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1151 if (!policy)
1152 return NULL;
1153
1154 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1155 goto err_free_policy;
1156
1157 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1158 goto err_free_cpumask;
1159
Viresh Kumar2fc33842015-06-08 18:25:29 +05301160 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
1161 "cpufreq");
1162 if (ret) {
1163 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1164 goto err_free_rcpumask;
1165 }
1166
Lukasz Majewskic88a1f82013-08-06 22:53:08 +05301167 INIT_LIST_HEAD(&policy->policy_list);
viresh kumarad7722d2013-10-18 19:10:15 +05301168 init_rwsem(&policy->rwsem);
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +05301169 spin_lock_init(&policy->transition_lock);
1170 init_waitqueue_head(&policy->transition_wait);
Viresh Kumar818c5712015-01-02 12:34:38 +05301171 init_completion(&policy->kobj_unregister);
1172 INIT_WORK(&policy->update, handle_update);
viresh kumarad7722d2013-10-18 19:10:15 +05301173
Viresh Kumar2fc33842015-06-08 18:25:29 +05301174 policy->cpu = dev->id;
Viresh Kumar87549142015-06-10 02:13:21 +02001175
1176 /* Set this once on allocation */
Viresh Kumar2fc33842015-06-08 18:25:29 +05301177 policy->kobj_cpu = dev->id;
Viresh Kumar87549142015-06-10 02:13:21 +02001178
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301179 return policy;
1180
Viresh Kumar2fc33842015-06-08 18:25:29 +05301181err_free_rcpumask:
1182 free_cpumask_var(policy->related_cpus);
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301183err_free_cpumask:
1184 free_cpumask_var(policy->cpus);
1185err_free_policy:
1186 kfree(policy);
1187
1188 return NULL;
1189}
1190
Viresh Kumar2fc33842015-06-08 18:25:29 +05301191static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
Viresh Kumar42f921a2013-12-20 21:26:02 +05301192{
1193 struct kobject *kobj;
1194 struct completion *cmp;
1195
Viresh Kumar2fc33842015-06-08 18:25:29 +05301196 if (notify)
1197 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1198 CPUFREQ_REMOVE_POLICY, policy);
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301199
Viresh Kumar87549142015-06-10 02:13:21 +02001200 down_write(&policy->rwsem);
1201 cpufreq_remove_dev_symlink(policy);
Viresh Kumar42f921a2013-12-20 21:26:02 +05301202 kobj = &policy->kobj;
1203 cmp = &policy->kobj_unregister;
Viresh Kumar87549142015-06-10 02:13:21 +02001204 up_write(&policy->rwsem);
Viresh Kumar42f921a2013-12-20 21:26:02 +05301205 kobject_put(kobj);
1206
1207 /*
1208 * We need to make sure that the underlying kobj is
1209 * actually not referenced anymore by anybody before we
1210 * proceed with unloading.
1211 */
1212 pr_debug("waiting for dropping of refcount\n");
1213 wait_for_completion(cmp);
1214 pr_debug("wait complete\n");
1215}
1216
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301217static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301218{
Viresh Kumar988bed02015-05-08 11:53:45 +05301219 unsigned long flags;
1220 int cpu;
1221
1222 /* Remove policy from list */
1223 write_lock_irqsave(&cpufreq_driver_lock, flags);
1224 list_del(&policy->policy_list);
1225
1226 for_each_cpu(cpu, policy->related_cpus)
1227 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1228 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1229
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301230 cpufreq_policy_put_kobj(policy, notify);
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301231 free_cpumask_var(policy->related_cpus);
1232 free_cpumask_var(policy->cpus);
1233 kfree(policy);
1234}
1235
Viresh Kumar23faf0b2015-02-19 17:02:04 +05301236/**
1237 * cpufreq_add_dev - add a CPU device
1238 *
1239 * Adds the cpufreq interface for a CPU device.
1240 *
1241 * The Oracle says: try running cpufreq registration/unregistration concurrently
1242 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1243 * mess up, but more thorough testing is needed. - Mathieu
1244 */
1245static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246{
Viresh Kumarfcf80582013-01-29 14:39:08 +00001247 unsigned int j, cpu = dev->id;
Viresh Kumar65922462013-02-07 10:56:03 +05301248 int ret = -ENOMEM;
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301249 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 unsigned long flags;
Viresh Kumar87549142015-06-10 02:13:21 +02001251 bool recover_policy = !sif;
Ashok Rajc32b6b82005-10-30 14:59:54 -08001252
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001253 pr_debug("adding CPU %u\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Viresh Kumar87549142015-06-10 02:13:21 +02001255 /*
1256 * Only possible if 'cpu' wasn't physically present earlier and we are
1257 * here from subsys_interface add callback. A hotplug notifier will
1258 * follow and we will handle it like logical CPU hotplug then. For now,
1259 * just create the sysfs link.
1260 */
1261 if (cpu_is_offline(cpu))
1262 return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
1263
Viresh Kumar6eed9402013-08-06 22:53:11 +05301264 if (!down_read_trylock(&cpufreq_rwsem))
1265 return 0;
1266
Viresh Kumarbb29ae12015-02-19 17:02:06 +05301267 /* Check if this CPU already has a policy to manage it */
Viresh Kumar9104bb22015-05-12 12:22:12 +05301268 policy = per_cpu(cpufreq_cpu_data, cpu);
1269 if (policy && !policy_is_inactive(policy)) {
1270 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1271 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1272 up_read(&cpufreq_rwsem);
1273 return ret;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001276 /*
1277 * Restore the saved policy when doing light-weight init and fall back
1278 * to the full init if that fails.
1279 */
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301280 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001281 if (!policy) {
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301282 recover_policy = false;
Viresh Kumar2fc33842015-06-08 18:25:29 +05301283 policy = cpufreq_policy_alloc(dev);
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001284 if (!policy)
Viresh Kumar8101f992015-07-08 15:12:15 +05301285 goto out_release_rwsem;
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001286 }
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301287
Rusty Russell835481d2009-01-04 05:18:06 -08001288 cpumask_copy(policy->cpus, cpumask_of(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 /* call driver. From then on the cpufreq must be able
1291 * to accept all calls to ->verify and ->setpolicy for this CPU
1292 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001293 ret = cpufreq_driver->init(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001295 pr_debug("initialization failed\n");
Viresh Kumar8101f992015-07-08 15:12:15 +05301296 goto out_free_policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 }
Viresh Kumar643ae6e2013-01-12 05:14:38 +00001298
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001299 down_write(&policy->rwsem);
1300
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001301 /* related cpus should atleast have policy->cpus */
1302 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1303
1304 /*
1305 * affected cpus must always be the one, which are online. We aren't
1306 * managing offline cpus here.
1307 */
1308 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1309
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301310 if (!recover_policy) {
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001311 policy->user_policy.min = policy->min;
1312 policy->user_policy.max = policy->max;
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001313
Viresh Kumar988bed02015-05-08 11:53:45 +05301314 write_lock_irqsave(&cpufreq_driver_lock, flags);
1315 for_each_cpu(j, policy->related_cpus)
1316 per_cpu(cpufreq_cpu_data, j) = policy;
1317 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1318 }
Viresh Kumar652ed952014-01-09 20:38:43 +05301319
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01001320 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumarda60ce92013-10-03 20:28:30 +05301321 policy->cur = cpufreq_driver->get(policy->cpu);
1322 if (!policy->cur) {
1323 pr_err("%s: ->get() failed\n", __func__);
Viresh Kumar8101f992015-07-08 15:12:15 +05301324 goto out_exit_policy;
Viresh Kumarda60ce92013-10-03 20:28:30 +05301325 }
1326 }
1327
Viresh Kumard3916692013-12-03 11:20:46 +05301328 /*
1329 * Sometimes boot loaders set CPU frequency to a value outside of
1330 * frequency table present with cpufreq core. In such cases CPU might be
1331 * unstable if it has to run on that frequency for long duration of time
1332 * and so its better to set it to a frequency which is specified in
1333 * freq-table. This also makes cpufreq stats inconsistent as
1334 * cpufreq-stats would fail to register because current frequency of CPU
1335 * isn't found in freq-table.
1336 *
1337 * Because we don't want this change to effect boot process badly, we go
1338 * for the next freq which is >= policy->cur ('cur' must be set by now,
1339 * otherwise we will end up setting freq to lowest of the table as 'cur'
1340 * is initialized to zero).
1341 *
1342 * We are passing target-freq as "policy->cur - 1" otherwise
1343 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1344 * equal to target-freq.
1345 */
1346 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1347 && has_target()) {
1348 /* Are we running at unknown frequency ? */
1349 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1350 if (ret == -EINVAL) {
1351 /* Warn user and fix it */
1352 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1353 __func__, policy->cpu, policy->cur);
1354 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1355 CPUFREQ_RELATION_L);
1356
1357 /*
1358 * Reaching here after boot in a few seconds may not
1359 * mean that system will remain stable at "unknown"
1360 * frequency for longer duration. Hence, a BUG_ON().
1361 */
1362 BUG_ON(ret);
1363 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1364 __func__, policy->cpu, policy->cur);
1365 }
1366 }
1367
Thomas Renningera1531ac2008-07-29 22:32:58 -07001368 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1369 CPUFREQ_START, policy);
1370
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301371 if (!recover_policy) {
Viresh Kumar308b60e2013-07-31 14:35:14 +02001372 ret = cpufreq_add_dev_interface(policy, dev);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301373 if (ret)
Viresh Kumar8101f992015-07-08 15:12:15 +05301374 goto out_exit_policy;
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301375 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1376 CPUFREQ_CREATE_POLICY, policy);
Dave Jones8ff69732006-03-05 03:37:23 -05001377
Viresh Kumar988bed02015-05-08 11:53:45 +05301378 write_lock_irqsave(&cpufreq_driver_lock, flags);
1379 list_add(&policy->policy_list, &cpufreq_policy_list);
1380 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1381 }
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301382
Viresh Kumar7f0fa402015-07-08 15:12:16 +05301383 ret = cpufreq_init_policy(policy);
1384 if (ret) {
1385 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1386 __func__, cpu, ret);
1387 goto out_remove_policy_notify;
1388 }
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301389
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301390 if (!recover_policy) {
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301391 policy->user_policy.policy = policy->policy;
1392 policy->user_policy.governor = policy->governor;
1393 }
Viresh Kumar4e97b632014-03-04 11:44:01 +08001394 up_write(&policy->rwsem);
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301395
Greg Kroah-Hartman038c5b32007-12-17 15:54:39 -04001396 kobject_uevent(&policy->kobj, KOBJ_ADD);
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301397
Viresh Kumar6eed9402013-08-06 22:53:11 +05301398 up_read(&cpufreq_rwsem);
1399
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301400 /* Callback for handling stuff after policy is ready */
1401 if (cpufreq_driver->ready)
1402 cpufreq_driver->ready(policy);
1403
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001404 pr_debug("initialization complete\n");
Dave Jones87c32272006-03-29 01:48:37 -05001405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 return 0;
1407
Viresh Kumar7f0fa402015-07-08 15:12:16 +05301408out_remove_policy_notify:
1409 /* cpufreq_policy_free() will notify based on this */
1410 recover_policy = true;
Viresh Kumar8101f992015-07-08 15:12:15 +05301411out_exit_policy:
Prarit Bhargava7106e022014-09-10 10:12:08 -04001412 up_write(&policy->rwsem);
1413
Viresh Kumarda60ce92013-10-03 20:28:30 +05301414 if (cpufreq_driver->exit)
1415 cpufreq_driver->exit(policy);
Viresh Kumar8101f992015-07-08 15:12:15 +05301416out_free_policy:
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301417 cpufreq_policy_free(policy, recover_policy);
Viresh Kumar8101f992015-07-08 15:12:15 +05301418out_release_rwsem:
Viresh Kumar6eed9402013-08-06 22:53:11 +05301419 up_read(&cpufreq_rwsem);
1420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 return ret;
1422}
1423
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301424static int __cpufreq_remove_dev_prepare(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301425 struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426{
Viresh Kumar9591bec2015-06-10 02:20:23 +02001427 unsigned int cpu = dev->id;
1428 int ret = 0;
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301429 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001431 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
Viresh Kumar988bed02015-05-08 11:53:45 +05301433 policy = cpufreq_cpu_get_raw(cpu);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301434 if (!policy) {
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001435 pr_debug("%s: No cpu_data found\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301439 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301440 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1441 if (ret) {
1442 pr_err("%s: Failed to stop governor\n", __func__);
1443 return ret;
1444 }
Viresh Kumardb5f2992015-01-02 12:34:25 +05301445 }
Jacob Shin27ecddc2011-04-27 13:32:11 -05001446
Viresh Kumar45732372015-05-12 12:22:34 +05301447 down_write(&policy->rwsem);
Viresh Kumar9591bec2015-06-10 02:20:23 +02001448 cpumask_clear_cpu(cpu, policy->cpus);
Viresh Kumar45732372015-05-12 12:22:34 +05301449
Viresh Kumar9591bec2015-06-10 02:20:23 +02001450 if (policy_is_inactive(policy)) {
1451 if (has_target())
1452 strncpy(policy->last_governor, policy->governor->name,
1453 CPUFREQ_NAME_LEN);
1454 } else if (cpu == policy->cpu) {
1455 /* Nominate new CPU */
1456 policy->cpu = cpumask_any(policy->cpus);
1457 }
Viresh Kumar45732372015-05-12 12:22:34 +05301458 up_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
Viresh Kumar9591bec2015-06-10 02:20:23 +02001460 /* Start governor again for active policy */
1461 if (!policy_is_inactive(policy)) {
1462 if (has_target()) {
1463 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1464 if (!ret)
1465 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
Viresh Kumar87549142015-06-10 02:13:21 +02001466
Viresh Kumar9591bec2015-06-10 02:20:23 +02001467 if (ret)
1468 pr_err("%s: Failed to start governor\n", __func__);
1469 }
1470 } else if (cpufreq_driver->stop_cpu) {
Dirk Brandewie367dc4a2014-03-19 08:45:53 -07001471 cpufreq_driver->stop_cpu(policy);
Viresh Kumar9591bec2015-06-10 02:20:23 +02001472 }
Venki Pallipadiec282972007-03-26 12:03:19 -07001473
Viresh Kumar9591bec2015-06-10 02:20:23 +02001474 return ret;
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301475}
1476
1477static int __cpufreq_remove_dev_finish(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301478 struct subsys_interface *sif)
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301479{
Viresh Kumar988bed02015-05-08 11:53:45 +05301480 unsigned int cpu = dev->id;
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301481 int ret;
Viresh Kumar9591bec2015-06-10 02:20:23 +02001482 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301483
1484 if (!policy) {
1485 pr_debug("%s: No cpu_data found\n", __func__);
1486 return -EINVAL;
1487 }
1488
Viresh Kumar9591bec2015-06-10 02:20:23 +02001489 /* Only proceed for inactive policies */
1490 if (!policy_is_inactive(policy))
Viresh Kumar87549142015-06-10 02:13:21 +02001491 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492
Viresh Kumar87549142015-06-10 02:13:21 +02001493 /* If cpu is last user of policy, free policy */
1494 if (has_target()) {
1495 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1496 if (ret) {
1497 pr_err("%s: Failed to exit governor\n", __func__);
1498 return ret;
1499 }
1500 }
1501
Viresh Kumar87549142015-06-10 02:13:21 +02001502 /*
1503 * Perform the ->exit() even during light-weight tear-down,
1504 * since this is a core component, and is essential for the
1505 * subsequent light-weight ->init() to succeed.
1506 */
1507 if (cpufreq_driver->exit)
1508 cpufreq_driver->exit(policy);
1509
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301510 /* Free the policy only if the driver is getting removed. */
Viresh Kumar87549142015-06-10 02:13:21 +02001511 if (sif)
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301512 cpufreq_policy_free(policy, true);
Viresh Kumar87549142015-06-10 02:13:21 +02001513
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 return 0;
1515}
1516
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301517/**
Viresh Kumar27a862e2013-10-02 14:13:14 +05301518 * cpufreq_remove_dev - remove a CPU device
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301519 *
1520 * Removes the cpufreq interface for a CPU device.
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301521 */
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001522static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001523{
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001524 unsigned int cpu = dev->id;
Viresh Kumar27a862e2013-10-02 14:13:14 +05301525 int ret;
Venki Pallipadiec282972007-03-26 12:03:19 -07001526
Viresh Kumar87549142015-06-10 02:13:21 +02001527 /*
1528 * Only possible if 'cpu' is getting physically removed now. A hotplug
1529 * notifier should have already been called and we just need to remove
1530 * link or free policy here.
1531 */
1532 if (cpu_is_offline(cpu)) {
1533 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1534 struct cpumask mask;
1535
1536 if (!policy)
1537 return 0;
1538
1539 cpumask_copy(&mask, policy->related_cpus);
1540 cpumask_clear_cpu(cpu, &mask);
1541
1542 /*
1543 * Free policy only if all policy->related_cpus are removed
1544 * physically.
1545 */
1546 if (cpumask_intersects(&mask, cpu_present_mask)) {
1547 remove_cpu_dev_symlink(policy, cpu);
1548 return 0;
1549 }
1550
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301551 cpufreq_policy_free(policy, true);
Venki Pallipadiec282972007-03-26 12:03:19 -07001552 return 0;
Viresh Kumar87549142015-06-10 02:13:21 +02001553 }
Venki Pallipadiec282972007-03-26 12:03:19 -07001554
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301555 ret = __cpufreq_remove_dev_prepare(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301556
1557 if (!ret)
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301558 ret = __cpufreq_remove_dev_finish(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301559
1560 return ret;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001561}
1562
David Howells65f27f32006-11-22 14:55:48 +00001563static void handle_update(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564{
David Howells65f27f32006-11-22 14:55:48 +00001565 struct cpufreq_policy *policy =
1566 container_of(work, struct cpufreq_policy, update);
1567 unsigned int cpu = policy->cpu;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001568 pr_debug("handle_update for cpu %u called\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 cpufreq_update_policy(cpu);
1570}
1571
1572/**
Viresh Kumarbb176f72013-06-19 14:19:33 +05301573 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1574 * in deep trouble.
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301575 * @policy: policy managing CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 * @new_freq: CPU frequency the CPU actually runs at
1577 *
Dave Jones29464f22009-01-18 01:37:11 -05001578 * We adjust to current frequency first, and need to clean up later.
1579 * So either call to cpufreq_update_policy() or schedule handle_update()).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 */
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301581static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301582 unsigned int new_freq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583{
1584 struct cpufreq_freqs freqs;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301585
Joe Perchese837f9b2014-03-11 10:03:00 -07001586 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301587 policy->cur, new_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301589 freqs.old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 freqs.new = new_freq;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301591
Viresh Kumar8fec0512014-03-24 13:35:45 +05301592 cpufreq_freq_transition_begin(policy, &freqs);
1593 cpufreq_freq_transition_end(policy, &freqs, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594}
1595
Dave Jones32ee8c32006-02-28 00:43:23 -05001596/**
Dhaval Giani4ab70df2006-12-13 14:49:15 +05301597 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001598 * @cpu: CPU number
1599 *
1600 * This is the last known freq, without actually getting it from the driver.
1601 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1602 */
1603unsigned int cpufreq_quick_get(unsigned int cpu)
1604{
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001605 struct cpufreq_policy *policy;
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301606 unsigned int ret_freq = 0;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001607
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001608 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1609 return cpufreq_driver->get(cpu);
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001610
1611 policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001612 if (policy) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301613 ret_freq = policy->cur;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001614 cpufreq_cpu_put(policy);
1615 }
1616
Dave Jones4d34a672008-02-07 16:33:49 -05001617 return ret_freq;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001618}
1619EXPORT_SYMBOL(cpufreq_quick_get);
1620
Jesse Barnes3d737102011-06-28 10:59:12 -07001621/**
1622 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1623 * @cpu: CPU number
1624 *
1625 * Just return the max possible frequency for a given CPU.
1626 */
1627unsigned int cpufreq_quick_get_max(unsigned int cpu)
1628{
1629 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1630 unsigned int ret_freq = 0;
1631
1632 if (policy) {
1633 ret_freq = policy->max;
1634 cpufreq_cpu_put(policy);
1635 }
1636
1637 return ret_freq;
1638}
1639EXPORT_SYMBOL(cpufreq_quick_get_max);
1640
Viresh Kumard92d50a2015-01-02 12:34:29 +05301641static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642{
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301643 unsigned int ret_freq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001645 if (!cpufreq_driver->get)
Dave Jones4d34a672008-02-07 16:33:49 -05001646 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647
Viresh Kumard92d50a2015-01-02 12:34:29 +05301648 ret_freq = cpufreq_driver->get(policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
Viresh Kumar11e584c2015-06-10 02:11:45 +02001650 /* Updating inactive policies is invalid, so avoid doing that. */
1651 if (unlikely(policy_is_inactive(policy)))
1652 return ret_freq;
1653
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301654 if (ret_freq && policy->cur &&
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001655 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301656 /* verify no discrepancy between actual and
1657 saved value exists */
1658 if (unlikely(ret_freq != policy->cur)) {
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301659 cpufreq_out_of_sync(policy, ret_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 schedule_work(&policy->update);
1661 }
1662 }
1663
Dave Jones4d34a672008-02-07 16:33:49 -05001664 return ret_freq;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001665}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001667/**
1668 * cpufreq_get - get the current CPU frequency (in kHz)
1669 * @cpu: CPU number
1670 *
1671 * Get the CPU current (static) CPU frequency
1672 */
1673unsigned int cpufreq_get(unsigned int cpu)
1674{
Aaron Plattner999976e2014-03-04 12:42:15 -08001675 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001676 unsigned int ret_freq = 0;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001677
Aaron Plattner999976e2014-03-04 12:42:15 -08001678 if (policy) {
1679 down_read(&policy->rwsem);
Viresh Kumard92d50a2015-01-02 12:34:29 +05301680 ret_freq = __cpufreq_get(policy);
Aaron Plattner999976e2014-03-04 12:42:15 -08001681 up_read(&policy->rwsem);
Viresh Kumar26ca8692013-09-20 22:37:31 +05301682
Aaron Plattner999976e2014-03-04 12:42:15 -08001683 cpufreq_cpu_put(policy);
1684 }
Viresh Kumar6eed9402013-08-06 22:53:11 +05301685
Dave Jones4d34a672008-02-07 16:33:49 -05001686 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687}
1688EXPORT_SYMBOL(cpufreq_get);
1689
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001690static struct subsys_interface cpufreq_interface = {
1691 .name = "cpufreq",
1692 .subsys = &cpu_subsys,
1693 .add_dev = cpufreq_add_dev,
1694 .remove_dev = cpufreq_remove_dev,
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001695};
1696
Viresh Kumare28867e2014-03-04 11:00:27 +08001697/*
1698 * In case platform wants some specific frequency to be configured
1699 * during suspend..
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001700 */
Viresh Kumare28867e2014-03-04 11:00:27 +08001701int cpufreq_generic_suspend(struct cpufreq_policy *policy)
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001702{
Viresh Kumare28867e2014-03-04 11:00:27 +08001703 int ret;
Dave Jones4bc5d342009-08-04 14:03:25 -04001704
Viresh Kumare28867e2014-03-04 11:00:27 +08001705 if (!policy->suspend_freq) {
1706 pr_err("%s: suspend_freq can't be zero\n", __func__);
1707 return -EINVAL;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001708 }
1709
Viresh Kumare28867e2014-03-04 11:00:27 +08001710 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1711 policy->suspend_freq);
1712
1713 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1714 CPUFREQ_RELATION_H);
1715 if (ret)
1716 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1717 __func__, policy->suspend_freq, ret);
1718
Dave Jonesc9060492008-02-07 16:32:18 -05001719 return ret;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001720}
Viresh Kumare28867e2014-03-04 11:00:27 +08001721EXPORT_SYMBOL(cpufreq_generic_suspend);
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001722
1723/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001724 * cpufreq_suspend() - Suspend CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001726 * Called during system wide Suspend/Hibernate cycles for suspending governors
1727 * as some platforms can't change frequency after this point in suspend cycle.
1728 * Because some of the devices (like: i2c, regulators, etc) they use for
1729 * changing frequency are suspended quickly after this point.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001731void cpufreq_suspend(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301733 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001735 if (!cpufreq_driver)
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001736 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001738 if (!has_target())
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301739 goto suspend;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001741 pr_debug("%s: Suspending Governors\n", __func__);
1742
Viresh Kumarf9637352015-05-12 12:20:11 +05301743 for_each_active_policy(policy) {
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001744 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1745 pr_err("%s: Failed to stop governor for policy: %p\n",
1746 __func__, policy);
1747 else if (cpufreq_driver->suspend
1748 && cpufreq_driver->suspend(policy))
1749 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1750 policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 }
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301752
1753suspend:
1754 cpufreq_suspended = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755}
1756
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001758 * cpufreq_resume() - Resume CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001760 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1761 * are suspended with cpufreq_suspend().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001763void cpufreq_resume(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 struct cpufreq_policy *policy;
1766
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001767 if (!cpufreq_driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 return;
1769
Lan Tianyu8e304442014-09-18 15:03:07 +08001770 cpufreq_suspended = false;
1771
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001772 if (!has_target())
1773 return;
1774
1775 pr_debug("%s: Resuming Governors\n", __func__);
1776
Viresh Kumarf9637352015-05-12 12:20:11 +05301777 for_each_active_policy(policy) {
Viresh Kumar0c5aa402014-03-24 12:30:29 +05301778 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1779 pr_err("%s: Failed to resume driver: %p\n", __func__,
1780 policy);
1781 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001782 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1783 pr_err("%s: Failed to start governor for policy: %p\n",
1784 __func__, policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 }
Viresh Kumarc75de0a2015-04-02 10:21:33 +05301786
1787 /*
1788 * schedule call cpufreq_update_policy() for first-online CPU, as that
1789 * wouldn't be hotplugged-out on suspend. It will verify that the
1790 * current freq is in sync with what we believe it to be.
1791 */
1792 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1793 if (WARN_ON(!policy))
1794 return;
1795
1796 schedule_work(&policy->update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
Borislav Petkov9d950462013-01-20 10:24:28 +00001799/**
1800 * cpufreq_get_current_driver - return current driver's name
1801 *
1802 * Return the name string of the currently loaded cpufreq driver
1803 * or NULL, if none.
1804 */
1805const char *cpufreq_get_current_driver(void)
1806{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001807 if (cpufreq_driver)
1808 return cpufreq_driver->name;
1809
1810 return NULL;
Borislav Petkov9d950462013-01-20 10:24:28 +00001811}
1812EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
Thomas Petazzoni51315cd2014-10-19 11:30:27 +02001814/**
1815 * cpufreq_get_driver_data - return current driver data
1816 *
1817 * Return the private data of the currently loaded cpufreq
1818 * driver, or NULL if no cpufreq driver is loaded.
1819 */
1820void *cpufreq_get_driver_data(void)
1821{
1822 if (cpufreq_driver)
1823 return cpufreq_driver->driver_data;
1824
1825 return NULL;
1826}
1827EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829/*********************************************************************
1830 * NOTIFIER LISTS INTERFACE *
1831 *********************************************************************/
1832
1833/**
1834 * cpufreq_register_notifier - register a driver with cpufreq
1835 * @nb: notifier function to register
1836 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1837 *
Dave Jones32ee8c32006-02-28 00:43:23 -05001838 * Add a driver to one of two lists: either a list of drivers that
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 * are notified about clock rate changes (once before and once after
1840 * the transition), or a list of drivers that are notified about
1841 * changes in cpufreq policy.
1842 *
1843 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001844 * blocking_notifier_chain_register.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 */
1846int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1847{
1848 int ret;
1849
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001850 if (cpufreq_disabled())
1851 return -EINVAL;
1852
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -02001853 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1854
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 switch (list) {
1856 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001857 ret = srcu_notifier_chain_register(
Alan Sterne041c682006-03-27 01:16:30 -08001858 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 break;
1860 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001861 ret = blocking_notifier_chain_register(
1862 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 break;
1864 default:
1865 ret = -EINVAL;
1866 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867
1868 return ret;
1869}
1870EXPORT_SYMBOL(cpufreq_register_notifier);
1871
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872/**
1873 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1874 * @nb: notifier block to be unregistered
Viresh Kumarbb176f72013-06-19 14:19:33 +05301875 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 *
1877 * Remove a driver from the CPU frequency notifier list.
1878 *
1879 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001880 * blocking_notifier_chain_unregister.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 */
1882int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1883{
1884 int ret;
1885
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001886 if (cpufreq_disabled())
1887 return -EINVAL;
1888
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 switch (list) {
1890 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001891 ret = srcu_notifier_chain_unregister(
Alan Sterne041c682006-03-27 01:16:30 -08001892 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 break;
1894 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001895 ret = blocking_notifier_chain_unregister(
1896 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 break;
1898 default:
1899 ret = -EINVAL;
1900 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901
1902 return ret;
1903}
1904EXPORT_SYMBOL(cpufreq_unregister_notifier);
1905
1906
1907/*********************************************************************
1908 * GOVERNORS *
1909 *********************************************************************/
1910
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301911/* Must set freqs->new to intermediate frequency */
1912static int __target_intermediate(struct cpufreq_policy *policy,
1913 struct cpufreq_freqs *freqs, int index)
1914{
1915 int ret;
1916
1917 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1918
1919 /* We don't need to switch to intermediate freq */
1920 if (!freqs->new)
1921 return 0;
1922
1923 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1924 __func__, policy->cpu, freqs->old, freqs->new);
1925
1926 cpufreq_freq_transition_begin(policy, freqs);
1927 ret = cpufreq_driver->target_intermediate(policy, index);
1928 cpufreq_freq_transition_end(policy, freqs, ret);
1929
1930 if (ret)
1931 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1932 __func__, ret);
1933
1934 return ret;
1935}
1936
Viresh Kumar8d657752014-05-21 14:29:29 +05301937static int __target_index(struct cpufreq_policy *policy,
1938 struct cpufreq_frequency_table *freq_table, int index)
1939{
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301940 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1941 unsigned int intermediate_freq = 0;
Viresh Kumar8d657752014-05-21 14:29:29 +05301942 int retval = -EINVAL;
1943 bool notify;
1944
1945 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
Viresh Kumar8d657752014-05-21 14:29:29 +05301946 if (notify) {
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301947 /* Handle switching to intermediate frequency */
1948 if (cpufreq_driver->get_intermediate) {
1949 retval = __target_intermediate(policy, &freqs, index);
1950 if (retval)
1951 return retval;
Viresh Kumar8d657752014-05-21 14:29:29 +05301952
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301953 intermediate_freq = freqs.new;
1954 /* Set old freq to intermediate */
1955 if (intermediate_freq)
1956 freqs.old = freqs.new;
1957 }
1958
1959 freqs.new = freq_table[index].frequency;
Viresh Kumar8d657752014-05-21 14:29:29 +05301960 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1961 __func__, policy->cpu, freqs.old, freqs.new);
1962
1963 cpufreq_freq_transition_begin(policy, &freqs);
1964 }
1965
1966 retval = cpufreq_driver->target_index(policy, index);
1967 if (retval)
1968 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1969 retval);
1970
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301971 if (notify) {
Viresh Kumar8d657752014-05-21 14:29:29 +05301972 cpufreq_freq_transition_end(policy, &freqs, retval);
1973
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301974 /*
1975 * Failed after setting to intermediate freq? Driver should have
1976 * reverted back to initial frequency and so should we. Check
1977 * here for intermediate_freq instead of get_intermediate, in
Shailendra Verma58405af2015-05-22 22:48:22 +05301978 * case we haven't switched to intermediate freq at all.
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301979 */
1980 if (unlikely(retval && intermediate_freq)) {
1981 freqs.old = intermediate_freq;
1982 freqs.new = policy->restore_freq;
1983 cpufreq_freq_transition_begin(policy, &freqs);
1984 cpufreq_freq_transition_end(policy, &freqs, 0);
1985 }
1986 }
1987
Viresh Kumar8d657752014-05-21 14:29:29 +05301988 return retval;
1989}
1990
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991int __cpufreq_driver_target(struct cpufreq_policy *policy,
1992 unsigned int target_freq,
1993 unsigned int relation)
1994{
Viresh Kumar72499242012-10-31 01:28:21 +01001995 unsigned int old_target_freq = target_freq;
Viresh Kumar8d657752014-05-21 14:29:29 +05301996 int retval = -EINVAL;
Ashok Rajc32b6b82005-10-30 14:59:54 -08001997
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04001998 if (cpufreq_disabled())
1999 return -ENODEV;
2000
Viresh Kumar72499242012-10-31 01:28:21 +01002001 /* Make sure that target_freq is within supported range */
2002 if (target_freq > policy->max)
2003 target_freq = policy->max;
2004 if (target_freq < policy->min)
2005 target_freq = policy->min;
2006
2007 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002008 policy->cpu, target_freq, relation, old_target_freq);
Viresh Kumar5a1c0222012-10-31 01:28:15 +01002009
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302010 /*
2011 * This might look like a redundant call as we are checking it again
2012 * after finding index. But it is left intentionally for cases where
2013 * exactly same freq is called again and so we can save on few function
2014 * calls.
2015 */
Viresh Kumar5a1c0222012-10-31 01:28:15 +01002016 if (target_freq == policy->cur)
2017 return 0;
2018
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05302019 /* Save last value to restore later on errors */
2020 policy->restore_freq = policy->cur;
2021
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002022 if (cpufreq_driver->target)
2023 retval = cpufreq_driver->target(policy, target_freq, relation);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302024 else if (cpufreq_driver->target_index) {
2025 struct cpufreq_frequency_table *freq_table;
2026 int index;
Ashok Raj90d45d12005-11-08 21:34:24 -08002027
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302028 freq_table = cpufreq_frequency_get_table(policy->cpu);
2029 if (unlikely(!freq_table)) {
2030 pr_err("%s: Unable to find freq_table\n", __func__);
2031 goto out;
2032 }
2033
2034 retval = cpufreq_frequency_table_target(policy, freq_table,
2035 target_freq, relation, &index);
2036 if (unlikely(retval)) {
2037 pr_err("%s: Unable to find matching freq\n", __func__);
2038 goto out;
2039 }
2040
Viresh Kumard4019f02013-08-14 19:38:24 +05302041 if (freq_table[index].frequency == policy->cur) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302042 retval = 0;
Viresh Kumard4019f02013-08-14 19:38:24 +05302043 goto out;
2044 }
2045
Viresh Kumar8d657752014-05-21 14:29:29 +05302046 retval = __target_index(policy, freq_table, index);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302047 }
2048
2049out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 return retval;
2051}
2052EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2053
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054int cpufreq_driver_target(struct cpufreq_policy *policy,
2055 unsigned int target_freq,
2056 unsigned int relation)
2057{
Julia Lawallf1829e42008-07-25 22:44:53 +02002058 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059
viresh kumarad7722d2013-10-18 19:10:15 +05302060 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061
2062 ret = __cpufreq_driver_target(policy, target_freq, relation);
2063
viresh kumarad7722d2013-10-18 19:10:15 +05302064 up_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 return ret;
2067}
2068EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2069
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05302070static int __cpufreq_governor(struct cpufreq_policy *policy,
2071 unsigned int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072{
Dave Jonescc993ca2005-07-28 09:43:56 -07002073 int ret;
Thomas Renninger6afde102007-10-02 13:28:13 -07002074
2075 /* Only must be defined when default governor is known to have latency
2076 restrictions, like e.g. conservative or ondemand.
2077 That this is the case is already ensured in Kconfig
2078 */
2079#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2080 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2081#else
2082 struct cpufreq_governor *gov = NULL;
2083#endif
Thomas Renninger1c256242007-10-02 13:28:12 -07002084
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002085 /* Don't start any governor operations if we are entering suspend */
2086 if (cpufreq_suspended)
2087 return 0;
Ethan Zhaocb577202014-12-18 15:28:19 +09002088 /*
2089 * Governor might not be initiated here if ACPI _PPC changed
2090 * notification happened, so check it.
2091 */
2092 if (!policy->governor)
2093 return -EINVAL;
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002094
Thomas Renninger1c256242007-10-02 13:28:12 -07002095 if (policy->governor->max_transition_latency &&
2096 policy->cpuinfo.transition_latency >
2097 policy->governor->max_transition_latency) {
Thomas Renninger6afde102007-10-02 13:28:13 -07002098 if (!gov)
2099 return -EINVAL;
2100 else {
Joe Perchese837f9b2014-03-11 10:03:00 -07002101 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2102 policy->governor->name, gov->name);
Thomas Renninger6afde102007-10-02 13:28:13 -07002103 policy->governor = gov;
2104 }
Thomas Renninger1c256242007-10-02 13:28:12 -07002105 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106
Viresh Kumarfe492f32013-08-06 22:53:10 +05302107 if (event == CPUFREQ_GOV_POLICY_INIT)
2108 if (!try_module_get(policy->governor->owner))
2109 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002111 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002112 policy->cpu, event);
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002113
2114 mutex_lock(&cpufreq_governor_lock);
Srivatsa S. Bhat56d07db2013-09-07 01:23:55 +05302115 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
Viresh Kumarf73d3932013-08-31 17:53:40 +05302116 || (!policy->governor_enabled
2117 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002118 mutex_unlock(&cpufreq_governor_lock);
2119 return -EBUSY;
2120 }
2121
2122 if (event == CPUFREQ_GOV_STOP)
2123 policy->governor_enabled = false;
2124 else if (event == CPUFREQ_GOV_START)
2125 policy->governor_enabled = true;
2126
2127 mutex_unlock(&cpufreq_governor_lock);
2128
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 ret = policy->governor->governor(policy, event);
2130
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002131 if (!ret) {
2132 if (event == CPUFREQ_GOV_POLICY_INIT)
2133 policy->governor->initialized++;
2134 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2135 policy->governor->initialized--;
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002136 } else {
2137 /* Restore original values */
2138 mutex_lock(&cpufreq_governor_lock);
2139 if (event == CPUFREQ_GOV_STOP)
2140 policy->governor_enabled = true;
2141 else if (event == CPUFREQ_GOV_START)
2142 policy->governor_enabled = false;
2143 mutex_unlock(&cpufreq_governor_lock);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002144 }
Viresh Kumarb3940582013-02-01 05:42:58 +00002145
Viresh Kumarfe492f32013-08-06 22:53:10 +05302146 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2147 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 module_put(policy->governor->owner);
2149
2150 return ret;
2151}
2152
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153int cpufreq_register_governor(struct cpufreq_governor *governor)
2154{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002155 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
2157 if (!governor)
2158 return -EINVAL;
2159
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002160 if (cpufreq_disabled())
2161 return -ENODEV;
2162
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002163 mutex_lock(&cpufreq_governor_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -05002164
Viresh Kumarb3940582013-02-01 05:42:58 +00002165 governor->initialized = 0;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002166 err = -EBUSY;
Viresh Kumar42f91fa2015-01-02 12:34:26 +05302167 if (!find_governor(governor->name)) {
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002168 err = 0;
2169 list_add(&governor->governor_list, &cpufreq_governor_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
Dave Jones32ee8c32006-02-28 00:43:23 -05002172 mutex_unlock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002173 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174}
2175EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2176
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2178{
Viresh Kumar45732372015-05-12 12:22:34 +05302179 struct cpufreq_policy *policy;
2180 unsigned long flags;
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002181
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 if (!governor)
2183 return;
2184
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002185 if (cpufreq_disabled())
2186 return;
2187
Viresh Kumar45732372015-05-12 12:22:34 +05302188 /* clear last_governor for all inactive policies */
2189 read_lock_irqsave(&cpufreq_driver_lock, flags);
2190 for_each_inactive_policy(policy) {
Viresh Kumar18bf3a12015-05-12 12:22:51 +05302191 if (!strcmp(policy->last_governor, governor->name)) {
2192 policy->governor = NULL;
Viresh Kumar45732372015-05-12 12:22:34 +05302193 strcpy(policy->last_governor, "\0");
Viresh Kumar18bf3a12015-05-12 12:22:51 +05302194 }
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002195 }
Viresh Kumar45732372015-05-12 12:22:34 +05302196 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002197
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002198 mutex_lock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 list_del(&governor->governor_list);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002200 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 return;
2202}
2203EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2204
2205
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206/*********************************************************************
2207 * POLICY INTERFACE *
2208 *********************************************************************/
2209
2210/**
2211 * cpufreq_get_policy - get the current cpufreq_policy
Dave Jones29464f22009-01-18 01:37:11 -05002212 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2213 * is written
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 *
2215 * Reads the current cpufreq policy.
2216 */
2217int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2218{
2219 struct cpufreq_policy *cpu_policy;
2220 if (!policy)
2221 return -EINVAL;
2222
2223 cpu_policy = cpufreq_cpu_get(cpu);
2224 if (!cpu_policy)
2225 return -EINVAL;
2226
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302227 memcpy(policy, cpu_policy, sizeof(*policy));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228
2229 cpufreq_cpu_put(cpu_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 return 0;
2231}
2232EXPORT_SYMBOL(cpufreq_get_policy);
2233
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002234/*
Viresh Kumar037ce832013-10-02 14:13:16 +05302235 * policy : current policy.
2236 * new_policy: policy to be set.
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002237 */
Viresh Kumar037ce832013-10-02 14:13:16 +05302238static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302239 struct cpufreq_policy *new_policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240{
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002241 struct cpufreq_governor *old_gov;
2242 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
Joe Perchese837f9b2014-03-11 10:03:00 -07002244 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2245 new_policy->cpu, new_policy->min, new_policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302247 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002249 if (new_policy->min > policy->max || new_policy->max < policy->min)
2250 return -EINVAL;
Mattia Dongili9c9a43e2006-07-05 23:12:20 +02002251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 /* verify the cpu speed can be set within this limit */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302253 ret = cpufreq_driver->verify(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002255 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 /* adjust if necessary - all reasons */
Alan Sterne041c682006-03-27 01:16:30 -08002258 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302259 CPUFREQ_ADJUST, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
2261 /* adjust if necessary - hardware incompatibility*/
Alan Sterne041c682006-03-27 01:16:30 -08002262 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302263 CPUFREQ_INCOMPATIBLE, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264
Viresh Kumarbb176f72013-06-19 14:19:33 +05302265 /*
2266 * verify the cpu speed can be set within this limit, which might be
2267 * different to the first one
2268 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302269 ret = cpufreq_driver->verify(new_policy);
Alan Sterne041c682006-03-27 01:16:30 -08002270 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002271 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272
2273 /* notification of the new policy */
Alan Sterne041c682006-03-27 01:16:30 -08002274 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302275 CPUFREQ_NOTIFY, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302277 policy->min = new_policy->min;
2278 policy->max = new_policy->max;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002280 pr_debug("new min and max freqs are %u - %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002281 policy->min, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002283 if (cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302284 policy->policy = new_policy->policy;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002285 pr_debug("setting range\n");
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002286 return cpufreq_driver->setpolicy(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 }
2288
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002289 if (new_policy->governor == policy->governor)
2290 goto out;
2291
2292 pr_debug("governor switch\n");
2293
2294 /* save old, working values */
2295 old_gov = policy->governor;
2296 /* end old governor */
2297 if (old_gov) {
2298 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2299 up_write(&policy->rwsem);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02002300 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002301 down_write(&policy->rwsem);
2302 }
2303
2304 /* start new governor */
2305 policy->governor = new_policy->governor;
2306 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2307 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2308 goto out;
2309
2310 up_write(&policy->rwsem);
2311 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2312 down_write(&policy->rwsem);
2313 }
2314
2315 /* new governor failed, so re-start old one */
2316 pr_debug("starting governor %s failed\n", policy->governor->name);
2317 if (old_gov) {
2318 policy->governor = old_gov;
2319 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2320 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2321 }
2322
2323 return -EINVAL;
2324
2325 out:
2326 pr_debug("governor: change or update limits\n");
2327 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328}
2329
2330/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2332 * @cpu: CPU which shall be re-evaluated
2333 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002334 * Useful for policy notifiers which have different necessities
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 * at different times.
2336 */
2337int cpufreq_update_policy(unsigned int cpu)
2338{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302339 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2340 struct cpufreq_policy new_policy;
Julia Lawallf1829e42008-07-25 22:44:53 +02002341 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002343 if (!policy)
2344 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
viresh kumarad7722d2013-10-18 19:10:15 +05302346 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002348 pr_debug("updating policy for CPU %u\n", cpu);
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302349 memcpy(&new_policy, policy, sizeof(*policy));
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302350 new_policy.min = policy->user_policy.min;
2351 new_policy.max = policy->user_policy.max;
2352 new_policy.policy = policy->user_policy.policy;
2353 new_policy.governor = policy->user_policy.governor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
Viresh Kumarbb176f72013-06-19 14:19:33 +05302355 /*
2356 * BIOS might change freq behind our back
2357 * -> ask driver for current freq and notify governors about a change
2358 */
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01002359 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302360 new_policy.cur = cpufreq_driver->get(cpu);
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302361 if (WARN_ON(!new_policy.cur)) {
2362 ret = -EIO;
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002363 goto unlock;
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302364 }
2365
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302366 if (!policy->cur) {
Joe Perchese837f9b2014-03-11 10:03:00 -07002367 pr_debug("Driver did not initialize current freq\n");
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302368 policy->cur = new_policy.cur;
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002369 } else {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302370 if (policy->cur != new_policy.cur && has_target())
Viresh Kumara1e1dc42015-01-02 12:34:28 +05302371 cpufreq_out_of_sync(policy, new_policy.cur);
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002372 }
Thomas Renninger0961dd02006-01-26 18:46:33 +01002373 }
2374
Viresh Kumar037ce832013-10-02 14:13:16 +05302375 ret = cpufreq_set_policy(policy, &new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002377unlock:
viresh kumarad7722d2013-10-18 19:10:15 +05302378 up_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002379
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302380 cpufreq_cpu_put(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 return ret;
2382}
2383EXPORT_SYMBOL(cpufreq_update_policy);
2384
Paul Gortmaker27609842013-06-19 13:54:04 -04002385static int cpufreq_cpu_callback(struct notifier_block *nfb,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002386 unsigned long action, void *hcpu)
2387{
2388 unsigned int cpu = (unsigned long)hcpu;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002389 struct device *dev;
Ashok Rajc32b6b82005-10-30 14:59:54 -08002390
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002391 dev = get_cpu_device(cpu);
2392 if (dev) {
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302393 switch (action & ~CPU_TASKS_FROZEN) {
Ashok Rajc32b6b82005-10-30 14:59:54 -08002394 case CPU_ONLINE:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302395 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002396 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302397
Ashok Rajc32b6b82005-10-30 14:59:54 -08002398 case CPU_DOWN_PREPARE:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302399 __cpufreq_remove_dev_prepare(dev, NULL);
Srivatsa S. Bhat1aee40a2013-09-07 01:23:27 +05302400 break;
2401
2402 case CPU_POST_DEAD:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302403 __cpufreq_remove_dev_finish(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002404 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302405
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002406 case CPU_DOWN_FAILED:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302407 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002408 break;
2409 }
2410 }
2411 return NOTIFY_OK;
2412}
2413
Neal Buckendahl9c36f742010-06-22 22:02:44 -05002414static struct notifier_block __refdata cpufreq_cpu_notifier = {
Viresh Kumarbb176f72013-06-19 14:19:33 +05302415 .notifier_call = cpufreq_cpu_callback,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002416};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417
2418/*********************************************************************
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002419 * BOOST *
2420 *********************************************************************/
2421static int cpufreq_boost_set_sw(int state)
2422{
2423 struct cpufreq_frequency_table *freq_table;
2424 struct cpufreq_policy *policy;
2425 int ret = -EINVAL;
2426
Viresh Kumarf9637352015-05-12 12:20:11 +05302427 for_each_active_policy(policy) {
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002428 freq_table = cpufreq_frequency_get_table(policy->cpu);
2429 if (freq_table) {
2430 ret = cpufreq_frequency_table_cpuinfo(policy,
2431 freq_table);
2432 if (ret) {
2433 pr_err("%s: Policy frequency update failed\n",
2434 __func__);
2435 break;
2436 }
2437 policy->user_policy.max = policy->max;
2438 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2439 }
2440 }
2441
2442 return ret;
2443}
2444
2445int cpufreq_boost_trigger_state(int state)
2446{
2447 unsigned long flags;
2448 int ret = 0;
2449
2450 if (cpufreq_driver->boost_enabled == state)
2451 return 0;
2452
2453 write_lock_irqsave(&cpufreq_driver_lock, flags);
2454 cpufreq_driver->boost_enabled = state;
2455 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2456
2457 ret = cpufreq_driver->set_boost(state);
2458 if (ret) {
2459 write_lock_irqsave(&cpufreq_driver_lock, flags);
2460 cpufreq_driver->boost_enabled = !state;
2461 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2462
Joe Perchese837f9b2014-03-11 10:03:00 -07002463 pr_err("%s: Cannot %s BOOST\n",
2464 __func__, state ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002465 }
2466
2467 return ret;
2468}
2469
2470int cpufreq_boost_supported(void)
2471{
2472 if (likely(cpufreq_driver))
2473 return cpufreq_driver->boost_supported;
2474
2475 return 0;
2476}
2477EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2478
2479int cpufreq_boost_enabled(void)
2480{
2481 return cpufreq_driver->boost_enabled;
2482}
2483EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2484
2485/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2487 *********************************************************************/
2488
2489/**
2490 * cpufreq_register_driver - register a CPU Frequency driver
2491 * @driver_data: A struct cpufreq_driver containing the values#
2492 * submitted by the CPU Frequency driver.
2493 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302494 * Registers a CPU Frequency driver to this core code. This code
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 * returns zero on success, -EBUSY when another driver got here first
Dave Jones32ee8c32006-02-28 00:43:23 -05002496 * (and isn't unregistered in the meantime).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 *
2498 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002499int cpufreq_register_driver(struct cpufreq_driver *driver_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500{
2501 unsigned long flags;
2502 int ret;
2503
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002504 if (cpufreq_disabled())
2505 return -ENODEV;
2506
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 if (!driver_data || !driver_data->verify || !driver_data->init ||
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302508 !(driver_data->setpolicy || driver_data->target_index ||
Rafael J. Wysocki98322352014-03-19 12:48:30 +01002509 driver_data->target) ||
2510 (driver_data->setpolicy && (driver_data->target_index ||
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05302511 driver_data->target)) ||
2512 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 return -EINVAL;
2514
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002515 pr_debug("trying to register driver %s\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002517 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002518 if (cpufreq_driver) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002519 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Yinghai Lu4dea58062013-09-18 21:05:20 -07002520 return -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002522 cpufreq_driver = driver_data;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002523 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524
Viresh Kumarbc68b7d2015-01-02 12:34:30 +05302525 if (driver_data->setpolicy)
2526 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2527
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002528 if (cpufreq_boost_supported()) {
2529 /*
2530 * Check if driver provides function to enable boost -
2531 * if not, use cpufreq_boost_set_sw as default
2532 */
2533 if (!cpufreq_driver->set_boost)
2534 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2535
2536 ret = cpufreq_sysfs_create_file(&boost.attr);
2537 if (ret) {
2538 pr_err("%s: cannot register global BOOST sysfs file\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002539 __func__);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002540 goto err_null_driver;
2541 }
2542 }
2543
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002544 ret = subsys_interface_register(&cpufreq_interface);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002545 if (ret)
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002546 goto err_boost_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302548 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2549 list_empty(&cpufreq_policy_list)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 /* if all ->init() calls failed, unregister */
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302551 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2552 driver_data->name);
2553 goto err_if_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 }
2555
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002556 register_hotcpu_notifier(&cpufreq_cpu_notifier);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002557 pr_debug("driver %s up and running\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002559 return 0;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002560err_if_unreg:
2561 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002562err_boost_unreg:
2563 if (cpufreq_boost_supported())
2564 cpufreq_sysfs_remove_file(&boost.attr);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002565err_null_driver:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002566 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002567 cpufreq_driver = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002568 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Dave Jones4d34a672008-02-07 16:33:49 -05002569 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570}
2571EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2572
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573/**
2574 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2575 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302576 * Unregister the current CPUFreq driver. Only call this if you have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 * the right to do so, i.e. if you have succeeded in initialising before!
2578 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2579 * currently not initialised.
2580 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002581int cpufreq_unregister_driver(struct cpufreq_driver *driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582{
2583 unsigned long flags;
2584
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002585 if (!cpufreq_driver || (driver != cpufreq_driver))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002588 pr_debug("unregistering driver %s\n", driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002590 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002591 if (cpufreq_boost_supported())
2592 cpufreq_sysfs_remove_file(&boost.attr);
2593
Chandra Seetharaman65edc682006-06-27 02:54:08 -07002594 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595
Viresh Kumar6eed9402013-08-06 22:53:11 +05302596 down_write(&cpufreq_rwsem);
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002597 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302598
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002599 cpufreq_driver = NULL;
Viresh Kumar6eed9402013-08-06 22:53:11 +05302600
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002601 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302602 up_write(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603
2604 return 0;
2605}
2606EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002607
Doug Anderson90de2a42014-12-23 22:09:48 -08002608/*
2609 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2610 * or mutexes when secondary CPUs are halted.
2611 */
2612static struct syscore_ops cpufreq_syscore_ops = {
2613 .shutdown = cpufreq_suspend,
2614};
2615
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002616static int __init cpufreq_core_init(void)
2617{
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002618 if (cpufreq_disabled())
2619 return -ENODEV;
2620
Viresh Kumar2361be22013-05-17 16:09:09 +05302621 cpufreq_global_kobject = kobject_create();
Thomas Renninger8aa84ad2009-07-24 15:25:05 +02002622 BUG_ON(!cpufreq_global_kobject);
2623
Doug Anderson90de2a42014-12-23 22:09:48 -08002624 register_syscore_ops(&cpufreq_syscore_ops);
2625
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002626 return 0;
2627}
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002628core_initcall(cpufreq_core_init);