blob: 8b810071ddd2621c68c441a16b7a92856d624382 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
Viresh Kumarbb176f72013-06-19 14:19:33 +05306 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Ashok Rajc32b6b82005-10-30 14:59:54 -08008 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
Dave Jones32ee8c32006-02-28 00:43:23 -05009 * Added handling for CPU hotplug
Dave Jones8ff69732006-03-05 03:37:23 -050010 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
Ashok Rajc32b6b82005-10-30 14:59:54 -080012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Viresh Kumardb701152012-10-23 01:29:03 +020018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Viresh Kumar5ff0a262013-08-06 22:53:03 +053020#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/cpufreq.h>
22#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/device.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053024#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080027#include <linux/mutex.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053028#include <linux/slab.h>
Viresh Kumar2f0aea92014-03-04 11:00:26 +080029#include <linux/suspend.h>
Doug Anderson90de2a42014-12-23 22:09:48 -080030#include <linux/syscore_ops.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053031#include <linux/tick.h>
Thomas Renninger6f4f2722010-04-20 13:17:36 +020032#include <trace/events/power.h>
33
Viresh Kumarb4f06762015-01-27 14:06:08 +053034static LIST_HEAD(cpufreq_policy_list);
Viresh Kumarf9637352015-05-12 12:20:11 +053035
36static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37{
38 return cpumask_empty(policy->cpus);
39}
40
41static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42{
43 return active == !policy_is_inactive(policy);
44}
45
46/* Finds Next Acive/Inactive policy */
47static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48 bool active)
49{
50 do {
51 policy = list_next_entry(policy, policy_list);
52
53 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list)
55 return NULL;
56 } while (!suitable_policy(policy, active));
57
58 return policy;
59}
60
61static struct cpufreq_policy *first_policy(bool active)
62{
63 struct cpufreq_policy *policy;
64
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list))
67 return NULL;
68
69 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70 policy_list);
71
72 if (!suitable_policy(policy, active))
73 policy = next_policy(policy, active);
74
75 return policy;
76}
77
78/* Macros to iterate over CPU policies */
79#define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
81 __policy; \
82 __policy = next_policy(__policy, __active))
83
84#define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86#define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
88
89#define for_each_policy(__policy) \
Viresh Kumarb4f06762015-01-27 14:06:08 +053090 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91
Viresh Kumarf7b27062015-01-27 14:06:09 +053092/* Iterate over governors */
93static LIST_HEAD(cpufreq_governor_list);
94#define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097/**
Dave Jonescd878472006-08-11 17:59:28 -040098 * The "cpufreq driver" - the arch- or hardware-dependent low
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
101 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200102static struct cpufreq_driver *cpufreq_driver;
Mike Travis7a6aedf2008-03-25 15:06:53 -0700103static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
Viresh Kumarbb176f72013-06-19 14:19:33 +0530104static DEFINE_RWLOCK(cpufreq_driver_lock);
Jane Li6f1e4ef2014-01-03 17:17:41 +0800105DEFINE_MUTEX(cpufreq_governor_lock);
Viresh Kumarbb176f72013-06-19 14:19:33 +0530106
Viresh Kumar2f0aea92014-03-04 11:00:26 +0800107/* Flag to suspend/resume CPUFreq governors */
108static bool cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530110static inline bool has_target(void)
111{
112 return cpufreq_driver->target_index || cpufreq_driver->target;
113}
114
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800115/*
Viresh Kumar6eed9402013-08-06 22:53:11 +0530116 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
117 * sections
118 */
119static DECLARE_RWSEM(cpufreq_rwsem);
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121/* internal prototypes */
Dave Jones29464f22009-01-18 01:37:11 -0500122static int __cpufreq_governor(struct cpufreq_policy *policy,
123 unsigned int event);
Viresh Kumard92d50a2015-01-02 12:34:29 +0530124static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
David Howells65f27f32006-11-22 14:55:48 +0000125static void handle_update(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127/**
Dave Jones32ee8c32006-02-28 00:43:23 -0500128 * Two notifier lists: the "policy" list is involved in the
129 * validation process for a new CPU frequency policy; the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 * "transition" list for kernel code that needs to handle
131 * changes to devices when the CPU clock speed changes.
132 * The mutex locks both lists.
133 */
Alan Sterne041c682006-03-27 01:16:30 -0800134static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700135static struct srcu_notifier_head cpufreq_transition_notifier_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200137static bool init_cpufreq_transition_notifier_list_called;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700138static int __init init_cpufreq_transition_notifier_list(void)
139{
140 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200141 init_cpufreq_transition_notifier_list_called = true;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700142 return 0;
143}
Linus Torvaldsb3438f82006-11-20 11:47:18 -0800144pure_initcall(init_cpufreq_transition_notifier_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400146static int off __read_mostly;
Viresh Kumarda584452012-10-26 00:51:32 +0200147static int cpufreq_disabled(void)
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400148{
149 return off;
150}
151void disable_cpufreq(void)
152{
153 off = 1;
154}
Dave Jones29464f22009-01-18 01:37:11 -0500155static DEFINE_MUTEX(cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000157bool have_governor_per_policy(void)
158{
Viresh Kumar0b981e72013-10-02 14:13:18 +0530159 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000160}
Viresh Kumar3f869d62013-05-16 05:09:56 +0000161EXPORT_SYMBOL_GPL(have_governor_per_policy);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000162
Viresh Kumar944e9a02013-05-16 05:09:57 +0000163struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
164{
165 if (have_governor_per_policy())
166 return &policy->kobj;
167 else
168 return cpufreq_global_kobject;
169}
170EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
171
Viresh Kumar72a4ce32013-05-17 11:26:32 +0000172static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
173{
174 u64 idle_time;
175 u64 cur_wall_time;
176 u64 busy_time;
177
178 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
179
180 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
181 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
182 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
183 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
184 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
185 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
186
187 idle_time = cur_wall_time - busy_time;
188 if (wall)
189 *wall = cputime_to_usecs(cur_wall_time);
190
191 return cputime_to_usecs(idle_time);
192}
193
194u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
195{
196 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
197
198 if (idle_time == -1ULL)
199 return get_cpu_idle_time_jiffy(cpu, wall);
200 else if (!io_busy)
201 idle_time += get_cpu_iowait_time_us(cpu, wall);
202
203 return idle_time;
204}
205EXPORT_SYMBOL_GPL(get_cpu_idle_time);
206
Viresh Kumar70e9e772013-10-03 20:29:07 +0530207/*
208 * This is a generic cpufreq init() routine which can be used by cpufreq
209 * drivers of SMP systems. It will do following:
210 * - validate & show freq table passed
211 * - set policies transition latency
212 * - policy->cpus with all possible CPUs
213 */
214int cpufreq_generic_init(struct cpufreq_policy *policy,
215 struct cpufreq_frequency_table *table,
216 unsigned int transition_latency)
217{
218 int ret;
219
220 ret = cpufreq_table_validate_and_show(policy, table);
221 if (ret) {
222 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
223 return ret;
224 }
225
226 policy->cpuinfo.transition_latency = transition_latency;
227
228 /*
Shailendra Verma58405af2015-05-22 22:48:22 +0530229 * The driver only supports the SMP configuration where all processors
Viresh Kumar70e9e772013-10-03 20:29:07 +0530230 * share the clock and voltage and clock.
231 */
232 cpumask_setall(policy->cpus);
233
234 return 0;
235}
236EXPORT_SYMBOL_GPL(cpufreq_generic_init);
237
Viresh Kumar988bed02015-05-08 11:53:45 +0530238/* Only for cpufreq core internal use */
239struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
Viresh Kumar652ed952014-01-09 20:38:43 +0530240{
241 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
242
Viresh Kumar988bed02015-05-08 11:53:45 +0530243 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
244}
245
246unsigned int cpufreq_generic_get(unsigned int cpu)
247{
248 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
249
Viresh Kumar652ed952014-01-09 20:38:43 +0530250 if (!policy || IS_ERR(policy->clk)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700251 pr_err("%s: No %s associated to cpu: %d\n",
252 __func__, policy ? "clk" : "policy", cpu);
Viresh Kumar652ed952014-01-09 20:38:43 +0530253 return 0;
254 }
255
256 return clk_get_rate(policy->clk) / 1000;
257}
258EXPORT_SYMBOL_GPL(cpufreq_generic_get);
259
Viresh Kumar50e9c852015-02-19 17:02:03 +0530260/**
261 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
262 *
263 * @cpu: cpu to find policy for.
264 *
265 * This returns policy for 'cpu', returns NULL if it doesn't exist.
266 * It also increments the kobject reference count to mark it busy and so would
267 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
268 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
269 * freed as that depends on the kobj count.
270 *
271 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
272 * valid policy is found. This is done to make sure the driver doesn't get
273 * unregistered while the policy is being used.
274 *
275 * Return: A valid policy on success, otherwise NULL on failure.
276 */
Viresh Kumar6eed9402013-08-06 22:53:11 +0530277struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530279 struct cpufreq_policy *policy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 unsigned long flags;
281
Viresh Kumar1b947c92015-02-19 17:02:05 +0530282 if (WARN_ON(cpu >= nr_cpu_ids))
Viresh Kumar6eed9402013-08-06 22:53:11 +0530283 return NULL;
284
285 if (!down_read_trylock(&cpufreq_rwsem))
286 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
288 /* get the cpufreq driver */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000289 read_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
Viresh Kumar6eed9402013-08-06 22:53:11 +0530291 if (cpufreq_driver) {
292 /* get the CPU */
Viresh Kumar988bed02015-05-08 11:53:45 +0530293 policy = cpufreq_cpu_get_raw(cpu);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530294 if (policy)
295 kobject_get(&policy->kobj);
296 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200297
Viresh Kumar6eed9402013-08-06 22:53:11 +0530298 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530300 if (!policy)
Viresh Kumar6eed9402013-08-06 22:53:11 +0530301 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530303 return policy;
Stephen Boyda9144432012-07-20 18:14:38 +0000304}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
306
Viresh Kumar50e9c852015-02-19 17:02:03 +0530307/**
308 * cpufreq_cpu_put: Decrements the usage count of a policy
309 *
310 * @policy: policy earlier returned by cpufreq_cpu_get().
311 *
312 * This decrements the kobject reference count incremented earlier by calling
313 * cpufreq_cpu_get().
314 *
315 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
316 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530317void cpufreq_cpu_put(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530319 kobject_put(&policy->kobj);
320 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321}
322EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
326 *********************************************************************/
327
328/**
329 * adjust_jiffies - adjust the system "loops_per_jiffy"
330 *
331 * This function alters the system "loops_per_jiffy" for the clock
332 * speed change. Note that loops_per_jiffy cannot be updated on SMP
Dave Jones32ee8c32006-02-28 00:43:23 -0500333 * systems as each CPU might be scaled differently. So, use the arch
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 * per-CPU loops_per_jiffy value wherever possible.
335 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800336static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
Viresh Kumar39c132e2015-01-02 12:34:34 +0530338#ifndef CONFIG_SMP
339 static unsigned long l_p_j_ref;
340 static unsigned int l_p_j_ref_freq;
341
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 if (ci->flags & CPUFREQ_CONST_LOOPS)
343 return;
344
345 if (!l_p_j_ref_freq) {
346 l_p_j_ref = loops_per_jiffy;
347 l_p_j_ref_freq = ci->old;
Joe Perchese837f9b2014-03-11 10:03:00 -0700348 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
349 l_p_j_ref, l_p_j_ref_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 }
Viresh Kumar0b443ea2014-03-19 11:24:58 +0530351 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530352 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
353 ci->new);
Joe Perchese837f9b2014-03-11 10:03:00 -0700354 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
355 loops_per_jiffy, ci->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357#endif
Viresh Kumar39c132e2015-01-02 12:34:34 +0530358}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
Viresh Kumar0956df9c2013-06-19 14:19:34 +0530360static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530361 struct cpufreq_freqs *freqs, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362{
363 BUG_ON(irqs_disabled());
364
Dirk Brandewied5aaffa2013-01-17 16:22:21 +0000365 if (cpufreq_disabled())
366 return;
367
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200368 freqs->flags = cpufreq_driver->flags;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200369 pr_debug("notification %u of frequency transition to %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -0700370 state, freqs->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 switch (state) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 case CPUFREQ_PRECHANGE:
Dave Jones32ee8c32006-02-28 00:43:23 -0500375 /* detect if the driver reported a value as "old frequency"
Dave Jonese4472cb2006-01-31 15:53:55 -0800376 * which is not equal to what the cpufreq core thinks is
377 * "old frequency".
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200379 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800380 if ((policy) && (policy->cpu == freqs->cpu) &&
381 (policy->cur) && (policy->cur != freqs->old)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700382 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
383 freqs->old, policy->cur);
Dave Jonese4472cb2006-01-31 15:53:55 -0800384 freqs->old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 }
386 }
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700387 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800388 CPUFREQ_PRECHANGE, freqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
390 break;
Dave Jonese4472cb2006-01-31 15:53:55 -0800391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 case CPUFREQ_POSTCHANGE:
393 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
Joe Perchese837f9b2014-03-11 10:03:00 -0700394 pr_debug("FREQ: %lu - CPU: %lu\n",
395 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
Thomas Renninger25e41932011-01-03 17:50:44 +0100396 trace_cpu_frequency(freqs->new, freqs->cpu);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700397 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800398 CPUFREQ_POSTCHANGE, freqs);
Dave Jonese4472cb2006-01-31 15:53:55 -0800399 if (likely(policy) && likely(policy->cpu == freqs->cpu))
400 policy->cur = freqs->new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 break;
402 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
Viresh Kumarbb176f72013-06-19 14:19:33 +0530404
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530405/**
406 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
407 * on frequency transition.
408 *
409 * This function calls the transition notifiers and the "adjust_jiffies"
410 * function. It is called twice on all CPU frequency changes that have
411 * external effects.
412 */
Viresh Kumar236a9802014-03-24 13:35:46 +0530413static void cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530414 struct cpufreq_freqs *freqs, unsigned int state)
415{
416 for_each_cpu(freqs->cpu, policy->cpus)
417 __cpufreq_notify_transition(policy, freqs, state);
418}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530420/* Do post notifications when there are chances that transition has failed */
Viresh Kumar236a9802014-03-24 13:35:46 +0530421static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530422 struct cpufreq_freqs *freqs, int transition_failed)
423{
424 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
425 if (!transition_failed)
426 return;
427
428 swap(freqs->old, freqs->new);
429 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
430 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
431}
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530432
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530433void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
434 struct cpufreq_freqs *freqs)
435{
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530436
437 /*
438 * Catch double invocations of _begin() which lead to self-deadlock.
439 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
440 * doesn't invoke _begin() on their behalf, and hence the chances of
441 * double invocations are very low. Moreover, there are scenarios
442 * where these checks can emit false-positive warnings in these
443 * drivers; so we avoid that by skipping them altogether.
444 */
445 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
446 && current == policy->transition_task);
447
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530448wait:
449 wait_event(policy->transition_wait, !policy->transition_ongoing);
450
451 spin_lock(&policy->transition_lock);
452
453 if (unlikely(policy->transition_ongoing)) {
454 spin_unlock(&policy->transition_lock);
455 goto wait;
456 }
457
458 policy->transition_ongoing = true;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530459 policy->transition_task = current;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530460
461 spin_unlock(&policy->transition_lock);
462
463 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
464}
465EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
466
467void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
468 struct cpufreq_freqs *freqs, int transition_failed)
469{
470 if (unlikely(WARN_ON(!policy->transition_ongoing)))
471 return;
472
473 cpufreq_notify_post_transition(policy, freqs, transition_failed);
474
475 policy->transition_ongoing = false;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530476 policy->transition_task = NULL;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530477
478 wake_up(&policy->transition_wait);
479}
480EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
481
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483/*********************************************************************
484 * SYSFS INTERFACE *
485 *********************************************************************/
Rashika Kheria8a5c74a2014-02-26 22:12:42 +0530486static ssize_t show_boost(struct kobject *kobj,
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100487 struct attribute *attr, char *buf)
488{
489 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
490}
491
492static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
493 const char *buf, size_t count)
494{
495 int ret, enable;
496
497 ret = sscanf(buf, "%d", &enable);
498 if (ret != 1 || enable < 0 || enable > 1)
499 return -EINVAL;
500
501 if (cpufreq_boost_trigger_state(enable)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700502 pr_err("%s: Cannot %s BOOST!\n",
503 __func__, enable ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100504 return -EINVAL;
505 }
506
Joe Perchese837f9b2014-03-11 10:03:00 -0700507 pr_debug("%s: cpufreq BOOST %s\n",
508 __func__, enable ? "enabled" : "disabled");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100509
510 return count;
511}
512define_one_global_rw(boost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530514static struct cpufreq_governor *find_governor(const char *str_governor)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700515{
516 struct cpufreq_governor *t;
517
Viresh Kumarf7b27062015-01-27 14:06:09 +0530518 for_each_governor(t)
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200519 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700520 return t;
521
522 return NULL;
523}
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525/**
526 * cpufreq_parse_governor - parse a governor string
527 */
Dave Jones905d77c2008-03-05 14:28:32 -0500528static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 struct cpufreq_governor **governor)
530{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700531 int err = -EINVAL;
532
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200533 if (!cpufreq_driver)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700534 goto out;
535
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200536 if (cpufreq_driver->setpolicy) {
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200537 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 *policy = CPUFREQ_POLICY_PERFORMANCE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700539 err = 0;
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200540 } else if (!strncasecmp(str_governor, "powersave",
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530541 CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 *policy = CPUFREQ_POLICY_POWERSAVE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700543 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 }
Viresh Kumar2e1cc3a2015-01-02 12:34:27 +0530545 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 struct cpufreq_governor *t;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700547
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800548 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700549
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530550 t = find_governor(str_governor);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700551
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700552 if (t == NULL) {
Kees Cook1a8e1462011-05-04 08:38:56 -0700553 int ret;
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700554
Kees Cook1a8e1462011-05-04 08:38:56 -0700555 mutex_unlock(&cpufreq_governor_mutex);
556 ret = request_module("cpufreq_%s", str_governor);
557 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700558
Kees Cook1a8e1462011-05-04 08:38:56 -0700559 if (ret == 0)
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530560 t = find_governor(str_governor);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700561 }
562
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700563 if (t != NULL) {
564 *governor = t;
565 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 }
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700567
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800568 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 }
Dave Jones29464f22009-01-18 01:37:11 -0500570out:
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700571 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574/**
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530575 * cpufreq_per_cpu_attr_read() / show_##file_name() -
576 * print out cpufreq information
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 *
578 * Write out information from cpufreq_driver->policy[cpu]; object must be
579 * "unsigned int".
580 */
581
Dave Jones32ee8c32006-02-28 00:43:23 -0500582#define show_one(file_name, object) \
583static ssize_t show_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500584(struct cpufreq_policy *policy, char *buf) \
Dave Jones32ee8c32006-02-28 00:43:23 -0500585{ \
Dave Jones29464f22009-01-18 01:37:11 -0500586 return sprintf(buf, "%u\n", policy->object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588
589show_one(cpuinfo_min_freq, cpuinfo.min_freq);
590show_one(cpuinfo_max_freq, cpuinfo.max_freq);
Thomas Renningered129782009-02-04 01:17:41 +0100591show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592show_one(scaling_min_freq, min);
593show_one(scaling_max_freq, max);
Dirk Brandewiec034b022014-10-13 08:37:40 -0700594
Viresh Kumar09347b22015-01-02 12:34:24 +0530595static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
Dirk Brandewiec034b022014-10-13 08:37:40 -0700596{
597 ssize_t ret;
598
599 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
600 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
601 else
602 ret = sprintf(buf, "%u\n", policy->cur);
603 return ret;
604}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
Viresh Kumar037ce832013-10-02 14:13:16 +0530606static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530607 struct cpufreq_policy *new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609/**
610 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
611 */
612#define store_one(file_name, object) \
613static ssize_t store_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500614(struct cpufreq_policy *policy, const char *buf, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615{ \
Vince Hsu619c144c2014-11-10 14:14:50 +0800616 int ret, temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 struct cpufreq_policy new_policy; \
618 \
619 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
620 if (ret) \
621 return -EINVAL; \
622 \
Dave Jones29464f22009-01-18 01:37:11 -0500623 ret = sscanf(buf, "%u", &new_policy.object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 if (ret != 1) \
625 return -EINVAL; \
626 \
Vince Hsu619c144c2014-11-10 14:14:50 +0800627 temp = new_policy.object; \
Viresh Kumar037ce832013-10-02 14:13:16 +0530628 ret = cpufreq_set_policy(policy, &new_policy); \
Vince Hsu619c144c2014-11-10 14:14:50 +0800629 if (!ret) \
630 policy->user_policy.object = temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 \
632 return ret ? ret : count; \
633}
634
Dave Jones29464f22009-01-18 01:37:11 -0500635store_one(scaling_min_freq, min);
636store_one(scaling_max_freq, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
638/**
639 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
640 */
Dave Jones905d77c2008-03-05 14:28:32 -0500641static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
642 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643{
Viresh Kumard92d50a2015-01-02 12:34:29 +0530644 unsigned int cur_freq = __cpufreq_get(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 if (!cur_freq)
646 return sprintf(buf, "<unknown>");
647 return sprintf(buf, "%u\n", cur_freq);
648}
649
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650/**
651 * show_scaling_governor - show the current policy for the specified CPU
652 */
Dave Jones905d77c2008-03-05 14:28:32 -0500653static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654{
Dave Jones29464f22009-01-18 01:37:11 -0500655 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 return sprintf(buf, "powersave\n");
657 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
658 return sprintf(buf, "performance\n");
659 else if (policy->governor)
viresh kumar4b972f02012-10-23 01:23:43 +0200660 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
Dave Jones29464f22009-01-18 01:37:11 -0500661 policy->governor->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 return -EINVAL;
663}
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665/**
666 * store_scaling_governor - store policy for the specified CPU
667 */
Dave Jones905d77c2008-03-05 14:28:32 -0500668static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
669 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670{
Srivatsa S. Bhat5136fa52013-09-07 01:24:06 +0530671 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 char str_governor[16];
673 struct cpufreq_policy new_policy;
674
675 ret = cpufreq_get_policy(&new_policy, policy->cpu);
676 if (ret)
677 return ret;
678
Dave Jones29464f22009-01-18 01:37:11 -0500679 ret = sscanf(buf, "%15s", str_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 if (ret != 1)
681 return -EINVAL;
682
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530683 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
684 &new_policy.governor))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 return -EINVAL;
686
Viresh Kumar037ce832013-10-02 14:13:16 +0530687 ret = cpufreq_set_policy(policy, &new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200688
689 policy->user_policy.policy = policy->policy;
690 policy->user_policy.governor = policy->governor;
Thomas Renninger7970e082006-04-13 15:14:04 +0200691
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530692 if (ret)
693 return ret;
694 else
695 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696}
697
698/**
699 * show_scaling_driver - show the cpufreq driver currently loaded
700 */
Dave Jones905d77c2008-03-05 14:28:32 -0500701static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200703 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704}
705
706/**
707 * show_scaling_available_governors - show the available CPUfreq governors
708 */
Dave Jones905d77c2008-03-05 14:28:32 -0500709static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
710 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711{
712 ssize_t i = 0;
713 struct cpufreq_governor *t;
714
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530715 if (!has_target()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 i += sprintf(buf, "performance powersave");
717 goto out;
718 }
719
Viresh Kumarf7b27062015-01-27 14:06:09 +0530720 for_each_governor(t) {
Dave Jones29464f22009-01-18 01:37:11 -0500721 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
722 - (CPUFREQ_NAME_LEN + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 goto out;
viresh kumar4b972f02012-10-23 01:23:43 +0200724 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 }
Dave Jones7d5e3502006-02-02 17:03:42 -0500726out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 i += sprintf(&buf[i], "\n");
728 return i;
729}
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700730
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800731ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732{
733 ssize_t i = 0;
734 unsigned int cpu;
735
Rusty Russell835481d2009-01-04 05:18:06 -0800736 for_each_cpu(cpu, mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 if (i)
738 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
739 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
740 if (i >= (PAGE_SIZE - 5))
Dave Jones29464f22009-01-18 01:37:11 -0500741 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 }
743 i += sprintf(&buf[i], "\n");
744 return i;
745}
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800746EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700748/**
749 * show_related_cpus - show the CPUs affected by each transition even if
750 * hw coordination is in use
751 */
752static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
753{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800754 return cpufreq_show_cpus(policy->related_cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700755}
756
757/**
758 * show_affected_cpus - show the CPUs affected by each transition
759 */
760static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
761{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800762 return cpufreq_show_cpus(policy->cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700763}
764
Venki Pallipadi9e769882007-10-26 10:18:21 -0700765static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
Dave Jones905d77c2008-03-05 14:28:32 -0500766 const char *buf, size_t count)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700767{
768 unsigned int freq = 0;
769 unsigned int ret;
770
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700771 if (!policy->governor || !policy->governor->store_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700772 return -EINVAL;
773
774 ret = sscanf(buf, "%u", &freq);
775 if (ret != 1)
776 return -EINVAL;
777
778 policy->governor->store_setspeed(policy, freq);
779
780 return count;
781}
782
783static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
784{
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700785 if (!policy->governor || !policy->governor->show_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700786 return sprintf(buf, "<unsupported>\n");
787
788 return policy->governor->show_setspeed(policy, buf);
789}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
Thomas Renningere2f74f32009-11-19 12:31:01 +0100791/**
viresh kumar8bf1ac722012-10-23 01:23:33 +0200792 * show_bios_limit - show the current cpufreq HW/BIOS limitation
Thomas Renningere2f74f32009-11-19 12:31:01 +0100793 */
794static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
795{
796 unsigned int limit;
797 int ret;
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200798 if (cpufreq_driver->bios_limit) {
799 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
Thomas Renningere2f74f32009-11-19 12:31:01 +0100800 if (!ret)
801 return sprintf(buf, "%u\n", limit);
802 }
803 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
804}
805
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200806cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
807cpufreq_freq_attr_ro(cpuinfo_min_freq);
808cpufreq_freq_attr_ro(cpuinfo_max_freq);
809cpufreq_freq_attr_ro(cpuinfo_transition_latency);
810cpufreq_freq_attr_ro(scaling_available_governors);
811cpufreq_freq_attr_ro(scaling_driver);
812cpufreq_freq_attr_ro(scaling_cur_freq);
813cpufreq_freq_attr_ro(bios_limit);
814cpufreq_freq_attr_ro(related_cpus);
815cpufreq_freq_attr_ro(affected_cpus);
816cpufreq_freq_attr_rw(scaling_min_freq);
817cpufreq_freq_attr_rw(scaling_max_freq);
818cpufreq_freq_attr_rw(scaling_governor);
819cpufreq_freq_attr_rw(scaling_setspeed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Dave Jones905d77c2008-03-05 14:28:32 -0500821static struct attribute *default_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 &cpuinfo_min_freq.attr,
823 &cpuinfo_max_freq.attr,
Thomas Renningered129782009-02-04 01:17:41 +0100824 &cpuinfo_transition_latency.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 &scaling_min_freq.attr,
826 &scaling_max_freq.attr,
827 &affected_cpus.attr,
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700828 &related_cpus.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 &scaling_governor.attr,
830 &scaling_driver.attr,
831 &scaling_available_governors.attr,
Venki Pallipadi9e769882007-10-26 10:18:21 -0700832 &scaling_setspeed.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 NULL
834};
835
Dave Jones29464f22009-01-18 01:37:11 -0500836#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
837#define to_attr(a) container_of(a, struct freq_attr, attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
Dave Jones29464f22009-01-18 01:37:11 -0500839static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840{
Dave Jones905d77c2008-03-05 14:28:32 -0500841 struct cpufreq_policy *policy = to_policy(kobj);
842 struct freq_attr *fattr = to_attr(attr);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530843 ssize_t ret;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530844
845 if (!down_read_trylock(&cpufreq_rwsem))
Viresh Kumar1b750e32013-10-02 14:13:09 +0530846 return -EINVAL;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800847
viresh kumarad7722d2013-10-18 19:10:15 +0530848 down_read(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800849
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530850 if (fattr->show)
851 ret = fattr->show(policy, buf);
852 else
853 ret = -EIO;
854
viresh kumarad7722d2013-10-18 19:10:15 +0530855 up_read(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530856 up_read(&cpufreq_rwsem);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 return ret;
859}
860
Dave Jones905d77c2008-03-05 14:28:32 -0500861static ssize_t store(struct kobject *kobj, struct attribute *attr,
862 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863{
Dave Jones905d77c2008-03-05 14:28:32 -0500864 struct cpufreq_policy *policy = to_policy(kobj);
865 struct freq_attr *fattr = to_attr(attr);
Dave Jonesa07530b2008-03-05 14:22:25 -0500866 ssize_t ret = -EINVAL;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530867
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530868 get_online_cpus();
869
870 if (!cpu_online(policy->cpu))
871 goto unlock;
872
Viresh Kumar6eed9402013-08-06 22:53:11 +0530873 if (!down_read_trylock(&cpufreq_rwsem))
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530874 goto unlock;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800875
viresh kumarad7722d2013-10-18 19:10:15 +0530876 down_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800877
Viresh Kumar11e584c2015-06-10 02:11:45 +0200878 /* Updating inactive policies is invalid, so avoid doing that. */
879 if (unlikely(policy_is_inactive(policy))) {
880 ret = -EBUSY;
881 goto unlock_policy_rwsem;
882 }
883
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530884 if (fattr->store)
885 ret = fattr->store(policy, buf, count);
886 else
887 ret = -EIO;
888
Viresh Kumar11e584c2015-06-10 02:11:45 +0200889unlock_policy_rwsem:
viresh kumarad7722d2013-10-18 19:10:15 +0530890 up_write(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530891
Viresh Kumar6eed9402013-08-06 22:53:11 +0530892 up_read(&cpufreq_rwsem);
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530893unlock:
894 put_online_cpus();
895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 return ret;
897}
898
Dave Jones905d77c2008-03-05 14:28:32 -0500899static void cpufreq_sysfs_release(struct kobject *kobj)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900{
Dave Jones905d77c2008-03-05 14:28:32 -0500901 struct cpufreq_policy *policy = to_policy(kobj);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200902 pr_debug("last reference is dropped\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 complete(&policy->kobj_unregister);
904}
905
Emese Revfy52cf25d2010-01-19 02:58:23 +0100906static const struct sysfs_ops sysfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 .show = show,
908 .store = store,
909};
910
911static struct kobj_type ktype_cpufreq = {
912 .sysfs_ops = &sysfs_ops,
913 .default_attrs = default_attrs,
914 .release = cpufreq_sysfs_release,
915};
916
Viresh Kumar2361be22013-05-17 16:09:09 +0530917struct kobject *cpufreq_global_kobject;
918EXPORT_SYMBOL(cpufreq_global_kobject);
919
920static int cpufreq_global_kobject_usage;
921
922int cpufreq_get_global_kobject(void)
923{
924 if (!cpufreq_global_kobject_usage++)
925 return kobject_add(cpufreq_global_kobject,
926 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
927
928 return 0;
929}
930EXPORT_SYMBOL(cpufreq_get_global_kobject);
931
932void cpufreq_put_global_kobject(void)
933{
934 if (!--cpufreq_global_kobject_usage)
935 kobject_del(cpufreq_global_kobject);
936}
937EXPORT_SYMBOL(cpufreq_put_global_kobject);
938
939int cpufreq_sysfs_create_file(const struct attribute *attr)
940{
941 int ret = cpufreq_get_global_kobject();
942
943 if (!ret) {
944 ret = sysfs_create_file(cpufreq_global_kobject, attr);
945 if (ret)
946 cpufreq_put_global_kobject();
947 }
948
949 return ret;
950}
951EXPORT_SYMBOL(cpufreq_sysfs_create_file);
952
953void cpufreq_sysfs_remove_file(const struct attribute *attr)
954{
955 sysfs_remove_file(cpufreq_global_kobject, attr);
956 cpufreq_put_global_kobject();
957}
958EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
959
Viresh Kumar87549142015-06-10 02:13:21 +0200960static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
961{
962 struct device *cpu_dev;
963
964 pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
965
966 if (!policy)
967 return 0;
968
969 cpu_dev = get_cpu_device(cpu);
970 if (WARN_ON(!cpu_dev))
971 return 0;
972
973 return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
974}
975
976static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
977{
978 struct device *cpu_dev;
979
980 pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
981
982 cpu_dev = get_cpu_device(cpu);
983 if (WARN_ON(!cpu_dev))
984 return;
985
986 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
987}
988
989/* Add/remove symlinks for all related CPUs */
Viresh Kumar308b60e2013-07-31 14:35:14 +0200990static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400991{
992 unsigned int j;
993 int ret = 0;
994
Viresh Kumar87549142015-06-10 02:13:21 +0200995 /* Some related CPUs might not be present (physically hotplugged) */
996 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
Saravana Kannan9d16f202015-05-18 10:43:31 +0530997 if (j == policy->kobj_cpu)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400998 continue;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400999
Viresh Kumar87549142015-06-10 02:13:21 +02001000 ret = add_cpu_dev_symlink(policy, j);
Rafael J. Wysocki71c34612013-08-04 01:19:34 +02001001 if (ret)
1002 break;
Dave Jones19d6f7e2009-07-08 17:35:39 -04001003 }
Viresh Kumar87549142015-06-10 02:13:21 +02001004
Dave Jones19d6f7e2009-07-08 17:35:39 -04001005 return ret;
1006}
1007
Viresh Kumar87549142015-06-10 02:13:21 +02001008static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1009{
1010 unsigned int j;
1011
1012 /* Some related CPUs might not be present (physically hotplugged) */
1013 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
1014 if (j == policy->kobj_cpu)
1015 continue;
1016
1017 remove_cpu_dev_symlink(policy, j);
1018 }
1019}
1020
Viresh Kumar308b60e2013-07-31 14:35:14 +02001021static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001022 struct device *dev)
Dave Jones909a6942009-07-08 18:05:42 -04001023{
1024 struct freq_attr **drv_attr;
Dave Jones909a6942009-07-08 18:05:42 -04001025 int ret = 0;
Dave Jones909a6942009-07-08 18:05:42 -04001026
Dave Jones909a6942009-07-08 18:05:42 -04001027 /* set up files for this cpu device */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001028 drv_attr = cpufreq_driver->attr;
Viresh Kumarf13f1182015-01-02 12:34:23 +05301029 while (drv_attr && *drv_attr) {
Dave Jones909a6942009-07-08 18:05:42 -04001030 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1031 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001032 return ret;
Dave Jones909a6942009-07-08 18:05:42 -04001033 drv_attr++;
1034 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001035 if (cpufreq_driver->get) {
Dave Jones909a6942009-07-08 18:05:42 -04001036 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1037 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001038 return ret;
Dave Jones909a6942009-07-08 18:05:42 -04001039 }
Dirk Brandewiec034b022014-10-13 08:37:40 -07001040
1041 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1042 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001043 return ret;
Dirk Brandewiec034b022014-10-13 08:37:40 -07001044
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001045 if (cpufreq_driver->bios_limit) {
Thomas Renningere2f74f32009-11-19 12:31:01 +01001046 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1047 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001048 return ret;
Thomas Renningere2f74f32009-11-19 12:31:01 +01001049 }
Dave Jones909a6942009-07-08 18:05:42 -04001050
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001051 return cpufreq_add_dev_symlink(policy);
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301052}
1053
1054static void cpufreq_init_policy(struct cpufreq_policy *policy)
1055{
viresh kumar6e2c89d2014-03-04 11:43:59 +08001056 struct cpufreq_governor *gov = NULL;
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301057 struct cpufreq_policy new_policy;
1058 int ret = 0;
1059
Viresh Kumard5b73cd2013-08-06 22:53:06 +05301060 memcpy(&new_policy, policy, sizeof(*policy));
Jason Barona27a9ab2013-12-19 22:50:50 +00001061
viresh kumar6e2c89d2014-03-04 11:43:59 +08001062 /* Update governor of new_policy to the governor used before hotplug */
Viresh Kumar45732372015-05-12 12:22:34 +05301063 gov = find_governor(policy->last_governor);
viresh kumar6e2c89d2014-03-04 11:43:59 +08001064 if (gov)
1065 pr_debug("Restoring governor %s for cpu %d\n",
1066 policy->governor->name, policy->cpu);
1067 else
1068 gov = CPUFREQ_DEFAULT_GOVERNOR;
1069
1070 new_policy.governor = gov;
1071
Jason Barona27a9ab2013-12-19 22:50:50 +00001072 /* Use the default policy if its valid. */
1073 if (cpufreq_driver->setpolicy)
viresh kumar6e2c89d2014-03-04 11:43:59 +08001074 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
Dave Jonesecf7e462009-07-08 18:48:47 -04001075
1076 /* set default policy */
Viresh Kumar037ce832013-10-02 14:13:16 +05301077 ret = cpufreq_set_policy(policy, &new_policy);
Dave Jonesecf7e462009-07-08 18:48:47 -04001078 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001079 pr_debug("setting policy failed\n");
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001080 if (cpufreq_driver->exit)
1081 cpufreq_driver->exit(policy);
Dave Jonesecf7e462009-07-08 18:48:47 -04001082 }
Dave Jones909a6942009-07-08 18:05:42 -04001083}
1084
Viresh Kumard8d3b472013-08-04 01:20:07 +02001085static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
Viresh Kumar42f921a2013-12-20 21:26:02 +05301086 unsigned int cpu, struct device *dev)
Viresh Kumarfcf80582013-01-29 14:39:08 +00001087{
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301088 int ret = 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001089
Viresh Kumarbb29ae12015-02-19 17:02:06 +05301090 /* Has this CPU been taken care of already? */
1091 if (cpumask_test_cpu(cpu, policy->cpus))
1092 return 0;
1093
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301094 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301095 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1096 if (ret) {
1097 pr_err("%s: Failed to stop governor\n", __func__);
1098 return ret;
1099 }
1100 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001101
viresh kumarad7722d2013-10-18 19:10:15 +05301102 down_write(&policy->rwsem);
Viresh Kumarfcf80582013-01-29 14:39:08 +00001103 cpumask_set_cpu(cpu, policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301104 up_write(&policy->rwsem);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301105
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301106 if (has_target()) {
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001107 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1108 if (!ret)
1109 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1110
1111 if (ret) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301112 pr_err("%s: Failed to start governor\n", __func__);
1113 return ret;
1114 }
Viresh Kumar820c6ca2013-04-22 00:48:03 +02001115 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001116
Viresh Kumar87549142015-06-10 02:13:21 +02001117 return 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001118}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301120static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1121{
1122 struct cpufreq_policy *policy;
1123 unsigned long flags;
1124
Lan Tianyu44871c92013-09-11 15:05:05 +08001125 read_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar3914d372015-05-08 11:53:46 +05301126 policy = per_cpu(cpufreq_cpu_data, cpu);
Lan Tianyu44871c92013-09-11 15:05:05 +08001127 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301128
Viresh Kumar3914d372015-05-08 11:53:46 +05301129 if (likely(policy)) {
1130 /* Policy should be inactive here */
1131 WARN_ON(!policy_is_inactive(policy));
Viresh Kumar3914d372015-05-08 11:53:46 +05301132 }
viresh kumar6e2c89d2014-03-04 11:43:59 +08001133
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301134 return policy;
1135}
1136
Viresh Kumar2fc33842015-06-08 18:25:29 +05301137static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301138{
1139 struct cpufreq_policy *policy;
Viresh Kumar2fc33842015-06-08 18:25:29 +05301140 int ret;
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301141
1142 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1143 if (!policy)
1144 return NULL;
1145
1146 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1147 goto err_free_policy;
1148
1149 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1150 goto err_free_cpumask;
1151
Viresh Kumar2fc33842015-06-08 18:25:29 +05301152 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
1153 "cpufreq");
1154 if (ret) {
1155 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1156 goto err_free_rcpumask;
1157 }
1158
Lukasz Majewskic88a1f82013-08-06 22:53:08 +05301159 INIT_LIST_HEAD(&policy->policy_list);
viresh kumarad7722d2013-10-18 19:10:15 +05301160 init_rwsem(&policy->rwsem);
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +05301161 spin_lock_init(&policy->transition_lock);
1162 init_waitqueue_head(&policy->transition_wait);
Viresh Kumar818c5712015-01-02 12:34:38 +05301163 init_completion(&policy->kobj_unregister);
1164 INIT_WORK(&policy->update, handle_update);
viresh kumarad7722d2013-10-18 19:10:15 +05301165
Viresh Kumar2fc33842015-06-08 18:25:29 +05301166 policy->cpu = dev->id;
Viresh Kumar87549142015-06-10 02:13:21 +02001167
1168 /* Set this once on allocation */
Viresh Kumar2fc33842015-06-08 18:25:29 +05301169 policy->kobj_cpu = dev->id;
Viresh Kumar87549142015-06-10 02:13:21 +02001170
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301171 return policy;
1172
Viresh Kumar2fc33842015-06-08 18:25:29 +05301173err_free_rcpumask:
1174 free_cpumask_var(policy->related_cpus);
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301175err_free_cpumask:
1176 free_cpumask_var(policy->cpus);
1177err_free_policy:
1178 kfree(policy);
1179
1180 return NULL;
1181}
1182
Viresh Kumar2fc33842015-06-08 18:25:29 +05301183static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
Viresh Kumar42f921a2013-12-20 21:26:02 +05301184{
1185 struct kobject *kobj;
1186 struct completion *cmp;
1187
Viresh Kumar2fc33842015-06-08 18:25:29 +05301188 if (notify)
1189 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1190 CPUFREQ_REMOVE_POLICY, policy);
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301191
Viresh Kumar87549142015-06-10 02:13:21 +02001192 down_write(&policy->rwsem);
1193 cpufreq_remove_dev_symlink(policy);
Viresh Kumar42f921a2013-12-20 21:26:02 +05301194 kobj = &policy->kobj;
1195 cmp = &policy->kobj_unregister;
Viresh Kumar87549142015-06-10 02:13:21 +02001196 up_write(&policy->rwsem);
Viresh Kumar42f921a2013-12-20 21:26:02 +05301197 kobject_put(kobj);
1198
1199 /*
1200 * We need to make sure that the underlying kobj is
1201 * actually not referenced anymore by anybody before we
1202 * proceed with unloading.
1203 */
1204 pr_debug("waiting for dropping of refcount\n");
1205 wait_for_completion(cmp);
1206 pr_debug("wait complete\n");
1207}
1208
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301209static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301210{
Viresh Kumar988bed02015-05-08 11:53:45 +05301211 unsigned long flags;
1212 int cpu;
1213
1214 /* Remove policy from list */
1215 write_lock_irqsave(&cpufreq_driver_lock, flags);
1216 list_del(&policy->policy_list);
1217
1218 for_each_cpu(cpu, policy->related_cpus)
1219 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1220 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1221
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301222 cpufreq_policy_put_kobj(policy, notify);
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301223 free_cpumask_var(policy->related_cpus);
1224 free_cpumask_var(policy->cpus);
1225 kfree(policy);
1226}
1227
Viresh Kumar87549142015-06-10 02:13:21 +02001228static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301229{
Srivatsa S. Bhat99ec8992013-09-12 17:29:09 +05301230 if (WARN_ON(cpu == policy->cpu))
Viresh Kumar87549142015-06-10 02:13:21 +02001231 return;
Srivatsa S. Bhatcb38ed52013-09-12 01:43:42 +05301232
viresh kumarad7722d2013-10-18 19:10:15 +05301233 down_write(&policy->rwsem);
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301234 policy->cpu = cpu;
viresh kumarad7722d2013-10-18 19:10:15 +05301235 up_write(&policy->rwsem);
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301236}
1237
Viresh Kumar23faf0b2015-02-19 17:02:04 +05301238/**
1239 * cpufreq_add_dev - add a CPU device
1240 *
1241 * Adds the cpufreq interface for a CPU device.
1242 *
1243 * The Oracle says: try running cpufreq registration/unregistration concurrently
1244 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1245 * mess up, but more thorough testing is needed. - Mathieu
1246 */
1247static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248{
Viresh Kumarfcf80582013-01-29 14:39:08 +00001249 unsigned int j, cpu = dev->id;
Viresh Kumar65922462013-02-07 10:56:03 +05301250 int ret = -ENOMEM;
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301251 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 unsigned long flags;
Viresh Kumar87549142015-06-10 02:13:21 +02001253 bool recover_policy = !sif;
Ashok Rajc32b6b82005-10-30 14:59:54 -08001254
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001255 pr_debug("adding CPU %u\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
Viresh Kumar87549142015-06-10 02:13:21 +02001257 /*
1258 * Only possible if 'cpu' wasn't physically present earlier and we are
1259 * here from subsys_interface add callback. A hotplug notifier will
1260 * follow and we will handle it like logical CPU hotplug then. For now,
1261 * just create the sysfs link.
1262 */
1263 if (cpu_is_offline(cpu))
1264 return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
1265
Viresh Kumar6eed9402013-08-06 22:53:11 +05301266 if (!down_read_trylock(&cpufreq_rwsem))
1267 return 0;
1268
Viresh Kumarbb29ae12015-02-19 17:02:06 +05301269 /* Check if this CPU already has a policy to manage it */
Viresh Kumar9104bb22015-05-12 12:22:12 +05301270 policy = per_cpu(cpufreq_cpu_data, cpu);
1271 if (policy && !policy_is_inactive(policy)) {
1272 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1273 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1274 up_read(&cpufreq_rwsem);
1275 return ret;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001276 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001278 /*
1279 * Restore the saved policy when doing light-weight init and fall back
1280 * to the full init if that fails.
1281 */
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301282 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001283 if (!policy) {
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301284 recover_policy = false;
Viresh Kumar2fc33842015-06-08 18:25:29 +05301285 policy = cpufreq_policy_alloc(dev);
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001286 if (!policy)
1287 goto nomem_out;
1288 }
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301289
1290 /*
1291 * In the resume path, since we restore a saved policy, the assignment
1292 * to policy->cpu is like an update of the existing policy, rather than
1293 * the creation of a brand new one. So we need to perform this update
1294 * by invoking update_policy_cpu().
1295 */
Viresh Kumar87549142015-06-10 02:13:21 +02001296 if (recover_policy && cpu != policy->cpu)
1297 update_policy_cpu(policy, cpu);
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301298
Rusty Russell835481d2009-01-04 05:18:06 -08001299 cpumask_copy(policy->cpus, cpumask_of(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 /* call driver. From then on the cpufreq must be able
1302 * to accept all calls to ->verify and ->setpolicy for this CPU
1303 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001304 ret = cpufreq_driver->init(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001306 pr_debug("initialization failed\n");
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301307 goto err_set_policy_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 }
Viresh Kumar643ae6e2013-01-12 05:14:38 +00001309
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001310 down_write(&policy->rwsem);
1311
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001312 /* related cpus should atleast have policy->cpus */
1313 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1314
1315 /*
1316 * affected cpus must always be the one, which are online. We aren't
1317 * managing offline cpus here.
1318 */
1319 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1320
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301321 if (!recover_policy) {
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001322 policy->user_policy.min = policy->min;
1323 policy->user_policy.max = policy->max;
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001324
Viresh Kumar988bed02015-05-08 11:53:45 +05301325 write_lock_irqsave(&cpufreq_driver_lock, flags);
1326 for_each_cpu(j, policy->related_cpus)
1327 per_cpu(cpufreq_cpu_data, j) = policy;
1328 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1329 }
Viresh Kumar652ed952014-01-09 20:38:43 +05301330
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01001331 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumarda60ce92013-10-03 20:28:30 +05301332 policy->cur = cpufreq_driver->get(policy->cpu);
1333 if (!policy->cur) {
1334 pr_err("%s: ->get() failed\n", __func__);
1335 goto err_get_freq;
1336 }
1337 }
1338
Viresh Kumard3916692013-12-03 11:20:46 +05301339 /*
1340 * Sometimes boot loaders set CPU frequency to a value outside of
1341 * frequency table present with cpufreq core. In such cases CPU might be
1342 * unstable if it has to run on that frequency for long duration of time
1343 * and so its better to set it to a frequency which is specified in
1344 * freq-table. This also makes cpufreq stats inconsistent as
1345 * cpufreq-stats would fail to register because current frequency of CPU
1346 * isn't found in freq-table.
1347 *
1348 * Because we don't want this change to effect boot process badly, we go
1349 * for the next freq which is >= policy->cur ('cur' must be set by now,
1350 * otherwise we will end up setting freq to lowest of the table as 'cur'
1351 * is initialized to zero).
1352 *
1353 * We are passing target-freq as "policy->cur - 1" otherwise
1354 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1355 * equal to target-freq.
1356 */
1357 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1358 && has_target()) {
1359 /* Are we running at unknown frequency ? */
1360 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1361 if (ret == -EINVAL) {
1362 /* Warn user and fix it */
1363 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1364 __func__, policy->cpu, policy->cur);
1365 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1366 CPUFREQ_RELATION_L);
1367
1368 /*
1369 * Reaching here after boot in a few seconds may not
1370 * mean that system will remain stable at "unknown"
1371 * frequency for longer duration. Hence, a BUG_ON().
1372 */
1373 BUG_ON(ret);
1374 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1375 __func__, policy->cpu, policy->cur);
1376 }
1377 }
1378
Thomas Renningera1531ac2008-07-29 22:32:58 -07001379 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1380 CPUFREQ_START, policy);
1381
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301382 if (!recover_policy) {
Viresh Kumar308b60e2013-07-31 14:35:14 +02001383 ret = cpufreq_add_dev_interface(policy, dev);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301384 if (ret)
1385 goto err_out_unregister;
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301386 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1387 CPUFREQ_CREATE_POLICY, policy);
Dave Jones8ff69732006-03-05 03:37:23 -05001388
Viresh Kumar988bed02015-05-08 11:53:45 +05301389 write_lock_irqsave(&cpufreq_driver_lock, flags);
1390 list_add(&policy->policy_list, &cpufreq_policy_list);
1391 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1392 }
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301393
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301394 cpufreq_init_policy(policy);
1395
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301396 if (!recover_policy) {
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301397 policy->user_policy.policy = policy->policy;
1398 policy->user_policy.governor = policy->governor;
1399 }
Viresh Kumar4e97b632014-03-04 11:44:01 +08001400 up_write(&policy->rwsem);
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301401
Greg Kroah-Hartman038c5b32007-12-17 15:54:39 -04001402 kobject_uevent(&policy->kobj, KOBJ_ADD);
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301403
Viresh Kumar6eed9402013-08-06 22:53:11 +05301404 up_read(&cpufreq_rwsem);
1405
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301406 /* Callback for handling stuff after policy is ready */
1407 if (cpufreq_driver->ready)
1408 cpufreq_driver->ready(policy);
1409
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001410 pr_debug("initialization complete\n");
Dave Jones87c32272006-03-29 01:48:37 -05001411
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 return 0;
1413
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414err_out_unregister:
Viresh Kumar652ed952014-01-09 20:38:43 +05301415err_get_freq:
Prarit Bhargava7106e022014-09-10 10:12:08 -04001416 up_write(&policy->rwsem);
1417
Viresh Kumarda60ce92013-10-03 20:28:30 +05301418 if (cpufreq_driver->exit)
1419 cpufreq_driver->exit(policy);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301420err_set_policy_cpu:
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301421 cpufreq_policy_free(policy, recover_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422nomem_out:
Viresh Kumar6eed9402013-08-06 22:53:11 +05301423 up_read(&cpufreq_rwsem);
1424
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 return ret;
1426}
1427
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301428static int __cpufreq_remove_dev_prepare(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301429 struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430{
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301431 unsigned int cpu = dev->id, cpus;
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301432 int ret;
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301433 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001435 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
Viresh Kumar988bed02015-05-08 11:53:45 +05301437 policy = cpufreq_cpu_get_raw(cpu);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301438 if (!policy) {
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001439 pr_debug("%s: No cpu_data found\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301443 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301444 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1445 if (ret) {
1446 pr_err("%s: Failed to stop governor\n", __func__);
1447 return ret;
1448 }
Viresh Kumardb5f2992015-01-02 12:34:25 +05301449 }
Jacob Shin27ecddc2011-04-27 13:32:11 -05001450
Viresh Kumar45732372015-05-12 12:22:34 +05301451 down_write(&policy->rwsem);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301452 cpus = cpumask_weight(policy->cpus);
Viresh Kumar45732372015-05-12 12:22:34 +05301453
1454 if (has_target() && cpus == 1)
1455 strncpy(policy->last_governor, policy->governor->name,
1456 CPUFREQ_NAME_LEN);
1457 up_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
Viresh Kumar87549142015-06-10 02:13:21 +02001459 if (cpu != policy->cpu)
1460 return 0;
1461
1462 if (cpus > 1)
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301463 /* Nominate new CPU */
Viresh Kumar87549142015-06-10 02:13:21 +02001464 update_policy_cpu(policy, cpumask_any_but(policy->cpus, cpu));
1465 else if (cpufreq_driver->stop_cpu)
Dirk Brandewie367dc4a2014-03-19 08:45:53 -07001466 cpufreq_driver->stop_cpu(policy);
Venki Pallipadiec282972007-03-26 12:03:19 -07001467
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301468 return 0;
1469}
1470
1471static int __cpufreq_remove_dev_finish(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301472 struct subsys_interface *sif)
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301473{
Viresh Kumar988bed02015-05-08 11:53:45 +05301474 unsigned int cpu = dev->id;
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301475 int ret;
Viresh Kumar988bed02015-05-08 11:53:45 +05301476 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301477
1478 if (!policy) {
1479 pr_debug("%s: No cpu_data found\n", __func__);
1480 return -EINVAL;
1481 }
1482
viresh kumarad7722d2013-10-18 19:10:15 +05301483 down_write(&policy->rwsem);
Viresh Kumar303ae722015-02-19 17:02:07 +05301484 cpumask_clear_cpu(cpu, policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301485 up_write(&policy->rwsem);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301486
Viresh Kumar87549142015-06-10 02:13:21 +02001487 /* Not the last cpu of policy, start governor again ? */
1488 if (!policy_is_inactive(policy)) {
1489 if (!has_target())
1490 return 0;
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001491
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001492 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1493 if (!ret)
1494 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1495
1496 if (ret) {
1497 pr_err("%s: Failed to start governor\n", __func__);
1498 return ret;
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001499 }
Viresh Kumar87549142015-06-10 02:13:21 +02001500
1501 return 0;
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
Viresh Kumar87549142015-06-10 02:13:21 +02001504 /* If cpu is last user of policy, free policy */
1505 if (has_target()) {
1506 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1507 if (ret) {
1508 pr_err("%s: Failed to exit governor\n", __func__);
1509 return ret;
1510 }
1511 }
1512
Viresh Kumar87549142015-06-10 02:13:21 +02001513 /*
1514 * Perform the ->exit() even during light-weight tear-down,
1515 * since this is a core component, and is essential for the
1516 * subsequent light-weight ->init() to succeed.
1517 */
1518 if (cpufreq_driver->exit)
1519 cpufreq_driver->exit(policy);
1520
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301521 /* Free the policy only if the driver is getting removed. */
Viresh Kumar87549142015-06-10 02:13:21 +02001522 if (sif)
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301523 cpufreq_policy_free(policy, true);
Viresh Kumar87549142015-06-10 02:13:21 +02001524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 return 0;
1526}
1527
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301528/**
Viresh Kumar27a862e2013-10-02 14:13:14 +05301529 * cpufreq_remove_dev - remove a CPU device
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301530 *
1531 * Removes the cpufreq interface for a CPU device.
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301532 */
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001533static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001534{
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001535 unsigned int cpu = dev->id;
Viresh Kumar27a862e2013-10-02 14:13:14 +05301536 int ret;
Venki Pallipadiec282972007-03-26 12:03:19 -07001537
Viresh Kumar87549142015-06-10 02:13:21 +02001538 /*
1539 * Only possible if 'cpu' is getting physically removed now. A hotplug
1540 * notifier should have already been called and we just need to remove
1541 * link or free policy here.
1542 */
1543 if (cpu_is_offline(cpu)) {
1544 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1545 struct cpumask mask;
1546
1547 if (!policy)
1548 return 0;
1549
1550 cpumask_copy(&mask, policy->related_cpus);
1551 cpumask_clear_cpu(cpu, &mask);
1552
1553 /*
1554 * Free policy only if all policy->related_cpus are removed
1555 * physically.
1556 */
1557 if (cpumask_intersects(&mask, cpu_present_mask)) {
1558 remove_cpu_dev_symlink(policy, cpu);
1559 return 0;
1560 }
1561
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301562 cpufreq_policy_free(policy, true);
Venki Pallipadiec282972007-03-26 12:03:19 -07001563 return 0;
Viresh Kumar87549142015-06-10 02:13:21 +02001564 }
Venki Pallipadiec282972007-03-26 12:03:19 -07001565
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301566 ret = __cpufreq_remove_dev_prepare(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301567
1568 if (!ret)
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301569 ret = __cpufreq_remove_dev_finish(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301570
1571 return ret;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001572}
1573
David Howells65f27f32006-11-22 14:55:48 +00001574static void handle_update(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575{
David Howells65f27f32006-11-22 14:55:48 +00001576 struct cpufreq_policy *policy =
1577 container_of(work, struct cpufreq_policy, update);
1578 unsigned int cpu = policy->cpu;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001579 pr_debug("handle_update for cpu %u called\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 cpufreq_update_policy(cpu);
1581}
1582
1583/**
Viresh Kumarbb176f72013-06-19 14:19:33 +05301584 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1585 * in deep trouble.
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301586 * @policy: policy managing CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 * @new_freq: CPU frequency the CPU actually runs at
1588 *
Dave Jones29464f22009-01-18 01:37:11 -05001589 * We adjust to current frequency first, and need to clean up later.
1590 * So either call to cpufreq_update_policy() or schedule handle_update()).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 */
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301592static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301593 unsigned int new_freq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594{
1595 struct cpufreq_freqs freqs;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301596
Joe Perchese837f9b2014-03-11 10:03:00 -07001597 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301598 policy->cur, new_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301600 freqs.old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 freqs.new = new_freq;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301602
Viresh Kumar8fec0512014-03-24 13:35:45 +05301603 cpufreq_freq_transition_begin(policy, &freqs);
1604 cpufreq_freq_transition_end(policy, &freqs, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605}
1606
Dave Jones32ee8c32006-02-28 00:43:23 -05001607/**
Dhaval Giani4ab70df2006-12-13 14:49:15 +05301608 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001609 * @cpu: CPU number
1610 *
1611 * This is the last known freq, without actually getting it from the driver.
1612 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1613 */
1614unsigned int cpufreq_quick_get(unsigned int cpu)
1615{
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001616 struct cpufreq_policy *policy;
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301617 unsigned int ret_freq = 0;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001618
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001619 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1620 return cpufreq_driver->get(cpu);
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001621
1622 policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001623 if (policy) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301624 ret_freq = policy->cur;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001625 cpufreq_cpu_put(policy);
1626 }
1627
Dave Jones4d34a672008-02-07 16:33:49 -05001628 return ret_freq;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001629}
1630EXPORT_SYMBOL(cpufreq_quick_get);
1631
Jesse Barnes3d737102011-06-28 10:59:12 -07001632/**
1633 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1634 * @cpu: CPU number
1635 *
1636 * Just return the max possible frequency for a given CPU.
1637 */
1638unsigned int cpufreq_quick_get_max(unsigned int cpu)
1639{
1640 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1641 unsigned int ret_freq = 0;
1642
1643 if (policy) {
1644 ret_freq = policy->max;
1645 cpufreq_cpu_put(policy);
1646 }
1647
1648 return ret_freq;
1649}
1650EXPORT_SYMBOL(cpufreq_quick_get_max);
1651
Viresh Kumard92d50a2015-01-02 12:34:29 +05301652static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653{
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301654 unsigned int ret_freq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001656 if (!cpufreq_driver->get)
Dave Jones4d34a672008-02-07 16:33:49 -05001657 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
Viresh Kumard92d50a2015-01-02 12:34:29 +05301659 ret_freq = cpufreq_driver->get(policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
Viresh Kumar11e584c2015-06-10 02:11:45 +02001661 /* Updating inactive policies is invalid, so avoid doing that. */
1662 if (unlikely(policy_is_inactive(policy)))
1663 return ret_freq;
1664
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301665 if (ret_freq && policy->cur &&
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001666 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301667 /* verify no discrepancy between actual and
1668 saved value exists */
1669 if (unlikely(ret_freq != policy->cur)) {
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301670 cpufreq_out_of_sync(policy, ret_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 schedule_work(&policy->update);
1672 }
1673 }
1674
Dave Jones4d34a672008-02-07 16:33:49 -05001675 return ret_freq;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001676}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001678/**
1679 * cpufreq_get - get the current CPU frequency (in kHz)
1680 * @cpu: CPU number
1681 *
1682 * Get the CPU current (static) CPU frequency
1683 */
1684unsigned int cpufreq_get(unsigned int cpu)
1685{
Aaron Plattner999976e2014-03-04 12:42:15 -08001686 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001687 unsigned int ret_freq = 0;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001688
Aaron Plattner999976e2014-03-04 12:42:15 -08001689 if (policy) {
1690 down_read(&policy->rwsem);
Viresh Kumard92d50a2015-01-02 12:34:29 +05301691 ret_freq = __cpufreq_get(policy);
Aaron Plattner999976e2014-03-04 12:42:15 -08001692 up_read(&policy->rwsem);
Viresh Kumar26ca8692013-09-20 22:37:31 +05301693
Aaron Plattner999976e2014-03-04 12:42:15 -08001694 cpufreq_cpu_put(policy);
1695 }
Viresh Kumar6eed9402013-08-06 22:53:11 +05301696
Dave Jones4d34a672008-02-07 16:33:49 -05001697 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698}
1699EXPORT_SYMBOL(cpufreq_get);
1700
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001701static struct subsys_interface cpufreq_interface = {
1702 .name = "cpufreq",
1703 .subsys = &cpu_subsys,
1704 .add_dev = cpufreq_add_dev,
1705 .remove_dev = cpufreq_remove_dev,
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001706};
1707
Viresh Kumare28867e2014-03-04 11:00:27 +08001708/*
1709 * In case platform wants some specific frequency to be configured
1710 * during suspend..
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001711 */
Viresh Kumare28867e2014-03-04 11:00:27 +08001712int cpufreq_generic_suspend(struct cpufreq_policy *policy)
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001713{
Viresh Kumare28867e2014-03-04 11:00:27 +08001714 int ret;
Dave Jones4bc5d342009-08-04 14:03:25 -04001715
Viresh Kumare28867e2014-03-04 11:00:27 +08001716 if (!policy->suspend_freq) {
1717 pr_err("%s: suspend_freq can't be zero\n", __func__);
1718 return -EINVAL;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001719 }
1720
Viresh Kumare28867e2014-03-04 11:00:27 +08001721 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1722 policy->suspend_freq);
1723
1724 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1725 CPUFREQ_RELATION_H);
1726 if (ret)
1727 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1728 __func__, policy->suspend_freq, ret);
1729
Dave Jonesc9060492008-02-07 16:32:18 -05001730 return ret;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001731}
Viresh Kumare28867e2014-03-04 11:00:27 +08001732EXPORT_SYMBOL(cpufreq_generic_suspend);
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001733
1734/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001735 * cpufreq_suspend() - Suspend CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001737 * Called during system wide Suspend/Hibernate cycles for suspending governors
1738 * as some platforms can't change frequency after this point in suspend cycle.
1739 * Because some of the devices (like: i2c, regulators, etc) they use for
1740 * changing frequency are suspended quickly after this point.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001742void cpufreq_suspend(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301744 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001746 if (!cpufreq_driver)
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001747 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001749 if (!has_target())
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301750 goto suspend;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001752 pr_debug("%s: Suspending Governors\n", __func__);
1753
Viresh Kumarf9637352015-05-12 12:20:11 +05301754 for_each_active_policy(policy) {
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001755 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1756 pr_err("%s: Failed to stop governor for policy: %p\n",
1757 __func__, policy);
1758 else if (cpufreq_driver->suspend
1759 && cpufreq_driver->suspend(policy))
1760 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1761 policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 }
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301763
1764suspend:
1765 cpufreq_suspended = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766}
1767
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001769 * cpufreq_resume() - Resume CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001771 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1772 * are suspended with cpufreq_suspend().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001774void cpufreq_resume(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 struct cpufreq_policy *policy;
1777
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001778 if (!cpufreq_driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 return;
1780
Lan Tianyu8e304442014-09-18 15:03:07 +08001781 cpufreq_suspended = false;
1782
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001783 if (!has_target())
1784 return;
1785
1786 pr_debug("%s: Resuming Governors\n", __func__);
1787
Viresh Kumarf9637352015-05-12 12:20:11 +05301788 for_each_active_policy(policy) {
Viresh Kumar0c5aa402014-03-24 12:30:29 +05301789 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1790 pr_err("%s: Failed to resume driver: %p\n", __func__,
1791 policy);
1792 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001793 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1794 pr_err("%s: Failed to start governor for policy: %p\n",
1795 __func__, policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 }
Viresh Kumarc75de0a2015-04-02 10:21:33 +05301797
1798 /*
1799 * schedule call cpufreq_update_policy() for first-online CPU, as that
1800 * wouldn't be hotplugged-out on suspend. It will verify that the
1801 * current freq is in sync with what we believe it to be.
1802 */
1803 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1804 if (WARN_ON(!policy))
1805 return;
1806
1807 schedule_work(&policy->update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
Borislav Petkov9d950462013-01-20 10:24:28 +00001810/**
1811 * cpufreq_get_current_driver - return current driver's name
1812 *
1813 * Return the name string of the currently loaded cpufreq driver
1814 * or NULL, if none.
1815 */
1816const char *cpufreq_get_current_driver(void)
1817{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001818 if (cpufreq_driver)
1819 return cpufreq_driver->name;
1820
1821 return NULL;
Borislav Petkov9d950462013-01-20 10:24:28 +00001822}
1823EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
Thomas Petazzoni51315cd2014-10-19 11:30:27 +02001825/**
1826 * cpufreq_get_driver_data - return current driver data
1827 *
1828 * Return the private data of the currently loaded cpufreq
1829 * driver, or NULL if no cpufreq driver is loaded.
1830 */
1831void *cpufreq_get_driver_data(void)
1832{
1833 if (cpufreq_driver)
1834 return cpufreq_driver->driver_data;
1835
1836 return NULL;
1837}
1838EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1839
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840/*********************************************************************
1841 * NOTIFIER LISTS INTERFACE *
1842 *********************************************************************/
1843
1844/**
1845 * cpufreq_register_notifier - register a driver with cpufreq
1846 * @nb: notifier function to register
1847 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1848 *
Dave Jones32ee8c32006-02-28 00:43:23 -05001849 * Add a driver to one of two lists: either a list of drivers that
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 * are notified about clock rate changes (once before and once after
1851 * the transition), or a list of drivers that are notified about
1852 * changes in cpufreq policy.
1853 *
1854 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001855 * blocking_notifier_chain_register.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 */
1857int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1858{
1859 int ret;
1860
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001861 if (cpufreq_disabled())
1862 return -EINVAL;
1863
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -02001864 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1865
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 switch (list) {
1867 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001868 ret = srcu_notifier_chain_register(
Alan Sterne041c682006-03-27 01:16:30 -08001869 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 break;
1871 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001872 ret = blocking_notifier_chain_register(
1873 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 break;
1875 default:
1876 ret = -EINVAL;
1877 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878
1879 return ret;
1880}
1881EXPORT_SYMBOL(cpufreq_register_notifier);
1882
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883/**
1884 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1885 * @nb: notifier block to be unregistered
Viresh Kumarbb176f72013-06-19 14:19:33 +05301886 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 *
1888 * Remove a driver from the CPU frequency notifier list.
1889 *
1890 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001891 * blocking_notifier_chain_unregister.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 */
1893int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1894{
1895 int ret;
1896
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001897 if (cpufreq_disabled())
1898 return -EINVAL;
1899
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 switch (list) {
1901 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001902 ret = srcu_notifier_chain_unregister(
Alan Sterne041c682006-03-27 01:16:30 -08001903 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 break;
1905 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001906 ret = blocking_notifier_chain_unregister(
1907 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 break;
1909 default:
1910 ret = -EINVAL;
1911 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
1913 return ret;
1914}
1915EXPORT_SYMBOL(cpufreq_unregister_notifier);
1916
1917
1918/*********************************************************************
1919 * GOVERNORS *
1920 *********************************************************************/
1921
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301922/* Must set freqs->new to intermediate frequency */
1923static int __target_intermediate(struct cpufreq_policy *policy,
1924 struct cpufreq_freqs *freqs, int index)
1925{
1926 int ret;
1927
1928 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1929
1930 /* We don't need to switch to intermediate freq */
1931 if (!freqs->new)
1932 return 0;
1933
1934 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1935 __func__, policy->cpu, freqs->old, freqs->new);
1936
1937 cpufreq_freq_transition_begin(policy, freqs);
1938 ret = cpufreq_driver->target_intermediate(policy, index);
1939 cpufreq_freq_transition_end(policy, freqs, ret);
1940
1941 if (ret)
1942 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1943 __func__, ret);
1944
1945 return ret;
1946}
1947
Viresh Kumar8d657752014-05-21 14:29:29 +05301948static int __target_index(struct cpufreq_policy *policy,
1949 struct cpufreq_frequency_table *freq_table, int index)
1950{
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301951 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1952 unsigned int intermediate_freq = 0;
Viresh Kumar8d657752014-05-21 14:29:29 +05301953 int retval = -EINVAL;
1954 bool notify;
1955
1956 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
Viresh Kumar8d657752014-05-21 14:29:29 +05301957 if (notify) {
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301958 /* Handle switching to intermediate frequency */
1959 if (cpufreq_driver->get_intermediate) {
1960 retval = __target_intermediate(policy, &freqs, index);
1961 if (retval)
1962 return retval;
Viresh Kumar8d657752014-05-21 14:29:29 +05301963
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301964 intermediate_freq = freqs.new;
1965 /* Set old freq to intermediate */
1966 if (intermediate_freq)
1967 freqs.old = freqs.new;
1968 }
1969
1970 freqs.new = freq_table[index].frequency;
Viresh Kumar8d657752014-05-21 14:29:29 +05301971 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1972 __func__, policy->cpu, freqs.old, freqs.new);
1973
1974 cpufreq_freq_transition_begin(policy, &freqs);
1975 }
1976
1977 retval = cpufreq_driver->target_index(policy, index);
1978 if (retval)
1979 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1980 retval);
1981
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301982 if (notify) {
Viresh Kumar8d657752014-05-21 14:29:29 +05301983 cpufreq_freq_transition_end(policy, &freqs, retval);
1984
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301985 /*
1986 * Failed after setting to intermediate freq? Driver should have
1987 * reverted back to initial frequency and so should we. Check
1988 * here for intermediate_freq instead of get_intermediate, in
Shailendra Verma58405af2015-05-22 22:48:22 +05301989 * case we haven't switched to intermediate freq at all.
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301990 */
1991 if (unlikely(retval && intermediate_freq)) {
1992 freqs.old = intermediate_freq;
1993 freqs.new = policy->restore_freq;
1994 cpufreq_freq_transition_begin(policy, &freqs);
1995 cpufreq_freq_transition_end(policy, &freqs, 0);
1996 }
1997 }
1998
Viresh Kumar8d657752014-05-21 14:29:29 +05301999 return retval;
2000}
2001
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002int __cpufreq_driver_target(struct cpufreq_policy *policy,
2003 unsigned int target_freq,
2004 unsigned int relation)
2005{
Viresh Kumar72499242012-10-31 01:28:21 +01002006 unsigned int old_target_freq = target_freq;
Viresh Kumar8d657752014-05-21 14:29:29 +05302007 int retval = -EINVAL;
Ashok Rajc32b6b82005-10-30 14:59:54 -08002008
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002009 if (cpufreq_disabled())
2010 return -ENODEV;
2011
Viresh Kumar72499242012-10-31 01:28:21 +01002012 /* Make sure that target_freq is within supported range */
2013 if (target_freq > policy->max)
2014 target_freq = policy->max;
2015 if (target_freq < policy->min)
2016 target_freq = policy->min;
2017
2018 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002019 policy->cpu, target_freq, relation, old_target_freq);
Viresh Kumar5a1c0222012-10-31 01:28:15 +01002020
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302021 /*
2022 * This might look like a redundant call as we are checking it again
2023 * after finding index. But it is left intentionally for cases where
2024 * exactly same freq is called again and so we can save on few function
2025 * calls.
2026 */
Viresh Kumar5a1c0222012-10-31 01:28:15 +01002027 if (target_freq == policy->cur)
2028 return 0;
2029
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05302030 /* Save last value to restore later on errors */
2031 policy->restore_freq = policy->cur;
2032
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002033 if (cpufreq_driver->target)
2034 retval = cpufreq_driver->target(policy, target_freq, relation);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302035 else if (cpufreq_driver->target_index) {
2036 struct cpufreq_frequency_table *freq_table;
2037 int index;
Ashok Raj90d45d12005-11-08 21:34:24 -08002038
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302039 freq_table = cpufreq_frequency_get_table(policy->cpu);
2040 if (unlikely(!freq_table)) {
2041 pr_err("%s: Unable to find freq_table\n", __func__);
2042 goto out;
2043 }
2044
2045 retval = cpufreq_frequency_table_target(policy, freq_table,
2046 target_freq, relation, &index);
2047 if (unlikely(retval)) {
2048 pr_err("%s: Unable to find matching freq\n", __func__);
2049 goto out;
2050 }
2051
Viresh Kumard4019f02013-08-14 19:38:24 +05302052 if (freq_table[index].frequency == policy->cur) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302053 retval = 0;
Viresh Kumard4019f02013-08-14 19:38:24 +05302054 goto out;
2055 }
2056
Viresh Kumar8d657752014-05-21 14:29:29 +05302057 retval = __target_index(policy, freq_table, index);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302058 }
2059
2060out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 return retval;
2062}
2063EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2064
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065int cpufreq_driver_target(struct cpufreq_policy *policy,
2066 unsigned int target_freq,
2067 unsigned int relation)
2068{
Julia Lawallf1829e42008-07-25 22:44:53 +02002069 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070
viresh kumarad7722d2013-10-18 19:10:15 +05302071 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
2073 ret = __cpufreq_driver_target(policy, target_freq, relation);
2074
viresh kumarad7722d2013-10-18 19:10:15 +05302075 up_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 return ret;
2078}
2079EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2080
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05302081static int __cpufreq_governor(struct cpufreq_policy *policy,
2082 unsigned int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083{
Dave Jonescc993ca2005-07-28 09:43:56 -07002084 int ret;
Thomas Renninger6afde102007-10-02 13:28:13 -07002085
2086 /* Only must be defined when default governor is known to have latency
2087 restrictions, like e.g. conservative or ondemand.
2088 That this is the case is already ensured in Kconfig
2089 */
2090#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2091 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2092#else
2093 struct cpufreq_governor *gov = NULL;
2094#endif
Thomas Renninger1c256242007-10-02 13:28:12 -07002095
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002096 /* Don't start any governor operations if we are entering suspend */
2097 if (cpufreq_suspended)
2098 return 0;
Ethan Zhaocb57720b2014-12-18 15:28:19 +09002099 /*
2100 * Governor might not be initiated here if ACPI _PPC changed
2101 * notification happened, so check it.
2102 */
2103 if (!policy->governor)
2104 return -EINVAL;
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002105
Thomas Renninger1c256242007-10-02 13:28:12 -07002106 if (policy->governor->max_transition_latency &&
2107 policy->cpuinfo.transition_latency >
2108 policy->governor->max_transition_latency) {
Thomas Renninger6afde102007-10-02 13:28:13 -07002109 if (!gov)
2110 return -EINVAL;
2111 else {
Joe Perchese837f9b2014-03-11 10:03:00 -07002112 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2113 policy->governor->name, gov->name);
Thomas Renninger6afde102007-10-02 13:28:13 -07002114 policy->governor = gov;
2115 }
Thomas Renninger1c256242007-10-02 13:28:12 -07002116 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
Viresh Kumarfe492f32013-08-06 22:53:10 +05302118 if (event == CPUFREQ_GOV_POLICY_INIT)
2119 if (!try_module_get(policy->governor->owner))
2120 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002122 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002123 policy->cpu, event);
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002124
2125 mutex_lock(&cpufreq_governor_lock);
Srivatsa S. Bhat56d07db2013-09-07 01:23:55 +05302126 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
Viresh Kumarf73d3932013-08-31 17:53:40 +05302127 || (!policy->governor_enabled
2128 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002129 mutex_unlock(&cpufreq_governor_lock);
2130 return -EBUSY;
2131 }
2132
2133 if (event == CPUFREQ_GOV_STOP)
2134 policy->governor_enabled = false;
2135 else if (event == CPUFREQ_GOV_START)
2136 policy->governor_enabled = true;
2137
2138 mutex_unlock(&cpufreq_governor_lock);
2139
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 ret = policy->governor->governor(policy, event);
2141
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002142 if (!ret) {
2143 if (event == CPUFREQ_GOV_POLICY_INIT)
2144 policy->governor->initialized++;
2145 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2146 policy->governor->initialized--;
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002147 } else {
2148 /* Restore original values */
2149 mutex_lock(&cpufreq_governor_lock);
2150 if (event == CPUFREQ_GOV_STOP)
2151 policy->governor_enabled = true;
2152 else if (event == CPUFREQ_GOV_START)
2153 policy->governor_enabled = false;
2154 mutex_unlock(&cpufreq_governor_lock);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002155 }
Viresh Kumarb3940582013-02-01 05:42:58 +00002156
Viresh Kumarfe492f32013-08-06 22:53:10 +05302157 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2158 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 module_put(policy->governor->owner);
2160
2161 return ret;
2162}
2163
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164int cpufreq_register_governor(struct cpufreq_governor *governor)
2165{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002166 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
2168 if (!governor)
2169 return -EINVAL;
2170
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002171 if (cpufreq_disabled())
2172 return -ENODEV;
2173
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002174 mutex_lock(&cpufreq_governor_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -05002175
Viresh Kumarb3940582013-02-01 05:42:58 +00002176 governor->initialized = 0;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002177 err = -EBUSY;
Viresh Kumar42f91fa2015-01-02 12:34:26 +05302178 if (!find_governor(governor->name)) {
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002179 err = 0;
2180 list_add(&governor->governor_list, &cpufreq_governor_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182
Dave Jones32ee8c32006-02-28 00:43:23 -05002183 mutex_unlock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002184 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185}
2186EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2187
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2189{
Viresh Kumar45732372015-05-12 12:22:34 +05302190 struct cpufreq_policy *policy;
2191 unsigned long flags;
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002192
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 if (!governor)
2194 return;
2195
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002196 if (cpufreq_disabled())
2197 return;
2198
Viresh Kumar45732372015-05-12 12:22:34 +05302199 /* clear last_governor for all inactive policies */
2200 read_lock_irqsave(&cpufreq_driver_lock, flags);
2201 for_each_inactive_policy(policy) {
Viresh Kumar18bf3a12015-05-12 12:22:51 +05302202 if (!strcmp(policy->last_governor, governor->name)) {
2203 policy->governor = NULL;
Viresh Kumar45732372015-05-12 12:22:34 +05302204 strcpy(policy->last_governor, "\0");
Viresh Kumar18bf3a12015-05-12 12:22:51 +05302205 }
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002206 }
Viresh Kumar45732372015-05-12 12:22:34 +05302207 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002208
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002209 mutex_lock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 list_del(&governor->governor_list);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002211 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 return;
2213}
2214EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2215
2216
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217/*********************************************************************
2218 * POLICY INTERFACE *
2219 *********************************************************************/
2220
2221/**
2222 * cpufreq_get_policy - get the current cpufreq_policy
Dave Jones29464f22009-01-18 01:37:11 -05002223 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2224 * is written
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 *
2226 * Reads the current cpufreq policy.
2227 */
2228int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2229{
2230 struct cpufreq_policy *cpu_policy;
2231 if (!policy)
2232 return -EINVAL;
2233
2234 cpu_policy = cpufreq_cpu_get(cpu);
2235 if (!cpu_policy)
2236 return -EINVAL;
2237
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302238 memcpy(policy, cpu_policy, sizeof(*policy));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239
2240 cpufreq_cpu_put(cpu_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 return 0;
2242}
2243EXPORT_SYMBOL(cpufreq_get_policy);
2244
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002245/*
Viresh Kumar037ce832013-10-02 14:13:16 +05302246 * policy : current policy.
2247 * new_policy: policy to be set.
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002248 */
Viresh Kumar037ce832013-10-02 14:13:16 +05302249static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302250 struct cpufreq_policy *new_policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251{
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002252 struct cpufreq_governor *old_gov;
2253 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254
Joe Perchese837f9b2014-03-11 10:03:00 -07002255 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2256 new_policy->cpu, new_policy->min, new_policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302258 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002260 if (new_policy->min > policy->max || new_policy->max < policy->min)
2261 return -EINVAL;
Mattia Dongili9c9a43e2006-07-05 23:12:20 +02002262
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 /* verify the cpu speed can be set within this limit */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302264 ret = cpufreq_driver->verify(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002266 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 /* adjust if necessary - all reasons */
Alan Sterne041c682006-03-27 01:16:30 -08002269 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302270 CPUFREQ_ADJUST, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
2272 /* adjust if necessary - hardware incompatibility*/
Alan Sterne041c682006-03-27 01:16:30 -08002273 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302274 CPUFREQ_INCOMPATIBLE, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275
Viresh Kumarbb176f72013-06-19 14:19:33 +05302276 /*
2277 * verify the cpu speed can be set within this limit, which might be
2278 * different to the first one
2279 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302280 ret = cpufreq_driver->verify(new_policy);
Alan Sterne041c682006-03-27 01:16:30 -08002281 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002282 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283
2284 /* notification of the new policy */
Alan Sterne041c682006-03-27 01:16:30 -08002285 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302286 CPUFREQ_NOTIFY, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302288 policy->min = new_policy->min;
2289 policy->max = new_policy->max;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002291 pr_debug("new min and max freqs are %u - %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002292 policy->min, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002294 if (cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302295 policy->policy = new_policy->policy;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002296 pr_debug("setting range\n");
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002297 return cpufreq_driver->setpolicy(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 }
2299
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002300 if (new_policy->governor == policy->governor)
2301 goto out;
2302
2303 pr_debug("governor switch\n");
2304
2305 /* save old, working values */
2306 old_gov = policy->governor;
2307 /* end old governor */
2308 if (old_gov) {
2309 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2310 up_write(&policy->rwsem);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02002311 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002312 down_write(&policy->rwsem);
2313 }
2314
2315 /* start new governor */
2316 policy->governor = new_policy->governor;
2317 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2318 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2319 goto out;
2320
2321 up_write(&policy->rwsem);
2322 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2323 down_write(&policy->rwsem);
2324 }
2325
2326 /* new governor failed, so re-start old one */
2327 pr_debug("starting governor %s failed\n", policy->governor->name);
2328 if (old_gov) {
2329 policy->governor = old_gov;
2330 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2331 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2332 }
2333
2334 return -EINVAL;
2335
2336 out:
2337 pr_debug("governor: change or update limits\n");
2338 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339}
2340
2341/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2343 * @cpu: CPU which shall be re-evaluated
2344 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002345 * Useful for policy notifiers which have different necessities
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 * at different times.
2347 */
2348int cpufreq_update_policy(unsigned int cpu)
2349{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302350 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2351 struct cpufreq_policy new_policy;
Julia Lawallf1829e42008-07-25 22:44:53 +02002352 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002354 if (!policy)
2355 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356
viresh kumarad7722d2013-10-18 19:10:15 +05302357 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002359 pr_debug("updating policy for CPU %u\n", cpu);
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302360 memcpy(&new_policy, policy, sizeof(*policy));
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302361 new_policy.min = policy->user_policy.min;
2362 new_policy.max = policy->user_policy.max;
2363 new_policy.policy = policy->user_policy.policy;
2364 new_policy.governor = policy->user_policy.governor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365
Viresh Kumarbb176f72013-06-19 14:19:33 +05302366 /*
2367 * BIOS might change freq behind our back
2368 * -> ask driver for current freq and notify governors about a change
2369 */
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01002370 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302371 new_policy.cur = cpufreq_driver->get(cpu);
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302372 if (WARN_ON(!new_policy.cur)) {
2373 ret = -EIO;
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002374 goto unlock;
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302375 }
2376
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302377 if (!policy->cur) {
Joe Perchese837f9b2014-03-11 10:03:00 -07002378 pr_debug("Driver did not initialize current freq\n");
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302379 policy->cur = new_policy.cur;
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002380 } else {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302381 if (policy->cur != new_policy.cur && has_target())
Viresh Kumara1e1dc42015-01-02 12:34:28 +05302382 cpufreq_out_of_sync(policy, new_policy.cur);
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002383 }
Thomas Renninger0961dd02006-01-26 18:46:33 +01002384 }
2385
Viresh Kumar037ce832013-10-02 14:13:16 +05302386 ret = cpufreq_set_policy(policy, &new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002388unlock:
viresh kumarad7722d2013-10-18 19:10:15 +05302389 up_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002390
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302391 cpufreq_cpu_put(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 return ret;
2393}
2394EXPORT_SYMBOL(cpufreq_update_policy);
2395
Paul Gortmaker27609842013-06-19 13:54:04 -04002396static int cpufreq_cpu_callback(struct notifier_block *nfb,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002397 unsigned long action, void *hcpu)
2398{
2399 unsigned int cpu = (unsigned long)hcpu;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002400 struct device *dev;
Ashok Rajc32b6b82005-10-30 14:59:54 -08002401
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002402 dev = get_cpu_device(cpu);
2403 if (dev) {
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302404 switch (action & ~CPU_TASKS_FROZEN) {
Ashok Rajc32b6b82005-10-30 14:59:54 -08002405 case CPU_ONLINE:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302406 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002407 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302408
Ashok Rajc32b6b82005-10-30 14:59:54 -08002409 case CPU_DOWN_PREPARE:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302410 __cpufreq_remove_dev_prepare(dev, NULL);
Srivatsa S. Bhat1aee40a2013-09-07 01:23:27 +05302411 break;
2412
2413 case CPU_POST_DEAD:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302414 __cpufreq_remove_dev_finish(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002415 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302416
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002417 case CPU_DOWN_FAILED:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302418 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002419 break;
2420 }
2421 }
2422 return NOTIFY_OK;
2423}
2424
Neal Buckendahl9c36f742010-06-22 22:02:44 -05002425static struct notifier_block __refdata cpufreq_cpu_notifier = {
Viresh Kumarbb176f72013-06-19 14:19:33 +05302426 .notifier_call = cpufreq_cpu_callback,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002427};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428
2429/*********************************************************************
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002430 * BOOST *
2431 *********************************************************************/
2432static int cpufreq_boost_set_sw(int state)
2433{
2434 struct cpufreq_frequency_table *freq_table;
2435 struct cpufreq_policy *policy;
2436 int ret = -EINVAL;
2437
Viresh Kumarf9637352015-05-12 12:20:11 +05302438 for_each_active_policy(policy) {
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002439 freq_table = cpufreq_frequency_get_table(policy->cpu);
2440 if (freq_table) {
2441 ret = cpufreq_frequency_table_cpuinfo(policy,
2442 freq_table);
2443 if (ret) {
2444 pr_err("%s: Policy frequency update failed\n",
2445 __func__);
2446 break;
2447 }
2448 policy->user_policy.max = policy->max;
2449 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2450 }
2451 }
2452
2453 return ret;
2454}
2455
2456int cpufreq_boost_trigger_state(int state)
2457{
2458 unsigned long flags;
2459 int ret = 0;
2460
2461 if (cpufreq_driver->boost_enabled == state)
2462 return 0;
2463
2464 write_lock_irqsave(&cpufreq_driver_lock, flags);
2465 cpufreq_driver->boost_enabled = state;
2466 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2467
2468 ret = cpufreq_driver->set_boost(state);
2469 if (ret) {
2470 write_lock_irqsave(&cpufreq_driver_lock, flags);
2471 cpufreq_driver->boost_enabled = !state;
2472 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2473
Joe Perchese837f9b2014-03-11 10:03:00 -07002474 pr_err("%s: Cannot %s BOOST\n",
2475 __func__, state ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002476 }
2477
2478 return ret;
2479}
2480
2481int cpufreq_boost_supported(void)
2482{
2483 if (likely(cpufreq_driver))
2484 return cpufreq_driver->boost_supported;
2485
2486 return 0;
2487}
2488EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2489
2490int cpufreq_boost_enabled(void)
2491{
2492 return cpufreq_driver->boost_enabled;
2493}
2494EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2495
2496/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2498 *********************************************************************/
2499
2500/**
2501 * cpufreq_register_driver - register a CPU Frequency driver
2502 * @driver_data: A struct cpufreq_driver containing the values#
2503 * submitted by the CPU Frequency driver.
2504 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302505 * Registers a CPU Frequency driver to this core code. This code
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 * returns zero on success, -EBUSY when another driver got here first
Dave Jones32ee8c32006-02-28 00:43:23 -05002507 * (and isn't unregistered in the meantime).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 *
2509 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002510int cpufreq_register_driver(struct cpufreq_driver *driver_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511{
2512 unsigned long flags;
2513 int ret;
2514
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002515 if (cpufreq_disabled())
2516 return -ENODEV;
2517
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 if (!driver_data || !driver_data->verify || !driver_data->init ||
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302519 !(driver_data->setpolicy || driver_data->target_index ||
Rafael J. Wysocki98322352014-03-19 12:48:30 +01002520 driver_data->target) ||
2521 (driver_data->setpolicy && (driver_data->target_index ||
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05302522 driver_data->target)) ||
2523 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 return -EINVAL;
2525
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002526 pr_debug("trying to register driver %s\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002528 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002529 if (cpufreq_driver) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002530 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Yinghai Lu4dea58062013-09-18 21:05:20 -07002531 return -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002533 cpufreq_driver = driver_data;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002534 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535
Viresh Kumarbc68b7d2015-01-02 12:34:30 +05302536 if (driver_data->setpolicy)
2537 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2538
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002539 if (cpufreq_boost_supported()) {
2540 /*
2541 * Check if driver provides function to enable boost -
2542 * if not, use cpufreq_boost_set_sw as default
2543 */
2544 if (!cpufreq_driver->set_boost)
2545 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2546
2547 ret = cpufreq_sysfs_create_file(&boost.attr);
2548 if (ret) {
2549 pr_err("%s: cannot register global BOOST sysfs file\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002550 __func__);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002551 goto err_null_driver;
2552 }
2553 }
2554
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002555 ret = subsys_interface_register(&cpufreq_interface);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002556 if (ret)
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002557 goto err_boost_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302559 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2560 list_empty(&cpufreq_policy_list)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 /* if all ->init() calls failed, unregister */
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302562 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2563 driver_data->name);
2564 goto err_if_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 }
2566
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002567 register_hotcpu_notifier(&cpufreq_cpu_notifier);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002568 pr_debug("driver %s up and running\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002570 return 0;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002571err_if_unreg:
2572 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002573err_boost_unreg:
2574 if (cpufreq_boost_supported())
2575 cpufreq_sysfs_remove_file(&boost.attr);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002576err_null_driver:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002577 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002578 cpufreq_driver = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002579 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Dave Jones4d34a672008-02-07 16:33:49 -05002580 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581}
2582EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2583
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584/**
2585 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2586 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302587 * Unregister the current CPUFreq driver. Only call this if you have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 * the right to do so, i.e. if you have succeeded in initialising before!
2589 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2590 * currently not initialised.
2591 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002592int cpufreq_unregister_driver(struct cpufreq_driver *driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593{
2594 unsigned long flags;
2595
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002596 if (!cpufreq_driver || (driver != cpufreq_driver))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002599 pr_debug("unregistering driver %s\n", driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002601 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002602 if (cpufreq_boost_supported())
2603 cpufreq_sysfs_remove_file(&boost.attr);
2604
Chandra Seetharaman65edc682006-06-27 02:54:08 -07002605 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606
Viresh Kumar6eed9402013-08-06 22:53:11 +05302607 down_write(&cpufreq_rwsem);
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002608 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302609
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002610 cpufreq_driver = NULL;
Viresh Kumar6eed9402013-08-06 22:53:11 +05302611
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002612 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302613 up_write(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
2615 return 0;
2616}
2617EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002618
Doug Anderson90de2a42014-12-23 22:09:48 -08002619/*
2620 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2621 * or mutexes when secondary CPUs are halted.
2622 */
2623static struct syscore_ops cpufreq_syscore_ops = {
2624 .shutdown = cpufreq_suspend,
2625};
2626
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002627static int __init cpufreq_core_init(void)
2628{
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002629 if (cpufreq_disabled())
2630 return -ENODEV;
2631
Viresh Kumar2361be22013-05-17 16:09:09 +05302632 cpufreq_global_kobject = kobject_create();
Thomas Renninger8aa84ad2009-07-24 15:25:05 +02002633 BUG_ON(!cpufreq_global_kobject);
2634
Doug Anderson90de2a42014-12-23 22:09:48 -08002635 register_syscore_ops(&cpufreq_syscore_ops);
2636
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002637 return 0;
2638}
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002639core_initcall(cpufreq_core_init);