blob: 870df9400d3fd5619e76b17435582da627aed798 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
Viresh Kumarbb176f72013-06-19 14:19:33 +05306 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Ashok Rajc32b6b82005-10-30 14:59:54 -08008 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
Dave Jones32ee8c32006-02-28 00:43:23 -05009 * Added handling for CPU hotplug
Dave Jones8ff69732006-03-05 03:37:23 -050010 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
Ashok Rajc32b6b82005-10-30 14:59:54 -080012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Viresh Kumardb701152012-10-23 01:29:03 +020018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Viresh Kumar5ff0a262013-08-06 22:53:03 +053020#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/cpufreq.h>
22#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/device.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053024#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080027#include <linux/mutex.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053028#include <linux/slab.h>
Viresh Kumar2f0aea92014-03-04 11:00:26 +080029#include <linux/suspend.h>
Doug Anderson90de2a42014-12-23 22:09:48 -080030#include <linux/syscore_ops.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053031#include <linux/tick.h>
Thomas Renninger6f4f2722010-04-20 13:17:36 +020032#include <trace/events/power.h>
33
Viresh Kumarb4f06762015-01-27 14:06:08 +053034static LIST_HEAD(cpufreq_policy_list);
Viresh Kumarf9637352015-05-12 12:20:11 +053035
36static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37{
38 return cpumask_empty(policy->cpus);
39}
40
41static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42{
43 return active == !policy_is_inactive(policy);
44}
45
46/* Finds Next Acive/Inactive policy */
47static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48 bool active)
49{
50 do {
51 policy = list_next_entry(policy, policy_list);
52
53 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list)
55 return NULL;
56 } while (!suitable_policy(policy, active));
57
58 return policy;
59}
60
61static struct cpufreq_policy *first_policy(bool active)
62{
63 struct cpufreq_policy *policy;
64
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list))
67 return NULL;
68
69 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70 policy_list);
71
72 if (!suitable_policy(policy, active))
73 policy = next_policy(policy, active);
74
75 return policy;
76}
77
78/* Macros to iterate over CPU policies */
79#define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
81 __policy; \
82 __policy = next_policy(__policy, __active))
83
84#define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86#define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
88
89#define for_each_policy(__policy) \
Viresh Kumarb4f06762015-01-27 14:06:08 +053090 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91
Viresh Kumarf7b27062015-01-27 14:06:09 +053092/* Iterate over governors */
93static LIST_HEAD(cpufreq_governor_list);
94#define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097/**
Dave Jonescd878472006-08-11 17:59:28 -040098 * The "cpufreq driver" - the arch- or hardware-dependent low
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
101 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200102static struct cpufreq_driver *cpufreq_driver;
Mike Travis7a6aedf2008-03-25 15:06:53 -0700103static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
Viresh Kumarbb176f72013-06-19 14:19:33 +0530104static DEFINE_RWLOCK(cpufreq_driver_lock);
Jane Li6f1e4ef2014-01-03 17:17:41 +0800105DEFINE_MUTEX(cpufreq_governor_lock);
Viresh Kumarbb176f72013-06-19 14:19:33 +0530106
Viresh Kumar2f0aea92014-03-04 11:00:26 +0800107/* Flag to suspend/resume CPUFreq governors */
108static bool cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530110static inline bool has_target(void)
111{
112 return cpufreq_driver->target_index || cpufreq_driver->target;
113}
114
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800115/*
Viresh Kumar6eed9402013-08-06 22:53:11 +0530116 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
117 * sections
118 */
119static DECLARE_RWSEM(cpufreq_rwsem);
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121/* internal prototypes */
Dave Jones29464f22009-01-18 01:37:11 -0500122static int __cpufreq_governor(struct cpufreq_policy *policy,
123 unsigned int event);
Viresh Kumard92d50a2015-01-02 12:34:29 +0530124static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
David Howells65f27f32006-11-22 14:55:48 +0000125static void handle_update(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127/**
Dave Jones32ee8c32006-02-28 00:43:23 -0500128 * Two notifier lists: the "policy" list is involved in the
129 * validation process for a new CPU frequency policy; the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 * "transition" list for kernel code that needs to handle
131 * changes to devices when the CPU clock speed changes.
132 * The mutex locks both lists.
133 */
Alan Sterne041c682006-03-27 01:16:30 -0800134static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700135static struct srcu_notifier_head cpufreq_transition_notifier_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200137static bool init_cpufreq_transition_notifier_list_called;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700138static int __init init_cpufreq_transition_notifier_list(void)
139{
140 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200141 init_cpufreq_transition_notifier_list_called = true;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700142 return 0;
143}
Linus Torvaldsb3438f82006-11-20 11:47:18 -0800144pure_initcall(init_cpufreq_transition_notifier_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400146static int off __read_mostly;
Viresh Kumarda584452012-10-26 00:51:32 +0200147static int cpufreq_disabled(void)
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400148{
149 return off;
150}
151void disable_cpufreq(void)
152{
153 off = 1;
154}
Dave Jones29464f22009-01-18 01:37:11 -0500155static DEFINE_MUTEX(cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000157bool have_governor_per_policy(void)
158{
Viresh Kumar0b981e72013-10-02 14:13:18 +0530159 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000160}
Viresh Kumar3f869d62013-05-16 05:09:56 +0000161EXPORT_SYMBOL_GPL(have_governor_per_policy);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000162
Viresh Kumar944e9a02013-05-16 05:09:57 +0000163struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
164{
165 if (have_governor_per_policy())
166 return &policy->kobj;
167 else
168 return cpufreq_global_kobject;
169}
170EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
171
Viresh Kumar72a4ce32013-05-17 11:26:32 +0000172static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
173{
174 u64 idle_time;
175 u64 cur_wall_time;
176 u64 busy_time;
177
178 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
179
180 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
181 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
182 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
183 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
184 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
185 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
186
187 idle_time = cur_wall_time - busy_time;
188 if (wall)
189 *wall = cputime_to_usecs(cur_wall_time);
190
191 return cputime_to_usecs(idle_time);
192}
193
194u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
195{
196 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
197
198 if (idle_time == -1ULL)
199 return get_cpu_idle_time_jiffy(cpu, wall);
200 else if (!io_busy)
201 idle_time += get_cpu_iowait_time_us(cpu, wall);
202
203 return idle_time;
204}
205EXPORT_SYMBOL_GPL(get_cpu_idle_time);
206
Viresh Kumar70e9e772013-10-03 20:29:07 +0530207/*
208 * This is a generic cpufreq init() routine which can be used by cpufreq
209 * drivers of SMP systems. It will do following:
210 * - validate & show freq table passed
211 * - set policies transition latency
212 * - policy->cpus with all possible CPUs
213 */
214int cpufreq_generic_init(struct cpufreq_policy *policy,
215 struct cpufreq_frequency_table *table,
216 unsigned int transition_latency)
217{
218 int ret;
219
220 ret = cpufreq_table_validate_and_show(policy, table);
221 if (ret) {
222 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
223 return ret;
224 }
225
226 policy->cpuinfo.transition_latency = transition_latency;
227
228 /*
Shailendra Verma58405af2015-05-22 22:48:22 +0530229 * The driver only supports the SMP configuration where all processors
Viresh Kumar70e9e772013-10-03 20:29:07 +0530230 * share the clock and voltage and clock.
231 */
232 cpumask_setall(policy->cpus);
233
234 return 0;
235}
236EXPORT_SYMBOL_GPL(cpufreq_generic_init);
237
Viresh Kumar988bed02015-05-08 11:53:45 +0530238/* Only for cpufreq core internal use */
239struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
Viresh Kumar652ed952014-01-09 20:38:43 +0530240{
241 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
242
Viresh Kumar988bed02015-05-08 11:53:45 +0530243 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
244}
245
246unsigned int cpufreq_generic_get(unsigned int cpu)
247{
248 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
249
Viresh Kumar652ed952014-01-09 20:38:43 +0530250 if (!policy || IS_ERR(policy->clk)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700251 pr_err("%s: No %s associated to cpu: %d\n",
252 __func__, policy ? "clk" : "policy", cpu);
Viresh Kumar652ed952014-01-09 20:38:43 +0530253 return 0;
254 }
255
256 return clk_get_rate(policy->clk) / 1000;
257}
258EXPORT_SYMBOL_GPL(cpufreq_generic_get);
259
Viresh Kumar50e9c852015-02-19 17:02:03 +0530260/**
261 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
262 *
263 * @cpu: cpu to find policy for.
264 *
265 * This returns policy for 'cpu', returns NULL if it doesn't exist.
266 * It also increments the kobject reference count to mark it busy and so would
267 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
268 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
269 * freed as that depends on the kobj count.
270 *
271 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
272 * valid policy is found. This is done to make sure the driver doesn't get
273 * unregistered while the policy is being used.
274 *
275 * Return: A valid policy on success, otherwise NULL on failure.
276 */
Viresh Kumar6eed9402013-08-06 22:53:11 +0530277struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530279 struct cpufreq_policy *policy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 unsigned long flags;
281
Viresh Kumar1b947c92015-02-19 17:02:05 +0530282 if (WARN_ON(cpu >= nr_cpu_ids))
Viresh Kumar6eed9402013-08-06 22:53:11 +0530283 return NULL;
284
285 if (!down_read_trylock(&cpufreq_rwsem))
286 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
288 /* get the cpufreq driver */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000289 read_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
Viresh Kumar6eed9402013-08-06 22:53:11 +0530291 if (cpufreq_driver) {
292 /* get the CPU */
Viresh Kumar988bed02015-05-08 11:53:45 +0530293 policy = cpufreq_cpu_get_raw(cpu);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530294 if (policy)
295 kobject_get(&policy->kobj);
296 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200297
Viresh Kumar6eed9402013-08-06 22:53:11 +0530298 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530300 if (!policy)
Viresh Kumar6eed9402013-08-06 22:53:11 +0530301 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530303 return policy;
Stephen Boyda9144432012-07-20 18:14:38 +0000304}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
306
Viresh Kumar50e9c852015-02-19 17:02:03 +0530307/**
308 * cpufreq_cpu_put: Decrements the usage count of a policy
309 *
310 * @policy: policy earlier returned by cpufreq_cpu_get().
311 *
312 * This decrements the kobject reference count incremented earlier by calling
313 * cpufreq_cpu_get().
314 *
315 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
316 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530317void cpufreq_cpu_put(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530319 kobject_put(&policy->kobj);
320 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321}
322EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
326 *********************************************************************/
327
328/**
329 * adjust_jiffies - adjust the system "loops_per_jiffy"
330 *
331 * This function alters the system "loops_per_jiffy" for the clock
332 * speed change. Note that loops_per_jiffy cannot be updated on SMP
Dave Jones32ee8c32006-02-28 00:43:23 -0500333 * systems as each CPU might be scaled differently. So, use the arch
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 * per-CPU loops_per_jiffy value wherever possible.
335 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800336static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
Viresh Kumar39c132e2015-01-02 12:34:34 +0530338#ifndef CONFIG_SMP
339 static unsigned long l_p_j_ref;
340 static unsigned int l_p_j_ref_freq;
341
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 if (ci->flags & CPUFREQ_CONST_LOOPS)
343 return;
344
345 if (!l_p_j_ref_freq) {
346 l_p_j_ref = loops_per_jiffy;
347 l_p_j_ref_freq = ci->old;
Joe Perchese837f9b2014-03-11 10:03:00 -0700348 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
349 l_p_j_ref, l_p_j_ref_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 }
Viresh Kumar0b443ea2014-03-19 11:24:58 +0530351 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530352 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
353 ci->new);
Joe Perchese837f9b2014-03-11 10:03:00 -0700354 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
355 loops_per_jiffy, ci->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357#endif
Viresh Kumar39c132e2015-01-02 12:34:34 +0530358}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
Viresh Kumar0956df9c2013-06-19 14:19:34 +0530360static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530361 struct cpufreq_freqs *freqs, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362{
363 BUG_ON(irqs_disabled());
364
Dirk Brandewied5aaffa2013-01-17 16:22:21 +0000365 if (cpufreq_disabled())
366 return;
367
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200368 freqs->flags = cpufreq_driver->flags;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200369 pr_debug("notification %u of frequency transition to %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -0700370 state, freqs->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 switch (state) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 case CPUFREQ_PRECHANGE:
Dave Jones32ee8c32006-02-28 00:43:23 -0500375 /* detect if the driver reported a value as "old frequency"
Dave Jonese4472cb2006-01-31 15:53:55 -0800376 * which is not equal to what the cpufreq core thinks is
377 * "old frequency".
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200379 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800380 if ((policy) && (policy->cpu == freqs->cpu) &&
381 (policy->cur) && (policy->cur != freqs->old)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700382 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
383 freqs->old, policy->cur);
Dave Jonese4472cb2006-01-31 15:53:55 -0800384 freqs->old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 }
386 }
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700387 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800388 CPUFREQ_PRECHANGE, freqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
390 break;
Dave Jonese4472cb2006-01-31 15:53:55 -0800391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 case CPUFREQ_POSTCHANGE:
393 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
Joe Perchese837f9b2014-03-11 10:03:00 -0700394 pr_debug("FREQ: %lu - CPU: %lu\n",
395 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
Thomas Renninger25e41932011-01-03 17:50:44 +0100396 trace_cpu_frequency(freqs->new, freqs->cpu);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700397 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800398 CPUFREQ_POSTCHANGE, freqs);
Dave Jonese4472cb2006-01-31 15:53:55 -0800399 if (likely(policy) && likely(policy->cpu == freqs->cpu))
400 policy->cur = freqs->new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 break;
402 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
Viresh Kumarbb176f72013-06-19 14:19:33 +0530404
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530405/**
406 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
407 * on frequency transition.
408 *
409 * This function calls the transition notifiers and the "adjust_jiffies"
410 * function. It is called twice on all CPU frequency changes that have
411 * external effects.
412 */
Viresh Kumar236a9802014-03-24 13:35:46 +0530413static void cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530414 struct cpufreq_freqs *freqs, unsigned int state)
415{
416 for_each_cpu(freqs->cpu, policy->cpus)
417 __cpufreq_notify_transition(policy, freqs, state);
418}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530420/* Do post notifications when there are chances that transition has failed */
Viresh Kumar236a9802014-03-24 13:35:46 +0530421static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530422 struct cpufreq_freqs *freqs, int transition_failed)
423{
424 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
425 if (!transition_failed)
426 return;
427
428 swap(freqs->old, freqs->new);
429 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
430 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
431}
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530432
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530433void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
434 struct cpufreq_freqs *freqs)
435{
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530436
437 /*
438 * Catch double invocations of _begin() which lead to self-deadlock.
439 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
440 * doesn't invoke _begin() on their behalf, and hence the chances of
441 * double invocations are very low. Moreover, there are scenarios
442 * where these checks can emit false-positive warnings in these
443 * drivers; so we avoid that by skipping them altogether.
444 */
445 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
446 && current == policy->transition_task);
447
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530448wait:
449 wait_event(policy->transition_wait, !policy->transition_ongoing);
450
451 spin_lock(&policy->transition_lock);
452
453 if (unlikely(policy->transition_ongoing)) {
454 spin_unlock(&policy->transition_lock);
455 goto wait;
456 }
457
458 policy->transition_ongoing = true;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530459 policy->transition_task = current;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530460
461 spin_unlock(&policy->transition_lock);
462
463 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
464}
465EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
466
467void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
468 struct cpufreq_freqs *freqs, int transition_failed)
469{
470 if (unlikely(WARN_ON(!policy->transition_ongoing)))
471 return;
472
473 cpufreq_notify_post_transition(policy, freqs, transition_failed);
474
475 policy->transition_ongoing = false;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530476 policy->transition_task = NULL;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530477
478 wake_up(&policy->transition_wait);
479}
480EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
481
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483/*********************************************************************
484 * SYSFS INTERFACE *
485 *********************************************************************/
Rashika Kheria8a5c74a2014-02-26 22:12:42 +0530486static ssize_t show_boost(struct kobject *kobj,
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100487 struct attribute *attr, char *buf)
488{
489 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
490}
491
492static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
493 const char *buf, size_t count)
494{
495 int ret, enable;
496
497 ret = sscanf(buf, "%d", &enable);
498 if (ret != 1 || enable < 0 || enable > 1)
499 return -EINVAL;
500
501 if (cpufreq_boost_trigger_state(enable)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700502 pr_err("%s: Cannot %s BOOST!\n",
503 __func__, enable ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100504 return -EINVAL;
505 }
506
Joe Perchese837f9b2014-03-11 10:03:00 -0700507 pr_debug("%s: cpufreq BOOST %s\n",
508 __func__, enable ? "enabled" : "disabled");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100509
510 return count;
511}
512define_one_global_rw(boost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530514static struct cpufreq_governor *find_governor(const char *str_governor)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700515{
516 struct cpufreq_governor *t;
517
Viresh Kumarf7b27062015-01-27 14:06:09 +0530518 for_each_governor(t)
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200519 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700520 return t;
521
522 return NULL;
523}
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525/**
526 * cpufreq_parse_governor - parse a governor string
527 */
Dave Jones905d77c2008-03-05 14:28:32 -0500528static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 struct cpufreq_governor **governor)
530{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700531 int err = -EINVAL;
532
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200533 if (!cpufreq_driver)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700534 goto out;
535
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200536 if (cpufreq_driver->setpolicy) {
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200537 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 *policy = CPUFREQ_POLICY_PERFORMANCE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700539 err = 0;
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200540 } else if (!strncasecmp(str_governor, "powersave",
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530541 CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 *policy = CPUFREQ_POLICY_POWERSAVE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700543 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 }
Viresh Kumar2e1cc3a2015-01-02 12:34:27 +0530545 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 struct cpufreq_governor *t;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700547
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800548 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700549
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530550 t = find_governor(str_governor);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700551
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700552 if (t == NULL) {
Kees Cook1a8e1462011-05-04 08:38:56 -0700553 int ret;
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700554
Kees Cook1a8e1462011-05-04 08:38:56 -0700555 mutex_unlock(&cpufreq_governor_mutex);
556 ret = request_module("cpufreq_%s", str_governor);
557 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700558
Kees Cook1a8e1462011-05-04 08:38:56 -0700559 if (ret == 0)
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530560 t = find_governor(str_governor);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700561 }
562
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700563 if (t != NULL) {
564 *governor = t;
565 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 }
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700567
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800568 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 }
Dave Jones29464f22009-01-18 01:37:11 -0500570out:
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700571 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574/**
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530575 * cpufreq_per_cpu_attr_read() / show_##file_name() -
576 * print out cpufreq information
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 *
578 * Write out information from cpufreq_driver->policy[cpu]; object must be
579 * "unsigned int".
580 */
581
Dave Jones32ee8c32006-02-28 00:43:23 -0500582#define show_one(file_name, object) \
583static ssize_t show_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500584(struct cpufreq_policy *policy, char *buf) \
Dave Jones32ee8c32006-02-28 00:43:23 -0500585{ \
Dave Jones29464f22009-01-18 01:37:11 -0500586 return sprintf(buf, "%u\n", policy->object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588
589show_one(cpuinfo_min_freq, cpuinfo.min_freq);
590show_one(cpuinfo_max_freq, cpuinfo.max_freq);
Thomas Renningered129782009-02-04 01:17:41 +0100591show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592show_one(scaling_min_freq, min);
593show_one(scaling_max_freq, max);
Dirk Brandewiec034b022014-10-13 08:37:40 -0700594
Viresh Kumar09347b22015-01-02 12:34:24 +0530595static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
Dirk Brandewiec034b022014-10-13 08:37:40 -0700596{
597 ssize_t ret;
598
599 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
600 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
601 else
602 ret = sprintf(buf, "%u\n", policy->cur);
603 return ret;
604}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
Viresh Kumar037ce832013-10-02 14:13:16 +0530606static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530607 struct cpufreq_policy *new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609/**
610 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
611 */
612#define store_one(file_name, object) \
613static ssize_t store_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500614(struct cpufreq_policy *policy, const char *buf, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615{ \
Vince Hsu619c144c2014-11-10 14:14:50 +0800616 int ret, temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 struct cpufreq_policy new_policy; \
618 \
619 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
620 if (ret) \
621 return -EINVAL; \
622 \
Dave Jones29464f22009-01-18 01:37:11 -0500623 ret = sscanf(buf, "%u", &new_policy.object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 if (ret != 1) \
625 return -EINVAL; \
626 \
Vince Hsu619c144c2014-11-10 14:14:50 +0800627 temp = new_policy.object; \
Viresh Kumar037ce832013-10-02 14:13:16 +0530628 ret = cpufreq_set_policy(policy, &new_policy); \
Vince Hsu619c144c2014-11-10 14:14:50 +0800629 if (!ret) \
630 policy->user_policy.object = temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 \
632 return ret ? ret : count; \
633}
634
Dave Jones29464f22009-01-18 01:37:11 -0500635store_one(scaling_min_freq, min);
636store_one(scaling_max_freq, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
638/**
639 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
640 */
Dave Jones905d77c2008-03-05 14:28:32 -0500641static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
642 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643{
Viresh Kumard92d50a2015-01-02 12:34:29 +0530644 unsigned int cur_freq = __cpufreq_get(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 if (!cur_freq)
646 return sprintf(buf, "<unknown>");
647 return sprintf(buf, "%u\n", cur_freq);
648}
649
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650/**
651 * show_scaling_governor - show the current policy for the specified CPU
652 */
Dave Jones905d77c2008-03-05 14:28:32 -0500653static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654{
Dave Jones29464f22009-01-18 01:37:11 -0500655 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 return sprintf(buf, "powersave\n");
657 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
658 return sprintf(buf, "performance\n");
659 else if (policy->governor)
viresh kumar4b972f02012-10-23 01:23:43 +0200660 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
Dave Jones29464f22009-01-18 01:37:11 -0500661 policy->governor->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 return -EINVAL;
663}
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665/**
666 * store_scaling_governor - store policy for the specified CPU
667 */
Dave Jones905d77c2008-03-05 14:28:32 -0500668static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
669 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670{
Srivatsa S. Bhat5136fa52013-09-07 01:24:06 +0530671 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 char str_governor[16];
673 struct cpufreq_policy new_policy;
674
675 ret = cpufreq_get_policy(&new_policy, policy->cpu);
676 if (ret)
677 return ret;
678
Dave Jones29464f22009-01-18 01:37:11 -0500679 ret = sscanf(buf, "%15s", str_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 if (ret != 1)
681 return -EINVAL;
682
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530683 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
684 &new_policy.governor))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 return -EINVAL;
686
Viresh Kumar037ce832013-10-02 14:13:16 +0530687 ret = cpufreq_set_policy(policy, &new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200688
689 policy->user_policy.policy = policy->policy;
690 policy->user_policy.governor = policy->governor;
Thomas Renninger7970e082006-04-13 15:14:04 +0200691
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530692 if (ret)
693 return ret;
694 else
695 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696}
697
698/**
699 * show_scaling_driver - show the cpufreq driver currently loaded
700 */
Dave Jones905d77c2008-03-05 14:28:32 -0500701static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200703 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704}
705
706/**
707 * show_scaling_available_governors - show the available CPUfreq governors
708 */
Dave Jones905d77c2008-03-05 14:28:32 -0500709static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
710 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711{
712 ssize_t i = 0;
713 struct cpufreq_governor *t;
714
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530715 if (!has_target()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 i += sprintf(buf, "performance powersave");
717 goto out;
718 }
719
Viresh Kumarf7b27062015-01-27 14:06:09 +0530720 for_each_governor(t) {
Dave Jones29464f22009-01-18 01:37:11 -0500721 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
722 - (CPUFREQ_NAME_LEN + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 goto out;
viresh kumar4b972f02012-10-23 01:23:43 +0200724 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 }
Dave Jones7d5e3502006-02-02 17:03:42 -0500726out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 i += sprintf(&buf[i], "\n");
728 return i;
729}
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700730
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800731ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732{
733 ssize_t i = 0;
734 unsigned int cpu;
735
Rusty Russell835481d2009-01-04 05:18:06 -0800736 for_each_cpu(cpu, mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 if (i)
738 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
739 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
740 if (i >= (PAGE_SIZE - 5))
Dave Jones29464f22009-01-18 01:37:11 -0500741 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 }
743 i += sprintf(&buf[i], "\n");
744 return i;
745}
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800746EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700748/**
749 * show_related_cpus - show the CPUs affected by each transition even if
750 * hw coordination is in use
751 */
752static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
753{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800754 return cpufreq_show_cpus(policy->related_cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700755}
756
757/**
758 * show_affected_cpus - show the CPUs affected by each transition
759 */
760static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
761{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800762 return cpufreq_show_cpus(policy->cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700763}
764
Venki Pallipadi9e769882007-10-26 10:18:21 -0700765static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
Dave Jones905d77c2008-03-05 14:28:32 -0500766 const char *buf, size_t count)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700767{
768 unsigned int freq = 0;
769 unsigned int ret;
770
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700771 if (!policy->governor || !policy->governor->store_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700772 return -EINVAL;
773
774 ret = sscanf(buf, "%u", &freq);
775 if (ret != 1)
776 return -EINVAL;
777
778 policy->governor->store_setspeed(policy, freq);
779
780 return count;
781}
782
783static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
784{
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700785 if (!policy->governor || !policy->governor->show_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700786 return sprintf(buf, "<unsupported>\n");
787
788 return policy->governor->show_setspeed(policy, buf);
789}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
Thomas Renningere2f74f32009-11-19 12:31:01 +0100791/**
viresh kumar8bf1ac722012-10-23 01:23:33 +0200792 * show_bios_limit - show the current cpufreq HW/BIOS limitation
Thomas Renningere2f74f32009-11-19 12:31:01 +0100793 */
794static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
795{
796 unsigned int limit;
797 int ret;
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200798 if (cpufreq_driver->bios_limit) {
799 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
Thomas Renningere2f74f32009-11-19 12:31:01 +0100800 if (!ret)
801 return sprintf(buf, "%u\n", limit);
802 }
803 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
804}
805
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200806cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
807cpufreq_freq_attr_ro(cpuinfo_min_freq);
808cpufreq_freq_attr_ro(cpuinfo_max_freq);
809cpufreq_freq_attr_ro(cpuinfo_transition_latency);
810cpufreq_freq_attr_ro(scaling_available_governors);
811cpufreq_freq_attr_ro(scaling_driver);
812cpufreq_freq_attr_ro(scaling_cur_freq);
813cpufreq_freq_attr_ro(bios_limit);
814cpufreq_freq_attr_ro(related_cpus);
815cpufreq_freq_attr_ro(affected_cpus);
816cpufreq_freq_attr_rw(scaling_min_freq);
817cpufreq_freq_attr_rw(scaling_max_freq);
818cpufreq_freq_attr_rw(scaling_governor);
819cpufreq_freq_attr_rw(scaling_setspeed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Dave Jones905d77c2008-03-05 14:28:32 -0500821static struct attribute *default_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 &cpuinfo_min_freq.attr,
823 &cpuinfo_max_freq.attr,
Thomas Renningered129782009-02-04 01:17:41 +0100824 &cpuinfo_transition_latency.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 &scaling_min_freq.attr,
826 &scaling_max_freq.attr,
827 &affected_cpus.attr,
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700828 &related_cpus.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 &scaling_governor.attr,
830 &scaling_driver.attr,
831 &scaling_available_governors.attr,
Venki Pallipadi9e769882007-10-26 10:18:21 -0700832 &scaling_setspeed.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 NULL
834};
835
Dave Jones29464f22009-01-18 01:37:11 -0500836#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
837#define to_attr(a) container_of(a, struct freq_attr, attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
Dave Jones29464f22009-01-18 01:37:11 -0500839static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840{
Dave Jones905d77c2008-03-05 14:28:32 -0500841 struct cpufreq_policy *policy = to_policy(kobj);
842 struct freq_attr *fattr = to_attr(attr);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530843 ssize_t ret;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530844
845 if (!down_read_trylock(&cpufreq_rwsem))
Viresh Kumar1b750e32013-10-02 14:13:09 +0530846 return -EINVAL;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800847
viresh kumarad7722d2013-10-18 19:10:15 +0530848 down_read(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800849
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530850 if (fattr->show)
851 ret = fattr->show(policy, buf);
852 else
853 ret = -EIO;
854
viresh kumarad7722d2013-10-18 19:10:15 +0530855 up_read(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530856 up_read(&cpufreq_rwsem);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 return ret;
859}
860
Dave Jones905d77c2008-03-05 14:28:32 -0500861static ssize_t store(struct kobject *kobj, struct attribute *attr,
862 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863{
Dave Jones905d77c2008-03-05 14:28:32 -0500864 struct cpufreq_policy *policy = to_policy(kobj);
865 struct freq_attr *fattr = to_attr(attr);
Dave Jonesa07530b2008-03-05 14:22:25 -0500866 ssize_t ret = -EINVAL;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530867
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530868 get_online_cpus();
869
870 if (!cpu_online(policy->cpu))
871 goto unlock;
872
Viresh Kumar6eed9402013-08-06 22:53:11 +0530873 if (!down_read_trylock(&cpufreq_rwsem))
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530874 goto unlock;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800875
viresh kumarad7722d2013-10-18 19:10:15 +0530876 down_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800877
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530878 if (fattr->store)
879 ret = fattr->store(policy, buf, count);
880 else
881 ret = -EIO;
882
viresh kumarad7722d2013-10-18 19:10:15 +0530883 up_write(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530884
Viresh Kumar6eed9402013-08-06 22:53:11 +0530885 up_read(&cpufreq_rwsem);
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530886unlock:
887 put_online_cpus();
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 return ret;
890}
891
Dave Jones905d77c2008-03-05 14:28:32 -0500892static void cpufreq_sysfs_release(struct kobject *kobj)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893{
Dave Jones905d77c2008-03-05 14:28:32 -0500894 struct cpufreq_policy *policy = to_policy(kobj);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200895 pr_debug("last reference is dropped\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 complete(&policy->kobj_unregister);
897}
898
Emese Revfy52cf25d2010-01-19 02:58:23 +0100899static const struct sysfs_ops sysfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 .show = show,
901 .store = store,
902};
903
904static struct kobj_type ktype_cpufreq = {
905 .sysfs_ops = &sysfs_ops,
906 .default_attrs = default_attrs,
907 .release = cpufreq_sysfs_release,
908};
909
Viresh Kumar2361be22013-05-17 16:09:09 +0530910struct kobject *cpufreq_global_kobject;
911EXPORT_SYMBOL(cpufreq_global_kobject);
912
913static int cpufreq_global_kobject_usage;
914
915int cpufreq_get_global_kobject(void)
916{
917 if (!cpufreq_global_kobject_usage++)
918 return kobject_add(cpufreq_global_kobject,
919 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
920
921 return 0;
922}
923EXPORT_SYMBOL(cpufreq_get_global_kobject);
924
925void cpufreq_put_global_kobject(void)
926{
927 if (!--cpufreq_global_kobject_usage)
928 kobject_del(cpufreq_global_kobject);
929}
930EXPORT_SYMBOL(cpufreq_put_global_kobject);
931
932int cpufreq_sysfs_create_file(const struct attribute *attr)
933{
934 int ret = cpufreq_get_global_kobject();
935
936 if (!ret) {
937 ret = sysfs_create_file(cpufreq_global_kobject, attr);
938 if (ret)
939 cpufreq_put_global_kobject();
940 }
941
942 return ret;
943}
944EXPORT_SYMBOL(cpufreq_sysfs_create_file);
945
946void cpufreq_sysfs_remove_file(const struct attribute *attr)
947{
948 sysfs_remove_file(cpufreq_global_kobject, attr);
949 cpufreq_put_global_kobject();
950}
951EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
952
Dave Jones19d6f7e2009-07-08 17:35:39 -0400953/* symlink affected CPUs */
Viresh Kumar308b60e2013-07-31 14:35:14 +0200954static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400955{
956 unsigned int j;
957 int ret = 0;
958
959 for_each_cpu(j, policy->cpus) {
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800960 struct device *cpu_dev;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400961
Viresh Kumar308b60e2013-07-31 14:35:14 +0200962 if (j == policy->cpu)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400963 continue;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400964
Viresh Kumare8fdde12013-07-31 14:31:33 +0200965 pr_debug("Adding link for CPU: %u\n", j);
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800966 cpu_dev = get_cpu_device(j);
967 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
Dave Jones19d6f7e2009-07-08 17:35:39 -0400968 "cpufreq");
Rafael J. Wysocki71c34612013-08-04 01:19:34 +0200969 if (ret)
970 break;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400971 }
972 return ret;
973}
974
Viresh Kumar308b60e2013-07-31 14:35:14 +0200975static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800976 struct device *dev)
Dave Jones909a6942009-07-08 18:05:42 -0400977{
978 struct freq_attr **drv_attr;
Dave Jones909a6942009-07-08 18:05:42 -0400979 int ret = 0;
Dave Jones909a6942009-07-08 18:05:42 -0400980
Dave Jones909a6942009-07-08 18:05:42 -0400981 /* set up files for this cpu device */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200982 drv_attr = cpufreq_driver->attr;
Viresh Kumarf13f1182015-01-02 12:34:23 +0530983 while (drv_attr && *drv_attr) {
Dave Jones909a6942009-07-08 18:05:42 -0400984 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
985 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100986 return ret;
Dave Jones909a6942009-07-08 18:05:42 -0400987 drv_attr++;
988 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200989 if (cpufreq_driver->get) {
Dave Jones909a6942009-07-08 18:05:42 -0400990 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
991 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100992 return ret;
Dave Jones909a6942009-07-08 18:05:42 -0400993 }
Dirk Brandewiec034b022014-10-13 08:37:40 -0700994
995 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
996 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100997 return ret;
Dirk Brandewiec034b022014-10-13 08:37:40 -0700998
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200999 if (cpufreq_driver->bios_limit) {
Thomas Renningere2f74f32009-11-19 12:31:01 +01001000 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1001 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001002 return ret;
Thomas Renningere2f74f32009-11-19 12:31:01 +01001003 }
Dave Jones909a6942009-07-08 18:05:42 -04001004
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001005 return cpufreq_add_dev_symlink(policy);
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301006}
1007
1008static void cpufreq_init_policy(struct cpufreq_policy *policy)
1009{
viresh kumar6e2c89d2014-03-04 11:43:59 +08001010 struct cpufreq_governor *gov = NULL;
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301011 struct cpufreq_policy new_policy;
1012 int ret = 0;
1013
Viresh Kumard5b73cd2013-08-06 22:53:06 +05301014 memcpy(&new_policy, policy, sizeof(*policy));
Jason Barona27a9ab2013-12-19 22:50:50 +00001015
viresh kumar6e2c89d2014-03-04 11:43:59 +08001016 /* Update governor of new_policy to the governor used before hotplug */
Viresh Kumar45732372015-05-12 12:22:34 +05301017 gov = find_governor(policy->last_governor);
viresh kumar6e2c89d2014-03-04 11:43:59 +08001018 if (gov)
1019 pr_debug("Restoring governor %s for cpu %d\n",
1020 policy->governor->name, policy->cpu);
1021 else
1022 gov = CPUFREQ_DEFAULT_GOVERNOR;
1023
1024 new_policy.governor = gov;
1025
Jason Barona27a9ab2013-12-19 22:50:50 +00001026 /* Use the default policy if its valid. */
1027 if (cpufreq_driver->setpolicy)
viresh kumar6e2c89d2014-03-04 11:43:59 +08001028 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
Dave Jonesecf7e462009-07-08 18:48:47 -04001029
1030 /* set default policy */
Viresh Kumar037ce832013-10-02 14:13:16 +05301031 ret = cpufreq_set_policy(policy, &new_policy);
Dave Jonesecf7e462009-07-08 18:48:47 -04001032 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001033 pr_debug("setting policy failed\n");
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001034 if (cpufreq_driver->exit)
1035 cpufreq_driver->exit(policy);
Dave Jonesecf7e462009-07-08 18:48:47 -04001036 }
Dave Jones909a6942009-07-08 18:05:42 -04001037}
1038
Viresh Kumard8d3b472013-08-04 01:20:07 +02001039static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
Viresh Kumar42f921a2013-12-20 21:26:02 +05301040 unsigned int cpu, struct device *dev)
Viresh Kumarfcf80582013-01-29 14:39:08 +00001041{
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301042 int ret = 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001043
Viresh Kumarbb29ae12015-02-19 17:02:06 +05301044 /* Has this CPU been taken care of already? */
1045 if (cpumask_test_cpu(cpu, policy->cpus))
1046 return 0;
1047
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301048 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301049 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1050 if (ret) {
1051 pr_err("%s: Failed to stop governor\n", __func__);
1052 return ret;
1053 }
1054 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001055
viresh kumarad7722d2013-10-18 19:10:15 +05301056 down_write(&policy->rwsem);
Viresh Kumarfcf80582013-01-29 14:39:08 +00001057 cpumask_set_cpu(cpu, policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301058 up_write(&policy->rwsem);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301059
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301060 if (has_target()) {
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001061 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1062 if (!ret)
1063 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1064
1065 if (ret) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301066 pr_err("%s: Failed to start governor\n", __func__);
1067 return ret;
1068 }
Viresh Kumar820c6ca2013-04-22 00:48:03 +02001069 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001070
Viresh Kumar42f921a2013-12-20 21:26:02 +05301071 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
Viresh Kumarfcf80582013-01-29 14:39:08 +00001072}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301074static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1075{
1076 struct cpufreq_policy *policy;
1077 unsigned long flags;
1078
Lan Tianyu44871c92013-09-11 15:05:05 +08001079 read_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar3914d372015-05-08 11:53:46 +05301080 policy = per_cpu(cpufreq_cpu_data, cpu);
Lan Tianyu44871c92013-09-11 15:05:05 +08001081 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301082
Viresh Kumar3914d372015-05-08 11:53:46 +05301083 if (likely(policy)) {
1084 /* Policy should be inactive here */
1085 WARN_ON(!policy_is_inactive(policy));
Viresh Kumar3914d372015-05-08 11:53:46 +05301086 }
viresh kumar6e2c89d2014-03-04 11:43:59 +08001087
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301088 return policy;
1089}
1090
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301091static struct cpufreq_policy *cpufreq_policy_alloc(void)
1092{
1093 struct cpufreq_policy *policy;
1094
1095 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1096 if (!policy)
1097 return NULL;
1098
1099 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1100 goto err_free_policy;
1101
1102 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1103 goto err_free_cpumask;
1104
Lukasz Majewskic88a1f82013-08-06 22:53:08 +05301105 INIT_LIST_HEAD(&policy->policy_list);
viresh kumarad7722d2013-10-18 19:10:15 +05301106 init_rwsem(&policy->rwsem);
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +05301107 spin_lock_init(&policy->transition_lock);
1108 init_waitqueue_head(&policy->transition_wait);
Viresh Kumar818c5712015-01-02 12:34:38 +05301109 init_completion(&policy->kobj_unregister);
1110 INIT_WORK(&policy->update, handle_update);
viresh kumarad7722d2013-10-18 19:10:15 +05301111
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301112 return policy;
1113
1114err_free_cpumask:
1115 free_cpumask_var(policy->cpus);
1116err_free_policy:
1117 kfree(policy);
1118
1119 return NULL;
1120}
1121
Viresh Kumar42f921a2013-12-20 21:26:02 +05301122static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1123{
1124 struct kobject *kobj;
1125 struct completion *cmp;
1126
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301127 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1128 CPUFREQ_REMOVE_POLICY, policy);
1129
Viresh Kumar42f921a2013-12-20 21:26:02 +05301130 down_read(&policy->rwsem);
1131 kobj = &policy->kobj;
1132 cmp = &policy->kobj_unregister;
1133 up_read(&policy->rwsem);
1134 kobject_put(kobj);
1135
1136 /*
1137 * We need to make sure that the underlying kobj is
1138 * actually not referenced anymore by anybody before we
1139 * proceed with unloading.
1140 */
1141 pr_debug("waiting for dropping of refcount\n");
1142 wait_for_completion(cmp);
1143 pr_debug("wait complete\n");
1144}
1145
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301146static void cpufreq_policy_free(struct cpufreq_policy *policy)
1147{
Viresh Kumar988bed02015-05-08 11:53:45 +05301148 unsigned long flags;
1149 int cpu;
1150
1151 /* Remove policy from list */
1152 write_lock_irqsave(&cpufreq_driver_lock, flags);
1153 list_del(&policy->policy_list);
1154
1155 for_each_cpu(cpu, policy->related_cpus)
1156 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1157 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1158
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301159 free_cpumask_var(policy->related_cpus);
1160 free_cpumask_var(policy->cpus);
1161 kfree(policy);
1162}
1163
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301164static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1165 struct device *cpu_dev)
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301166{
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301167 int ret;
1168
Srivatsa S. Bhat99ec8992013-09-12 17:29:09 +05301169 if (WARN_ON(cpu == policy->cpu))
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301170 return 0;
1171
1172 /* Move kobject to the new policy->cpu */
1173 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1174 if (ret) {
1175 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1176 return ret;
1177 }
Srivatsa S. Bhatcb38ed52013-09-12 01:43:42 +05301178
viresh kumarad7722d2013-10-18 19:10:15 +05301179 down_write(&policy->rwsem);
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301180 policy->cpu = cpu;
viresh kumarad7722d2013-10-18 19:10:15 +05301181 up_write(&policy->rwsem);
Viresh Kumar8efd5762013-09-17 10:22:11 +05301182
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301183 return 0;
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301184}
1185
Viresh Kumar23faf0b2015-02-19 17:02:04 +05301186/**
1187 * cpufreq_add_dev - add a CPU device
1188 *
1189 * Adds the cpufreq interface for a CPU device.
1190 *
1191 * The Oracle says: try running cpufreq registration/unregistration concurrently
1192 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1193 * mess up, but more thorough testing is needed. - Mathieu
1194 */
1195static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196{
Viresh Kumarfcf80582013-01-29 14:39:08 +00001197 unsigned int j, cpu = dev->id;
Viresh Kumar65922462013-02-07 10:56:03 +05301198 int ret = -ENOMEM;
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301199 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 unsigned long flags;
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301201 bool recover_policy = cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
Ashok Rajc32b6b82005-10-30 14:59:54 -08001203 if (cpu_is_offline(cpu))
1204 return 0;
1205
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001206 pr_debug("adding CPU %u\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207
Viresh Kumar6eed9402013-08-06 22:53:11 +05301208 if (!down_read_trylock(&cpufreq_rwsem))
1209 return 0;
1210
Viresh Kumarbb29ae12015-02-19 17:02:06 +05301211 /* Check if this CPU already has a policy to manage it */
Viresh Kumar9104bb22015-05-12 12:22:12 +05301212 policy = per_cpu(cpufreq_cpu_data, cpu);
1213 if (policy && !policy_is_inactive(policy)) {
1214 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1215 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1216 up_read(&cpufreq_rwsem);
1217 return ret;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001218 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001220 /*
1221 * Restore the saved policy when doing light-weight init and fall back
1222 * to the full init if that fails.
1223 */
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301224 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001225 if (!policy) {
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301226 recover_policy = false;
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301227 policy = cpufreq_policy_alloc();
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001228 if (!policy)
1229 goto nomem_out;
1230 }
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301231
1232 /*
1233 * In the resume path, since we restore a saved policy, the assignment
1234 * to policy->cpu is like an update of the existing policy, rather than
1235 * the creation of a brand new one. So we need to perform this update
1236 * by invoking update_policy_cpu().
1237 */
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301238 if (recover_policy && cpu != policy->cpu)
1239 WARN_ON(update_policy_cpu(policy, cpu, dev));
1240 else
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301241 policy->cpu = cpu;
1242
Rusty Russell835481d2009-01-04 05:18:06 -08001243 cpumask_copy(policy->cpus, cpumask_of(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 /* call driver. From then on the cpufreq must be able
1246 * to accept all calls to ->verify and ->setpolicy for this CPU
1247 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001248 ret = cpufreq_driver->init(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001250 pr_debug("initialization failed\n");
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301251 goto err_set_policy_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 }
Viresh Kumar643ae6e2013-01-12 05:14:38 +00001253
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001254 down_write(&policy->rwsem);
1255
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001256 /* related cpus should atleast have policy->cpus */
1257 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1258
1259 /*
1260 * affected cpus must always be the one, which are online. We aren't
1261 * managing offline cpus here.
1262 */
1263 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1264
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301265 if (!recover_policy) {
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001266 policy->user_policy.min = policy->min;
1267 policy->user_policy.max = policy->max;
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001268
1269 /* prepare interface data */
1270 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1271 &dev->kobj, "cpufreq");
1272 if (ret) {
1273 pr_err("%s: failed to init policy->kobj: %d\n",
1274 __func__, ret);
1275 goto err_init_policy_kobj;
1276 }
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001277
Viresh Kumar988bed02015-05-08 11:53:45 +05301278 write_lock_irqsave(&cpufreq_driver_lock, flags);
1279 for_each_cpu(j, policy->related_cpus)
1280 per_cpu(cpufreq_cpu_data, j) = policy;
1281 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1282 }
Viresh Kumar652ed952014-01-09 20:38:43 +05301283
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01001284 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumarda60ce92013-10-03 20:28:30 +05301285 policy->cur = cpufreq_driver->get(policy->cpu);
1286 if (!policy->cur) {
1287 pr_err("%s: ->get() failed\n", __func__);
1288 goto err_get_freq;
1289 }
1290 }
1291
Viresh Kumard3916692013-12-03 11:20:46 +05301292 /*
1293 * Sometimes boot loaders set CPU frequency to a value outside of
1294 * frequency table present with cpufreq core. In such cases CPU might be
1295 * unstable if it has to run on that frequency for long duration of time
1296 * and so its better to set it to a frequency which is specified in
1297 * freq-table. This also makes cpufreq stats inconsistent as
1298 * cpufreq-stats would fail to register because current frequency of CPU
1299 * isn't found in freq-table.
1300 *
1301 * Because we don't want this change to effect boot process badly, we go
1302 * for the next freq which is >= policy->cur ('cur' must be set by now,
1303 * otherwise we will end up setting freq to lowest of the table as 'cur'
1304 * is initialized to zero).
1305 *
1306 * We are passing target-freq as "policy->cur - 1" otherwise
1307 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1308 * equal to target-freq.
1309 */
1310 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1311 && has_target()) {
1312 /* Are we running at unknown frequency ? */
1313 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1314 if (ret == -EINVAL) {
1315 /* Warn user and fix it */
1316 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1317 __func__, policy->cpu, policy->cur);
1318 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1319 CPUFREQ_RELATION_L);
1320
1321 /*
1322 * Reaching here after boot in a few seconds may not
1323 * mean that system will remain stable at "unknown"
1324 * frequency for longer duration. Hence, a BUG_ON().
1325 */
1326 BUG_ON(ret);
1327 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1328 __func__, policy->cpu, policy->cur);
1329 }
1330 }
1331
Thomas Renningera1531ac2008-07-29 22:32:58 -07001332 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1333 CPUFREQ_START, policy);
1334
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301335 if (!recover_policy) {
Viresh Kumar308b60e2013-07-31 14:35:14 +02001336 ret = cpufreq_add_dev_interface(policy, dev);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301337 if (ret)
1338 goto err_out_unregister;
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301339 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1340 CPUFREQ_CREATE_POLICY, policy);
Dave Jones8ff69732006-03-05 03:37:23 -05001341
Viresh Kumar988bed02015-05-08 11:53:45 +05301342 write_lock_irqsave(&cpufreq_driver_lock, flags);
1343 list_add(&policy->policy_list, &cpufreq_policy_list);
1344 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1345 }
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301346
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301347 cpufreq_init_policy(policy);
1348
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301349 if (!recover_policy) {
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301350 policy->user_policy.policy = policy->policy;
1351 policy->user_policy.governor = policy->governor;
1352 }
Viresh Kumar4e97b632014-03-04 11:44:01 +08001353 up_write(&policy->rwsem);
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301354
Greg Kroah-Hartman038c5b32007-12-17 15:54:39 -04001355 kobject_uevent(&policy->kobj, KOBJ_ADD);
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301356
Viresh Kumar6eed9402013-08-06 22:53:11 +05301357 up_read(&cpufreq_rwsem);
1358
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301359 /* Callback for handling stuff after policy is ready */
1360 if (cpufreq_driver->ready)
1361 cpufreq_driver->ready(policy);
1362
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001363 pr_debug("initialization complete\n");
Dave Jones87c32272006-03-29 01:48:37 -05001364
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 return 0;
1366
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367err_out_unregister:
Viresh Kumar652ed952014-01-09 20:38:43 +05301368err_get_freq:
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001369 if (!recover_policy) {
1370 kobject_put(&policy->kobj);
1371 wait_for_completion(&policy->kobj_unregister);
1372 }
1373err_init_policy_kobj:
Prarit Bhargava7106e022014-09-10 10:12:08 -04001374 up_write(&policy->rwsem);
1375
Viresh Kumarda60ce92013-10-03 20:28:30 +05301376 if (cpufreq_driver->exit)
1377 cpufreq_driver->exit(policy);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301378err_set_policy_cpu:
Viresh Kumar3914d372015-05-08 11:53:46 +05301379 if (recover_policy)
Viresh Kumar42f921a2013-12-20 21:26:02 +05301380 cpufreq_policy_put_kobj(policy);
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301381 cpufreq_policy_free(policy);
Viresh Kumar42f921a2013-12-20 21:26:02 +05301382
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383nomem_out:
Viresh Kumar6eed9402013-08-06 22:53:11 +05301384 up_read(&cpufreq_rwsem);
1385
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 return ret;
1387}
1388
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301389static int __cpufreq_remove_dev_prepare(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301390 struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391{
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301392 unsigned int cpu = dev->id, cpus;
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301393 int ret;
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301394 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001396 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
Viresh Kumar988bed02015-05-08 11:53:45 +05301398 policy = cpufreq_cpu_get_raw(cpu);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301399 if (!policy) {
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001400 pr_debug("%s: No cpu_data found\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301404 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301405 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1406 if (ret) {
1407 pr_err("%s: Failed to stop governor\n", __func__);
1408 return ret;
1409 }
Viresh Kumardb5f2992015-01-02 12:34:25 +05301410 }
Jacob Shin27ecddc2011-04-27 13:32:11 -05001411
Viresh Kumar45732372015-05-12 12:22:34 +05301412 down_write(&policy->rwsem);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301413 cpus = cpumask_weight(policy->cpus);
Viresh Kumar45732372015-05-12 12:22:34 +05301414
1415 if (has_target() && cpus == 1)
1416 strncpy(policy->last_governor, policy->governor->name,
1417 CPUFREQ_NAME_LEN);
1418 up_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
Srivatsa S. Bhat61173f22013-09-12 01:43:25 +05301420 if (cpu != policy->cpu) {
viresh kumar6964d912014-02-17 14:52:11 +05301421 sysfs_remove_link(&dev->kobj, "cpufreq");
Viresh Kumar73bf0fc2013-02-05 22:21:14 +01001422 } else if (cpus > 1) {
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301423 /* Nominate new CPU */
1424 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1425 struct device *cpu_dev = get_cpu_device(new_cpu);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301426
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301427 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1428 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1429 if (ret) {
1430 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1431 "cpufreq"))
1432 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1433 __func__, cpu_dev->id);
1434 return ret;
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001435 }
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301436
1437 if (!cpufreq_suspended)
1438 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1439 __func__, new_cpu, cpu);
Preeti U Murthy789ca242014-09-29 15:47:12 +02001440 } else if (cpufreq_driver->stop_cpu) {
Dirk Brandewie367dc4a2014-03-19 08:45:53 -07001441 cpufreq_driver->stop_cpu(policy);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001442 }
Venki Pallipadiec282972007-03-26 12:03:19 -07001443
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301444 return 0;
1445}
1446
1447static int __cpufreq_remove_dev_finish(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301448 struct subsys_interface *sif)
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301449{
Viresh Kumar988bed02015-05-08 11:53:45 +05301450 unsigned int cpu = dev->id;
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301451 int ret;
Viresh Kumar988bed02015-05-08 11:53:45 +05301452 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301453
1454 if (!policy) {
1455 pr_debug("%s: No cpu_data found\n", __func__);
1456 return -EINVAL;
1457 }
1458
viresh kumarad7722d2013-10-18 19:10:15 +05301459 down_write(&policy->rwsem);
Viresh Kumar303ae722015-02-19 17:02:07 +05301460 cpumask_clear_cpu(cpu, policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301461 up_write(&policy->rwsem);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301462
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001463 /* If cpu is last user of policy, free policy */
Viresh Kumar988bed02015-05-08 11:53:45 +05301464 if (policy_is_inactive(policy)) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301465 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301466 ret = __cpufreq_governor(policy,
1467 CPUFREQ_GOV_POLICY_EXIT);
1468 if (ret) {
1469 pr_err("%s: Failed to exit governor\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07001470 __func__);
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301471 return ret;
1472 }
Viresh Kumaredab2fb2013-08-20 12:08:22 +05301473 }
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001474
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301475 if (!cpufreq_suspended)
Viresh Kumar42f921a2013-12-20 21:26:02 +05301476 cpufreq_policy_put_kobj(policy);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301477
1478 /*
1479 * Perform the ->exit() even during light-weight tear-down,
1480 * since this is a core component, and is essential for the
1481 * subsequent light-weight ->init() to succeed.
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001482 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001483 if (cpufreq_driver->exit)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301484 cpufreq_driver->exit(policy);
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001485
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301486 if (!cpufreq_suspended)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301487 cpufreq_policy_free(policy);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001488 } else if (has_target()) {
1489 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1490 if (!ret)
1491 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1492
1493 if (ret) {
1494 pr_err("%s: Failed to start governor\n", __func__);
1495 return ret;
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001496 }
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001497 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 return 0;
1500}
1501
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301502/**
Viresh Kumar27a862e2013-10-02 14:13:14 +05301503 * cpufreq_remove_dev - remove a CPU device
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301504 *
1505 * Removes the cpufreq interface for a CPU device.
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301506 */
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001507static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001508{
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001509 unsigned int cpu = dev->id;
Viresh Kumar27a862e2013-10-02 14:13:14 +05301510 int ret;
Venki Pallipadiec282972007-03-26 12:03:19 -07001511
1512 if (cpu_is_offline(cpu))
1513 return 0;
1514
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301515 ret = __cpufreq_remove_dev_prepare(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301516
1517 if (!ret)
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301518 ret = __cpufreq_remove_dev_finish(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301519
1520 return ret;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001521}
1522
David Howells65f27f32006-11-22 14:55:48 +00001523static void handle_update(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524{
David Howells65f27f32006-11-22 14:55:48 +00001525 struct cpufreq_policy *policy =
1526 container_of(work, struct cpufreq_policy, update);
1527 unsigned int cpu = policy->cpu;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001528 pr_debug("handle_update for cpu %u called\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 cpufreq_update_policy(cpu);
1530}
1531
1532/**
Viresh Kumarbb176f72013-06-19 14:19:33 +05301533 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1534 * in deep trouble.
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301535 * @policy: policy managing CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 * @new_freq: CPU frequency the CPU actually runs at
1537 *
Dave Jones29464f22009-01-18 01:37:11 -05001538 * We adjust to current frequency first, and need to clean up later.
1539 * So either call to cpufreq_update_policy() or schedule handle_update()).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 */
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301541static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301542 unsigned int new_freq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543{
1544 struct cpufreq_freqs freqs;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301545
Joe Perchese837f9b2014-03-11 10:03:00 -07001546 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301547 policy->cur, new_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301549 freqs.old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 freqs.new = new_freq;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301551
Viresh Kumar8fec0512014-03-24 13:35:45 +05301552 cpufreq_freq_transition_begin(policy, &freqs);
1553 cpufreq_freq_transition_end(policy, &freqs, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554}
1555
Dave Jones32ee8c32006-02-28 00:43:23 -05001556/**
Dhaval Giani4ab70df2006-12-13 14:49:15 +05301557 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001558 * @cpu: CPU number
1559 *
1560 * This is the last known freq, without actually getting it from the driver.
1561 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1562 */
1563unsigned int cpufreq_quick_get(unsigned int cpu)
1564{
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001565 struct cpufreq_policy *policy;
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301566 unsigned int ret_freq = 0;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001567
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001568 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1569 return cpufreq_driver->get(cpu);
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001570
1571 policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001572 if (policy) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301573 ret_freq = policy->cur;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001574 cpufreq_cpu_put(policy);
1575 }
1576
Dave Jones4d34a672008-02-07 16:33:49 -05001577 return ret_freq;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001578}
1579EXPORT_SYMBOL(cpufreq_quick_get);
1580
Jesse Barnes3d737102011-06-28 10:59:12 -07001581/**
1582 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1583 * @cpu: CPU number
1584 *
1585 * Just return the max possible frequency for a given CPU.
1586 */
1587unsigned int cpufreq_quick_get_max(unsigned int cpu)
1588{
1589 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1590 unsigned int ret_freq = 0;
1591
1592 if (policy) {
1593 ret_freq = policy->max;
1594 cpufreq_cpu_put(policy);
1595 }
1596
1597 return ret_freq;
1598}
1599EXPORT_SYMBOL(cpufreq_quick_get_max);
1600
Viresh Kumard92d50a2015-01-02 12:34:29 +05301601static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602{
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301603 unsigned int ret_freq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001605 if (!cpufreq_driver->get)
Dave Jones4d34a672008-02-07 16:33:49 -05001606 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
Viresh Kumard92d50a2015-01-02 12:34:29 +05301608 ret_freq = cpufreq_driver->get(policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301610 if (ret_freq && policy->cur &&
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001611 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301612 /* verify no discrepancy between actual and
1613 saved value exists */
1614 if (unlikely(ret_freq != policy->cur)) {
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301615 cpufreq_out_of_sync(policy, ret_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 schedule_work(&policy->update);
1617 }
1618 }
1619
Dave Jones4d34a672008-02-07 16:33:49 -05001620 return ret_freq;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001621}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001623/**
1624 * cpufreq_get - get the current CPU frequency (in kHz)
1625 * @cpu: CPU number
1626 *
1627 * Get the CPU current (static) CPU frequency
1628 */
1629unsigned int cpufreq_get(unsigned int cpu)
1630{
Aaron Plattner999976e2014-03-04 12:42:15 -08001631 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001632 unsigned int ret_freq = 0;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001633
Aaron Plattner999976e2014-03-04 12:42:15 -08001634 if (policy) {
1635 down_read(&policy->rwsem);
Viresh Kumard92d50a2015-01-02 12:34:29 +05301636 ret_freq = __cpufreq_get(policy);
Aaron Plattner999976e2014-03-04 12:42:15 -08001637 up_read(&policy->rwsem);
Viresh Kumar26ca8692013-09-20 22:37:31 +05301638
Aaron Plattner999976e2014-03-04 12:42:15 -08001639 cpufreq_cpu_put(policy);
1640 }
Viresh Kumar6eed9402013-08-06 22:53:11 +05301641
Dave Jones4d34a672008-02-07 16:33:49 -05001642 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643}
1644EXPORT_SYMBOL(cpufreq_get);
1645
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001646static struct subsys_interface cpufreq_interface = {
1647 .name = "cpufreq",
1648 .subsys = &cpu_subsys,
1649 .add_dev = cpufreq_add_dev,
1650 .remove_dev = cpufreq_remove_dev,
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001651};
1652
Viresh Kumare28867e2014-03-04 11:00:27 +08001653/*
1654 * In case platform wants some specific frequency to be configured
1655 * during suspend..
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001656 */
Viresh Kumare28867e2014-03-04 11:00:27 +08001657int cpufreq_generic_suspend(struct cpufreq_policy *policy)
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001658{
Viresh Kumare28867e2014-03-04 11:00:27 +08001659 int ret;
Dave Jones4bc5d342009-08-04 14:03:25 -04001660
Viresh Kumare28867e2014-03-04 11:00:27 +08001661 if (!policy->suspend_freq) {
1662 pr_err("%s: suspend_freq can't be zero\n", __func__);
1663 return -EINVAL;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001664 }
1665
Viresh Kumare28867e2014-03-04 11:00:27 +08001666 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1667 policy->suspend_freq);
1668
1669 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1670 CPUFREQ_RELATION_H);
1671 if (ret)
1672 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1673 __func__, policy->suspend_freq, ret);
1674
Dave Jonesc9060492008-02-07 16:32:18 -05001675 return ret;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001676}
Viresh Kumare28867e2014-03-04 11:00:27 +08001677EXPORT_SYMBOL(cpufreq_generic_suspend);
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001678
1679/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001680 * cpufreq_suspend() - Suspend CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001682 * Called during system wide Suspend/Hibernate cycles for suspending governors
1683 * as some platforms can't change frequency after this point in suspend cycle.
1684 * Because some of the devices (like: i2c, regulators, etc) they use for
1685 * changing frequency are suspended quickly after this point.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001687void cpufreq_suspend(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301689 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001691 if (!cpufreq_driver)
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001692 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001694 if (!has_target())
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301695 goto suspend;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001697 pr_debug("%s: Suspending Governors\n", __func__);
1698
Viresh Kumarf9637352015-05-12 12:20:11 +05301699 for_each_active_policy(policy) {
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001700 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1701 pr_err("%s: Failed to stop governor for policy: %p\n",
1702 __func__, policy);
1703 else if (cpufreq_driver->suspend
1704 && cpufreq_driver->suspend(policy))
1705 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1706 policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 }
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301708
1709suspend:
1710 cpufreq_suspended = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711}
1712
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001714 * cpufreq_resume() - Resume CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001716 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1717 * are suspended with cpufreq_suspend().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001719void cpufreq_resume(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 struct cpufreq_policy *policy;
1722
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001723 if (!cpufreq_driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 return;
1725
Lan Tianyu8e304442014-09-18 15:03:07 +08001726 cpufreq_suspended = false;
1727
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001728 if (!has_target())
1729 return;
1730
1731 pr_debug("%s: Resuming Governors\n", __func__);
1732
Viresh Kumarf9637352015-05-12 12:20:11 +05301733 for_each_active_policy(policy) {
Viresh Kumar0c5aa402014-03-24 12:30:29 +05301734 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1735 pr_err("%s: Failed to resume driver: %p\n", __func__,
1736 policy);
1737 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001738 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1739 pr_err("%s: Failed to start governor for policy: %p\n",
1740 __func__, policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 }
Viresh Kumarc75de0a2015-04-02 10:21:33 +05301742
1743 /*
1744 * schedule call cpufreq_update_policy() for first-online CPU, as that
1745 * wouldn't be hotplugged-out on suspend. It will verify that the
1746 * current freq is in sync with what we believe it to be.
1747 */
1748 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1749 if (WARN_ON(!policy))
1750 return;
1751
1752 schedule_work(&policy->update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
Borislav Petkov9d950462013-01-20 10:24:28 +00001755/**
1756 * cpufreq_get_current_driver - return current driver's name
1757 *
1758 * Return the name string of the currently loaded cpufreq driver
1759 * or NULL, if none.
1760 */
1761const char *cpufreq_get_current_driver(void)
1762{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001763 if (cpufreq_driver)
1764 return cpufreq_driver->name;
1765
1766 return NULL;
Borislav Petkov9d950462013-01-20 10:24:28 +00001767}
1768EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
Thomas Petazzoni51315cd2014-10-19 11:30:27 +02001770/**
1771 * cpufreq_get_driver_data - return current driver data
1772 *
1773 * Return the private data of the currently loaded cpufreq
1774 * driver, or NULL if no cpufreq driver is loaded.
1775 */
1776void *cpufreq_get_driver_data(void)
1777{
1778 if (cpufreq_driver)
1779 return cpufreq_driver->driver_data;
1780
1781 return NULL;
1782}
1783EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1784
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785/*********************************************************************
1786 * NOTIFIER LISTS INTERFACE *
1787 *********************************************************************/
1788
1789/**
1790 * cpufreq_register_notifier - register a driver with cpufreq
1791 * @nb: notifier function to register
1792 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1793 *
Dave Jones32ee8c32006-02-28 00:43:23 -05001794 * Add a driver to one of two lists: either a list of drivers that
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 * are notified about clock rate changes (once before and once after
1796 * the transition), or a list of drivers that are notified about
1797 * changes in cpufreq policy.
1798 *
1799 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001800 * blocking_notifier_chain_register.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 */
1802int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1803{
1804 int ret;
1805
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001806 if (cpufreq_disabled())
1807 return -EINVAL;
1808
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -02001809 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1810
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 switch (list) {
1812 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001813 ret = srcu_notifier_chain_register(
Alan Sterne041c682006-03-27 01:16:30 -08001814 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 break;
1816 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001817 ret = blocking_notifier_chain_register(
1818 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 break;
1820 default:
1821 ret = -EINVAL;
1822 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823
1824 return ret;
1825}
1826EXPORT_SYMBOL(cpufreq_register_notifier);
1827
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828/**
1829 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1830 * @nb: notifier block to be unregistered
Viresh Kumarbb176f72013-06-19 14:19:33 +05301831 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 *
1833 * Remove a driver from the CPU frequency notifier list.
1834 *
1835 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001836 * blocking_notifier_chain_unregister.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 */
1838int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1839{
1840 int ret;
1841
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001842 if (cpufreq_disabled())
1843 return -EINVAL;
1844
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 switch (list) {
1846 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001847 ret = srcu_notifier_chain_unregister(
Alan Sterne041c682006-03-27 01:16:30 -08001848 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 break;
1850 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001851 ret = blocking_notifier_chain_unregister(
1852 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 break;
1854 default:
1855 ret = -EINVAL;
1856 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857
1858 return ret;
1859}
1860EXPORT_SYMBOL(cpufreq_unregister_notifier);
1861
1862
1863/*********************************************************************
1864 * GOVERNORS *
1865 *********************************************************************/
1866
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301867/* Must set freqs->new to intermediate frequency */
1868static int __target_intermediate(struct cpufreq_policy *policy,
1869 struct cpufreq_freqs *freqs, int index)
1870{
1871 int ret;
1872
1873 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1874
1875 /* We don't need to switch to intermediate freq */
1876 if (!freqs->new)
1877 return 0;
1878
1879 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1880 __func__, policy->cpu, freqs->old, freqs->new);
1881
1882 cpufreq_freq_transition_begin(policy, freqs);
1883 ret = cpufreq_driver->target_intermediate(policy, index);
1884 cpufreq_freq_transition_end(policy, freqs, ret);
1885
1886 if (ret)
1887 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1888 __func__, ret);
1889
1890 return ret;
1891}
1892
Viresh Kumar8d657752014-05-21 14:29:29 +05301893static int __target_index(struct cpufreq_policy *policy,
1894 struct cpufreq_frequency_table *freq_table, int index)
1895{
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301896 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1897 unsigned int intermediate_freq = 0;
Viresh Kumar8d657752014-05-21 14:29:29 +05301898 int retval = -EINVAL;
1899 bool notify;
1900
1901 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
Viresh Kumar8d657752014-05-21 14:29:29 +05301902 if (notify) {
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301903 /* Handle switching to intermediate frequency */
1904 if (cpufreq_driver->get_intermediate) {
1905 retval = __target_intermediate(policy, &freqs, index);
1906 if (retval)
1907 return retval;
Viresh Kumar8d657752014-05-21 14:29:29 +05301908
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301909 intermediate_freq = freqs.new;
1910 /* Set old freq to intermediate */
1911 if (intermediate_freq)
1912 freqs.old = freqs.new;
1913 }
1914
1915 freqs.new = freq_table[index].frequency;
Viresh Kumar8d657752014-05-21 14:29:29 +05301916 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1917 __func__, policy->cpu, freqs.old, freqs.new);
1918
1919 cpufreq_freq_transition_begin(policy, &freqs);
1920 }
1921
1922 retval = cpufreq_driver->target_index(policy, index);
1923 if (retval)
1924 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1925 retval);
1926
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301927 if (notify) {
Viresh Kumar8d657752014-05-21 14:29:29 +05301928 cpufreq_freq_transition_end(policy, &freqs, retval);
1929
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301930 /*
1931 * Failed after setting to intermediate freq? Driver should have
1932 * reverted back to initial frequency and so should we. Check
1933 * here for intermediate_freq instead of get_intermediate, in
Shailendra Verma58405af2015-05-22 22:48:22 +05301934 * case we haven't switched to intermediate freq at all.
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301935 */
1936 if (unlikely(retval && intermediate_freq)) {
1937 freqs.old = intermediate_freq;
1938 freqs.new = policy->restore_freq;
1939 cpufreq_freq_transition_begin(policy, &freqs);
1940 cpufreq_freq_transition_end(policy, &freqs, 0);
1941 }
1942 }
1943
Viresh Kumar8d657752014-05-21 14:29:29 +05301944 return retval;
1945}
1946
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947int __cpufreq_driver_target(struct cpufreq_policy *policy,
1948 unsigned int target_freq,
1949 unsigned int relation)
1950{
Viresh Kumar72499242012-10-31 01:28:21 +01001951 unsigned int old_target_freq = target_freq;
Viresh Kumar8d657752014-05-21 14:29:29 +05301952 int retval = -EINVAL;
Ashok Rajc32b6b82005-10-30 14:59:54 -08001953
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04001954 if (cpufreq_disabled())
1955 return -ENODEV;
1956
Viresh Kumar72499242012-10-31 01:28:21 +01001957 /* Make sure that target_freq is within supported range */
1958 if (target_freq > policy->max)
1959 target_freq = policy->max;
1960 if (target_freq < policy->min)
1961 target_freq = policy->min;
1962
1963 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07001964 policy->cpu, target_freq, relation, old_target_freq);
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001965
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301966 /*
1967 * This might look like a redundant call as we are checking it again
1968 * after finding index. But it is left intentionally for cases where
1969 * exactly same freq is called again and so we can save on few function
1970 * calls.
1971 */
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001972 if (target_freq == policy->cur)
1973 return 0;
1974
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301975 /* Save last value to restore later on errors */
1976 policy->restore_freq = policy->cur;
1977
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001978 if (cpufreq_driver->target)
1979 retval = cpufreq_driver->target(policy, target_freq, relation);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301980 else if (cpufreq_driver->target_index) {
1981 struct cpufreq_frequency_table *freq_table;
1982 int index;
Ashok Raj90d45d12005-11-08 21:34:24 -08001983
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301984 freq_table = cpufreq_frequency_get_table(policy->cpu);
1985 if (unlikely(!freq_table)) {
1986 pr_err("%s: Unable to find freq_table\n", __func__);
1987 goto out;
1988 }
1989
1990 retval = cpufreq_frequency_table_target(policy, freq_table,
1991 target_freq, relation, &index);
1992 if (unlikely(retval)) {
1993 pr_err("%s: Unable to find matching freq\n", __func__);
1994 goto out;
1995 }
1996
Viresh Kumard4019f02013-08-14 19:38:24 +05301997 if (freq_table[index].frequency == policy->cur) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301998 retval = 0;
Viresh Kumard4019f02013-08-14 19:38:24 +05301999 goto out;
2000 }
2001
Viresh Kumar8d657752014-05-21 14:29:29 +05302002 retval = __target_index(policy, freq_table, index);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302003 }
2004
2005out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 return retval;
2007}
2008EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2009
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010int cpufreq_driver_target(struct cpufreq_policy *policy,
2011 unsigned int target_freq,
2012 unsigned int relation)
2013{
Julia Lawallf1829e42008-07-25 22:44:53 +02002014 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015
viresh kumarad7722d2013-10-18 19:10:15 +05302016 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
2018 ret = __cpufreq_driver_target(policy, target_freq, relation);
2019
viresh kumarad7722d2013-10-18 19:10:15 +05302020 up_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 return ret;
2023}
2024EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2025
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05302026static int __cpufreq_governor(struct cpufreq_policy *policy,
2027 unsigned int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028{
Dave Jonescc993ca2005-07-28 09:43:56 -07002029 int ret;
Thomas Renninger6afde102007-10-02 13:28:13 -07002030
2031 /* Only must be defined when default governor is known to have latency
2032 restrictions, like e.g. conservative or ondemand.
2033 That this is the case is already ensured in Kconfig
2034 */
2035#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2036 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2037#else
2038 struct cpufreq_governor *gov = NULL;
2039#endif
Thomas Renninger1c256242007-10-02 13:28:12 -07002040
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002041 /* Don't start any governor operations if we are entering suspend */
2042 if (cpufreq_suspended)
2043 return 0;
Ethan Zhaocb577202014-12-18 15:28:19 +09002044 /*
2045 * Governor might not be initiated here if ACPI _PPC changed
2046 * notification happened, so check it.
2047 */
2048 if (!policy->governor)
2049 return -EINVAL;
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002050
Thomas Renninger1c256242007-10-02 13:28:12 -07002051 if (policy->governor->max_transition_latency &&
2052 policy->cpuinfo.transition_latency >
2053 policy->governor->max_transition_latency) {
Thomas Renninger6afde102007-10-02 13:28:13 -07002054 if (!gov)
2055 return -EINVAL;
2056 else {
Joe Perchese837f9b2014-03-11 10:03:00 -07002057 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2058 policy->governor->name, gov->name);
Thomas Renninger6afde102007-10-02 13:28:13 -07002059 policy->governor = gov;
2060 }
Thomas Renninger1c256242007-10-02 13:28:12 -07002061 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
Viresh Kumarfe492f32013-08-06 22:53:10 +05302063 if (event == CPUFREQ_GOV_POLICY_INIT)
2064 if (!try_module_get(policy->governor->owner))
2065 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002067 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002068 policy->cpu, event);
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002069
2070 mutex_lock(&cpufreq_governor_lock);
Srivatsa S. Bhat56d07db2013-09-07 01:23:55 +05302071 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
Viresh Kumarf73d3932013-08-31 17:53:40 +05302072 || (!policy->governor_enabled
2073 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002074 mutex_unlock(&cpufreq_governor_lock);
2075 return -EBUSY;
2076 }
2077
2078 if (event == CPUFREQ_GOV_STOP)
2079 policy->governor_enabled = false;
2080 else if (event == CPUFREQ_GOV_START)
2081 policy->governor_enabled = true;
2082
2083 mutex_unlock(&cpufreq_governor_lock);
2084
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 ret = policy->governor->governor(policy, event);
2086
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002087 if (!ret) {
2088 if (event == CPUFREQ_GOV_POLICY_INIT)
2089 policy->governor->initialized++;
2090 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2091 policy->governor->initialized--;
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002092 } else {
2093 /* Restore original values */
2094 mutex_lock(&cpufreq_governor_lock);
2095 if (event == CPUFREQ_GOV_STOP)
2096 policy->governor_enabled = true;
2097 else if (event == CPUFREQ_GOV_START)
2098 policy->governor_enabled = false;
2099 mutex_unlock(&cpufreq_governor_lock);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002100 }
Viresh Kumarb3940582013-02-01 05:42:58 +00002101
Viresh Kumarfe492f32013-08-06 22:53:10 +05302102 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2103 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 module_put(policy->governor->owner);
2105
2106 return ret;
2107}
2108
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109int cpufreq_register_governor(struct cpufreq_governor *governor)
2110{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002111 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112
2113 if (!governor)
2114 return -EINVAL;
2115
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002116 if (cpufreq_disabled())
2117 return -ENODEV;
2118
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002119 mutex_lock(&cpufreq_governor_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -05002120
Viresh Kumarb3940582013-02-01 05:42:58 +00002121 governor->initialized = 0;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002122 err = -EBUSY;
Viresh Kumar42f91fa2015-01-02 12:34:26 +05302123 if (!find_governor(governor->name)) {
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002124 err = 0;
2125 list_add(&governor->governor_list, &cpufreq_governor_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127
Dave Jones32ee8c32006-02-28 00:43:23 -05002128 mutex_unlock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002129 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130}
2131EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2132
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2134{
Viresh Kumar45732372015-05-12 12:22:34 +05302135 struct cpufreq_policy *policy;
2136 unsigned long flags;
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002137
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 if (!governor)
2139 return;
2140
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002141 if (cpufreq_disabled())
2142 return;
2143
Viresh Kumar45732372015-05-12 12:22:34 +05302144 /* clear last_governor for all inactive policies */
2145 read_lock_irqsave(&cpufreq_driver_lock, flags);
2146 for_each_inactive_policy(policy) {
Viresh Kumar18bf3a12015-05-12 12:22:51 +05302147 if (!strcmp(policy->last_governor, governor->name)) {
2148 policy->governor = NULL;
Viresh Kumar45732372015-05-12 12:22:34 +05302149 strcpy(policy->last_governor, "\0");
Viresh Kumar18bf3a12015-05-12 12:22:51 +05302150 }
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002151 }
Viresh Kumar45732372015-05-12 12:22:34 +05302152 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002153
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002154 mutex_lock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 list_del(&governor->governor_list);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002156 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 return;
2158}
2159EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2160
2161
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162/*********************************************************************
2163 * POLICY INTERFACE *
2164 *********************************************************************/
2165
2166/**
2167 * cpufreq_get_policy - get the current cpufreq_policy
Dave Jones29464f22009-01-18 01:37:11 -05002168 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2169 * is written
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 *
2171 * Reads the current cpufreq policy.
2172 */
2173int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2174{
2175 struct cpufreq_policy *cpu_policy;
2176 if (!policy)
2177 return -EINVAL;
2178
2179 cpu_policy = cpufreq_cpu_get(cpu);
2180 if (!cpu_policy)
2181 return -EINVAL;
2182
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302183 memcpy(policy, cpu_policy, sizeof(*policy));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
2185 cpufreq_cpu_put(cpu_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 return 0;
2187}
2188EXPORT_SYMBOL(cpufreq_get_policy);
2189
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002190/*
Viresh Kumar037ce832013-10-02 14:13:16 +05302191 * policy : current policy.
2192 * new_policy: policy to be set.
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002193 */
Viresh Kumar037ce832013-10-02 14:13:16 +05302194static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302195 struct cpufreq_policy *new_policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196{
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002197 struct cpufreq_governor *old_gov;
2198 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
Joe Perchese837f9b2014-03-11 10:03:00 -07002200 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2201 new_policy->cpu, new_policy->min, new_policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302203 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002205 if (new_policy->min > policy->max || new_policy->max < policy->min)
2206 return -EINVAL;
Mattia Dongili9c9a43e2006-07-05 23:12:20 +02002207
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 /* verify the cpu speed can be set within this limit */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302209 ret = cpufreq_driver->verify(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002211 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 /* adjust if necessary - all reasons */
Alan Sterne041c682006-03-27 01:16:30 -08002214 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302215 CPUFREQ_ADJUST, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
2217 /* adjust if necessary - hardware incompatibility*/
Alan Sterne041c682006-03-27 01:16:30 -08002218 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302219 CPUFREQ_INCOMPATIBLE, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
Viresh Kumarbb176f72013-06-19 14:19:33 +05302221 /*
2222 * verify the cpu speed can be set within this limit, which might be
2223 * different to the first one
2224 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302225 ret = cpufreq_driver->verify(new_policy);
Alan Sterne041c682006-03-27 01:16:30 -08002226 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002227 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228
2229 /* notification of the new policy */
Alan Sterne041c682006-03-27 01:16:30 -08002230 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302231 CPUFREQ_NOTIFY, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302233 policy->min = new_policy->min;
2234 policy->max = new_policy->max;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002236 pr_debug("new min and max freqs are %u - %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002237 policy->min, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002239 if (cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302240 policy->policy = new_policy->policy;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002241 pr_debug("setting range\n");
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002242 return cpufreq_driver->setpolicy(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 }
2244
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002245 if (new_policy->governor == policy->governor)
2246 goto out;
2247
2248 pr_debug("governor switch\n");
2249
2250 /* save old, working values */
2251 old_gov = policy->governor;
2252 /* end old governor */
2253 if (old_gov) {
2254 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2255 up_write(&policy->rwsem);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02002256 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002257 down_write(&policy->rwsem);
2258 }
2259
2260 /* start new governor */
2261 policy->governor = new_policy->governor;
2262 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2263 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2264 goto out;
2265
2266 up_write(&policy->rwsem);
2267 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2268 down_write(&policy->rwsem);
2269 }
2270
2271 /* new governor failed, so re-start old one */
2272 pr_debug("starting governor %s failed\n", policy->governor->name);
2273 if (old_gov) {
2274 policy->governor = old_gov;
2275 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2276 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2277 }
2278
2279 return -EINVAL;
2280
2281 out:
2282 pr_debug("governor: change or update limits\n");
2283 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284}
2285
2286/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2288 * @cpu: CPU which shall be re-evaluated
2289 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002290 * Useful for policy notifiers which have different necessities
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 * at different times.
2292 */
2293int cpufreq_update_policy(unsigned int cpu)
2294{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302295 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2296 struct cpufreq_policy new_policy;
Julia Lawallf1829e42008-07-25 22:44:53 +02002297 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002299 if (!policy)
2300 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
viresh kumarad7722d2013-10-18 19:10:15 +05302302 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002304 pr_debug("updating policy for CPU %u\n", cpu);
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302305 memcpy(&new_policy, policy, sizeof(*policy));
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302306 new_policy.min = policy->user_policy.min;
2307 new_policy.max = policy->user_policy.max;
2308 new_policy.policy = policy->user_policy.policy;
2309 new_policy.governor = policy->user_policy.governor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310
Viresh Kumarbb176f72013-06-19 14:19:33 +05302311 /*
2312 * BIOS might change freq behind our back
2313 * -> ask driver for current freq and notify governors about a change
2314 */
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01002315 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302316 new_policy.cur = cpufreq_driver->get(cpu);
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302317 if (WARN_ON(!new_policy.cur)) {
2318 ret = -EIO;
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002319 goto unlock;
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302320 }
2321
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302322 if (!policy->cur) {
Joe Perchese837f9b2014-03-11 10:03:00 -07002323 pr_debug("Driver did not initialize current freq\n");
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302324 policy->cur = new_policy.cur;
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002325 } else {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302326 if (policy->cur != new_policy.cur && has_target())
Viresh Kumara1e1dc42015-01-02 12:34:28 +05302327 cpufreq_out_of_sync(policy, new_policy.cur);
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002328 }
Thomas Renninger0961dd02006-01-26 18:46:33 +01002329 }
2330
Viresh Kumar037ce832013-10-02 14:13:16 +05302331 ret = cpufreq_set_policy(policy, &new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002333unlock:
viresh kumarad7722d2013-10-18 19:10:15 +05302334 up_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002335
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302336 cpufreq_cpu_put(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 return ret;
2338}
2339EXPORT_SYMBOL(cpufreq_update_policy);
2340
Paul Gortmaker27609842013-06-19 13:54:04 -04002341static int cpufreq_cpu_callback(struct notifier_block *nfb,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002342 unsigned long action, void *hcpu)
2343{
2344 unsigned int cpu = (unsigned long)hcpu;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002345 struct device *dev;
Ashok Rajc32b6b82005-10-30 14:59:54 -08002346
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002347 dev = get_cpu_device(cpu);
2348 if (dev) {
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302349 switch (action & ~CPU_TASKS_FROZEN) {
Ashok Rajc32b6b82005-10-30 14:59:54 -08002350 case CPU_ONLINE:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302351 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002352 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302353
Ashok Rajc32b6b82005-10-30 14:59:54 -08002354 case CPU_DOWN_PREPARE:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302355 __cpufreq_remove_dev_prepare(dev, NULL);
Srivatsa S. Bhat1aee40a2013-09-07 01:23:27 +05302356 break;
2357
2358 case CPU_POST_DEAD:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302359 __cpufreq_remove_dev_finish(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002360 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302361
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002362 case CPU_DOWN_FAILED:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302363 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002364 break;
2365 }
2366 }
2367 return NOTIFY_OK;
2368}
2369
Neal Buckendahl9c36f742010-06-22 22:02:44 -05002370static struct notifier_block __refdata cpufreq_cpu_notifier = {
Viresh Kumarbb176f72013-06-19 14:19:33 +05302371 .notifier_call = cpufreq_cpu_callback,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002372};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373
2374/*********************************************************************
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002375 * BOOST *
2376 *********************************************************************/
2377static int cpufreq_boost_set_sw(int state)
2378{
2379 struct cpufreq_frequency_table *freq_table;
2380 struct cpufreq_policy *policy;
2381 int ret = -EINVAL;
2382
Viresh Kumarf9637352015-05-12 12:20:11 +05302383 for_each_active_policy(policy) {
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002384 freq_table = cpufreq_frequency_get_table(policy->cpu);
2385 if (freq_table) {
2386 ret = cpufreq_frequency_table_cpuinfo(policy,
2387 freq_table);
2388 if (ret) {
2389 pr_err("%s: Policy frequency update failed\n",
2390 __func__);
2391 break;
2392 }
2393 policy->user_policy.max = policy->max;
2394 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2395 }
2396 }
2397
2398 return ret;
2399}
2400
2401int cpufreq_boost_trigger_state(int state)
2402{
2403 unsigned long flags;
2404 int ret = 0;
2405
2406 if (cpufreq_driver->boost_enabled == state)
2407 return 0;
2408
2409 write_lock_irqsave(&cpufreq_driver_lock, flags);
2410 cpufreq_driver->boost_enabled = state;
2411 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2412
2413 ret = cpufreq_driver->set_boost(state);
2414 if (ret) {
2415 write_lock_irqsave(&cpufreq_driver_lock, flags);
2416 cpufreq_driver->boost_enabled = !state;
2417 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2418
Joe Perchese837f9b2014-03-11 10:03:00 -07002419 pr_err("%s: Cannot %s BOOST\n",
2420 __func__, state ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002421 }
2422
2423 return ret;
2424}
2425
2426int cpufreq_boost_supported(void)
2427{
2428 if (likely(cpufreq_driver))
2429 return cpufreq_driver->boost_supported;
2430
2431 return 0;
2432}
2433EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2434
2435int cpufreq_boost_enabled(void)
2436{
2437 return cpufreq_driver->boost_enabled;
2438}
2439EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2440
2441/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2443 *********************************************************************/
2444
2445/**
2446 * cpufreq_register_driver - register a CPU Frequency driver
2447 * @driver_data: A struct cpufreq_driver containing the values#
2448 * submitted by the CPU Frequency driver.
2449 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302450 * Registers a CPU Frequency driver to this core code. This code
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 * returns zero on success, -EBUSY when another driver got here first
Dave Jones32ee8c32006-02-28 00:43:23 -05002452 * (and isn't unregistered in the meantime).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 *
2454 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002455int cpufreq_register_driver(struct cpufreq_driver *driver_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456{
2457 unsigned long flags;
2458 int ret;
2459
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002460 if (cpufreq_disabled())
2461 return -ENODEV;
2462
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 if (!driver_data || !driver_data->verify || !driver_data->init ||
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302464 !(driver_data->setpolicy || driver_data->target_index ||
Rafael J. Wysocki98322352014-03-19 12:48:30 +01002465 driver_data->target) ||
2466 (driver_data->setpolicy && (driver_data->target_index ||
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05302467 driver_data->target)) ||
2468 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 return -EINVAL;
2470
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002471 pr_debug("trying to register driver %s\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002473 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002474 if (cpufreq_driver) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002475 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Yinghai Lu4dea58062013-09-18 21:05:20 -07002476 return -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002478 cpufreq_driver = driver_data;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002479 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480
Viresh Kumarbc68b7d2015-01-02 12:34:30 +05302481 if (driver_data->setpolicy)
2482 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2483
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002484 if (cpufreq_boost_supported()) {
2485 /*
2486 * Check if driver provides function to enable boost -
2487 * if not, use cpufreq_boost_set_sw as default
2488 */
2489 if (!cpufreq_driver->set_boost)
2490 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2491
2492 ret = cpufreq_sysfs_create_file(&boost.attr);
2493 if (ret) {
2494 pr_err("%s: cannot register global BOOST sysfs file\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002495 __func__);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002496 goto err_null_driver;
2497 }
2498 }
2499
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002500 ret = subsys_interface_register(&cpufreq_interface);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002501 if (ret)
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002502 goto err_boost_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302504 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2505 list_empty(&cpufreq_policy_list)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 /* if all ->init() calls failed, unregister */
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302507 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2508 driver_data->name);
2509 goto err_if_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 }
2511
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002512 register_hotcpu_notifier(&cpufreq_cpu_notifier);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002513 pr_debug("driver %s up and running\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002515 return 0;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002516err_if_unreg:
2517 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002518err_boost_unreg:
2519 if (cpufreq_boost_supported())
2520 cpufreq_sysfs_remove_file(&boost.attr);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002521err_null_driver:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002522 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002523 cpufreq_driver = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002524 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Dave Jones4d34a672008-02-07 16:33:49 -05002525 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526}
2527EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2528
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529/**
2530 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2531 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302532 * Unregister the current CPUFreq driver. Only call this if you have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 * the right to do so, i.e. if you have succeeded in initialising before!
2534 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2535 * currently not initialised.
2536 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002537int cpufreq_unregister_driver(struct cpufreq_driver *driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538{
2539 unsigned long flags;
2540
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002541 if (!cpufreq_driver || (driver != cpufreq_driver))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002544 pr_debug("unregistering driver %s\n", driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002546 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002547 if (cpufreq_boost_supported())
2548 cpufreq_sysfs_remove_file(&boost.attr);
2549
Chandra Seetharaman65edc682006-06-27 02:54:08 -07002550 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551
Viresh Kumar6eed9402013-08-06 22:53:11 +05302552 down_write(&cpufreq_rwsem);
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002553 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302554
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002555 cpufreq_driver = NULL;
Viresh Kumar6eed9402013-08-06 22:53:11 +05302556
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002557 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302558 up_write(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559
2560 return 0;
2561}
2562EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002563
Doug Anderson90de2a42014-12-23 22:09:48 -08002564/*
2565 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2566 * or mutexes when secondary CPUs are halted.
2567 */
2568static struct syscore_ops cpufreq_syscore_ops = {
2569 .shutdown = cpufreq_suspend,
2570};
2571
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002572static int __init cpufreq_core_init(void)
2573{
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002574 if (cpufreq_disabled())
2575 return -ENODEV;
2576
Viresh Kumar2361be22013-05-17 16:09:09 +05302577 cpufreq_global_kobject = kobject_create();
Thomas Renninger8aa84ad2009-07-24 15:25:05 +02002578 BUG_ON(!cpufreq_global_kobject);
2579
Doug Anderson90de2a42014-12-23 22:09:48 -08002580 register_syscore_ops(&cpufreq_syscore_ops);
2581
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002582 return 0;
2583}
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002584core_initcall(cpufreq_core_init);