blob: ca963336c262feb0a58e20ea0e58dca4bfc87ddc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
Viresh Kumarbb176f72013-06-19 14:19:33 +05306 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Ashok Rajc32b6b82005-10-30 14:59:54 -08008 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
Dave Jones32ee8c32006-02-28 00:43:23 -05009 * Added handling for CPU hotplug
Dave Jones8ff69732006-03-05 03:37:23 -050010 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
Ashok Rajc32b6b82005-10-30 14:59:54 -080012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Viresh Kumardb701152012-10-23 01:29:03 +020018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Viresh Kumar5ff0a262013-08-06 22:53:03 +053020#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/cpufreq.h>
22#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/device.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053024#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080027#include <linux/mutex.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053028#include <linux/slab.h>
Viresh Kumar2f0aea92014-03-04 11:00:26 +080029#include <linux/suspend.h>
Doug Anderson90de2a42014-12-23 22:09:48 -080030#include <linux/syscore_ops.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053031#include <linux/tick.h>
Thomas Renninger6f4f2722010-04-20 13:17:36 +020032#include <trace/events/power.h>
33
Viresh Kumarb4f06762015-01-27 14:06:08 +053034static LIST_HEAD(cpufreq_policy_list);
Viresh Kumarf9637352015-05-12 12:20:11 +053035
36static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37{
38 return cpumask_empty(policy->cpus);
39}
40
41static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42{
43 return active == !policy_is_inactive(policy);
44}
45
46/* Finds Next Acive/Inactive policy */
47static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48 bool active)
49{
50 do {
51 policy = list_next_entry(policy, policy_list);
52
53 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list)
55 return NULL;
56 } while (!suitable_policy(policy, active));
57
58 return policy;
59}
60
61static struct cpufreq_policy *first_policy(bool active)
62{
63 struct cpufreq_policy *policy;
64
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list))
67 return NULL;
68
69 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70 policy_list);
71
72 if (!suitable_policy(policy, active))
73 policy = next_policy(policy, active);
74
75 return policy;
76}
77
78/* Macros to iterate over CPU policies */
79#define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
81 __policy; \
82 __policy = next_policy(__policy, __active))
83
84#define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86#define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
88
89#define for_each_policy(__policy) \
Viresh Kumarb4f06762015-01-27 14:06:08 +053090 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91
Viresh Kumarf7b27062015-01-27 14:06:09 +053092/* Iterate over governors */
93static LIST_HEAD(cpufreq_governor_list);
94#define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097/**
Dave Jonescd878472006-08-11 17:59:28 -040098 * The "cpufreq driver" - the arch- or hardware-dependent low
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
101 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200102static struct cpufreq_driver *cpufreq_driver;
Mike Travis7a6aedf2008-03-25 15:06:53 -0700103static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
Viresh Kumarbb176f72013-06-19 14:19:33 +0530104static DEFINE_RWLOCK(cpufreq_driver_lock);
Jane Li6f1e4ef2014-01-03 17:17:41 +0800105DEFINE_MUTEX(cpufreq_governor_lock);
Viresh Kumarbb176f72013-06-19 14:19:33 +0530106
Viresh Kumar2f0aea92014-03-04 11:00:26 +0800107/* Flag to suspend/resume CPUFreq governors */
108static bool cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530110static inline bool has_target(void)
111{
112 return cpufreq_driver->target_index || cpufreq_driver->target;
113}
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115/* internal prototypes */
Dave Jones29464f22009-01-18 01:37:11 -0500116static int __cpufreq_governor(struct cpufreq_policy *policy,
117 unsigned int event);
Viresh Kumard92d50a2015-01-02 12:34:29 +0530118static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
David Howells65f27f32006-11-22 14:55:48 +0000119static void handle_update(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121/**
Dave Jones32ee8c32006-02-28 00:43:23 -0500122 * Two notifier lists: the "policy" list is involved in the
123 * validation process for a new CPU frequency policy; the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 * "transition" list for kernel code that needs to handle
125 * changes to devices when the CPU clock speed changes.
126 * The mutex locks both lists.
127 */
Alan Sterne041c682006-03-27 01:16:30 -0800128static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700129static struct srcu_notifier_head cpufreq_transition_notifier_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200131static bool init_cpufreq_transition_notifier_list_called;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700132static int __init init_cpufreq_transition_notifier_list(void)
133{
134 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200135 init_cpufreq_transition_notifier_list_called = true;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700136 return 0;
137}
Linus Torvaldsb3438f82006-11-20 11:47:18 -0800138pure_initcall(init_cpufreq_transition_notifier_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400140static int off __read_mostly;
Viresh Kumarda584452012-10-26 00:51:32 +0200141static int cpufreq_disabled(void)
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400142{
143 return off;
144}
145void disable_cpufreq(void)
146{
147 off = 1;
148}
Dave Jones29464f22009-01-18 01:37:11 -0500149static DEFINE_MUTEX(cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000151bool have_governor_per_policy(void)
152{
Viresh Kumar0b981e72013-10-02 14:13:18 +0530153 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000154}
Viresh Kumar3f869d62013-05-16 05:09:56 +0000155EXPORT_SYMBOL_GPL(have_governor_per_policy);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000156
Viresh Kumar944e9a02013-05-16 05:09:57 +0000157struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
158{
159 if (have_governor_per_policy())
160 return &policy->kobj;
161 else
162 return cpufreq_global_kobject;
163}
164EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
165
Viresh Kumar5a31d592015-07-10 01:43:27 +0200166struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
167{
168 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
169
170 return policy && !policy_is_inactive(policy) ?
171 policy->freq_table : NULL;
172}
173EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
174
Viresh Kumar72a4ce32013-05-17 11:26:32 +0000175static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
176{
177 u64 idle_time;
178 u64 cur_wall_time;
179 u64 busy_time;
180
181 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
182
183 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
184 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
185 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
186 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
187 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
188 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
189
190 idle_time = cur_wall_time - busy_time;
191 if (wall)
192 *wall = cputime_to_usecs(cur_wall_time);
193
194 return cputime_to_usecs(idle_time);
195}
196
197u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
198{
199 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
200
201 if (idle_time == -1ULL)
202 return get_cpu_idle_time_jiffy(cpu, wall);
203 else if (!io_busy)
204 idle_time += get_cpu_iowait_time_us(cpu, wall);
205
206 return idle_time;
207}
208EXPORT_SYMBOL_GPL(get_cpu_idle_time);
209
Viresh Kumar70e9e772013-10-03 20:29:07 +0530210/*
211 * This is a generic cpufreq init() routine which can be used by cpufreq
212 * drivers of SMP systems. It will do following:
213 * - validate & show freq table passed
214 * - set policies transition latency
215 * - policy->cpus with all possible CPUs
216 */
217int cpufreq_generic_init(struct cpufreq_policy *policy,
218 struct cpufreq_frequency_table *table,
219 unsigned int transition_latency)
220{
221 int ret;
222
223 ret = cpufreq_table_validate_and_show(policy, table);
224 if (ret) {
225 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
226 return ret;
227 }
228
229 policy->cpuinfo.transition_latency = transition_latency;
230
231 /*
Shailendra Verma58405af2015-05-22 22:48:22 +0530232 * The driver only supports the SMP configuration where all processors
Viresh Kumar70e9e772013-10-03 20:29:07 +0530233 * share the clock and voltage and clock.
234 */
235 cpumask_setall(policy->cpus);
236
237 return 0;
238}
239EXPORT_SYMBOL_GPL(cpufreq_generic_init);
240
Viresh Kumar988bed02015-05-08 11:53:45 +0530241/* Only for cpufreq core internal use */
Viresh Kumard075a882015-09-03 12:15:21 +0530242static struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
Viresh Kumar652ed952014-01-09 20:38:43 +0530243{
244 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
245
Viresh Kumar988bed02015-05-08 11:53:45 +0530246 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
247}
248
249unsigned int cpufreq_generic_get(unsigned int cpu)
250{
251 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
252
Viresh Kumar652ed952014-01-09 20:38:43 +0530253 if (!policy || IS_ERR(policy->clk)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700254 pr_err("%s: No %s associated to cpu: %d\n",
255 __func__, policy ? "clk" : "policy", cpu);
Viresh Kumar652ed952014-01-09 20:38:43 +0530256 return 0;
257 }
258
259 return clk_get_rate(policy->clk) / 1000;
260}
261EXPORT_SYMBOL_GPL(cpufreq_generic_get);
262
Viresh Kumar50e9c852015-02-19 17:02:03 +0530263/**
264 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
265 *
266 * @cpu: cpu to find policy for.
267 *
268 * This returns policy for 'cpu', returns NULL if it doesn't exist.
269 * It also increments the kobject reference count to mark it busy and so would
270 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
271 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
272 * freed as that depends on the kobj count.
273 *
Viresh Kumar50e9c852015-02-19 17:02:03 +0530274 * Return: A valid policy on success, otherwise NULL on failure.
275 */
Viresh Kumar6eed9402013-08-06 22:53:11 +0530276struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530278 struct cpufreq_policy *policy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 unsigned long flags;
280
Viresh Kumar1b947c92015-02-19 17:02:05 +0530281 if (WARN_ON(cpu >= nr_cpu_ids))
Viresh Kumar6eed9402013-08-06 22:53:11 +0530282 return NULL;
283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 /* get the cpufreq driver */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000285 read_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Viresh Kumar6eed9402013-08-06 22:53:11 +0530287 if (cpufreq_driver) {
288 /* get the CPU */
Viresh Kumar988bed02015-05-08 11:53:45 +0530289 policy = cpufreq_cpu_get_raw(cpu);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530290 if (policy)
291 kobject_get(&policy->kobj);
292 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200293
Viresh Kumar6eed9402013-08-06 22:53:11 +0530294 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530296 return policy;
Stephen Boyda9144432012-07-20 18:14:38 +0000297}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
299
Viresh Kumar50e9c852015-02-19 17:02:03 +0530300/**
301 * cpufreq_cpu_put: Decrements the usage count of a policy
302 *
303 * @policy: policy earlier returned by cpufreq_cpu_get().
304 *
305 * This decrements the kobject reference count incremented earlier by calling
306 * cpufreq_cpu_get().
Viresh Kumar50e9c852015-02-19 17:02:03 +0530307 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530308void cpufreq_cpu_put(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530310 kobject_put(&policy->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311}
312EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
316 *********************************************************************/
317
318/**
319 * adjust_jiffies - adjust the system "loops_per_jiffy"
320 *
321 * This function alters the system "loops_per_jiffy" for the clock
322 * speed change. Note that loops_per_jiffy cannot be updated on SMP
Dave Jones32ee8c32006-02-28 00:43:23 -0500323 * systems as each CPU might be scaled differently. So, use the arch
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 * per-CPU loops_per_jiffy value wherever possible.
325 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800326static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
Viresh Kumar39c132e2015-01-02 12:34:34 +0530328#ifndef CONFIG_SMP
329 static unsigned long l_p_j_ref;
330 static unsigned int l_p_j_ref_freq;
331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 if (ci->flags & CPUFREQ_CONST_LOOPS)
333 return;
334
335 if (!l_p_j_ref_freq) {
336 l_p_j_ref = loops_per_jiffy;
337 l_p_j_ref_freq = ci->old;
Joe Perchese837f9b2014-03-11 10:03:00 -0700338 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
339 l_p_j_ref, l_p_j_ref_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 }
Viresh Kumar0b443ea2014-03-19 11:24:58 +0530341 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530342 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
343 ci->new);
Joe Perchese837f9b2014-03-11 10:03:00 -0700344 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
345 loops_per_jiffy, ci->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347#endif
Viresh Kumar39c132e2015-01-02 12:34:34 +0530348}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
Viresh Kumar0956df9c2013-06-19 14:19:34 +0530350static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530351 struct cpufreq_freqs *freqs, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352{
353 BUG_ON(irqs_disabled());
354
Dirk Brandewied5aaffa2013-01-17 16:22:21 +0000355 if (cpufreq_disabled())
356 return;
357
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200358 freqs->flags = cpufreq_driver->flags;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200359 pr_debug("notification %u of frequency transition to %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -0700360 state, freqs->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 switch (state) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 case CPUFREQ_PRECHANGE:
Dave Jones32ee8c32006-02-28 00:43:23 -0500365 /* detect if the driver reported a value as "old frequency"
Dave Jonese4472cb2006-01-31 15:53:55 -0800366 * which is not equal to what the cpufreq core thinks is
367 * "old frequency".
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200369 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800370 if ((policy) && (policy->cpu == freqs->cpu) &&
371 (policy->cur) && (policy->cur != freqs->old)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700372 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
373 freqs->old, policy->cur);
Dave Jonese4472cb2006-01-31 15:53:55 -0800374 freqs->old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 }
376 }
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700377 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800378 CPUFREQ_PRECHANGE, freqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
380 break;
Dave Jonese4472cb2006-01-31 15:53:55 -0800381
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 case CPUFREQ_POSTCHANGE:
383 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
Joe Perchese837f9b2014-03-11 10:03:00 -0700384 pr_debug("FREQ: %lu - CPU: %lu\n",
385 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
Thomas Renninger25e41932011-01-03 17:50:44 +0100386 trace_cpu_frequency(freqs->new, freqs->cpu);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700387 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800388 CPUFREQ_POSTCHANGE, freqs);
Dave Jonese4472cb2006-01-31 15:53:55 -0800389 if (likely(policy) && likely(policy->cpu == freqs->cpu))
390 policy->cur = freqs->new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 break;
392 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393}
Viresh Kumarbb176f72013-06-19 14:19:33 +0530394
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530395/**
396 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
397 * on frequency transition.
398 *
399 * This function calls the transition notifiers and the "adjust_jiffies"
400 * function. It is called twice on all CPU frequency changes that have
401 * external effects.
402 */
Viresh Kumar236a9802014-03-24 13:35:46 +0530403static void cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530404 struct cpufreq_freqs *freqs, unsigned int state)
405{
406 for_each_cpu(freqs->cpu, policy->cpus)
407 __cpufreq_notify_transition(policy, freqs, state);
408}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530410/* Do post notifications when there are chances that transition has failed */
Viresh Kumar236a9802014-03-24 13:35:46 +0530411static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530412 struct cpufreq_freqs *freqs, int transition_failed)
413{
414 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
415 if (!transition_failed)
416 return;
417
418 swap(freqs->old, freqs->new);
419 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
420 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
421}
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530422
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530423void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
424 struct cpufreq_freqs *freqs)
425{
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530426
427 /*
428 * Catch double invocations of _begin() which lead to self-deadlock.
429 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
430 * doesn't invoke _begin() on their behalf, and hence the chances of
431 * double invocations are very low. Moreover, there are scenarios
432 * where these checks can emit false-positive warnings in these
433 * drivers; so we avoid that by skipping them altogether.
434 */
435 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
436 && current == policy->transition_task);
437
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530438wait:
439 wait_event(policy->transition_wait, !policy->transition_ongoing);
440
441 spin_lock(&policy->transition_lock);
442
443 if (unlikely(policy->transition_ongoing)) {
444 spin_unlock(&policy->transition_lock);
445 goto wait;
446 }
447
448 policy->transition_ongoing = true;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530449 policy->transition_task = current;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530450
451 spin_unlock(&policy->transition_lock);
452
453 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
454}
455EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
456
457void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
458 struct cpufreq_freqs *freqs, int transition_failed)
459{
460 if (unlikely(WARN_ON(!policy->transition_ongoing)))
461 return;
462
463 cpufreq_notify_post_transition(policy, freqs, transition_failed);
464
465 policy->transition_ongoing = false;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530466 policy->transition_task = NULL;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530467
468 wake_up(&policy->transition_wait);
469}
470EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473/*********************************************************************
474 * SYSFS INTERFACE *
475 *********************************************************************/
Rashika Kheria8a5c74a2014-02-26 22:12:42 +0530476static ssize_t show_boost(struct kobject *kobj,
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100477 struct attribute *attr, char *buf)
478{
479 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
480}
481
482static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
483 const char *buf, size_t count)
484{
485 int ret, enable;
486
487 ret = sscanf(buf, "%d", &enable);
488 if (ret != 1 || enable < 0 || enable > 1)
489 return -EINVAL;
490
491 if (cpufreq_boost_trigger_state(enable)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700492 pr_err("%s: Cannot %s BOOST!\n",
493 __func__, enable ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100494 return -EINVAL;
495 }
496
Joe Perchese837f9b2014-03-11 10:03:00 -0700497 pr_debug("%s: cpufreq BOOST %s\n",
498 __func__, enable ? "enabled" : "disabled");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100499
500 return count;
501}
502define_one_global_rw(boost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530504static struct cpufreq_governor *find_governor(const char *str_governor)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700505{
506 struct cpufreq_governor *t;
507
Viresh Kumarf7b27062015-01-27 14:06:09 +0530508 for_each_governor(t)
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200509 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700510 return t;
511
512 return NULL;
513}
514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515/**
516 * cpufreq_parse_governor - parse a governor string
517 */
Dave Jones905d77c2008-03-05 14:28:32 -0500518static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 struct cpufreq_governor **governor)
520{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700521 int err = -EINVAL;
522
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200523 if (cpufreq_driver->setpolicy) {
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200524 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 *policy = CPUFREQ_POLICY_PERFORMANCE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700526 err = 0;
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200527 } else if (!strncasecmp(str_governor, "powersave",
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530528 CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 *policy = CPUFREQ_POLICY_POWERSAVE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700530 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 }
Viresh Kumar2e1cc3a2015-01-02 12:34:27 +0530532 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 struct cpufreq_governor *t;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700534
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800535 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700536
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530537 t = find_governor(str_governor);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700538
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700539 if (t == NULL) {
Kees Cook1a8e1462011-05-04 08:38:56 -0700540 int ret;
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700541
Kees Cook1a8e1462011-05-04 08:38:56 -0700542 mutex_unlock(&cpufreq_governor_mutex);
543 ret = request_module("cpufreq_%s", str_governor);
544 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700545
Kees Cook1a8e1462011-05-04 08:38:56 -0700546 if (ret == 0)
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530547 t = find_governor(str_governor);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700548 }
549
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700550 if (t != NULL) {
551 *governor = t;
552 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 }
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700554
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800555 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 }
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700557 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560/**
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530561 * cpufreq_per_cpu_attr_read() / show_##file_name() -
562 * print out cpufreq information
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 *
564 * Write out information from cpufreq_driver->policy[cpu]; object must be
565 * "unsigned int".
566 */
567
Dave Jones32ee8c32006-02-28 00:43:23 -0500568#define show_one(file_name, object) \
569static ssize_t show_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500570(struct cpufreq_policy *policy, char *buf) \
Dave Jones32ee8c32006-02-28 00:43:23 -0500571{ \
Dave Jones29464f22009-01-18 01:37:11 -0500572 return sprintf(buf, "%u\n", policy->object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573}
574
575show_one(cpuinfo_min_freq, cpuinfo.min_freq);
576show_one(cpuinfo_max_freq, cpuinfo.max_freq);
Thomas Renningered129782009-02-04 01:17:41 +0100577show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578show_one(scaling_min_freq, min);
579show_one(scaling_max_freq, max);
Dirk Brandewiec034b022014-10-13 08:37:40 -0700580
Viresh Kumar09347b22015-01-02 12:34:24 +0530581static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
Dirk Brandewiec034b022014-10-13 08:37:40 -0700582{
583 ssize_t ret;
584
585 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
586 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
587 else
588 ret = sprintf(buf, "%u\n", policy->cur);
589 return ret;
590}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
Viresh Kumar037ce832013-10-02 14:13:16 +0530592static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530593 struct cpufreq_policy *new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595/**
596 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
597 */
598#define store_one(file_name, object) \
599static ssize_t store_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500600(struct cpufreq_policy *policy, const char *buf, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601{ \
Vince Hsu619c144c2014-11-10 14:14:50 +0800602 int ret, temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 struct cpufreq_policy new_policy; \
604 \
Viresh Kumar8fa5b632015-08-03 08:36:15 +0530605 memcpy(&new_policy, policy, sizeof(*policy)); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 \
Dave Jones29464f22009-01-18 01:37:11 -0500607 ret = sscanf(buf, "%u", &new_policy.object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 if (ret != 1) \
609 return -EINVAL; \
610 \
Vince Hsu619c144c2014-11-10 14:14:50 +0800611 temp = new_policy.object; \
Viresh Kumar037ce832013-10-02 14:13:16 +0530612 ret = cpufreq_set_policy(policy, &new_policy); \
Vince Hsu619c144c2014-11-10 14:14:50 +0800613 if (!ret) \
614 policy->user_policy.object = temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 \
616 return ret ? ret : count; \
617}
618
Dave Jones29464f22009-01-18 01:37:11 -0500619store_one(scaling_min_freq, min);
620store_one(scaling_max_freq, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
622/**
623 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
624 */
Dave Jones905d77c2008-03-05 14:28:32 -0500625static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
626 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627{
Viresh Kumard92d50a2015-01-02 12:34:29 +0530628 unsigned int cur_freq = __cpufreq_get(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 if (!cur_freq)
630 return sprintf(buf, "<unknown>");
631 return sprintf(buf, "%u\n", cur_freq);
632}
633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634/**
635 * show_scaling_governor - show the current policy for the specified CPU
636 */
Dave Jones905d77c2008-03-05 14:28:32 -0500637static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638{
Dave Jones29464f22009-01-18 01:37:11 -0500639 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 return sprintf(buf, "powersave\n");
641 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
642 return sprintf(buf, "performance\n");
643 else if (policy->governor)
viresh kumar4b972f02012-10-23 01:23:43 +0200644 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
Dave Jones29464f22009-01-18 01:37:11 -0500645 policy->governor->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 return -EINVAL;
647}
648
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649/**
650 * store_scaling_governor - store policy for the specified CPU
651 */
Dave Jones905d77c2008-03-05 14:28:32 -0500652static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
653 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654{
Srivatsa S. Bhat5136fa52013-09-07 01:24:06 +0530655 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 char str_governor[16];
657 struct cpufreq_policy new_policy;
658
Viresh Kumar8fa5b632015-08-03 08:36:15 +0530659 memcpy(&new_policy, policy, sizeof(*policy));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Dave Jones29464f22009-01-18 01:37:11 -0500661 ret = sscanf(buf, "%15s", str_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 if (ret != 1)
663 return -EINVAL;
664
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530665 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
666 &new_policy.governor))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 return -EINVAL;
668
Viresh Kumar037ce832013-10-02 14:13:16 +0530669 ret = cpufreq_set_policy(policy, &new_policy);
Viresh Kumar88dc4382015-08-03 08:36:18 +0530670 return ret ? ret : count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671}
672
673/**
674 * show_scaling_driver - show the cpufreq driver currently loaded
675 */
Dave Jones905d77c2008-03-05 14:28:32 -0500676static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200678 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679}
680
681/**
682 * show_scaling_available_governors - show the available CPUfreq governors
683 */
Dave Jones905d77c2008-03-05 14:28:32 -0500684static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
685 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686{
687 ssize_t i = 0;
688 struct cpufreq_governor *t;
689
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530690 if (!has_target()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 i += sprintf(buf, "performance powersave");
692 goto out;
693 }
694
Viresh Kumarf7b27062015-01-27 14:06:09 +0530695 for_each_governor(t) {
Dave Jones29464f22009-01-18 01:37:11 -0500696 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
697 - (CPUFREQ_NAME_LEN + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 goto out;
viresh kumar4b972f02012-10-23 01:23:43 +0200699 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 }
Dave Jones7d5e3502006-02-02 17:03:42 -0500701out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 i += sprintf(&buf[i], "\n");
703 return i;
704}
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700705
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800706ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707{
708 ssize_t i = 0;
709 unsigned int cpu;
710
Rusty Russell835481d2009-01-04 05:18:06 -0800711 for_each_cpu(cpu, mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 if (i)
713 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
714 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
715 if (i >= (PAGE_SIZE - 5))
Dave Jones29464f22009-01-18 01:37:11 -0500716 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 }
718 i += sprintf(&buf[i], "\n");
719 return i;
720}
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800721EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700723/**
724 * show_related_cpus - show the CPUs affected by each transition even if
725 * hw coordination is in use
726 */
727static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
728{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800729 return cpufreq_show_cpus(policy->related_cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700730}
731
732/**
733 * show_affected_cpus - show the CPUs affected by each transition
734 */
735static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
736{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800737 return cpufreq_show_cpus(policy->cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700738}
739
Venki Pallipadi9e769882007-10-26 10:18:21 -0700740static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
Dave Jones905d77c2008-03-05 14:28:32 -0500741 const char *buf, size_t count)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700742{
743 unsigned int freq = 0;
744 unsigned int ret;
745
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700746 if (!policy->governor || !policy->governor->store_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700747 return -EINVAL;
748
749 ret = sscanf(buf, "%u", &freq);
750 if (ret != 1)
751 return -EINVAL;
752
753 policy->governor->store_setspeed(policy, freq);
754
755 return count;
756}
757
758static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
759{
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700760 if (!policy->governor || !policy->governor->show_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700761 return sprintf(buf, "<unsupported>\n");
762
763 return policy->governor->show_setspeed(policy, buf);
764}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Thomas Renningere2f74f32009-11-19 12:31:01 +0100766/**
viresh kumar8bf1ac722012-10-23 01:23:33 +0200767 * show_bios_limit - show the current cpufreq HW/BIOS limitation
Thomas Renningere2f74f32009-11-19 12:31:01 +0100768 */
769static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
770{
771 unsigned int limit;
772 int ret;
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200773 if (cpufreq_driver->bios_limit) {
774 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
Thomas Renningere2f74f32009-11-19 12:31:01 +0100775 if (!ret)
776 return sprintf(buf, "%u\n", limit);
777 }
778 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
779}
780
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200781cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
782cpufreq_freq_attr_ro(cpuinfo_min_freq);
783cpufreq_freq_attr_ro(cpuinfo_max_freq);
784cpufreq_freq_attr_ro(cpuinfo_transition_latency);
785cpufreq_freq_attr_ro(scaling_available_governors);
786cpufreq_freq_attr_ro(scaling_driver);
787cpufreq_freq_attr_ro(scaling_cur_freq);
788cpufreq_freq_attr_ro(bios_limit);
789cpufreq_freq_attr_ro(related_cpus);
790cpufreq_freq_attr_ro(affected_cpus);
791cpufreq_freq_attr_rw(scaling_min_freq);
792cpufreq_freq_attr_rw(scaling_max_freq);
793cpufreq_freq_attr_rw(scaling_governor);
794cpufreq_freq_attr_rw(scaling_setspeed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795
Dave Jones905d77c2008-03-05 14:28:32 -0500796static struct attribute *default_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 &cpuinfo_min_freq.attr,
798 &cpuinfo_max_freq.attr,
Thomas Renningered129782009-02-04 01:17:41 +0100799 &cpuinfo_transition_latency.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 &scaling_min_freq.attr,
801 &scaling_max_freq.attr,
802 &affected_cpus.attr,
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700803 &related_cpus.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 &scaling_governor.attr,
805 &scaling_driver.attr,
806 &scaling_available_governors.attr,
Venki Pallipadi9e769882007-10-26 10:18:21 -0700807 &scaling_setspeed.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 NULL
809};
810
Dave Jones29464f22009-01-18 01:37:11 -0500811#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
812#define to_attr(a) container_of(a, struct freq_attr, attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Dave Jones29464f22009-01-18 01:37:11 -0500814static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815{
Dave Jones905d77c2008-03-05 14:28:32 -0500816 struct cpufreq_policy *policy = to_policy(kobj);
817 struct freq_attr *fattr = to_attr(attr);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530818 ssize_t ret;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530819
viresh kumarad7722d2013-10-18 19:10:15 +0530820 down_read(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800821
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530822 if (fattr->show)
823 ret = fattr->show(policy, buf);
824 else
825 ret = -EIO;
826
viresh kumarad7722d2013-10-18 19:10:15 +0530827 up_read(&policy->rwsem);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 return ret;
830}
831
Dave Jones905d77c2008-03-05 14:28:32 -0500832static ssize_t store(struct kobject *kobj, struct attribute *attr,
833 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834{
Dave Jones905d77c2008-03-05 14:28:32 -0500835 struct cpufreq_policy *policy = to_policy(kobj);
836 struct freq_attr *fattr = to_attr(attr);
Dave Jonesa07530b2008-03-05 14:22:25 -0500837 ssize_t ret = -EINVAL;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530838
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530839 get_online_cpus();
840
841 if (!cpu_online(policy->cpu))
842 goto unlock;
843
viresh kumarad7722d2013-10-18 19:10:15 +0530844 down_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800845
Viresh Kumar11e584c2015-06-10 02:11:45 +0200846 /* Updating inactive policies is invalid, so avoid doing that. */
847 if (unlikely(policy_is_inactive(policy))) {
848 ret = -EBUSY;
849 goto unlock_policy_rwsem;
850 }
851
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530852 if (fattr->store)
853 ret = fattr->store(policy, buf, count);
854 else
855 ret = -EIO;
856
Viresh Kumar11e584c2015-06-10 02:11:45 +0200857unlock_policy_rwsem:
viresh kumarad7722d2013-10-18 19:10:15 +0530858 up_write(&policy->rwsem);
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530859unlock:
860 put_online_cpus();
861
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 return ret;
863}
864
Dave Jones905d77c2008-03-05 14:28:32 -0500865static void cpufreq_sysfs_release(struct kobject *kobj)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866{
Dave Jones905d77c2008-03-05 14:28:32 -0500867 struct cpufreq_policy *policy = to_policy(kobj);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200868 pr_debug("last reference is dropped\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 complete(&policy->kobj_unregister);
870}
871
Emese Revfy52cf25d2010-01-19 02:58:23 +0100872static const struct sysfs_ops sysfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 .show = show,
874 .store = store,
875};
876
877static struct kobj_type ktype_cpufreq = {
878 .sysfs_ops = &sysfs_ops,
879 .default_attrs = default_attrs,
880 .release = cpufreq_sysfs_release,
881};
882
Viresh Kumar2361be22013-05-17 16:09:09 +0530883struct kobject *cpufreq_global_kobject;
884EXPORT_SYMBOL(cpufreq_global_kobject);
885
886static int cpufreq_global_kobject_usage;
887
888int cpufreq_get_global_kobject(void)
889{
890 if (!cpufreq_global_kobject_usage++)
891 return kobject_add(cpufreq_global_kobject,
892 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
893
894 return 0;
895}
896EXPORT_SYMBOL(cpufreq_get_global_kobject);
897
898void cpufreq_put_global_kobject(void)
899{
900 if (!--cpufreq_global_kobject_usage)
901 kobject_del(cpufreq_global_kobject);
902}
903EXPORT_SYMBOL(cpufreq_put_global_kobject);
904
905int cpufreq_sysfs_create_file(const struct attribute *attr)
906{
907 int ret = cpufreq_get_global_kobject();
908
909 if (!ret) {
910 ret = sysfs_create_file(cpufreq_global_kobject, attr);
911 if (ret)
912 cpufreq_put_global_kobject();
913 }
914
915 return ret;
916}
917EXPORT_SYMBOL(cpufreq_sysfs_create_file);
918
919void cpufreq_sysfs_remove_file(const struct attribute *attr)
920{
921 sysfs_remove_file(cpufreq_global_kobject, attr);
922 cpufreq_put_global_kobject();
923}
924EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
925
Viresh Kumar87549142015-06-10 02:13:21 +0200926static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
927{
928 struct device *cpu_dev;
929
930 pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
931
932 if (!policy)
933 return 0;
934
935 cpu_dev = get_cpu_device(cpu);
936 if (WARN_ON(!cpu_dev))
937 return 0;
938
939 return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
940}
941
942static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
943{
944 struct device *cpu_dev;
945
946 pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
947
948 cpu_dev = get_cpu_device(cpu);
949 if (WARN_ON(!cpu_dev))
950 return;
951
952 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
953}
954
955/* Add/remove symlinks for all related CPUs */
Viresh Kumar308b60e2013-07-31 14:35:14 +0200956static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400957{
958 unsigned int j;
959 int ret = 0;
960
Viresh Kumar87549142015-06-10 02:13:21 +0200961 /* Some related CPUs might not be present (physically hotplugged) */
Rafael J. Wysocki559ed402015-07-26 02:07:47 +0200962 for_each_cpu(j, policy->real_cpus) {
Saravana Kannan9d16f202015-05-18 10:43:31 +0530963 if (j == policy->kobj_cpu)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400964 continue;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400965
Viresh Kumar87549142015-06-10 02:13:21 +0200966 ret = add_cpu_dev_symlink(policy, j);
Rafael J. Wysocki71c34612013-08-04 01:19:34 +0200967 if (ret)
968 break;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400969 }
Viresh Kumar87549142015-06-10 02:13:21 +0200970
Dave Jones19d6f7e2009-07-08 17:35:39 -0400971 return ret;
972}
973
Viresh Kumar87549142015-06-10 02:13:21 +0200974static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
975{
976 unsigned int j;
977
978 /* Some related CPUs might not be present (physically hotplugged) */
Rafael J. Wysocki559ed402015-07-26 02:07:47 +0200979 for_each_cpu(j, policy->real_cpus) {
Viresh Kumar87549142015-06-10 02:13:21 +0200980 if (j == policy->kobj_cpu)
981 continue;
982
983 remove_cpu_dev_symlink(policy, j);
984 }
985}
986
Rafael J. Wysockid9612a42015-07-27 23:11:37 +0200987static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
Dave Jones909a6942009-07-08 18:05:42 -0400988{
989 struct freq_attr **drv_attr;
Dave Jones909a6942009-07-08 18:05:42 -0400990 int ret = 0;
Dave Jones909a6942009-07-08 18:05:42 -0400991
Dave Jones909a6942009-07-08 18:05:42 -0400992 /* set up files for this cpu device */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200993 drv_attr = cpufreq_driver->attr;
Viresh Kumarf13f1182015-01-02 12:34:23 +0530994 while (drv_attr && *drv_attr) {
Dave Jones909a6942009-07-08 18:05:42 -0400995 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
996 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100997 return ret;
Dave Jones909a6942009-07-08 18:05:42 -0400998 drv_attr++;
999 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001000 if (cpufreq_driver->get) {
Dave Jones909a6942009-07-08 18:05:42 -04001001 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1002 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001003 return ret;
Dave Jones909a6942009-07-08 18:05:42 -04001004 }
Dirk Brandewiec034b022014-10-13 08:37:40 -07001005
1006 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1007 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001008 return ret;
Dirk Brandewiec034b022014-10-13 08:37:40 -07001009
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001010 if (cpufreq_driver->bios_limit) {
Thomas Renningere2f74f32009-11-19 12:31:01 +01001011 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1012 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001013 return ret;
Thomas Renningere2f74f32009-11-19 12:31:01 +01001014 }
Dave Jones909a6942009-07-08 18:05:42 -04001015
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001016 return cpufreq_add_dev_symlink(policy);
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301017}
1018
Viresh Kumar7f0fa402015-07-08 15:12:16 +05301019static int cpufreq_init_policy(struct cpufreq_policy *policy)
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301020{
viresh kumar6e2c89d2014-03-04 11:43:59 +08001021 struct cpufreq_governor *gov = NULL;
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301022 struct cpufreq_policy new_policy;
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301023
Viresh Kumard5b73cd2013-08-06 22:53:06 +05301024 memcpy(&new_policy, policy, sizeof(*policy));
Jason Barona27a9ab2013-12-19 22:50:50 +00001025
viresh kumar6e2c89d2014-03-04 11:43:59 +08001026 /* Update governor of new_policy to the governor used before hotplug */
Viresh Kumar45732372015-05-12 12:22:34 +05301027 gov = find_governor(policy->last_governor);
viresh kumar6e2c89d2014-03-04 11:43:59 +08001028 if (gov)
1029 pr_debug("Restoring governor %s for cpu %d\n",
1030 policy->governor->name, policy->cpu);
1031 else
1032 gov = CPUFREQ_DEFAULT_GOVERNOR;
1033
1034 new_policy.governor = gov;
1035
Jason Barona27a9ab2013-12-19 22:50:50 +00001036 /* Use the default policy if its valid. */
1037 if (cpufreq_driver->setpolicy)
viresh kumar6e2c89d2014-03-04 11:43:59 +08001038 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
Dave Jonesecf7e462009-07-08 18:48:47 -04001039
1040 /* set default policy */
Viresh Kumar7f0fa402015-07-08 15:12:16 +05301041 return cpufreq_set_policy(policy, &new_policy);
Dave Jones909a6942009-07-08 18:05:42 -04001042}
1043
Rafael J. Wysockid9612a42015-07-27 23:11:37 +02001044static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
Viresh Kumarfcf80582013-01-29 14:39:08 +00001045{
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301046 int ret = 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001047
Viresh Kumarbb29ae12015-02-19 17:02:06 +05301048 /* Has this CPU been taken care of already? */
1049 if (cpumask_test_cpu(cpu, policy->cpus))
1050 return 0;
1051
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301052 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301053 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1054 if (ret) {
1055 pr_err("%s: Failed to stop governor\n", __func__);
1056 return ret;
1057 }
1058 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001059
viresh kumarad7722d2013-10-18 19:10:15 +05301060 down_write(&policy->rwsem);
Viresh Kumarfcf80582013-01-29 14:39:08 +00001061 cpumask_set_cpu(cpu, policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301062 up_write(&policy->rwsem);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301063
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301064 if (has_target()) {
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001065 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1066 if (!ret)
1067 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1068
1069 if (ret) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301070 pr_err("%s: Failed to start governor\n", __func__);
1071 return ret;
1072 }
Viresh Kumar820c6ca2013-04-22 00:48:03 +02001073 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001074
Viresh Kumar87549142015-06-10 02:13:21 +02001075 return 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001076}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
Rafael J. Wysockia34e63b2015-07-27 23:11:50 +02001078static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301079{
Rafael J. Wysockia34e63b2015-07-27 23:11:50 +02001080 struct device *dev = get_cpu_device(cpu);
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301081 struct cpufreq_policy *policy;
Viresh Kumar2fc33842015-06-08 18:25:29 +05301082 int ret;
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301083
Rafael J. Wysockia34e63b2015-07-27 23:11:50 +02001084 if (WARN_ON(!dev))
1085 return NULL;
1086
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301087 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1088 if (!policy)
1089 return NULL;
1090
1091 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1092 goto err_free_policy;
1093
1094 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1095 goto err_free_cpumask;
1096
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001097 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1098 goto err_free_rcpumask;
1099
Viresh Kumar2fc33842015-06-08 18:25:29 +05301100 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
1101 "cpufreq");
1102 if (ret) {
1103 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001104 goto err_free_real_cpus;
Viresh Kumar2fc33842015-06-08 18:25:29 +05301105 }
1106
Lukasz Majewskic88a1f82013-08-06 22:53:08 +05301107 INIT_LIST_HEAD(&policy->policy_list);
viresh kumarad7722d2013-10-18 19:10:15 +05301108 init_rwsem(&policy->rwsem);
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +05301109 spin_lock_init(&policy->transition_lock);
1110 init_waitqueue_head(&policy->transition_wait);
Viresh Kumar818c5712015-01-02 12:34:38 +05301111 init_completion(&policy->kobj_unregister);
1112 INIT_WORK(&policy->update, handle_update);
viresh kumarad7722d2013-10-18 19:10:15 +05301113
Rafael J. Wysockia34e63b2015-07-27 23:11:50 +02001114 policy->cpu = cpu;
Viresh Kumar87549142015-06-10 02:13:21 +02001115
1116 /* Set this once on allocation */
Rafael J. Wysockia34e63b2015-07-27 23:11:50 +02001117 policy->kobj_cpu = cpu;
Viresh Kumar87549142015-06-10 02:13:21 +02001118
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301119 return policy;
1120
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001121err_free_real_cpus:
1122 free_cpumask_var(policy->real_cpus);
Viresh Kumar2fc33842015-06-08 18:25:29 +05301123err_free_rcpumask:
1124 free_cpumask_var(policy->related_cpus);
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301125err_free_cpumask:
1126 free_cpumask_var(policy->cpus);
1127err_free_policy:
1128 kfree(policy);
1129
1130 return NULL;
1131}
1132
Viresh Kumar2fc33842015-06-08 18:25:29 +05301133static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
Viresh Kumar42f921a2013-12-20 21:26:02 +05301134{
1135 struct kobject *kobj;
1136 struct completion *cmp;
1137
Viresh Kumar2fc33842015-06-08 18:25:29 +05301138 if (notify)
1139 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1140 CPUFREQ_REMOVE_POLICY, policy);
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301141
Viresh Kumar87549142015-06-10 02:13:21 +02001142 down_write(&policy->rwsem);
1143 cpufreq_remove_dev_symlink(policy);
Viresh Kumar42f921a2013-12-20 21:26:02 +05301144 kobj = &policy->kobj;
1145 cmp = &policy->kobj_unregister;
Viresh Kumar87549142015-06-10 02:13:21 +02001146 up_write(&policy->rwsem);
Viresh Kumar42f921a2013-12-20 21:26:02 +05301147 kobject_put(kobj);
1148
1149 /*
1150 * We need to make sure that the underlying kobj is
1151 * actually not referenced anymore by anybody before we
1152 * proceed with unloading.
1153 */
1154 pr_debug("waiting for dropping of refcount\n");
1155 wait_for_completion(cmp);
1156 pr_debug("wait complete\n");
1157}
1158
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301159static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301160{
Viresh Kumar988bed02015-05-08 11:53:45 +05301161 unsigned long flags;
1162 int cpu;
1163
1164 /* Remove policy from list */
1165 write_lock_irqsave(&cpufreq_driver_lock, flags);
1166 list_del(&policy->policy_list);
1167
1168 for_each_cpu(cpu, policy->related_cpus)
1169 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1170 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1171
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301172 cpufreq_policy_put_kobj(policy, notify);
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001173 free_cpumask_var(policy->real_cpus);
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301174 free_cpumask_var(policy->related_cpus);
1175 free_cpumask_var(policy->cpus);
1176 kfree(policy);
1177}
1178
Rafael J. Wysocki0b275352015-07-29 03:03:44 +02001179static int cpufreq_online(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180{
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301181 struct cpufreq_policy *policy;
Rafael J. Wysocki194d99c2015-07-29 03:08:57 +02001182 bool new_policy;
Rafael J. Wysocki0b275352015-07-29 03:03:44 +02001183 unsigned long flags;
1184 unsigned int j;
1185 int ret;
Ashok Rajc32b6b82005-10-30 14:59:54 -08001186
Rafael J. Wysocki0b275352015-07-29 03:03:44 +02001187 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
Viresh Kumar87549142015-06-10 02:13:21 +02001188
Viresh Kumarbb29ae12015-02-19 17:02:06 +05301189 /* Check if this CPU already has a policy to manage it */
Viresh Kumar9104bb22015-05-12 12:22:12 +05301190 policy = per_cpu(cpufreq_cpu_data, cpu);
Rafael J. Wysocki11ce7072015-07-27 23:11:21 +02001191 if (policy) {
Viresh Kumar9104bb22015-05-12 12:22:12 +05301192 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
Rafael J. Wysocki11ce7072015-07-27 23:11:21 +02001193 if (!policy_is_inactive(policy))
Rafael J. Wysockid9612a42015-07-27 23:11:37 +02001194 return cpufreq_add_policy_cpu(policy, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Rafael J. Wysocki11ce7072015-07-27 23:11:21 +02001196 /* This is the only online CPU for the policy. Start over. */
Rafael J. Wysocki194d99c2015-07-29 03:08:57 +02001197 new_policy = false;
Rafael J. Wysocki11ce7072015-07-27 23:11:21 +02001198 down_write(&policy->rwsem);
1199 policy->cpu = cpu;
1200 policy->governor = NULL;
1201 up_write(&policy->rwsem);
1202 } else {
Rafael J. Wysocki194d99c2015-07-29 03:08:57 +02001203 new_policy = true;
Rafael J. Wysockia34e63b2015-07-27 23:11:50 +02001204 policy = cpufreq_policy_alloc(cpu);
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001205 if (!policy)
Rafael J. Wysockid4d854d2015-07-27 23:11:30 +02001206 return -ENOMEM;
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001207 }
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301208
Rusty Russell835481d2009-01-04 05:18:06 -08001209 cpumask_copy(policy->cpus, cpumask_of(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 /* call driver. From then on the cpufreq must be able
1212 * to accept all calls to ->verify and ->setpolicy for this CPU
1213 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001214 ret = cpufreq_driver->init(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001216 pr_debug("initialization failed\n");
Viresh Kumar8101f992015-07-08 15:12:15 +05301217 goto out_free_policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 }
Viresh Kumar643ae6e2013-01-12 05:14:38 +00001219
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001220 down_write(&policy->rwsem);
1221
Rafael J. Wysocki194d99c2015-07-29 03:08:57 +02001222 if (new_policy) {
Rafael J. Wysocki4d1f3a52015-07-27 23:11:44 +02001223 /* related_cpus should at least include policy->cpus. */
1224 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1225 /* Remember CPUs present at the policy creation time. */
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001226 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
Rafael J. Wysocki4d1f3a52015-07-27 23:11:44 +02001227 }
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001228
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001229 /*
1230 * affected cpus must always be the one, which are online. We aren't
1231 * managing offline cpus here.
1232 */
1233 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1234
Rafael J. Wysocki194d99c2015-07-29 03:08:57 +02001235 if (new_policy) {
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001236 policy->user_policy.min = policy->min;
1237 policy->user_policy.max = policy->max;
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001238
Viresh Kumar988bed02015-05-08 11:53:45 +05301239 write_lock_irqsave(&cpufreq_driver_lock, flags);
1240 for_each_cpu(j, policy->related_cpus)
1241 per_cpu(cpufreq_cpu_data, j) = policy;
1242 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1243 }
Viresh Kumar652ed952014-01-09 20:38:43 +05301244
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01001245 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumarda60ce92013-10-03 20:28:30 +05301246 policy->cur = cpufreq_driver->get(policy->cpu);
1247 if (!policy->cur) {
1248 pr_err("%s: ->get() failed\n", __func__);
Viresh Kumar8101f992015-07-08 15:12:15 +05301249 goto out_exit_policy;
Viresh Kumarda60ce92013-10-03 20:28:30 +05301250 }
1251 }
1252
Viresh Kumard3916692013-12-03 11:20:46 +05301253 /*
1254 * Sometimes boot loaders set CPU frequency to a value outside of
1255 * frequency table present with cpufreq core. In such cases CPU might be
1256 * unstable if it has to run on that frequency for long duration of time
1257 * and so its better to set it to a frequency which is specified in
1258 * freq-table. This also makes cpufreq stats inconsistent as
1259 * cpufreq-stats would fail to register because current frequency of CPU
1260 * isn't found in freq-table.
1261 *
1262 * Because we don't want this change to effect boot process badly, we go
1263 * for the next freq which is >= policy->cur ('cur' must be set by now,
1264 * otherwise we will end up setting freq to lowest of the table as 'cur'
1265 * is initialized to zero).
1266 *
1267 * We are passing target-freq as "policy->cur - 1" otherwise
1268 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1269 * equal to target-freq.
1270 */
1271 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1272 && has_target()) {
1273 /* Are we running at unknown frequency ? */
1274 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1275 if (ret == -EINVAL) {
1276 /* Warn user and fix it */
1277 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1278 __func__, policy->cpu, policy->cur);
1279 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1280 CPUFREQ_RELATION_L);
1281
1282 /*
1283 * Reaching here after boot in a few seconds may not
1284 * mean that system will remain stable at "unknown"
1285 * frequency for longer duration. Hence, a BUG_ON().
1286 */
1287 BUG_ON(ret);
1288 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1289 __func__, policy->cpu, policy->cur);
1290 }
1291 }
1292
Thomas Renningera1531ac2008-07-29 22:32:58 -07001293 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1294 CPUFREQ_START, policy);
1295
Rafael J. Wysocki194d99c2015-07-29 03:08:57 +02001296 if (new_policy) {
Rafael J. Wysockid9612a42015-07-27 23:11:37 +02001297 ret = cpufreq_add_dev_interface(policy);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301298 if (ret)
Viresh Kumar8101f992015-07-08 15:12:15 +05301299 goto out_exit_policy;
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301300 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1301 CPUFREQ_CREATE_POLICY, policy);
Dave Jones8ff69732006-03-05 03:37:23 -05001302
Viresh Kumar988bed02015-05-08 11:53:45 +05301303 write_lock_irqsave(&cpufreq_driver_lock, flags);
1304 list_add(&policy->policy_list, &cpufreq_policy_list);
1305 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1306 }
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301307
Viresh Kumar7f0fa402015-07-08 15:12:16 +05301308 ret = cpufreq_init_policy(policy);
1309 if (ret) {
1310 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1311 __func__, cpu, ret);
Rafael J. Wysocki194d99c2015-07-29 03:08:57 +02001312 /* cpufreq_policy_free() will notify based on this */
1313 new_policy = false;
1314 goto out_exit_policy;
Viresh Kumar7f0fa402015-07-08 15:12:16 +05301315 }
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301316
Viresh Kumar4e97b632014-03-04 11:44:01 +08001317 up_write(&policy->rwsem);
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301318
Greg Kroah-Hartman038c5b32007-12-17 15:54:39 -04001319 kobject_uevent(&policy->kobj, KOBJ_ADD);
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301320
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301321 /* Callback for handling stuff after policy is ready */
1322 if (cpufreq_driver->ready)
1323 cpufreq_driver->ready(policy);
1324
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001325 pr_debug("initialization complete\n");
Dave Jones87c32272006-03-29 01:48:37 -05001326
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 return 0;
1328
Viresh Kumar8101f992015-07-08 15:12:15 +05301329out_exit_policy:
Prarit Bhargava7106e022014-09-10 10:12:08 -04001330 up_write(&policy->rwsem);
1331
Viresh Kumarda60ce92013-10-03 20:28:30 +05301332 if (cpufreq_driver->exit)
1333 cpufreq_driver->exit(policy);
Viresh Kumar8101f992015-07-08 15:12:15 +05301334out_free_policy:
Rafael J. Wysocki194d99c2015-07-29 03:08:57 +02001335 cpufreq_policy_free(policy, !new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 return ret;
1337}
1338
Rafael J. Wysocki0b275352015-07-29 03:03:44 +02001339/**
1340 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1341 * @dev: CPU device.
1342 * @sif: Subsystem interface structure pointer (not used)
1343 */
1344static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1345{
1346 unsigned cpu = dev->id;
1347 int ret;
1348
1349 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1350
1351 if (cpu_online(cpu)) {
1352 ret = cpufreq_online(cpu);
1353 } else {
1354 /*
1355 * A hotplug notifier will follow and we will handle it as CPU
1356 * online then. For now, just create the sysfs link, unless
1357 * there is no policy or the link is already present.
1358 */
1359 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1360
1361 ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1362 ? add_cpu_dev_symlink(policy, cpu) : 0;
1363 }
1364
1365 return ret;
1366}
1367
Rafael J. Wysocki15c0b4d2015-07-27 23:11:09 +02001368static void cpufreq_offline_prepare(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301370 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001372 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
Viresh Kumar988bed02015-05-08 11:53:45 +05301374 policy = cpufreq_cpu_get_raw(cpu);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301375 if (!policy) {
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001376 pr_debug("%s: No cpu_data found\n", __func__);
Rafael J. Wysocki15c0b4d2015-07-27 23:11:09 +02001377 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301380 if (has_target()) {
Rafael J. Wysocki15c0b4d2015-07-27 23:11:09 +02001381 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001382 if (ret)
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301383 pr_err("%s: Failed to stop governor\n", __func__);
Viresh Kumardb5f2992015-01-02 12:34:25 +05301384 }
Jacob Shin27ecddc2011-04-27 13:32:11 -05001385
Viresh Kumar45732372015-05-12 12:22:34 +05301386 down_write(&policy->rwsem);
Viresh Kumar9591bec2015-06-10 02:20:23 +02001387 cpumask_clear_cpu(cpu, policy->cpus);
Viresh Kumar45732372015-05-12 12:22:34 +05301388
Viresh Kumar9591bec2015-06-10 02:20:23 +02001389 if (policy_is_inactive(policy)) {
1390 if (has_target())
1391 strncpy(policy->last_governor, policy->governor->name,
1392 CPUFREQ_NAME_LEN);
1393 } else if (cpu == policy->cpu) {
1394 /* Nominate new CPU */
1395 policy->cpu = cpumask_any(policy->cpus);
1396 }
Viresh Kumar45732372015-05-12 12:22:34 +05301397 up_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Viresh Kumar9591bec2015-06-10 02:20:23 +02001399 /* Start governor again for active policy */
1400 if (!policy_is_inactive(policy)) {
1401 if (has_target()) {
Rafael J. Wysocki15c0b4d2015-07-27 23:11:09 +02001402 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
Viresh Kumar9591bec2015-06-10 02:20:23 +02001403 if (!ret)
1404 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
Viresh Kumar87549142015-06-10 02:13:21 +02001405
Viresh Kumar9591bec2015-06-10 02:20:23 +02001406 if (ret)
1407 pr_err("%s: Failed to start governor\n", __func__);
1408 }
1409 } else if (cpufreq_driver->stop_cpu) {
Dirk Brandewie367dc4a2014-03-19 08:45:53 -07001410 cpufreq_driver->stop_cpu(policy);
Viresh Kumar9591bec2015-06-10 02:20:23 +02001411 }
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301412}
1413
Rafael J. Wysocki15c0b4d2015-07-27 23:11:09 +02001414static void cpufreq_offline_finish(unsigned int cpu)
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301415{
Viresh Kumar9591bec2015-06-10 02:20:23 +02001416 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301417
1418 if (!policy) {
1419 pr_debug("%s: No cpu_data found\n", __func__);
Rafael J. Wysocki15c0b4d2015-07-27 23:11:09 +02001420 return;
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301421 }
1422
Viresh Kumar9591bec2015-06-10 02:20:23 +02001423 /* Only proceed for inactive policies */
1424 if (!policy_is_inactive(policy))
Rafael J. Wysocki15c0b4d2015-07-27 23:11:09 +02001425 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Viresh Kumar87549142015-06-10 02:13:21 +02001427 /* If cpu is last user of policy, free policy */
1428 if (has_target()) {
Rafael J. Wysocki15c0b4d2015-07-27 23:11:09 +02001429 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001430 if (ret)
Viresh Kumar87549142015-06-10 02:13:21 +02001431 pr_err("%s: Failed to exit governor\n", __func__);
Viresh Kumar87549142015-06-10 02:13:21 +02001432 }
1433
Viresh Kumar87549142015-06-10 02:13:21 +02001434 /*
1435 * Perform the ->exit() even during light-weight tear-down,
1436 * since this is a core component, and is essential for the
1437 * subsequent light-weight ->init() to succeed.
1438 */
1439 if (cpufreq_driver->exit)
1440 cpufreq_driver->exit(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441}
1442
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301443/**
Viresh Kumar27a862e2013-10-02 14:13:14 +05301444 * cpufreq_remove_dev - remove a CPU device
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301445 *
1446 * Removes the cpufreq interface for a CPU device.
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301447 */
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001448static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001449{
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001450 unsigned int cpu = dev->id;
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001451 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
Venki Pallipadiec282972007-03-26 12:03:19 -07001452
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001453 if (!policy)
1454 return 0;
Viresh Kumar87549142015-06-10 02:13:21 +02001455
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001456 if (cpu_online(cpu)) {
Rafael J. Wysocki15c0b4d2015-07-27 23:11:09 +02001457 cpufreq_offline_prepare(cpu);
1458 cpufreq_offline_finish(cpu);
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001459 }
Viresh Kumar87549142015-06-10 02:13:21 +02001460
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001461 cpumask_clear_cpu(cpu, policy->real_cpus);
Viresh Kumar87549142015-06-10 02:13:21 +02001462
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001463 if (cpumask_empty(policy->real_cpus)) {
Viresh Kumar3654c5c2015-06-08 18:25:30 +05301464 cpufreq_policy_free(policy, true);
Venki Pallipadiec282972007-03-26 12:03:19 -07001465 return 0;
Viresh Kumar87549142015-06-10 02:13:21 +02001466 }
Venki Pallipadiec282972007-03-26 12:03:19 -07001467
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001468 if (cpu != policy->kobj_cpu) {
1469 remove_cpu_dev_symlink(policy, cpu);
1470 } else {
1471 /*
1472 * The CPU owning the policy object is going away. Move it to
1473 * another suitable CPU.
1474 */
1475 unsigned int new_cpu = cpumask_first(policy->real_cpus);
1476 struct device *new_dev = get_cpu_device(new_cpu);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301477
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001478 dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301479
Rafael J. Wysocki559ed402015-07-26 02:07:47 +02001480 sysfs_remove_link(&new_dev->kobj, "cpufreq");
1481 policy->kobj_cpu = new_cpu;
1482 WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
1483 }
1484
1485 return 0;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001486}
1487
David Howells65f27f32006-11-22 14:55:48 +00001488static void handle_update(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489{
David Howells65f27f32006-11-22 14:55:48 +00001490 struct cpufreq_policy *policy =
1491 container_of(work, struct cpufreq_policy, update);
1492 unsigned int cpu = policy->cpu;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001493 pr_debug("handle_update for cpu %u called\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 cpufreq_update_policy(cpu);
1495}
1496
1497/**
Viresh Kumarbb176f72013-06-19 14:19:33 +05301498 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1499 * in deep trouble.
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301500 * @policy: policy managing CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 * @new_freq: CPU frequency the CPU actually runs at
1502 *
Dave Jones29464f22009-01-18 01:37:11 -05001503 * We adjust to current frequency first, and need to clean up later.
1504 * So either call to cpufreq_update_policy() or schedule handle_update()).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 */
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301506static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301507 unsigned int new_freq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508{
1509 struct cpufreq_freqs freqs;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301510
Joe Perchese837f9b2014-03-11 10:03:00 -07001511 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301512 policy->cur, new_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301514 freqs.old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 freqs.new = new_freq;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301516
Viresh Kumar8fec0512014-03-24 13:35:45 +05301517 cpufreq_freq_transition_begin(policy, &freqs);
1518 cpufreq_freq_transition_end(policy, &freqs, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519}
1520
Dave Jones32ee8c32006-02-28 00:43:23 -05001521/**
Dhaval Giani4ab70df2006-12-13 14:49:15 +05301522 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001523 * @cpu: CPU number
1524 *
1525 * This is the last known freq, without actually getting it from the driver.
1526 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1527 */
1528unsigned int cpufreq_quick_get(unsigned int cpu)
1529{
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001530 struct cpufreq_policy *policy;
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301531 unsigned int ret_freq = 0;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001532
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001533 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1534 return cpufreq_driver->get(cpu);
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001535
1536 policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001537 if (policy) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301538 ret_freq = policy->cur;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001539 cpufreq_cpu_put(policy);
1540 }
1541
Dave Jones4d34a672008-02-07 16:33:49 -05001542 return ret_freq;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001543}
1544EXPORT_SYMBOL(cpufreq_quick_get);
1545
Jesse Barnes3d737102011-06-28 10:59:12 -07001546/**
1547 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1548 * @cpu: CPU number
1549 *
1550 * Just return the max possible frequency for a given CPU.
1551 */
1552unsigned int cpufreq_quick_get_max(unsigned int cpu)
1553{
1554 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1555 unsigned int ret_freq = 0;
1556
1557 if (policy) {
1558 ret_freq = policy->max;
1559 cpufreq_cpu_put(policy);
1560 }
1561
1562 return ret_freq;
1563}
1564EXPORT_SYMBOL(cpufreq_quick_get_max);
1565
Viresh Kumard92d50a2015-01-02 12:34:29 +05301566static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567{
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301568 unsigned int ret_freq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001570 if (!cpufreq_driver->get)
Dave Jones4d34a672008-02-07 16:33:49 -05001571 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572
Viresh Kumard92d50a2015-01-02 12:34:29 +05301573 ret_freq = cpufreq_driver->get(policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
Viresh Kumar11e584c2015-06-10 02:11:45 +02001575 /* Updating inactive policies is invalid, so avoid doing that. */
1576 if (unlikely(policy_is_inactive(policy)))
1577 return ret_freq;
1578
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301579 if (ret_freq && policy->cur &&
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001580 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301581 /* verify no discrepancy between actual and
1582 saved value exists */
1583 if (unlikely(ret_freq != policy->cur)) {
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301584 cpufreq_out_of_sync(policy, ret_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 schedule_work(&policy->update);
1586 }
1587 }
1588
Dave Jones4d34a672008-02-07 16:33:49 -05001589 return ret_freq;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001590}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001592/**
1593 * cpufreq_get - get the current CPU frequency (in kHz)
1594 * @cpu: CPU number
1595 *
1596 * Get the CPU current (static) CPU frequency
1597 */
1598unsigned int cpufreq_get(unsigned int cpu)
1599{
Aaron Plattner999976e2014-03-04 12:42:15 -08001600 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001601 unsigned int ret_freq = 0;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001602
Aaron Plattner999976e2014-03-04 12:42:15 -08001603 if (policy) {
1604 down_read(&policy->rwsem);
Viresh Kumard92d50a2015-01-02 12:34:29 +05301605 ret_freq = __cpufreq_get(policy);
Aaron Plattner999976e2014-03-04 12:42:15 -08001606 up_read(&policy->rwsem);
Viresh Kumar26ca8692013-09-20 22:37:31 +05301607
Aaron Plattner999976e2014-03-04 12:42:15 -08001608 cpufreq_cpu_put(policy);
1609 }
Viresh Kumar6eed9402013-08-06 22:53:11 +05301610
Dave Jones4d34a672008-02-07 16:33:49 -05001611 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612}
1613EXPORT_SYMBOL(cpufreq_get);
1614
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001615static struct subsys_interface cpufreq_interface = {
1616 .name = "cpufreq",
1617 .subsys = &cpu_subsys,
1618 .add_dev = cpufreq_add_dev,
1619 .remove_dev = cpufreq_remove_dev,
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001620};
1621
Viresh Kumare28867e2014-03-04 11:00:27 +08001622/*
1623 * In case platform wants some specific frequency to be configured
1624 * during suspend..
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001625 */
Viresh Kumare28867e2014-03-04 11:00:27 +08001626int cpufreq_generic_suspend(struct cpufreq_policy *policy)
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001627{
Viresh Kumare28867e2014-03-04 11:00:27 +08001628 int ret;
Dave Jones4bc5d342009-08-04 14:03:25 -04001629
Viresh Kumare28867e2014-03-04 11:00:27 +08001630 if (!policy->suspend_freq) {
Bartlomiej Zolnierkiewicz201f3712015-09-08 18:41:02 +02001631 pr_debug("%s: suspend_freq not defined\n", __func__);
1632 return 0;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001633 }
1634
Viresh Kumare28867e2014-03-04 11:00:27 +08001635 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1636 policy->suspend_freq);
1637
1638 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1639 CPUFREQ_RELATION_H);
1640 if (ret)
1641 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1642 __func__, policy->suspend_freq, ret);
1643
Dave Jonesc9060492008-02-07 16:32:18 -05001644 return ret;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001645}
Viresh Kumare28867e2014-03-04 11:00:27 +08001646EXPORT_SYMBOL(cpufreq_generic_suspend);
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001647
1648/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001649 * cpufreq_suspend() - Suspend CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001651 * Called during system wide Suspend/Hibernate cycles for suspending governors
1652 * as some platforms can't change frequency after this point in suspend cycle.
1653 * Because some of the devices (like: i2c, regulators, etc) they use for
1654 * changing frequency are suspended quickly after this point.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001656void cpufreq_suspend(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301658 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001660 if (!cpufreq_driver)
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001661 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001663 if (!has_target())
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301664 goto suspend;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001666 pr_debug("%s: Suspending Governors\n", __func__);
1667
Viresh Kumarf9637352015-05-12 12:20:11 +05301668 for_each_active_policy(policy) {
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001669 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1670 pr_err("%s: Failed to stop governor for policy: %p\n",
1671 __func__, policy);
1672 else if (cpufreq_driver->suspend
1673 && cpufreq_driver->suspend(policy))
1674 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1675 policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 }
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301677
1678suspend:
1679 cpufreq_suspended = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680}
1681
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001683 * cpufreq_resume() - Resume CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001685 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1686 * are suspended with cpufreq_suspend().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001688void cpufreq_resume(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 struct cpufreq_policy *policy;
1691
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001692 if (!cpufreq_driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 return;
1694
Lan Tianyu8e304442014-09-18 15:03:07 +08001695 cpufreq_suspended = false;
1696
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001697 if (!has_target())
1698 return;
1699
1700 pr_debug("%s: Resuming Governors\n", __func__);
1701
Viresh Kumarf9637352015-05-12 12:20:11 +05301702 for_each_active_policy(policy) {
Viresh Kumar0c5aa402014-03-24 12:30:29 +05301703 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1704 pr_err("%s: Failed to resume driver: %p\n", __func__,
1705 policy);
1706 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001707 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1708 pr_err("%s: Failed to start governor for policy: %p\n",
1709 __func__, policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 }
Viresh Kumarc75de0a2015-04-02 10:21:33 +05301711
1712 /*
1713 * schedule call cpufreq_update_policy() for first-online CPU, as that
1714 * wouldn't be hotplugged-out on suspend. It will verify that the
1715 * current freq is in sync with what we believe it to be.
1716 */
1717 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1718 if (WARN_ON(!policy))
1719 return;
1720
1721 schedule_work(&policy->update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Borislav Petkov9d950462013-01-20 10:24:28 +00001724/**
1725 * cpufreq_get_current_driver - return current driver's name
1726 *
1727 * Return the name string of the currently loaded cpufreq driver
1728 * or NULL, if none.
1729 */
1730const char *cpufreq_get_current_driver(void)
1731{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001732 if (cpufreq_driver)
1733 return cpufreq_driver->name;
1734
1735 return NULL;
Borislav Petkov9d950462013-01-20 10:24:28 +00001736}
1737EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
Thomas Petazzoni51315cd2014-10-19 11:30:27 +02001739/**
1740 * cpufreq_get_driver_data - return current driver data
1741 *
1742 * Return the private data of the currently loaded cpufreq
1743 * driver, or NULL if no cpufreq driver is loaded.
1744 */
1745void *cpufreq_get_driver_data(void)
1746{
1747 if (cpufreq_driver)
1748 return cpufreq_driver->driver_data;
1749
1750 return NULL;
1751}
1752EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1753
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754/*********************************************************************
1755 * NOTIFIER LISTS INTERFACE *
1756 *********************************************************************/
1757
1758/**
1759 * cpufreq_register_notifier - register a driver with cpufreq
1760 * @nb: notifier function to register
1761 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1762 *
Dave Jones32ee8c32006-02-28 00:43:23 -05001763 * Add a driver to one of two lists: either a list of drivers that
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 * are notified about clock rate changes (once before and once after
1765 * the transition), or a list of drivers that are notified about
1766 * changes in cpufreq policy.
1767 *
1768 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001769 * blocking_notifier_chain_register.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 */
1771int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1772{
1773 int ret;
1774
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001775 if (cpufreq_disabled())
1776 return -EINVAL;
1777
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -02001778 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1779
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 switch (list) {
1781 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001782 ret = srcu_notifier_chain_register(
Alan Sterne041c682006-03-27 01:16:30 -08001783 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 break;
1785 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001786 ret = blocking_notifier_chain_register(
1787 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 break;
1789 default:
1790 ret = -EINVAL;
1791 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792
1793 return ret;
1794}
1795EXPORT_SYMBOL(cpufreq_register_notifier);
1796
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797/**
1798 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1799 * @nb: notifier block to be unregistered
Viresh Kumarbb176f72013-06-19 14:19:33 +05301800 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 *
1802 * Remove a driver from the CPU frequency notifier list.
1803 *
1804 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001805 * blocking_notifier_chain_unregister.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 */
1807int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1808{
1809 int ret;
1810
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001811 if (cpufreq_disabled())
1812 return -EINVAL;
1813
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 switch (list) {
1815 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001816 ret = srcu_notifier_chain_unregister(
Alan Sterne041c682006-03-27 01:16:30 -08001817 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 break;
1819 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001820 ret = blocking_notifier_chain_unregister(
1821 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 break;
1823 default:
1824 ret = -EINVAL;
1825 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
1827 return ret;
1828}
1829EXPORT_SYMBOL(cpufreq_unregister_notifier);
1830
1831
1832/*********************************************************************
1833 * GOVERNORS *
1834 *********************************************************************/
1835
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301836/* Must set freqs->new to intermediate frequency */
1837static int __target_intermediate(struct cpufreq_policy *policy,
1838 struct cpufreq_freqs *freqs, int index)
1839{
1840 int ret;
1841
1842 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1843
1844 /* We don't need to switch to intermediate freq */
1845 if (!freqs->new)
1846 return 0;
1847
1848 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1849 __func__, policy->cpu, freqs->old, freqs->new);
1850
1851 cpufreq_freq_transition_begin(policy, freqs);
1852 ret = cpufreq_driver->target_intermediate(policy, index);
1853 cpufreq_freq_transition_end(policy, freqs, ret);
1854
1855 if (ret)
1856 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1857 __func__, ret);
1858
1859 return ret;
1860}
1861
Viresh Kumar8d657752014-05-21 14:29:29 +05301862static int __target_index(struct cpufreq_policy *policy,
1863 struct cpufreq_frequency_table *freq_table, int index)
1864{
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301865 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1866 unsigned int intermediate_freq = 0;
Viresh Kumar8d657752014-05-21 14:29:29 +05301867 int retval = -EINVAL;
1868 bool notify;
1869
1870 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
Viresh Kumar8d657752014-05-21 14:29:29 +05301871 if (notify) {
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301872 /* Handle switching to intermediate frequency */
1873 if (cpufreq_driver->get_intermediate) {
1874 retval = __target_intermediate(policy, &freqs, index);
1875 if (retval)
1876 return retval;
Viresh Kumar8d657752014-05-21 14:29:29 +05301877
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301878 intermediate_freq = freqs.new;
1879 /* Set old freq to intermediate */
1880 if (intermediate_freq)
1881 freqs.old = freqs.new;
1882 }
1883
1884 freqs.new = freq_table[index].frequency;
Viresh Kumar8d657752014-05-21 14:29:29 +05301885 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1886 __func__, policy->cpu, freqs.old, freqs.new);
1887
1888 cpufreq_freq_transition_begin(policy, &freqs);
1889 }
1890
1891 retval = cpufreq_driver->target_index(policy, index);
1892 if (retval)
1893 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1894 retval);
1895
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301896 if (notify) {
Viresh Kumar8d657752014-05-21 14:29:29 +05301897 cpufreq_freq_transition_end(policy, &freqs, retval);
1898
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301899 /*
1900 * Failed after setting to intermediate freq? Driver should have
1901 * reverted back to initial frequency and so should we. Check
1902 * here for intermediate_freq instead of get_intermediate, in
Shailendra Verma58405af2015-05-22 22:48:22 +05301903 * case we haven't switched to intermediate freq at all.
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301904 */
1905 if (unlikely(retval && intermediate_freq)) {
1906 freqs.old = intermediate_freq;
1907 freqs.new = policy->restore_freq;
1908 cpufreq_freq_transition_begin(policy, &freqs);
1909 cpufreq_freq_transition_end(policy, &freqs, 0);
1910 }
1911 }
1912
Viresh Kumar8d657752014-05-21 14:29:29 +05301913 return retval;
1914}
1915
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916int __cpufreq_driver_target(struct cpufreq_policy *policy,
1917 unsigned int target_freq,
1918 unsigned int relation)
1919{
Viresh Kumar72499242012-10-31 01:28:21 +01001920 unsigned int old_target_freq = target_freq;
Viresh Kumar8d657752014-05-21 14:29:29 +05301921 int retval = -EINVAL;
Ashok Rajc32b6b82005-10-30 14:59:54 -08001922
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04001923 if (cpufreq_disabled())
1924 return -ENODEV;
1925
Viresh Kumar72499242012-10-31 01:28:21 +01001926 /* Make sure that target_freq is within supported range */
1927 if (target_freq > policy->max)
1928 target_freq = policy->max;
1929 if (target_freq < policy->min)
1930 target_freq = policy->min;
1931
1932 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07001933 policy->cpu, target_freq, relation, old_target_freq);
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001934
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301935 /*
1936 * This might look like a redundant call as we are checking it again
1937 * after finding index. But it is left intentionally for cases where
1938 * exactly same freq is called again and so we can save on few function
1939 * calls.
1940 */
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001941 if (target_freq == policy->cur)
1942 return 0;
1943
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301944 /* Save last value to restore later on errors */
1945 policy->restore_freq = policy->cur;
1946
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001947 if (cpufreq_driver->target)
1948 retval = cpufreq_driver->target(policy, target_freq, relation);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301949 else if (cpufreq_driver->target_index) {
1950 struct cpufreq_frequency_table *freq_table;
1951 int index;
Ashok Raj90d45d12005-11-08 21:34:24 -08001952
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301953 freq_table = cpufreq_frequency_get_table(policy->cpu);
1954 if (unlikely(!freq_table)) {
1955 pr_err("%s: Unable to find freq_table\n", __func__);
1956 goto out;
1957 }
1958
1959 retval = cpufreq_frequency_table_target(policy, freq_table,
1960 target_freq, relation, &index);
1961 if (unlikely(retval)) {
1962 pr_err("%s: Unable to find matching freq\n", __func__);
1963 goto out;
1964 }
1965
Viresh Kumard4019f02013-08-14 19:38:24 +05301966 if (freq_table[index].frequency == policy->cur) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301967 retval = 0;
Viresh Kumard4019f02013-08-14 19:38:24 +05301968 goto out;
1969 }
1970
Viresh Kumar8d657752014-05-21 14:29:29 +05301971 retval = __target_index(policy, freq_table, index);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301972 }
1973
1974out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 return retval;
1976}
1977EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1978
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979int cpufreq_driver_target(struct cpufreq_policy *policy,
1980 unsigned int target_freq,
1981 unsigned int relation)
1982{
Julia Lawallf1829e42008-07-25 22:44:53 +02001983 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984
viresh kumarad7722d2013-10-18 19:10:15 +05301985 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
1987 ret = __cpufreq_driver_target(policy, target_freq, relation);
1988
viresh kumarad7722d2013-10-18 19:10:15 +05301989 up_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 return ret;
1992}
1993EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1994
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301995static int __cpufreq_governor(struct cpufreq_policy *policy,
1996 unsigned int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997{
Dave Jonescc993ca2005-07-28 09:43:56 -07001998 int ret;
Thomas Renninger6afde102007-10-02 13:28:13 -07001999
2000 /* Only must be defined when default governor is known to have latency
2001 restrictions, like e.g. conservative or ondemand.
2002 That this is the case is already ensured in Kconfig
2003 */
2004#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2005 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2006#else
2007 struct cpufreq_governor *gov = NULL;
2008#endif
Thomas Renninger1c256242007-10-02 13:28:12 -07002009
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002010 /* Don't start any governor operations if we are entering suspend */
2011 if (cpufreq_suspended)
2012 return 0;
Ethan Zhaocb577202014-12-18 15:28:19 +09002013 /*
2014 * Governor might not be initiated here if ACPI _PPC changed
2015 * notification happened, so check it.
2016 */
2017 if (!policy->governor)
2018 return -EINVAL;
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002019
Thomas Renninger1c256242007-10-02 13:28:12 -07002020 if (policy->governor->max_transition_latency &&
2021 policy->cpuinfo.transition_latency >
2022 policy->governor->max_transition_latency) {
Thomas Renninger6afde102007-10-02 13:28:13 -07002023 if (!gov)
2024 return -EINVAL;
2025 else {
Joe Perchese837f9b2014-03-11 10:03:00 -07002026 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2027 policy->governor->name, gov->name);
Thomas Renninger6afde102007-10-02 13:28:13 -07002028 policy->governor = gov;
2029 }
Thomas Renninger1c256242007-10-02 13:28:12 -07002030 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031
Viresh Kumarfe492f32013-08-06 22:53:10 +05302032 if (event == CPUFREQ_GOV_POLICY_INIT)
2033 if (!try_module_get(policy->governor->owner))
2034 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
Viresh Kumar63431f72015-07-27 17:58:06 +05302036 pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002037
2038 mutex_lock(&cpufreq_governor_lock);
Srivatsa S. Bhat56d07db2013-09-07 01:23:55 +05302039 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
Viresh Kumarf73d3932013-08-31 17:53:40 +05302040 || (!policy->governor_enabled
2041 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002042 mutex_unlock(&cpufreq_governor_lock);
2043 return -EBUSY;
2044 }
2045
2046 if (event == CPUFREQ_GOV_STOP)
2047 policy->governor_enabled = false;
2048 else if (event == CPUFREQ_GOV_START)
2049 policy->governor_enabled = true;
2050
2051 mutex_unlock(&cpufreq_governor_lock);
2052
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 ret = policy->governor->governor(policy, event);
2054
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002055 if (!ret) {
2056 if (event == CPUFREQ_GOV_POLICY_INIT)
2057 policy->governor->initialized++;
2058 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2059 policy->governor->initialized--;
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002060 } else {
2061 /* Restore original values */
2062 mutex_lock(&cpufreq_governor_lock);
2063 if (event == CPUFREQ_GOV_STOP)
2064 policy->governor_enabled = true;
2065 else if (event == CPUFREQ_GOV_START)
2066 policy->governor_enabled = false;
2067 mutex_unlock(&cpufreq_governor_lock);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002068 }
Viresh Kumarb3940582013-02-01 05:42:58 +00002069
Viresh Kumarfe492f32013-08-06 22:53:10 +05302070 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2071 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 module_put(policy->governor->owner);
2073
2074 return ret;
2075}
2076
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077int cpufreq_register_governor(struct cpufreq_governor *governor)
2078{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002079 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
2081 if (!governor)
2082 return -EINVAL;
2083
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002084 if (cpufreq_disabled())
2085 return -ENODEV;
2086
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002087 mutex_lock(&cpufreq_governor_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -05002088
Viresh Kumarb3940582013-02-01 05:42:58 +00002089 governor->initialized = 0;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002090 err = -EBUSY;
Viresh Kumar42f91fa2015-01-02 12:34:26 +05302091 if (!find_governor(governor->name)) {
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002092 err = 0;
2093 list_add(&governor->governor_list, &cpufreq_governor_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095
Dave Jones32ee8c32006-02-28 00:43:23 -05002096 mutex_unlock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002097 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098}
2099EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2100
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2102{
Viresh Kumar45732372015-05-12 12:22:34 +05302103 struct cpufreq_policy *policy;
2104 unsigned long flags;
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002105
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 if (!governor)
2107 return;
2108
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002109 if (cpufreq_disabled())
2110 return;
2111
Viresh Kumar45732372015-05-12 12:22:34 +05302112 /* clear last_governor for all inactive policies */
2113 read_lock_irqsave(&cpufreq_driver_lock, flags);
2114 for_each_inactive_policy(policy) {
Viresh Kumar18bf3a12015-05-12 12:22:51 +05302115 if (!strcmp(policy->last_governor, governor->name)) {
2116 policy->governor = NULL;
Viresh Kumar45732372015-05-12 12:22:34 +05302117 strcpy(policy->last_governor, "\0");
Viresh Kumar18bf3a12015-05-12 12:22:51 +05302118 }
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002119 }
Viresh Kumar45732372015-05-12 12:22:34 +05302120 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002121
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002122 mutex_lock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 list_del(&governor->governor_list);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002124 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 return;
2126}
2127EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2128
2129
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130/*********************************************************************
2131 * POLICY INTERFACE *
2132 *********************************************************************/
2133
2134/**
2135 * cpufreq_get_policy - get the current cpufreq_policy
Dave Jones29464f22009-01-18 01:37:11 -05002136 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2137 * is written
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 *
2139 * Reads the current cpufreq policy.
2140 */
2141int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2142{
2143 struct cpufreq_policy *cpu_policy;
2144 if (!policy)
2145 return -EINVAL;
2146
2147 cpu_policy = cpufreq_cpu_get(cpu);
2148 if (!cpu_policy)
2149 return -EINVAL;
2150
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302151 memcpy(policy, cpu_policy, sizeof(*policy));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152
2153 cpufreq_cpu_put(cpu_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 return 0;
2155}
2156EXPORT_SYMBOL(cpufreq_get_policy);
2157
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002158/*
Viresh Kumar037ce832013-10-02 14:13:16 +05302159 * policy : current policy.
2160 * new_policy: policy to be set.
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002161 */
Viresh Kumar037ce832013-10-02 14:13:16 +05302162static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302163 struct cpufreq_policy *new_policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164{
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002165 struct cpufreq_governor *old_gov;
2166 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
Joe Perchese837f9b2014-03-11 10:03:00 -07002168 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2169 new_policy->cpu, new_policy->min, new_policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302171 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172
Pan Xinhuifba95732015-07-30 18:10:40 +08002173 /*
2174 * This check works well when we store new min/max freq attributes,
2175 * because new_policy is a copy of policy with one field updated.
2176 */
2177 if (new_policy->min > new_policy->max)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002178 return -EINVAL;
Mattia Dongili9c9a43e2006-07-05 23:12:20 +02002179
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 /* verify the cpu speed can be set within this limit */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302181 ret = cpufreq_driver->verify(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002183 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 /* adjust if necessary - all reasons */
Alan Sterne041c682006-03-27 01:16:30 -08002186 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302187 CPUFREQ_ADJUST, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
Viresh Kumarbb176f72013-06-19 14:19:33 +05302189 /*
2190 * verify the cpu speed can be set within this limit, which might be
2191 * different to the first one
2192 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302193 ret = cpufreq_driver->verify(new_policy);
Alan Sterne041c682006-03-27 01:16:30 -08002194 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002195 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196
2197 /* notification of the new policy */
Alan Sterne041c682006-03-27 01:16:30 -08002198 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302199 CPUFREQ_NOTIFY, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302201 policy->min = new_policy->min;
2202 policy->max = new_policy->max;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002204 pr_debug("new min and max freqs are %u - %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002205 policy->min, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002207 if (cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302208 policy->policy = new_policy->policy;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002209 pr_debug("setting range\n");
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002210 return cpufreq_driver->setpolicy(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 }
2212
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002213 if (new_policy->governor == policy->governor)
2214 goto out;
2215
2216 pr_debug("governor switch\n");
2217
2218 /* save old, working values */
2219 old_gov = policy->governor;
2220 /* end old governor */
2221 if (old_gov) {
Viresh Kumar4bc384a2015-07-18 11:31:03 +05302222 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2223 if (ret) {
2224 /* This can happen due to race with other operations */
2225 pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2226 __func__, old_gov->name, ret);
2227 return ret;
2228 }
2229
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002230 up_write(&policy->rwsem);
Viresh Kumar4bc384a2015-07-18 11:31:03 +05302231 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002232 down_write(&policy->rwsem);
Viresh Kumar4bc384a2015-07-18 11:31:03 +05302233
2234 if (ret) {
2235 pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2236 __func__, old_gov->name, ret);
2237 return ret;
2238 }
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002239 }
2240
2241 /* start new governor */
2242 policy->governor = new_policy->governor;
Viresh Kumar4bc384a2015-07-18 11:31:03 +05302243 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2244 if (!ret) {
2245 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
2246 if (!ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002247 goto out;
2248
2249 up_write(&policy->rwsem);
2250 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2251 down_write(&policy->rwsem);
2252 }
2253
2254 /* new governor failed, so re-start old one */
2255 pr_debug("starting governor %s failed\n", policy->governor->name);
2256 if (old_gov) {
2257 policy->governor = old_gov;
Viresh Kumar4bc384a2015-07-18 11:31:03 +05302258 if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
2259 policy->governor = NULL;
2260 else
2261 __cpufreq_governor(policy, CPUFREQ_GOV_START);
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002262 }
2263
Viresh Kumar4bc384a2015-07-18 11:31:03 +05302264 return ret;
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002265
2266 out:
2267 pr_debug("governor: change or update limits\n");
2268 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269}
2270
2271/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2273 * @cpu: CPU which shall be re-evaluated
2274 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002275 * Useful for policy notifiers which have different necessities
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 * at different times.
2277 */
2278int cpufreq_update_policy(unsigned int cpu)
2279{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302280 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2281 struct cpufreq_policy new_policy;
Julia Lawallf1829e42008-07-25 22:44:53 +02002282 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002284 if (!policy)
2285 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286
viresh kumarad7722d2013-10-18 19:10:15 +05302287 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002289 pr_debug("updating policy for CPU %u\n", cpu);
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302290 memcpy(&new_policy, policy, sizeof(*policy));
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302291 new_policy.min = policy->user_policy.min;
2292 new_policy.max = policy->user_policy.max;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293
Viresh Kumarbb176f72013-06-19 14:19:33 +05302294 /*
2295 * BIOS might change freq behind our back
2296 * -> ask driver for current freq and notify governors about a change
2297 */
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01002298 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302299 new_policy.cur = cpufreq_driver->get(cpu);
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302300 if (WARN_ON(!new_policy.cur)) {
2301 ret = -EIO;
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002302 goto unlock;
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302303 }
2304
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302305 if (!policy->cur) {
Joe Perchese837f9b2014-03-11 10:03:00 -07002306 pr_debug("Driver did not initialize current freq\n");
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302307 policy->cur = new_policy.cur;
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002308 } else {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302309 if (policy->cur != new_policy.cur && has_target())
Viresh Kumara1e1dc42015-01-02 12:34:28 +05302310 cpufreq_out_of_sync(policy, new_policy.cur);
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002311 }
Thomas Renninger0961dd02006-01-26 18:46:33 +01002312 }
2313
Viresh Kumar037ce832013-10-02 14:13:16 +05302314 ret = cpufreq_set_policy(policy, &new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002316unlock:
viresh kumarad7722d2013-10-18 19:10:15 +05302317 up_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002318
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302319 cpufreq_cpu_put(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 return ret;
2321}
2322EXPORT_SYMBOL(cpufreq_update_policy);
2323
Paul Gortmaker27609842013-06-19 13:54:04 -04002324static int cpufreq_cpu_callback(struct notifier_block *nfb,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002325 unsigned long action, void *hcpu)
2326{
2327 unsigned int cpu = (unsigned long)hcpu;
Ashok Rajc32b6b82005-10-30 14:59:54 -08002328
Rafael J. Wysocki0b275352015-07-29 03:03:44 +02002329 switch (action & ~CPU_TASKS_FROZEN) {
2330 case CPU_ONLINE:
2331 cpufreq_online(cpu);
2332 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302333
Rafael J. Wysocki0b275352015-07-29 03:03:44 +02002334 case CPU_DOWN_PREPARE:
2335 cpufreq_offline_prepare(cpu);
2336 break;
Srivatsa S. Bhat1aee40a2013-09-07 01:23:27 +05302337
Rafael J. Wysocki0b275352015-07-29 03:03:44 +02002338 case CPU_POST_DEAD:
2339 cpufreq_offline_finish(cpu);
2340 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302341
Rafael J. Wysocki0b275352015-07-29 03:03:44 +02002342 case CPU_DOWN_FAILED:
2343 cpufreq_online(cpu);
2344 break;
Ashok Rajc32b6b82005-10-30 14:59:54 -08002345 }
2346 return NOTIFY_OK;
2347}
2348
Neal Buckendahl9c36f742010-06-22 22:02:44 -05002349static struct notifier_block __refdata cpufreq_cpu_notifier = {
Viresh Kumarbb176f72013-06-19 14:19:33 +05302350 .notifier_call = cpufreq_cpu_callback,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002351};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352
2353/*********************************************************************
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002354 * BOOST *
2355 *********************************************************************/
2356static int cpufreq_boost_set_sw(int state)
2357{
2358 struct cpufreq_frequency_table *freq_table;
2359 struct cpufreq_policy *policy;
2360 int ret = -EINVAL;
2361
Viresh Kumarf9637352015-05-12 12:20:11 +05302362 for_each_active_policy(policy) {
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002363 freq_table = cpufreq_frequency_get_table(policy->cpu);
2364 if (freq_table) {
2365 ret = cpufreq_frequency_table_cpuinfo(policy,
2366 freq_table);
2367 if (ret) {
2368 pr_err("%s: Policy frequency update failed\n",
2369 __func__);
2370 break;
2371 }
2372 policy->user_policy.max = policy->max;
2373 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2374 }
2375 }
2376
2377 return ret;
2378}
2379
2380int cpufreq_boost_trigger_state(int state)
2381{
2382 unsigned long flags;
2383 int ret = 0;
2384
2385 if (cpufreq_driver->boost_enabled == state)
2386 return 0;
2387
2388 write_lock_irqsave(&cpufreq_driver_lock, flags);
2389 cpufreq_driver->boost_enabled = state;
2390 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2391
2392 ret = cpufreq_driver->set_boost(state);
2393 if (ret) {
2394 write_lock_irqsave(&cpufreq_driver_lock, flags);
2395 cpufreq_driver->boost_enabled = !state;
2396 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2397
Joe Perchese837f9b2014-03-11 10:03:00 -07002398 pr_err("%s: Cannot %s BOOST\n",
2399 __func__, state ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002400 }
2401
2402 return ret;
2403}
2404
2405int cpufreq_boost_supported(void)
2406{
2407 if (likely(cpufreq_driver))
2408 return cpufreq_driver->boost_supported;
2409
2410 return 0;
2411}
2412EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2413
Viresh Kumar44139ed2015-07-29 16:23:09 +05302414static int create_boost_sysfs_file(void)
2415{
2416 int ret;
2417
2418 if (!cpufreq_boost_supported())
2419 return 0;
2420
2421 /*
2422 * Check if driver provides function to enable boost -
2423 * if not, use cpufreq_boost_set_sw as default
2424 */
2425 if (!cpufreq_driver->set_boost)
2426 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2427
2428 ret = cpufreq_sysfs_create_file(&boost.attr);
2429 if (ret)
2430 pr_err("%s: cannot register global BOOST sysfs file\n",
2431 __func__);
2432
2433 return ret;
2434}
2435
2436static void remove_boost_sysfs_file(void)
2437{
2438 if (cpufreq_boost_supported())
2439 cpufreq_sysfs_remove_file(&boost.attr);
2440}
2441
2442int cpufreq_enable_boost_support(void)
2443{
2444 if (!cpufreq_driver)
2445 return -EINVAL;
2446
2447 if (cpufreq_boost_supported())
2448 return 0;
2449
2450 cpufreq_driver->boost_supported = true;
2451
2452 /* This will get removed on driver unregister */
2453 return create_boost_sysfs_file();
2454}
2455EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2456
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002457int cpufreq_boost_enabled(void)
2458{
2459 return cpufreq_driver->boost_enabled;
2460}
2461EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2462
2463/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2465 *********************************************************************/
2466
2467/**
2468 * cpufreq_register_driver - register a CPU Frequency driver
2469 * @driver_data: A struct cpufreq_driver containing the values#
2470 * submitted by the CPU Frequency driver.
2471 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302472 * Registers a CPU Frequency driver to this core code. This code
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 * returns zero on success, -EBUSY when another driver got here first
Dave Jones32ee8c32006-02-28 00:43:23 -05002474 * (and isn't unregistered in the meantime).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 *
2476 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002477int cpufreq_register_driver(struct cpufreq_driver *driver_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478{
2479 unsigned long flags;
2480 int ret;
2481
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002482 if (cpufreq_disabled())
2483 return -ENODEV;
2484
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 if (!driver_data || !driver_data->verify || !driver_data->init ||
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302486 !(driver_data->setpolicy || driver_data->target_index ||
Rafael J. Wysocki98322352014-03-19 12:48:30 +01002487 driver_data->target) ||
2488 (driver_data->setpolicy && (driver_data->target_index ||
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05302489 driver_data->target)) ||
2490 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 return -EINVAL;
2492
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002493 pr_debug("trying to register driver %s\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494
Rafael J. Wysockifdd320d2015-07-30 01:45:07 +02002495 /* Protect against concurrent CPU online/offline. */
2496 get_online_cpus();
2497
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002498 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002499 if (cpufreq_driver) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002500 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Rafael J. Wysockifdd320d2015-07-30 01:45:07 +02002501 ret = -EEXIST;
2502 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002504 cpufreq_driver = driver_data;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002505 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506
Viresh Kumarbc68b7d2015-01-02 12:34:30 +05302507 if (driver_data->setpolicy)
2508 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2509
Viresh Kumar44139ed2015-07-29 16:23:09 +05302510 ret = create_boost_sysfs_file();
2511 if (ret)
2512 goto err_null_driver;
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002513
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002514 ret = subsys_interface_register(&cpufreq_interface);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002515 if (ret)
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002516 goto err_boost_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302518 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2519 list_empty(&cpufreq_policy_list)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 /* if all ->init() calls failed, unregister */
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302521 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2522 driver_data->name);
2523 goto err_if_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 }
2525
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002526 register_hotcpu_notifier(&cpufreq_cpu_notifier);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002527 pr_debug("driver %s up and running\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528
Rafael J. Wysockifdd320d2015-07-30 01:45:07 +02002529out:
2530 put_online_cpus();
2531 return ret;
2532
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002533err_if_unreg:
2534 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002535err_boost_unreg:
Viresh Kumar44139ed2015-07-29 16:23:09 +05302536 remove_boost_sysfs_file();
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002537err_null_driver:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002538 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002539 cpufreq_driver = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002540 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Rafael J. Wysockifdd320d2015-07-30 01:45:07 +02002541 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542}
2543EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2544
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545/**
2546 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2547 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302548 * Unregister the current CPUFreq driver. Only call this if you have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 * the right to do so, i.e. if you have succeeded in initialising before!
2550 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2551 * currently not initialised.
2552 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002553int cpufreq_unregister_driver(struct cpufreq_driver *driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554{
2555 unsigned long flags;
2556
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002557 if (!cpufreq_driver || (driver != cpufreq_driver))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002560 pr_debug("unregistering driver %s\n", driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561
Sebastian Andrzej Siewior454d3a22015-07-22 17:59:11 +02002562 /* Protect against concurrent cpu hotplug */
2563 get_online_cpus();
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002564 subsys_interface_unregister(&cpufreq_interface);
Viresh Kumar44139ed2015-07-29 16:23:09 +05302565 remove_boost_sysfs_file();
Chandra Seetharaman65edc682006-06-27 02:54:08 -07002566 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002568 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302569
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002570 cpufreq_driver = NULL;
Viresh Kumar6eed9402013-08-06 22:53:11 +05302571
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002572 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Sebastian Andrzej Siewior454d3a22015-07-22 17:59:11 +02002573 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574
2575 return 0;
2576}
2577EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002578
Doug Anderson90de2a42014-12-23 22:09:48 -08002579/*
2580 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2581 * or mutexes when secondary CPUs are halted.
2582 */
2583static struct syscore_ops cpufreq_syscore_ops = {
2584 .shutdown = cpufreq_suspend,
2585};
2586
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002587static int __init cpufreq_core_init(void)
2588{
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002589 if (cpufreq_disabled())
2590 return -ENODEV;
2591
Viresh Kumar2361be22013-05-17 16:09:09 +05302592 cpufreq_global_kobject = kobject_create();
Thomas Renninger8aa84ad2009-07-24 15:25:05 +02002593 BUG_ON(!cpufreq_global_kobject);
2594
Doug Anderson90de2a42014-12-23 22:09:48 -08002595 register_syscore_ops(&cpufreq_syscore_ops);
2596
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002597 return 0;
2598}
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002599core_initcall(cpufreq_core_init);