blob: 497935a93614ca47a2b002fce1e691a4a0cb4100 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
Viresh Kumarbb176f72013-06-19 14:19:33 +05306 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Ashok Rajc32b6b82005-10-30 14:59:54 -08008 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
Dave Jones32ee8c32006-02-28 00:43:23 -05009 * Added handling for CPU hotplug
Dave Jones8ff69732006-03-05 03:37:23 -050010 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
Ashok Rajc32b6b82005-10-30 14:59:54 -080012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Viresh Kumardb701152012-10-23 01:29:03 +020018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Viresh Kumar5ff0a262013-08-06 22:53:03 +053020#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/cpufreq.h>
22#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/device.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053024#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080027#include <linux/mutex.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053028#include <linux/slab.h>
Viresh Kumar2f0aea92014-03-04 11:00:26 +080029#include <linux/suspend.h>
Doug Anderson90de2a42014-12-23 22:09:48 -080030#include <linux/syscore_ops.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053031#include <linux/tick.h>
Thomas Renninger6f4f2722010-04-20 13:17:36 +020032#include <trace/events/power.h>
33
Viresh Kumarb4f06762015-01-27 14:06:08 +053034/* Macros to iterate over lists */
35/* Iterate over online CPUs policies */
36static LIST_HEAD(cpufreq_policy_list);
37#define for_each_policy(__policy) \
38 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
39
Viresh Kumarf7b27062015-01-27 14:06:09 +053040/* Iterate over governors */
41static LIST_HEAD(cpufreq_governor_list);
42#define for_each_governor(__governor) \
43 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/**
Dave Jonescd878472006-08-11 17:59:28 -040046 * The "cpufreq driver" - the arch- or hardware-dependent low
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * level driver of CPUFreq support, and its spinlock. This lock
48 * also protects the cpufreq_cpu_data array.
49 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +020050static struct cpufreq_driver *cpufreq_driver;
Mike Travis7a6aedf2008-03-25 15:06:53 -070051static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +053052static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
Viresh Kumarbb176f72013-06-19 14:19:33 +053053static DEFINE_RWLOCK(cpufreq_driver_lock);
Jane Li6f1e4ef2014-01-03 17:17:41 +080054DEFINE_MUTEX(cpufreq_governor_lock);
Viresh Kumarbb176f72013-06-19 14:19:33 +053055
Thomas Renninger084f3492007-07-09 11:35:28 -070056/* This one keeps track of the previously set governor of a removed CPU */
Dmitry Monakhove77b89f2009-10-05 00:38:55 +040057static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Viresh Kumar2f0aea92014-03-04 11:00:26 +080059/* Flag to suspend/resume CPUFreq governors */
60static bool cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +053062static inline bool has_target(void)
63{
64 return cpufreq_driver->target_index || cpufreq_driver->target;
65}
66
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080067/*
Viresh Kumar6eed9402013-08-06 22:53:11 +053068 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
69 * sections
70 */
71static DECLARE_RWSEM(cpufreq_rwsem);
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/* internal prototypes */
Dave Jones29464f22009-01-18 01:37:11 -050074static int __cpufreq_governor(struct cpufreq_policy *policy,
75 unsigned int event);
Viresh Kumard92d50a2015-01-02 12:34:29 +053076static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
David Howells65f27f32006-11-22 14:55:48 +000077static void handle_update(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79/**
Dave Jones32ee8c32006-02-28 00:43:23 -050080 * Two notifier lists: the "policy" list is involved in the
81 * validation process for a new CPU frequency policy; the
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 * "transition" list for kernel code that needs to handle
83 * changes to devices when the CPU clock speed changes.
84 * The mutex locks both lists.
85 */
Alan Sterne041c682006-03-27 01:16:30 -080086static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
Alan Sternb4dfdbb2006-10-04 02:17:06 -070087static struct srcu_notifier_head cpufreq_transition_notifier_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -020089static bool init_cpufreq_transition_notifier_list_called;
Alan Sternb4dfdbb2006-10-04 02:17:06 -070090static int __init init_cpufreq_transition_notifier_list(void)
91{
92 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -020093 init_cpufreq_transition_notifier_list_called = true;
Alan Sternb4dfdbb2006-10-04 02:17:06 -070094 return 0;
95}
Linus Torvaldsb3438f82006-11-20 11:47:18 -080096pure_initcall(init_cpufreq_transition_notifier_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -040098static int off __read_mostly;
Viresh Kumarda584452012-10-26 00:51:32 +020099static int cpufreq_disabled(void)
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400100{
101 return off;
102}
103void disable_cpufreq(void)
104{
105 off = 1;
106}
Dave Jones29464f22009-01-18 01:37:11 -0500107static DEFINE_MUTEX(cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000109bool have_governor_per_policy(void)
110{
Viresh Kumar0b981e72013-10-02 14:13:18 +0530111 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000112}
Viresh Kumar3f869d62013-05-16 05:09:56 +0000113EXPORT_SYMBOL_GPL(have_governor_per_policy);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000114
Viresh Kumar944e9a02013-05-16 05:09:57 +0000115struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
116{
117 if (have_governor_per_policy())
118 return &policy->kobj;
119 else
120 return cpufreq_global_kobject;
121}
122EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
123
Viresh Kumar72a4ce32013-05-17 11:26:32 +0000124static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
125{
126 u64 idle_time;
127 u64 cur_wall_time;
128 u64 busy_time;
129
130 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
131
132 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
134 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
135 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
136 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
137 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
138
139 idle_time = cur_wall_time - busy_time;
140 if (wall)
141 *wall = cputime_to_usecs(cur_wall_time);
142
143 return cputime_to_usecs(idle_time);
144}
145
146u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
147{
148 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
149
150 if (idle_time == -1ULL)
151 return get_cpu_idle_time_jiffy(cpu, wall);
152 else if (!io_busy)
153 idle_time += get_cpu_iowait_time_us(cpu, wall);
154
155 return idle_time;
156}
157EXPORT_SYMBOL_GPL(get_cpu_idle_time);
158
Viresh Kumar70e9e772013-10-03 20:29:07 +0530159/*
160 * This is a generic cpufreq init() routine which can be used by cpufreq
161 * drivers of SMP systems. It will do following:
162 * - validate & show freq table passed
163 * - set policies transition latency
164 * - policy->cpus with all possible CPUs
165 */
166int cpufreq_generic_init(struct cpufreq_policy *policy,
167 struct cpufreq_frequency_table *table,
168 unsigned int transition_latency)
169{
170 int ret;
171
172 ret = cpufreq_table_validate_and_show(policy, table);
173 if (ret) {
174 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
175 return ret;
176 }
177
178 policy->cpuinfo.transition_latency = transition_latency;
179
180 /*
181 * The driver only supports the SMP configuartion where all processors
182 * share the clock and voltage and clock.
183 */
184 cpumask_setall(policy->cpus);
185
186 return 0;
187}
188EXPORT_SYMBOL_GPL(cpufreq_generic_init);
189
Viresh Kumar652ed952014-01-09 20:38:43 +0530190unsigned int cpufreq_generic_get(unsigned int cpu)
191{
192 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
193
194 if (!policy || IS_ERR(policy->clk)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700195 pr_err("%s: No %s associated to cpu: %d\n",
196 __func__, policy ? "clk" : "policy", cpu);
Viresh Kumar652ed952014-01-09 20:38:43 +0530197 return 0;
198 }
199
200 return clk_get_rate(policy->clk) / 1000;
201}
202EXPORT_SYMBOL_GPL(cpufreq_generic_get);
203
Viresh Kumare0b31652014-03-10 14:53:33 +0530204/* Only for cpufreq core internal use */
205struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
206{
207 return per_cpu(cpufreq_cpu_data, cpu);
208}
209
Viresh Kumar50e9c852015-02-19 17:02:03 +0530210/**
211 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
212 *
213 * @cpu: cpu to find policy for.
214 *
215 * This returns policy for 'cpu', returns NULL if it doesn't exist.
216 * It also increments the kobject reference count to mark it busy and so would
217 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
218 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
219 * freed as that depends on the kobj count.
220 *
221 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
222 * valid policy is found. This is done to make sure the driver doesn't get
223 * unregistered while the policy is being used.
224 *
225 * Return: A valid policy on success, otherwise NULL on failure.
226 */
Viresh Kumar6eed9402013-08-06 22:53:11 +0530227struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530229 struct cpufreq_policy *policy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 unsigned long flags;
231
Viresh Kumar1b947c92015-02-19 17:02:05 +0530232 if (WARN_ON(cpu >= nr_cpu_ids))
Viresh Kumar6eed9402013-08-06 22:53:11 +0530233 return NULL;
234
235 if (!down_read_trylock(&cpufreq_rwsem))
236 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
238 /* get the cpufreq driver */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000239 read_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Viresh Kumar6eed9402013-08-06 22:53:11 +0530241 if (cpufreq_driver) {
242 /* get the CPU */
243 policy = per_cpu(cpufreq_cpu_data, cpu);
244 if (policy)
245 kobject_get(&policy->kobj);
246 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200247
Viresh Kumar6eed9402013-08-06 22:53:11 +0530248 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530250 if (!policy)
Viresh Kumar6eed9402013-08-06 22:53:11 +0530251 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530253 return policy;
Stephen Boyda9144432012-07-20 18:14:38 +0000254}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
256
Viresh Kumar50e9c852015-02-19 17:02:03 +0530257/**
258 * cpufreq_cpu_put: Decrements the usage count of a policy
259 *
260 * @policy: policy earlier returned by cpufreq_cpu_get().
261 *
262 * This decrements the kobject reference count incremented earlier by calling
263 * cpufreq_cpu_get().
264 *
265 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
266 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530267void cpufreq_cpu_put(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530269 kobject_put(&policy->kobj);
270 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271}
272EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
276 *********************************************************************/
277
278/**
279 * adjust_jiffies - adjust the system "loops_per_jiffy"
280 *
281 * This function alters the system "loops_per_jiffy" for the clock
282 * speed change. Note that loops_per_jiffy cannot be updated on SMP
Dave Jones32ee8c32006-02-28 00:43:23 -0500283 * systems as each CPU might be scaled differently. So, use the arch
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 * per-CPU loops_per_jiffy value wherever possible.
285 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800286static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
Viresh Kumar39c132e2015-01-02 12:34:34 +0530288#ifndef CONFIG_SMP
289 static unsigned long l_p_j_ref;
290 static unsigned int l_p_j_ref_freq;
291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 if (ci->flags & CPUFREQ_CONST_LOOPS)
293 return;
294
295 if (!l_p_j_ref_freq) {
296 l_p_j_ref = loops_per_jiffy;
297 l_p_j_ref_freq = ci->old;
Joe Perchese837f9b2014-03-11 10:03:00 -0700298 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
299 l_p_j_ref, l_p_j_ref_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 }
Viresh Kumar0b443ea2014-03-19 11:24:58 +0530301 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530302 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
303 ci->new);
Joe Perchese837f9b2014-03-11 10:03:00 -0700304 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
305 loops_per_jiffy, ci->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307#endif
Viresh Kumar39c132e2015-01-02 12:34:34 +0530308}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
Viresh Kumar0956df9c2013-06-19 14:19:34 +0530310static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530311 struct cpufreq_freqs *freqs, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312{
313 BUG_ON(irqs_disabled());
314
Dirk Brandewied5aaffa2013-01-17 16:22:21 +0000315 if (cpufreq_disabled())
316 return;
317
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200318 freqs->flags = cpufreq_driver->flags;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200319 pr_debug("notification %u of frequency transition to %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -0700320 state, freqs->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 switch (state) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 case CPUFREQ_PRECHANGE:
Dave Jones32ee8c32006-02-28 00:43:23 -0500325 /* detect if the driver reported a value as "old frequency"
Dave Jonese4472cb2006-01-31 15:53:55 -0800326 * which is not equal to what the cpufreq core thinks is
327 * "old frequency".
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200329 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800330 if ((policy) && (policy->cpu == freqs->cpu) &&
331 (policy->cur) && (policy->cur != freqs->old)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700332 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
333 freqs->old, policy->cur);
Dave Jonese4472cb2006-01-31 15:53:55 -0800334 freqs->old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 }
336 }
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700337 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800338 CPUFREQ_PRECHANGE, freqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
340 break;
Dave Jonese4472cb2006-01-31 15:53:55 -0800341
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 case CPUFREQ_POSTCHANGE:
343 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
Joe Perchese837f9b2014-03-11 10:03:00 -0700344 pr_debug("FREQ: %lu - CPU: %lu\n",
345 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
Thomas Renninger25e41932011-01-03 17:50:44 +0100346 trace_cpu_frequency(freqs->new, freqs->cpu);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700347 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800348 CPUFREQ_POSTCHANGE, freqs);
Dave Jonese4472cb2006-01-31 15:53:55 -0800349 if (likely(policy) && likely(policy->cpu == freqs->cpu))
350 policy->cur = freqs->new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 break;
352 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353}
Viresh Kumarbb176f72013-06-19 14:19:33 +0530354
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530355/**
356 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
357 * on frequency transition.
358 *
359 * This function calls the transition notifiers and the "adjust_jiffies"
360 * function. It is called twice on all CPU frequency changes that have
361 * external effects.
362 */
Viresh Kumar236a9802014-03-24 13:35:46 +0530363static void cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530364 struct cpufreq_freqs *freqs, unsigned int state)
365{
366 for_each_cpu(freqs->cpu, policy->cpus)
367 __cpufreq_notify_transition(policy, freqs, state);
368}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530370/* Do post notifications when there are chances that transition has failed */
Viresh Kumar236a9802014-03-24 13:35:46 +0530371static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530372 struct cpufreq_freqs *freqs, int transition_failed)
373{
374 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
375 if (!transition_failed)
376 return;
377
378 swap(freqs->old, freqs->new);
379 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
380 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
381}
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530382
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530383void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
384 struct cpufreq_freqs *freqs)
385{
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530386
387 /*
388 * Catch double invocations of _begin() which lead to self-deadlock.
389 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
390 * doesn't invoke _begin() on their behalf, and hence the chances of
391 * double invocations are very low. Moreover, there are scenarios
392 * where these checks can emit false-positive warnings in these
393 * drivers; so we avoid that by skipping them altogether.
394 */
395 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
396 && current == policy->transition_task);
397
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530398wait:
399 wait_event(policy->transition_wait, !policy->transition_ongoing);
400
401 spin_lock(&policy->transition_lock);
402
403 if (unlikely(policy->transition_ongoing)) {
404 spin_unlock(&policy->transition_lock);
405 goto wait;
406 }
407
408 policy->transition_ongoing = true;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530409 policy->transition_task = current;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530410
411 spin_unlock(&policy->transition_lock);
412
413 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
414}
415EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
416
417void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
418 struct cpufreq_freqs *freqs, int transition_failed)
419{
420 if (unlikely(WARN_ON(!policy->transition_ongoing)))
421 return;
422
423 cpufreq_notify_post_transition(policy, freqs, transition_failed);
424
425 policy->transition_ongoing = false;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530426 policy->transition_task = NULL;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530427
428 wake_up(&policy->transition_wait);
429}
430EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
431
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433/*********************************************************************
434 * SYSFS INTERFACE *
435 *********************************************************************/
Rashika Kheria8a5c74a2014-02-26 22:12:42 +0530436static ssize_t show_boost(struct kobject *kobj,
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100437 struct attribute *attr, char *buf)
438{
439 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
440}
441
442static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
443 const char *buf, size_t count)
444{
445 int ret, enable;
446
447 ret = sscanf(buf, "%d", &enable);
448 if (ret != 1 || enable < 0 || enable > 1)
449 return -EINVAL;
450
451 if (cpufreq_boost_trigger_state(enable)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700452 pr_err("%s: Cannot %s BOOST!\n",
453 __func__, enable ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100454 return -EINVAL;
455 }
456
Joe Perchese837f9b2014-03-11 10:03:00 -0700457 pr_debug("%s: cpufreq BOOST %s\n",
458 __func__, enable ? "enabled" : "disabled");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100459
460 return count;
461}
462define_one_global_rw(boost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530464static struct cpufreq_governor *find_governor(const char *str_governor)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700465{
466 struct cpufreq_governor *t;
467
Viresh Kumarf7b27062015-01-27 14:06:09 +0530468 for_each_governor(t)
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200469 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700470 return t;
471
472 return NULL;
473}
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475/**
476 * cpufreq_parse_governor - parse a governor string
477 */
Dave Jones905d77c2008-03-05 14:28:32 -0500478static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 struct cpufreq_governor **governor)
480{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700481 int err = -EINVAL;
482
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200483 if (!cpufreq_driver)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700484 goto out;
485
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200486 if (cpufreq_driver->setpolicy) {
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200487 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 *policy = CPUFREQ_POLICY_PERFORMANCE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700489 err = 0;
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200490 } else if (!strncasecmp(str_governor, "powersave",
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530491 CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 *policy = CPUFREQ_POLICY_POWERSAVE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700493 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 }
Viresh Kumar2e1cc3a2015-01-02 12:34:27 +0530495 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 struct cpufreq_governor *t;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700497
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800498 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700499
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530500 t = find_governor(str_governor);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700501
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700502 if (t == NULL) {
Kees Cook1a8e1462011-05-04 08:38:56 -0700503 int ret;
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700504
Kees Cook1a8e1462011-05-04 08:38:56 -0700505 mutex_unlock(&cpufreq_governor_mutex);
506 ret = request_module("cpufreq_%s", str_governor);
507 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700508
Kees Cook1a8e1462011-05-04 08:38:56 -0700509 if (ret == 0)
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530510 t = find_governor(str_governor);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700511 }
512
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700513 if (t != NULL) {
514 *governor = t;
515 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 }
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700517
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800518 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 }
Dave Jones29464f22009-01-18 01:37:11 -0500520out:
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700521 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524/**
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530525 * cpufreq_per_cpu_attr_read() / show_##file_name() -
526 * print out cpufreq information
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 *
528 * Write out information from cpufreq_driver->policy[cpu]; object must be
529 * "unsigned int".
530 */
531
Dave Jones32ee8c32006-02-28 00:43:23 -0500532#define show_one(file_name, object) \
533static ssize_t show_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500534(struct cpufreq_policy *policy, char *buf) \
Dave Jones32ee8c32006-02-28 00:43:23 -0500535{ \
Dave Jones29464f22009-01-18 01:37:11 -0500536 return sprintf(buf, "%u\n", policy->object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537}
538
539show_one(cpuinfo_min_freq, cpuinfo.min_freq);
540show_one(cpuinfo_max_freq, cpuinfo.max_freq);
Thomas Renningered129782009-02-04 01:17:41 +0100541show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542show_one(scaling_min_freq, min);
543show_one(scaling_max_freq, max);
Dirk Brandewiec034b022014-10-13 08:37:40 -0700544
Viresh Kumar09347b22015-01-02 12:34:24 +0530545static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
Dirk Brandewiec034b022014-10-13 08:37:40 -0700546{
547 ssize_t ret;
548
549 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
550 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
551 else
552 ret = sprintf(buf, "%u\n", policy->cur);
553 return ret;
554}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
Viresh Kumar037ce832013-10-02 14:13:16 +0530556static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530557 struct cpufreq_policy *new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200558
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559/**
560 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
561 */
562#define store_one(file_name, object) \
563static ssize_t store_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500564(struct cpufreq_policy *policy, const char *buf, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565{ \
Vince Hsu619c144c2014-11-10 14:14:50 +0800566 int ret, temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 struct cpufreq_policy new_policy; \
568 \
569 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
570 if (ret) \
571 return -EINVAL; \
572 \
Dave Jones29464f22009-01-18 01:37:11 -0500573 ret = sscanf(buf, "%u", &new_policy.object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 if (ret != 1) \
575 return -EINVAL; \
576 \
Vince Hsu619c144c2014-11-10 14:14:50 +0800577 temp = new_policy.object; \
Viresh Kumar037ce832013-10-02 14:13:16 +0530578 ret = cpufreq_set_policy(policy, &new_policy); \
Vince Hsu619c144c2014-11-10 14:14:50 +0800579 if (!ret) \
580 policy->user_policy.object = temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 \
582 return ret ? ret : count; \
583}
584
Dave Jones29464f22009-01-18 01:37:11 -0500585store_one(scaling_min_freq, min);
586store_one(scaling_max_freq, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588/**
589 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
590 */
Dave Jones905d77c2008-03-05 14:28:32 -0500591static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
592 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{
Viresh Kumard92d50a2015-01-02 12:34:29 +0530594 unsigned int cur_freq = __cpufreq_get(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 if (!cur_freq)
596 return sprintf(buf, "<unknown>");
597 return sprintf(buf, "%u\n", cur_freq);
598}
599
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600/**
601 * show_scaling_governor - show the current policy for the specified CPU
602 */
Dave Jones905d77c2008-03-05 14:28:32 -0500603static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604{
Dave Jones29464f22009-01-18 01:37:11 -0500605 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 return sprintf(buf, "powersave\n");
607 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
608 return sprintf(buf, "performance\n");
609 else if (policy->governor)
viresh kumar4b972f02012-10-23 01:23:43 +0200610 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
Dave Jones29464f22009-01-18 01:37:11 -0500611 policy->governor->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 return -EINVAL;
613}
614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615/**
616 * store_scaling_governor - store policy for the specified CPU
617 */
Dave Jones905d77c2008-03-05 14:28:32 -0500618static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
619 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620{
Srivatsa S. Bhat5136fa52013-09-07 01:24:06 +0530621 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 char str_governor[16];
623 struct cpufreq_policy new_policy;
624
625 ret = cpufreq_get_policy(&new_policy, policy->cpu);
626 if (ret)
627 return ret;
628
Dave Jones29464f22009-01-18 01:37:11 -0500629 ret = sscanf(buf, "%15s", str_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 if (ret != 1)
631 return -EINVAL;
632
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530633 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
634 &new_policy.governor))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 return -EINVAL;
636
Viresh Kumar037ce832013-10-02 14:13:16 +0530637 ret = cpufreq_set_policy(policy, &new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200638
639 policy->user_policy.policy = policy->policy;
640 policy->user_policy.governor = policy->governor;
Thomas Renninger7970e082006-04-13 15:14:04 +0200641
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530642 if (ret)
643 return ret;
644 else
645 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646}
647
648/**
649 * show_scaling_driver - show the cpufreq driver currently loaded
650 */
Dave Jones905d77c2008-03-05 14:28:32 -0500651static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200653 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654}
655
656/**
657 * show_scaling_available_governors - show the available CPUfreq governors
658 */
Dave Jones905d77c2008-03-05 14:28:32 -0500659static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
660 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
662 ssize_t i = 0;
663 struct cpufreq_governor *t;
664
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530665 if (!has_target()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 i += sprintf(buf, "performance powersave");
667 goto out;
668 }
669
Viresh Kumarf7b27062015-01-27 14:06:09 +0530670 for_each_governor(t) {
Dave Jones29464f22009-01-18 01:37:11 -0500671 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
672 - (CPUFREQ_NAME_LEN + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 goto out;
viresh kumar4b972f02012-10-23 01:23:43 +0200674 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 }
Dave Jones7d5e3502006-02-02 17:03:42 -0500676out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 i += sprintf(&buf[i], "\n");
678 return i;
679}
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700680
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800681ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 ssize_t i = 0;
684 unsigned int cpu;
685
Rusty Russell835481d2009-01-04 05:18:06 -0800686 for_each_cpu(cpu, mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 if (i)
688 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
689 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
690 if (i >= (PAGE_SIZE - 5))
Dave Jones29464f22009-01-18 01:37:11 -0500691 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 }
693 i += sprintf(&buf[i], "\n");
694 return i;
695}
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800696EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700698/**
699 * show_related_cpus - show the CPUs affected by each transition even if
700 * hw coordination is in use
701 */
702static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
703{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800704 return cpufreq_show_cpus(policy->related_cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700705}
706
707/**
708 * show_affected_cpus - show the CPUs affected by each transition
709 */
710static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
711{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800712 return cpufreq_show_cpus(policy->cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700713}
714
Venki Pallipadi9e769882007-10-26 10:18:21 -0700715static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
Dave Jones905d77c2008-03-05 14:28:32 -0500716 const char *buf, size_t count)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700717{
718 unsigned int freq = 0;
719 unsigned int ret;
720
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700721 if (!policy->governor || !policy->governor->store_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700722 return -EINVAL;
723
724 ret = sscanf(buf, "%u", &freq);
725 if (ret != 1)
726 return -EINVAL;
727
728 policy->governor->store_setspeed(policy, freq);
729
730 return count;
731}
732
733static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
734{
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700735 if (!policy->governor || !policy->governor->show_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700736 return sprintf(buf, "<unsupported>\n");
737
738 return policy->governor->show_setspeed(policy, buf);
739}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Thomas Renningere2f74f32009-11-19 12:31:01 +0100741/**
viresh kumar8bf1ac722012-10-23 01:23:33 +0200742 * show_bios_limit - show the current cpufreq HW/BIOS limitation
Thomas Renningere2f74f32009-11-19 12:31:01 +0100743 */
744static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
745{
746 unsigned int limit;
747 int ret;
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200748 if (cpufreq_driver->bios_limit) {
749 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
Thomas Renningere2f74f32009-11-19 12:31:01 +0100750 if (!ret)
751 return sprintf(buf, "%u\n", limit);
752 }
753 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
754}
755
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200756cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
757cpufreq_freq_attr_ro(cpuinfo_min_freq);
758cpufreq_freq_attr_ro(cpuinfo_max_freq);
759cpufreq_freq_attr_ro(cpuinfo_transition_latency);
760cpufreq_freq_attr_ro(scaling_available_governors);
761cpufreq_freq_attr_ro(scaling_driver);
762cpufreq_freq_attr_ro(scaling_cur_freq);
763cpufreq_freq_attr_ro(bios_limit);
764cpufreq_freq_attr_ro(related_cpus);
765cpufreq_freq_attr_ro(affected_cpus);
766cpufreq_freq_attr_rw(scaling_min_freq);
767cpufreq_freq_attr_rw(scaling_max_freq);
768cpufreq_freq_attr_rw(scaling_governor);
769cpufreq_freq_attr_rw(scaling_setspeed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
Dave Jones905d77c2008-03-05 14:28:32 -0500771static struct attribute *default_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 &cpuinfo_min_freq.attr,
773 &cpuinfo_max_freq.attr,
Thomas Renningered129782009-02-04 01:17:41 +0100774 &cpuinfo_transition_latency.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 &scaling_min_freq.attr,
776 &scaling_max_freq.attr,
777 &affected_cpus.attr,
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700778 &related_cpus.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 &scaling_governor.attr,
780 &scaling_driver.attr,
781 &scaling_available_governors.attr,
Venki Pallipadi9e769882007-10-26 10:18:21 -0700782 &scaling_setspeed.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 NULL
784};
785
Dave Jones29464f22009-01-18 01:37:11 -0500786#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
787#define to_attr(a) container_of(a, struct freq_attr, attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
Dave Jones29464f22009-01-18 01:37:11 -0500789static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790{
Dave Jones905d77c2008-03-05 14:28:32 -0500791 struct cpufreq_policy *policy = to_policy(kobj);
792 struct freq_attr *fattr = to_attr(attr);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530793 ssize_t ret;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530794
795 if (!down_read_trylock(&cpufreq_rwsem))
Viresh Kumar1b750e32013-10-02 14:13:09 +0530796 return -EINVAL;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800797
viresh kumarad7722d2013-10-18 19:10:15 +0530798 down_read(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800799
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530800 if (fattr->show)
801 ret = fattr->show(policy, buf);
802 else
803 ret = -EIO;
804
viresh kumarad7722d2013-10-18 19:10:15 +0530805 up_read(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530806 up_read(&cpufreq_rwsem);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530807
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 return ret;
809}
810
Dave Jones905d77c2008-03-05 14:28:32 -0500811static ssize_t store(struct kobject *kobj, struct attribute *attr,
812 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813{
Dave Jones905d77c2008-03-05 14:28:32 -0500814 struct cpufreq_policy *policy = to_policy(kobj);
815 struct freq_attr *fattr = to_attr(attr);
Dave Jonesa07530b2008-03-05 14:22:25 -0500816 ssize_t ret = -EINVAL;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530817
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530818 get_online_cpus();
819
820 if (!cpu_online(policy->cpu))
821 goto unlock;
822
Viresh Kumar6eed9402013-08-06 22:53:11 +0530823 if (!down_read_trylock(&cpufreq_rwsem))
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530824 goto unlock;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800825
viresh kumarad7722d2013-10-18 19:10:15 +0530826 down_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800827
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530828 if (fattr->store)
829 ret = fattr->store(policy, buf, count);
830 else
831 ret = -EIO;
832
viresh kumarad7722d2013-10-18 19:10:15 +0530833 up_write(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530834
Viresh Kumar6eed9402013-08-06 22:53:11 +0530835 up_read(&cpufreq_rwsem);
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530836unlock:
837 put_online_cpus();
838
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 return ret;
840}
841
Dave Jones905d77c2008-03-05 14:28:32 -0500842static void cpufreq_sysfs_release(struct kobject *kobj)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843{
Dave Jones905d77c2008-03-05 14:28:32 -0500844 struct cpufreq_policy *policy = to_policy(kobj);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200845 pr_debug("last reference is dropped\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 complete(&policy->kobj_unregister);
847}
848
Emese Revfy52cf25d2010-01-19 02:58:23 +0100849static const struct sysfs_ops sysfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 .show = show,
851 .store = store,
852};
853
854static struct kobj_type ktype_cpufreq = {
855 .sysfs_ops = &sysfs_ops,
856 .default_attrs = default_attrs,
857 .release = cpufreq_sysfs_release,
858};
859
Viresh Kumar2361be22013-05-17 16:09:09 +0530860struct kobject *cpufreq_global_kobject;
861EXPORT_SYMBOL(cpufreq_global_kobject);
862
863static int cpufreq_global_kobject_usage;
864
865int cpufreq_get_global_kobject(void)
866{
867 if (!cpufreq_global_kobject_usage++)
868 return kobject_add(cpufreq_global_kobject,
869 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
870
871 return 0;
872}
873EXPORT_SYMBOL(cpufreq_get_global_kobject);
874
875void cpufreq_put_global_kobject(void)
876{
877 if (!--cpufreq_global_kobject_usage)
878 kobject_del(cpufreq_global_kobject);
879}
880EXPORT_SYMBOL(cpufreq_put_global_kobject);
881
882int cpufreq_sysfs_create_file(const struct attribute *attr)
883{
884 int ret = cpufreq_get_global_kobject();
885
886 if (!ret) {
887 ret = sysfs_create_file(cpufreq_global_kobject, attr);
888 if (ret)
889 cpufreq_put_global_kobject();
890 }
891
892 return ret;
893}
894EXPORT_SYMBOL(cpufreq_sysfs_create_file);
895
896void cpufreq_sysfs_remove_file(const struct attribute *attr)
897{
898 sysfs_remove_file(cpufreq_global_kobject, attr);
899 cpufreq_put_global_kobject();
900}
901EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
902
Dave Jones19d6f7e2009-07-08 17:35:39 -0400903/* symlink affected CPUs */
Viresh Kumar308b60e2013-07-31 14:35:14 +0200904static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400905{
906 unsigned int j;
907 int ret = 0;
908
909 for_each_cpu(j, policy->cpus) {
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800910 struct device *cpu_dev;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400911
Viresh Kumar308b60e2013-07-31 14:35:14 +0200912 if (j == policy->cpu)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400913 continue;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400914
Viresh Kumare8fdde12013-07-31 14:31:33 +0200915 pr_debug("Adding link for CPU: %u\n", j);
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800916 cpu_dev = get_cpu_device(j);
917 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
Dave Jones19d6f7e2009-07-08 17:35:39 -0400918 "cpufreq");
Rafael J. Wysocki71c34612013-08-04 01:19:34 +0200919 if (ret)
920 break;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400921 }
922 return ret;
923}
924
Viresh Kumar308b60e2013-07-31 14:35:14 +0200925static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800926 struct device *dev)
Dave Jones909a6942009-07-08 18:05:42 -0400927{
928 struct freq_attr **drv_attr;
Dave Jones909a6942009-07-08 18:05:42 -0400929 int ret = 0;
Dave Jones909a6942009-07-08 18:05:42 -0400930
Dave Jones909a6942009-07-08 18:05:42 -0400931 /* set up files for this cpu device */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200932 drv_attr = cpufreq_driver->attr;
Viresh Kumarf13f1182015-01-02 12:34:23 +0530933 while (drv_attr && *drv_attr) {
Dave Jones909a6942009-07-08 18:05:42 -0400934 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
935 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100936 return ret;
Dave Jones909a6942009-07-08 18:05:42 -0400937 drv_attr++;
938 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200939 if (cpufreq_driver->get) {
Dave Jones909a6942009-07-08 18:05:42 -0400940 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
941 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100942 return ret;
Dave Jones909a6942009-07-08 18:05:42 -0400943 }
Dirk Brandewiec034b022014-10-13 08:37:40 -0700944
945 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
946 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100947 return ret;
Dirk Brandewiec034b022014-10-13 08:37:40 -0700948
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200949 if (cpufreq_driver->bios_limit) {
Thomas Renningere2f74f32009-11-19 12:31:01 +0100950 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
951 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100952 return ret;
Thomas Renningere2f74f32009-11-19 12:31:01 +0100953 }
Dave Jones909a6942009-07-08 18:05:42 -0400954
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100955 return cpufreq_add_dev_symlink(policy);
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +0530956}
957
958static void cpufreq_init_policy(struct cpufreq_policy *policy)
959{
viresh kumar6e2c89d2014-03-04 11:43:59 +0800960 struct cpufreq_governor *gov = NULL;
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +0530961 struct cpufreq_policy new_policy;
962 int ret = 0;
963
Viresh Kumard5b73cd2013-08-06 22:53:06 +0530964 memcpy(&new_policy, policy, sizeof(*policy));
Jason Barona27a9ab2013-12-19 22:50:50 +0000965
viresh kumar6e2c89d2014-03-04 11:43:59 +0800966 /* Update governor of new_policy to the governor used before hotplug */
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530967 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
viresh kumar6e2c89d2014-03-04 11:43:59 +0800968 if (gov)
969 pr_debug("Restoring governor %s for cpu %d\n",
970 policy->governor->name, policy->cpu);
971 else
972 gov = CPUFREQ_DEFAULT_GOVERNOR;
973
974 new_policy.governor = gov;
975
Jason Barona27a9ab2013-12-19 22:50:50 +0000976 /* Use the default policy if its valid. */
977 if (cpufreq_driver->setpolicy)
viresh kumar6e2c89d2014-03-04 11:43:59 +0800978 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
Dave Jonesecf7e462009-07-08 18:48:47 -0400979
980 /* set default policy */
Viresh Kumar037ce832013-10-02 14:13:16 +0530981 ret = cpufreq_set_policy(policy, &new_policy);
Dave Jonesecf7e462009-07-08 18:48:47 -0400982 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200983 pr_debug("setting policy failed\n");
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200984 if (cpufreq_driver->exit)
985 cpufreq_driver->exit(policy);
Dave Jonesecf7e462009-07-08 18:48:47 -0400986 }
Dave Jones909a6942009-07-08 18:05:42 -0400987}
988
Viresh Kumard8d3b472013-08-04 01:20:07 +0200989static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
Viresh Kumar42f921a2013-12-20 21:26:02 +0530990 unsigned int cpu, struct device *dev)
Viresh Kumarfcf80582013-01-29 14:39:08 +0000991{
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530992 int ret = 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +0000993 unsigned long flags;
994
Viresh Kumarbb29ae12015-02-19 17:02:06 +0530995 /* Has this CPU been taken care of already? */
996 if (cpumask_test_cpu(cpu, policy->cpus))
997 return 0;
998
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530999 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301000 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1001 if (ret) {
1002 pr_err("%s: Failed to stop governor\n", __func__);
1003 return ret;
1004 }
1005 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001006
viresh kumarad7722d2013-10-18 19:10:15 +05301007 down_write(&policy->rwsem);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301008
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001009 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301010
Viresh Kumarfcf80582013-01-29 14:39:08 +00001011 cpumask_set_cpu(cpu, policy->cpus);
1012 per_cpu(cpufreq_cpu_data, cpu) = policy;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001013 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumarfcf80582013-01-29 14:39:08 +00001014
viresh kumarad7722d2013-10-18 19:10:15 +05301015 up_write(&policy->rwsem);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301016
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301017 if (has_target()) {
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001018 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1019 if (!ret)
1020 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1021
1022 if (ret) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301023 pr_err("%s: Failed to start governor\n", __func__);
1024 return ret;
1025 }
Viresh Kumar820c6ca2013-04-22 00:48:03 +02001026 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001027
Viresh Kumar42f921a2013-12-20 21:26:02 +05301028 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
Viresh Kumarfcf80582013-01-29 14:39:08 +00001029}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301031static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1032{
1033 struct cpufreq_policy *policy;
1034 unsigned long flags;
1035
Lan Tianyu44871c92013-09-11 15:05:05 +08001036 read_lock_irqsave(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301037
1038 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1039
Lan Tianyu44871c92013-09-11 15:05:05 +08001040 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301041
Geert Uytterhoeven09712f52014-11-04 17:05:25 +01001042 if (policy)
1043 policy->governor = NULL;
viresh kumar6e2c89d2014-03-04 11:43:59 +08001044
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301045 return policy;
1046}
1047
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301048static struct cpufreq_policy *cpufreq_policy_alloc(void)
1049{
1050 struct cpufreq_policy *policy;
1051
1052 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1053 if (!policy)
1054 return NULL;
1055
1056 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1057 goto err_free_policy;
1058
1059 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1060 goto err_free_cpumask;
1061
Lukasz Majewskic88a1f82013-08-06 22:53:08 +05301062 INIT_LIST_HEAD(&policy->policy_list);
viresh kumarad7722d2013-10-18 19:10:15 +05301063 init_rwsem(&policy->rwsem);
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +05301064 spin_lock_init(&policy->transition_lock);
1065 init_waitqueue_head(&policy->transition_wait);
Viresh Kumar818c5712015-01-02 12:34:38 +05301066 init_completion(&policy->kobj_unregister);
1067 INIT_WORK(&policy->update, handle_update);
viresh kumarad7722d2013-10-18 19:10:15 +05301068
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301069 return policy;
1070
1071err_free_cpumask:
1072 free_cpumask_var(policy->cpus);
1073err_free_policy:
1074 kfree(policy);
1075
1076 return NULL;
1077}
1078
Viresh Kumar42f921a2013-12-20 21:26:02 +05301079static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1080{
1081 struct kobject *kobj;
1082 struct completion *cmp;
1083
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301084 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1085 CPUFREQ_REMOVE_POLICY, policy);
1086
Viresh Kumar42f921a2013-12-20 21:26:02 +05301087 down_read(&policy->rwsem);
1088 kobj = &policy->kobj;
1089 cmp = &policy->kobj_unregister;
1090 up_read(&policy->rwsem);
1091 kobject_put(kobj);
1092
1093 /*
1094 * We need to make sure that the underlying kobj is
1095 * actually not referenced anymore by anybody before we
1096 * proceed with unloading.
1097 */
1098 pr_debug("waiting for dropping of refcount\n");
1099 wait_for_completion(cmp);
1100 pr_debug("wait complete\n");
1101}
1102
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301103static void cpufreq_policy_free(struct cpufreq_policy *policy)
1104{
1105 free_cpumask_var(policy->related_cpus);
1106 free_cpumask_var(policy->cpus);
1107 kfree(policy);
1108}
1109
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301110static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1111 struct device *cpu_dev)
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301112{
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301113 int ret;
1114
Srivatsa S. Bhat99ec8992013-09-12 17:29:09 +05301115 if (WARN_ON(cpu == policy->cpu))
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301116 return 0;
1117
1118 /* Move kobject to the new policy->cpu */
1119 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1120 if (ret) {
1121 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1122 return ret;
1123 }
Srivatsa S. Bhatcb38ed52013-09-12 01:43:42 +05301124
viresh kumarad7722d2013-10-18 19:10:15 +05301125 down_write(&policy->rwsem);
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301126 policy->cpu = cpu;
viresh kumarad7722d2013-10-18 19:10:15 +05301127 up_write(&policy->rwsem);
Viresh Kumar8efd5762013-09-17 10:22:11 +05301128
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301129 return 0;
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301130}
1131
Viresh Kumar23faf0b2015-02-19 17:02:04 +05301132/**
1133 * cpufreq_add_dev - add a CPU device
1134 *
1135 * Adds the cpufreq interface for a CPU device.
1136 *
1137 * The Oracle says: try running cpufreq registration/unregistration concurrently
1138 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1139 * mess up, but more thorough testing is needed. - Mathieu
1140 */
1141static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142{
Viresh Kumarfcf80582013-01-29 14:39:08 +00001143 unsigned int j, cpu = dev->id;
Viresh Kumar65922462013-02-07 10:56:03 +05301144 int ret = -ENOMEM;
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301145 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 unsigned long flags;
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301147 bool recover_policy = cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
Ashok Rajc32b6b82005-10-30 14:59:54 -08001149 if (cpu_is_offline(cpu))
1150 return 0;
1151
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001152 pr_debug("adding CPU %u\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
Viresh Kumar6eed9402013-08-06 22:53:11 +05301154 if (!down_read_trylock(&cpufreq_rwsem))
1155 return 0;
1156
Viresh Kumarbb29ae12015-02-19 17:02:06 +05301157 /* Check if this CPU already has a policy to manage it */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001158 read_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumarb4f06762015-01-27 14:06:08 +05301159 for_each_policy(policy) {
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301160 if (cpumask_test_cpu(cpu, policy->related_cpus)) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001161 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301162 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
Viresh Kumar6eed9402013-08-06 22:53:11 +05301163 up_read(&cpufreq_rwsem);
1164 return ret;
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301165 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001166 }
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001167 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001169 /*
1170 * Restore the saved policy when doing light-weight init and fall back
1171 * to the full init if that fails.
1172 */
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301173 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001174 if (!policy) {
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301175 recover_policy = false;
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301176 policy = cpufreq_policy_alloc();
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001177 if (!policy)
1178 goto nomem_out;
1179 }
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301180
1181 /*
1182 * In the resume path, since we restore a saved policy, the assignment
1183 * to policy->cpu is like an update of the existing policy, rather than
1184 * the creation of a brand new one. So we need to perform this update
1185 * by invoking update_policy_cpu().
1186 */
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301187 if (recover_policy && cpu != policy->cpu)
1188 WARN_ON(update_policy_cpu(policy, cpu, dev));
1189 else
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301190 policy->cpu = cpu;
1191
Rusty Russell835481d2009-01-04 05:18:06 -08001192 cpumask_copy(policy->cpus, cpumask_of(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 /* call driver. From then on the cpufreq must be able
1195 * to accept all calls to ->verify and ->setpolicy for this CPU
1196 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001197 ret = cpufreq_driver->init(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001199 pr_debug("initialization failed\n");
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301200 goto err_set_policy_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 }
Viresh Kumar643ae6e2013-01-12 05:14:38 +00001202
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001203 down_write(&policy->rwsem);
1204
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001205 /* related cpus should atleast have policy->cpus */
1206 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1207
1208 /*
1209 * affected cpus must always be the one, which are online. We aren't
1210 * managing offline cpus here.
1211 */
1212 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1213
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301214 if (!recover_policy) {
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001215 policy->user_policy.min = policy->min;
1216 policy->user_policy.max = policy->max;
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001217
1218 /* prepare interface data */
1219 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1220 &dev->kobj, "cpufreq");
1221 if (ret) {
1222 pr_err("%s: failed to init policy->kobj: %d\n",
1223 __func__, ret);
1224 goto err_init_policy_kobj;
1225 }
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001226 }
1227
Viresh Kumar652ed952014-01-09 20:38:43 +05301228 write_lock_irqsave(&cpufreq_driver_lock, flags);
1229 for_each_cpu(j, policy->cpus)
1230 per_cpu(cpufreq_cpu_data, j) = policy;
1231 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1232
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01001233 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumarda60ce92013-10-03 20:28:30 +05301234 policy->cur = cpufreq_driver->get(policy->cpu);
1235 if (!policy->cur) {
1236 pr_err("%s: ->get() failed\n", __func__);
1237 goto err_get_freq;
1238 }
1239 }
1240
Viresh Kumard3916692013-12-03 11:20:46 +05301241 /*
1242 * Sometimes boot loaders set CPU frequency to a value outside of
1243 * frequency table present with cpufreq core. In such cases CPU might be
1244 * unstable if it has to run on that frequency for long duration of time
1245 * and so its better to set it to a frequency which is specified in
1246 * freq-table. This also makes cpufreq stats inconsistent as
1247 * cpufreq-stats would fail to register because current frequency of CPU
1248 * isn't found in freq-table.
1249 *
1250 * Because we don't want this change to effect boot process badly, we go
1251 * for the next freq which is >= policy->cur ('cur' must be set by now,
1252 * otherwise we will end up setting freq to lowest of the table as 'cur'
1253 * is initialized to zero).
1254 *
1255 * We are passing target-freq as "policy->cur - 1" otherwise
1256 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1257 * equal to target-freq.
1258 */
1259 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1260 && has_target()) {
1261 /* Are we running at unknown frequency ? */
1262 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1263 if (ret == -EINVAL) {
1264 /* Warn user and fix it */
1265 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1266 __func__, policy->cpu, policy->cur);
1267 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1268 CPUFREQ_RELATION_L);
1269
1270 /*
1271 * Reaching here after boot in a few seconds may not
1272 * mean that system will remain stable at "unknown"
1273 * frequency for longer duration. Hence, a BUG_ON().
1274 */
1275 BUG_ON(ret);
1276 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1277 __func__, policy->cpu, policy->cur);
1278 }
1279 }
1280
Thomas Renningera1531ac2008-07-29 22:32:58 -07001281 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1282 CPUFREQ_START, policy);
1283
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301284 if (!recover_policy) {
Viresh Kumar308b60e2013-07-31 14:35:14 +02001285 ret = cpufreq_add_dev_interface(policy, dev);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301286 if (ret)
1287 goto err_out_unregister;
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301288 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1289 CPUFREQ_CREATE_POLICY, policy);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301290 }
Dave Jones8ff69732006-03-05 03:37:23 -05001291
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301292 write_lock_irqsave(&cpufreq_driver_lock, flags);
1293 list_add(&policy->policy_list, &cpufreq_policy_list);
1294 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1295
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301296 cpufreq_init_policy(policy);
1297
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301298 if (!recover_policy) {
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301299 policy->user_policy.policy = policy->policy;
1300 policy->user_policy.governor = policy->governor;
1301 }
Viresh Kumar4e97b632014-03-04 11:44:01 +08001302 up_write(&policy->rwsem);
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301303
Greg Kroah-Hartman038c5b32007-12-17 15:54:39 -04001304 kobject_uevent(&policy->kobj, KOBJ_ADD);
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301305
Viresh Kumar6eed9402013-08-06 22:53:11 +05301306 up_read(&cpufreq_rwsem);
1307
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301308 /* Callback for handling stuff after policy is ready */
1309 if (cpufreq_driver->ready)
1310 cpufreq_driver->ready(policy);
1311
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001312 pr_debug("initialization complete\n");
Dave Jones87c32272006-03-29 01:48:37 -05001313
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 return 0;
1315
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316err_out_unregister:
Viresh Kumar652ed952014-01-09 20:38:43 +05301317err_get_freq:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001318 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar474deff2013-08-20 12:08:25 +05301319 for_each_cpu(j, policy->cpus)
Mike Travis7a6aedf2008-03-25 15:06:53 -07001320 per_cpu(cpufreq_cpu_data, j) = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001321 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001323 if (!recover_policy) {
1324 kobject_put(&policy->kobj);
1325 wait_for_completion(&policy->kobj_unregister);
1326 }
1327err_init_policy_kobj:
Prarit Bhargava7106e022014-09-10 10:12:08 -04001328 up_write(&policy->rwsem);
1329
Viresh Kumarda60ce92013-10-03 20:28:30 +05301330 if (cpufreq_driver->exit)
1331 cpufreq_driver->exit(policy);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301332err_set_policy_cpu:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301333 if (recover_policy) {
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001334 /* Do not leave stale fallback data behind. */
1335 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
Viresh Kumar42f921a2013-12-20 21:26:02 +05301336 cpufreq_policy_put_kobj(policy);
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001337 }
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301338 cpufreq_policy_free(policy);
Viresh Kumar42f921a2013-12-20 21:26:02 +05301339
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340nomem_out:
Viresh Kumar6eed9402013-08-06 22:53:11 +05301341 up_read(&cpufreq_rwsem);
1342
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 return ret;
1344}
1345
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301346static int __cpufreq_remove_dev_prepare(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301347 struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348{
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301349 unsigned int cpu = dev->id, cpus;
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301350 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 unsigned long flags;
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301352 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001354 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001356 write_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301358 policy = per_cpu(cpufreq_cpu_data, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301360 /* Save the policy somewhere when doing a light-weight tear-down */
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301361 if (cpufreq_suspended)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301362 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301363
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001364 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301366 if (!policy) {
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001367 pr_debug("%s: No cpu_data found\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301371 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301372 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1373 if (ret) {
1374 pr_err("%s: Failed to stop governor\n", __func__);
1375 return ret;
1376 }
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001377
Dirk Brandewiefa69e332013-02-06 09:02:11 -08001378 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301379 policy->governor->name, CPUFREQ_NAME_LEN);
Viresh Kumardb5f2992015-01-02 12:34:25 +05301380 }
Jacob Shin27ecddc2011-04-27 13:32:11 -05001381
viresh kumarad7722d2013-10-18 19:10:15 +05301382 down_read(&policy->rwsem);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301383 cpus = cpumask_weight(policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301384 up_read(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
Srivatsa S. Bhat61173f22013-09-12 01:43:25 +05301386 if (cpu != policy->cpu) {
viresh kumar6964d912014-02-17 14:52:11 +05301387 sysfs_remove_link(&dev->kobj, "cpufreq");
Viresh Kumar73bf0fc2013-02-05 22:21:14 +01001388 } else if (cpus > 1) {
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301389 /* Nominate new CPU */
1390 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1391 struct device *cpu_dev = get_cpu_device(new_cpu);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301392
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301393 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1394 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1395 if (ret) {
1396 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1397 "cpufreq"))
1398 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1399 __func__, cpu_dev->id);
1400 return ret;
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001401 }
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301402
1403 if (!cpufreq_suspended)
1404 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1405 __func__, new_cpu, cpu);
Preeti U Murthy789ca242014-09-29 15:47:12 +02001406 } else if (cpufreq_driver->stop_cpu) {
Dirk Brandewie367dc4a2014-03-19 08:45:53 -07001407 cpufreq_driver->stop_cpu(policy);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001408 }
Venki Pallipadiec282972007-03-26 12:03:19 -07001409
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301410 return 0;
1411}
1412
1413static int __cpufreq_remove_dev_finish(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301414 struct subsys_interface *sif)
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301415{
1416 unsigned int cpu = dev->id, cpus;
1417 int ret;
1418 unsigned long flags;
1419 struct cpufreq_policy *policy;
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301420
Viresh Kumar6ffae8c2015-01-31 06:02:44 +05301421 write_lock_irqsave(&cpufreq_driver_lock, flags);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301422 policy = per_cpu(cpufreq_cpu_data, cpu);
Viresh Kumar6ffae8c2015-01-31 06:02:44 +05301423 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1424 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301425
1426 if (!policy) {
1427 pr_debug("%s: No cpu_data found\n", __func__);
1428 return -EINVAL;
1429 }
1430
viresh kumarad7722d2013-10-18 19:10:15 +05301431 down_write(&policy->rwsem);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301432 cpus = cpumask_weight(policy->cpus);
Viresh Kumar9c8f1ee2013-09-12 17:06:33 +05301433
1434 if (cpus > 1)
1435 cpumask_clear_cpu(cpu, policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301436 up_write(&policy->rwsem);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301437
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001438 /* If cpu is last user of policy, free policy */
1439 if (cpus == 1) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301440 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301441 ret = __cpufreq_governor(policy,
1442 CPUFREQ_GOV_POLICY_EXIT);
1443 if (ret) {
1444 pr_err("%s: Failed to exit governor\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07001445 __func__);
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301446 return ret;
1447 }
Viresh Kumaredab2fb2013-08-20 12:08:22 +05301448 }
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001449
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301450 if (!cpufreq_suspended)
Viresh Kumar42f921a2013-12-20 21:26:02 +05301451 cpufreq_policy_put_kobj(policy);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301452
1453 /*
1454 * Perform the ->exit() even during light-weight tear-down,
1455 * since this is a core component, and is essential for the
1456 * subsequent light-weight ->init() to succeed.
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001457 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001458 if (cpufreq_driver->exit)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301459 cpufreq_driver->exit(policy);
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001460
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301461 /* Remove policy from list of active policies */
1462 write_lock_irqsave(&cpufreq_driver_lock, flags);
1463 list_del(&policy->policy_list);
1464 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1465
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301466 if (!cpufreq_suspended)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301467 cpufreq_policy_free(policy);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001468 } else if (has_target()) {
1469 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1470 if (!ret)
1471 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1472
1473 if (ret) {
1474 pr_err("%s: Failed to start governor\n", __func__);
1475 return ret;
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001476 }
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 return 0;
1480}
1481
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301482/**
Viresh Kumar27a862e2013-10-02 14:13:14 +05301483 * cpufreq_remove_dev - remove a CPU device
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301484 *
1485 * Removes the cpufreq interface for a CPU device.
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301486 */
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001487static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001488{
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001489 unsigned int cpu = dev->id;
Viresh Kumar27a862e2013-10-02 14:13:14 +05301490 int ret;
Venki Pallipadiec282972007-03-26 12:03:19 -07001491
1492 if (cpu_is_offline(cpu))
1493 return 0;
1494
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301495 ret = __cpufreq_remove_dev_prepare(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301496
1497 if (!ret)
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301498 ret = __cpufreq_remove_dev_finish(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301499
1500 return ret;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001501}
1502
David Howells65f27f32006-11-22 14:55:48 +00001503static void handle_update(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504{
David Howells65f27f32006-11-22 14:55:48 +00001505 struct cpufreq_policy *policy =
1506 container_of(work, struct cpufreq_policy, update);
1507 unsigned int cpu = policy->cpu;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001508 pr_debug("handle_update for cpu %u called\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 cpufreq_update_policy(cpu);
1510}
1511
1512/**
Viresh Kumarbb176f72013-06-19 14:19:33 +05301513 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1514 * in deep trouble.
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301515 * @policy: policy managing CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 * @new_freq: CPU frequency the CPU actually runs at
1517 *
Dave Jones29464f22009-01-18 01:37:11 -05001518 * We adjust to current frequency first, and need to clean up later.
1519 * So either call to cpufreq_update_policy() or schedule handle_update()).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 */
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301521static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301522 unsigned int new_freq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523{
1524 struct cpufreq_freqs freqs;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301525
Joe Perchese837f9b2014-03-11 10:03:00 -07001526 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301527 policy->cur, new_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301529 freqs.old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 freqs.new = new_freq;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301531
Viresh Kumar8fec0512014-03-24 13:35:45 +05301532 cpufreq_freq_transition_begin(policy, &freqs);
1533 cpufreq_freq_transition_end(policy, &freqs, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534}
1535
Dave Jones32ee8c32006-02-28 00:43:23 -05001536/**
Dhaval Giani4ab70df2006-12-13 14:49:15 +05301537 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001538 * @cpu: CPU number
1539 *
1540 * This is the last known freq, without actually getting it from the driver.
1541 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1542 */
1543unsigned int cpufreq_quick_get(unsigned int cpu)
1544{
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001545 struct cpufreq_policy *policy;
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301546 unsigned int ret_freq = 0;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001547
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001548 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1549 return cpufreq_driver->get(cpu);
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001550
1551 policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001552 if (policy) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301553 ret_freq = policy->cur;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001554 cpufreq_cpu_put(policy);
1555 }
1556
Dave Jones4d34a672008-02-07 16:33:49 -05001557 return ret_freq;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001558}
1559EXPORT_SYMBOL(cpufreq_quick_get);
1560
Jesse Barnes3d737102011-06-28 10:59:12 -07001561/**
1562 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1563 * @cpu: CPU number
1564 *
1565 * Just return the max possible frequency for a given CPU.
1566 */
1567unsigned int cpufreq_quick_get_max(unsigned int cpu)
1568{
1569 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1570 unsigned int ret_freq = 0;
1571
1572 if (policy) {
1573 ret_freq = policy->max;
1574 cpufreq_cpu_put(policy);
1575 }
1576
1577 return ret_freq;
1578}
1579EXPORT_SYMBOL(cpufreq_quick_get_max);
1580
Viresh Kumard92d50a2015-01-02 12:34:29 +05301581static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582{
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301583 unsigned int ret_freq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001585 if (!cpufreq_driver->get)
Dave Jones4d34a672008-02-07 16:33:49 -05001586 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
Viresh Kumard92d50a2015-01-02 12:34:29 +05301588 ret_freq = cpufreq_driver->get(policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301590 if (ret_freq && policy->cur &&
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001591 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301592 /* verify no discrepancy between actual and
1593 saved value exists */
1594 if (unlikely(ret_freq != policy->cur)) {
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301595 cpufreq_out_of_sync(policy, ret_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 schedule_work(&policy->update);
1597 }
1598 }
1599
Dave Jones4d34a672008-02-07 16:33:49 -05001600 return ret_freq;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001601}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001603/**
1604 * cpufreq_get - get the current CPU frequency (in kHz)
1605 * @cpu: CPU number
1606 *
1607 * Get the CPU current (static) CPU frequency
1608 */
1609unsigned int cpufreq_get(unsigned int cpu)
1610{
Aaron Plattner999976e2014-03-04 12:42:15 -08001611 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001612 unsigned int ret_freq = 0;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001613
Aaron Plattner999976e2014-03-04 12:42:15 -08001614 if (policy) {
1615 down_read(&policy->rwsem);
Viresh Kumard92d50a2015-01-02 12:34:29 +05301616 ret_freq = __cpufreq_get(policy);
Aaron Plattner999976e2014-03-04 12:42:15 -08001617 up_read(&policy->rwsem);
Viresh Kumar26ca8692013-09-20 22:37:31 +05301618
Aaron Plattner999976e2014-03-04 12:42:15 -08001619 cpufreq_cpu_put(policy);
1620 }
Viresh Kumar6eed9402013-08-06 22:53:11 +05301621
Dave Jones4d34a672008-02-07 16:33:49 -05001622 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623}
1624EXPORT_SYMBOL(cpufreq_get);
1625
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001626static struct subsys_interface cpufreq_interface = {
1627 .name = "cpufreq",
1628 .subsys = &cpu_subsys,
1629 .add_dev = cpufreq_add_dev,
1630 .remove_dev = cpufreq_remove_dev,
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001631};
1632
Viresh Kumare28867e2014-03-04 11:00:27 +08001633/*
1634 * In case platform wants some specific frequency to be configured
1635 * during suspend..
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001636 */
Viresh Kumare28867e2014-03-04 11:00:27 +08001637int cpufreq_generic_suspend(struct cpufreq_policy *policy)
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001638{
Viresh Kumare28867e2014-03-04 11:00:27 +08001639 int ret;
Dave Jones4bc5d342009-08-04 14:03:25 -04001640
Viresh Kumare28867e2014-03-04 11:00:27 +08001641 if (!policy->suspend_freq) {
1642 pr_err("%s: suspend_freq can't be zero\n", __func__);
1643 return -EINVAL;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001644 }
1645
Viresh Kumare28867e2014-03-04 11:00:27 +08001646 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1647 policy->suspend_freq);
1648
1649 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1650 CPUFREQ_RELATION_H);
1651 if (ret)
1652 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1653 __func__, policy->suspend_freq, ret);
1654
Dave Jonesc9060492008-02-07 16:32:18 -05001655 return ret;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001656}
Viresh Kumare28867e2014-03-04 11:00:27 +08001657EXPORT_SYMBOL(cpufreq_generic_suspend);
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001658
1659/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001660 * cpufreq_suspend() - Suspend CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001662 * Called during system wide Suspend/Hibernate cycles for suspending governors
1663 * as some platforms can't change frequency after this point in suspend cycle.
1664 * Because some of the devices (like: i2c, regulators, etc) they use for
1665 * changing frequency are suspended quickly after this point.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001667void cpufreq_suspend(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301669 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001671 if (!cpufreq_driver)
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001672 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001674 if (!has_target())
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301675 goto suspend;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001677 pr_debug("%s: Suspending Governors\n", __func__);
1678
Viresh Kumarb4f06762015-01-27 14:06:08 +05301679 for_each_policy(policy) {
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001680 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1681 pr_err("%s: Failed to stop governor for policy: %p\n",
1682 __func__, policy);
1683 else if (cpufreq_driver->suspend
1684 && cpufreq_driver->suspend(policy))
1685 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1686 policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 }
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301688
1689suspend:
1690 cpufreq_suspended = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691}
1692
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001694 * cpufreq_resume() - Resume CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001696 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1697 * are suspended with cpufreq_suspend().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001699void cpufreq_resume(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 struct cpufreq_policy *policy;
1702
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001703 if (!cpufreq_driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 return;
1705
Lan Tianyu8e304442014-09-18 15:03:07 +08001706 cpufreq_suspended = false;
1707
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001708 if (!has_target())
1709 return;
1710
1711 pr_debug("%s: Resuming Governors\n", __func__);
1712
Viresh Kumarb4f06762015-01-27 14:06:08 +05301713 for_each_policy(policy) {
Viresh Kumar0c5aa402014-03-24 12:30:29 +05301714 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1715 pr_err("%s: Failed to resume driver: %p\n", __func__,
1716 policy);
1717 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001718 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1719 pr_err("%s: Failed to start governor for policy: %p\n",
1720 __func__, policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 }
Viresh Kumarc75de0a2015-04-02 10:21:33 +05301722
1723 /*
1724 * schedule call cpufreq_update_policy() for first-online CPU, as that
1725 * wouldn't be hotplugged-out on suspend. It will verify that the
1726 * current freq is in sync with what we believe it to be.
1727 */
1728 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1729 if (WARN_ON(!policy))
1730 return;
1731
1732 schedule_work(&policy->update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
Borislav Petkov9d950462013-01-20 10:24:28 +00001735/**
1736 * cpufreq_get_current_driver - return current driver's name
1737 *
1738 * Return the name string of the currently loaded cpufreq driver
1739 * or NULL, if none.
1740 */
1741const char *cpufreq_get_current_driver(void)
1742{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001743 if (cpufreq_driver)
1744 return cpufreq_driver->name;
1745
1746 return NULL;
Borislav Petkov9d950462013-01-20 10:24:28 +00001747}
1748EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749
Thomas Petazzoni51315cd2014-10-19 11:30:27 +02001750/**
1751 * cpufreq_get_driver_data - return current driver data
1752 *
1753 * Return the private data of the currently loaded cpufreq
1754 * driver, or NULL if no cpufreq driver is loaded.
1755 */
1756void *cpufreq_get_driver_data(void)
1757{
1758 if (cpufreq_driver)
1759 return cpufreq_driver->driver_data;
1760
1761 return NULL;
1762}
1763EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1764
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765/*********************************************************************
1766 * NOTIFIER LISTS INTERFACE *
1767 *********************************************************************/
1768
1769/**
1770 * cpufreq_register_notifier - register a driver with cpufreq
1771 * @nb: notifier function to register
1772 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1773 *
Dave Jones32ee8c32006-02-28 00:43:23 -05001774 * Add a driver to one of two lists: either a list of drivers that
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 * are notified about clock rate changes (once before and once after
1776 * the transition), or a list of drivers that are notified about
1777 * changes in cpufreq policy.
1778 *
1779 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001780 * blocking_notifier_chain_register.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 */
1782int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1783{
1784 int ret;
1785
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001786 if (cpufreq_disabled())
1787 return -EINVAL;
1788
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -02001789 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1790
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 switch (list) {
1792 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001793 ret = srcu_notifier_chain_register(
Alan Sterne041c682006-03-27 01:16:30 -08001794 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 break;
1796 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001797 ret = blocking_notifier_chain_register(
1798 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 break;
1800 default:
1801 ret = -EINVAL;
1802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
1804 return ret;
1805}
1806EXPORT_SYMBOL(cpufreq_register_notifier);
1807
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808/**
1809 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1810 * @nb: notifier block to be unregistered
Viresh Kumarbb176f72013-06-19 14:19:33 +05301811 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 *
1813 * Remove a driver from the CPU frequency notifier list.
1814 *
1815 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001816 * blocking_notifier_chain_unregister.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 */
1818int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1819{
1820 int ret;
1821
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001822 if (cpufreq_disabled())
1823 return -EINVAL;
1824
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 switch (list) {
1826 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001827 ret = srcu_notifier_chain_unregister(
Alan Sterne041c682006-03-27 01:16:30 -08001828 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 break;
1830 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001831 ret = blocking_notifier_chain_unregister(
1832 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 break;
1834 default:
1835 ret = -EINVAL;
1836 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837
1838 return ret;
1839}
1840EXPORT_SYMBOL(cpufreq_unregister_notifier);
1841
1842
1843/*********************************************************************
1844 * GOVERNORS *
1845 *********************************************************************/
1846
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301847/* Must set freqs->new to intermediate frequency */
1848static int __target_intermediate(struct cpufreq_policy *policy,
1849 struct cpufreq_freqs *freqs, int index)
1850{
1851 int ret;
1852
1853 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1854
1855 /* We don't need to switch to intermediate freq */
1856 if (!freqs->new)
1857 return 0;
1858
1859 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1860 __func__, policy->cpu, freqs->old, freqs->new);
1861
1862 cpufreq_freq_transition_begin(policy, freqs);
1863 ret = cpufreq_driver->target_intermediate(policy, index);
1864 cpufreq_freq_transition_end(policy, freqs, ret);
1865
1866 if (ret)
1867 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1868 __func__, ret);
1869
1870 return ret;
1871}
1872
Viresh Kumar8d657752014-05-21 14:29:29 +05301873static int __target_index(struct cpufreq_policy *policy,
1874 struct cpufreq_frequency_table *freq_table, int index)
1875{
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301876 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1877 unsigned int intermediate_freq = 0;
Viresh Kumar8d657752014-05-21 14:29:29 +05301878 int retval = -EINVAL;
1879 bool notify;
1880
1881 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
Viresh Kumar8d657752014-05-21 14:29:29 +05301882 if (notify) {
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301883 /* Handle switching to intermediate frequency */
1884 if (cpufreq_driver->get_intermediate) {
1885 retval = __target_intermediate(policy, &freqs, index);
1886 if (retval)
1887 return retval;
Viresh Kumar8d657752014-05-21 14:29:29 +05301888
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301889 intermediate_freq = freqs.new;
1890 /* Set old freq to intermediate */
1891 if (intermediate_freq)
1892 freqs.old = freqs.new;
1893 }
1894
1895 freqs.new = freq_table[index].frequency;
Viresh Kumar8d657752014-05-21 14:29:29 +05301896 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1897 __func__, policy->cpu, freqs.old, freqs.new);
1898
1899 cpufreq_freq_transition_begin(policy, &freqs);
1900 }
1901
1902 retval = cpufreq_driver->target_index(policy, index);
1903 if (retval)
1904 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1905 retval);
1906
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301907 if (notify) {
Viresh Kumar8d657752014-05-21 14:29:29 +05301908 cpufreq_freq_transition_end(policy, &freqs, retval);
1909
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301910 /*
1911 * Failed after setting to intermediate freq? Driver should have
1912 * reverted back to initial frequency and so should we. Check
1913 * here for intermediate_freq instead of get_intermediate, in
1914 * case we have't switched to intermediate freq at all.
1915 */
1916 if (unlikely(retval && intermediate_freq)) {
1917 freqs.old = intermediate_freq;
1918 freqs.new = policy->restore_freq;
1919 cpufreq_freq_transition_begin(policy, &freqs);
1920 cpufreq_freq_transition_end(policy, &freqs, 0);
1921 }
1922 }
1923
Viresh Kumar8d657752014-05-21 14:29:29 +05301924 return retval;
1925}
1926
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927int __cpufreq_driver_target(struct cpufreq_policy *policy,
1928 unsigned int target_freq,
1929 unsigned int relation)
1930{
Viresh Kumar72499242012-10-31 01:28:21 +01001931 unsigned int old_target_freq = target_freq;
Viresh Kumar8d657752014-05-21 14:29:29 +05301932 int retval = -EINVAL;
Ashok Rajc32b6b82005-10-30 14:59:54 -08001933
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04001934 if (cpufreq_disabled())
1935 return -ENODEV;
1936
Viresh Kumar72499242012-10-31 01:28:21 +01001937 /* Make sure that target_freq is within supported range */
1938 if (target_freq > policy->max)
1939 target_freq = policy->max;
1940 if (target_freq < policy->min)
1941 target_freq = policy->min;
1942
1943 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07001944 policy->cpu, target_freq, relation, old_target_freq);
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001945
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301946 /*
1947 * This might look like a redundant call as we are checking it again
1948 * after finding index. But it is left intentionally for cases where
1949 * exactly same freq is called again and so we can save on few function
1950 * calls.
1951 */
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001952 if (target_freq == policy->cur)
1953 return 0;
1954
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301955 /* Save last value to restore later on errors */
1956 policy->restore_freq = policy->cur;
1957
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001958 if (cpufreq_driver->target)
1959 retval = cpufreq_driver->target(policy, target_freq, relation);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301960 else if (cpufreq_driver->target_index) {
1961 struct cpufreq_frequency_table *freq_table;
1962 int index;
Ashok Raj90d45d12005-11-08 21:34:24 -08001963
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301964 freq_table = cpufreq_frequency_get_table(policy->cpu);
1965 if (unlikely(!freq_table)) {
1966 pr_err("%s: Unable to find freq_table\n", __func__);
1967 goto out;
1968 }
1969
1970 retval = cpufreq_frequency_table_target(policy, freq_table,
1971 target_freq, relation, &index);
1972 if (unlikely(retval)) {
1973 pr_err("%s: Unable to find matching freq\n", __func__);
1974 goto out;
1975 }
1976
Viresh Kumard4019f02013-08-14 19:38:24 +05301977 if (freq_table[index].frequency == policy->cur) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301978 retval = 0;
Viresh Kumard4019f02013-08-14 19:38:24 +05301979 goto out;
1980 }
1981
Viresh Kumar8d657752014-05-21 14:29:29 +05301982 retval = __target_index(policy, freq_table, index);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301983 }
1984
1985out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 return retval;
1987}
1988EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1989
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990int cpufreq_driver_target(struct cpufreq_policy *policy,
1991 unsigned int target_freq,
1992 unsigned int relation)
1993{
Julia Lawallf1829e42008-07-25 22:44:53 +02001994 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
viresh kumarad7722d2013-10-18 19:10:15 +05301996 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
1998 ret = __cpufreq_driver_target(policy, target_freq, relation);
1999
viresh kumarad7722d2013-10-18 19:10:15 +05302000 up_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 return ret;
2003}
2004EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2005
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05302006static int __cpufreq_governor(struct cpufreq_policy *policy,
2007 unsigned int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008{
Dave Jonescc993ca2005-07-28 09:43:56 -07002009 int ret;
Thomas Renninger6afde102007-10-02 13:28:13 -07002010
2011 /* Only must be defined when default governor is known to have latency
2012 restrictions, like e.g. conservative or ondemand.
2013 That this is the case is already ensured in Kconfig
2014 */
2015#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2016 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2017#else
2018 struct cpufreq_governor *gov = NULL;
2019#endif
Thomas Renninger1c256242007-10-02 13:28:12 -07002020
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002021 /* Don't start any governor operations if we are entering suspend */
2022 if (cpufreq_suspended)
2023 return 0;
Ethan Zhaocb57720b2014-12-18 15:28:19 +09002024 /*
2025 * Governor might not be initiated here if ACPI _PPC changed
2026 * notification happened, so check it.
2027 */
2028 if (!policy->governor)
2029 return -EINVAL;
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002030
Thomas Renninger1c256242007-10-02 13:28:12 -07002031 if (policy->governor->max_transition_latency &&
2032 policy->cpuinfo.transition_latency >
2033 policy->governor->max_transition_latency) {
Thomas Renninger6afde102007-10-02 13:28:13 -07002034 if (!gov)
2035 return -EINVAL;
2036 else {
Joe Perchese837f9b2014-03-11 10:03:00 -07002037 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2038 policy->governor->name, gov->name);
Thomas Renninger6afde102007-10-02 13:28:13 -07002039 policy->governor = gov;
2040 }
Thomas Renninger1c256242007-10-02 13:28:12 -07002041 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042
Viresh Kumarfe492f32013-08-06 22:53:10 +05302043 if (event == CPUFREQ_GOV_POLICY_INIT)
2044 if (!try_module_get(policy->governor->owner))
2045 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002047 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002048 policy->cpu, event);
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002049
2050 mutex_lock(&cpufreq_governor_lock);
Srivatsa S. Bhat56d07db2013-09-07 01:23:55 +05302051 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
Viresh Kumarf73d3932013-08-31 17:53:40 +05302052 || (!policy->governor_enabled
2053 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002054 mutex_unlock(&cpufreq_governor_lock);
2055 return -EBUSY;
2056 }
2057
2058 if (event == CPUFREQ_GOV_STOP)
2059 policy->governor_enabled = false;
2060 else if (event == CPUFREQ_GOV_START)
2061 policy->governor_enabled = true;
2062
2063 mutex_unlock(&cpufreq_governor_lock);
2064
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 ret = policy->governor->governor(policy, event);
2066
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002067 if (!ret) {
2068 if (event == CPUFREQ_GOV_POLICY_INIT)
2069 policy->governor->initialized++;
2070 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2071 policy->governor->initialized--;
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002072 } else {
2073 /* Restore original values */
2074 mutex_lock(&cpufreq_governor_lock);
2075 if (event == CPUFREQ_GOV_STOP)
2076 policy->governor_enabled = true;
2077 else if (event == CPUFREQ_GOV_START)
2078 policy->governor_enabled = false;
2079 mutex_unlock(&cpufreq_governor_lock);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002080 }
Viresh Kumarb3940582013-02-01 05:42:58 +00002081
Viresh Kumarfe492f32013-08-06 22:53:10 +05302082 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2083 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 module_put(policy->governor->owner);
2085
2086 return ret;
2087}
2088
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089int cpufreq_register_governor(struct cpufreq_governor *governor)
2090{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002091 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092
2093 if (!governor)
2094 return -EINVAL;
2095
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002096 if (cpufreq_disabled())
2097 return -ENODEV;
2098
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002099 mutex_lock(&cpufreq_governor_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -05002100
Viresh Kumarb3940582013-02-01 05:42:58 +00002101 governor->initialized = 0;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002102 err = -EBUSY;
Viresh Kumar42f91fa2015-01-02 12:34:26 +05302103 if (!find_governor(governor->name)) {
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002104 err = 0;
2105 list_add(&governor->governor_list, &cpufreq_governor_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107
Dave Jones32ee8c32006-02-28 00:43:23 -05002108 mutex_unlock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002109 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110}
2111EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2112
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2114{
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002115 int cpu;
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 if (!governor)
2118 return;
2119
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002120 if (cpufreq_disabled())
2121 return;
2122
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002123 for_each_present_cpu(cpu) {
2124 if (cpu_online(cpu))
2125 continue;
2126 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2127 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2128 }
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002129
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002130 mutex_lock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 list_del(&governor->governor_list);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002132 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 return;
2134}
2135EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2136
2137
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138/*********************************************************************
2139 * POLICY INTERFACE *
2140 *********************************************************************/
2141
2142/**
2143 * cpufreq_get_policy - get the current cpufreq_policy
Dave Jones29464f22009-01-18 01:37:11 -05002144 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2145 * is written
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 *
2147 * Reads the current cpufreq policy.
2148 */
2149int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2150{
2151 struct cpufreq_policy *cpu_policy;
2152 if (!policy)
2153 return -EINVAL;
2154
2155 cpu_policy = cpufreq_cpu_get(cpu);
2156 if (!cpu_policy)
2157 return -EINVAL;
2158
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302159 memcpy(policy, cpu_policy, sizeof(*policy));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
2161 cpufreq_cpu_put(cpu_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 return 0;
2163}
2164EXPORT_SYMBOL(cpufreq_get_policy);
2165
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002166/*
Viresh Kumar037ce832013-10-02 14:13:16 +05302167 * policy : current policy.
2168 * new_policy: policy to be set.
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002169 */
Viresh Kumar037ce832013-10-02 14:13:16 +05302170static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302171 struct cpufreq_policy *new_policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172{
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002173 struct cpufreq_governor *old_gov;
2174 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175
Joe Perchese837f9b2014-03-11 10:03:00 -07002176 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2177 new_policy->cpu, new_policy->min, new_policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302179 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002181 if (new_policy->min > policy->max || new_policy->max < policy->min)
2182 return -EINVAL;
Mattia Dongili9c9a43e2006-07-05 23:12:20 +02002183
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 /* verify the cpu speed can be set within this limit */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302185 ret = cpufreq_driver->verify(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002187 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 /* adjust if necessary - all reasons */
Alan Sterne041c682006-03-27 01:16:30 -08002190 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302191 CPUFREQ_ADJUST, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
2193 /* adjust if necessary - hardware incompatibility*/
Alan Sterne041c682006-03-27 01:16:30 -08002194 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302195 CPUFREQ_INCOMPATIBLE, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196
Viresh Kumarbb176f72013-06-19 14:19:33 +05302197 /*
2198 * verify the cpu speed can be set within this limit, which might be
2199 * different to the first one
2200 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302201 ret = cpufreq_driver->verify(new_policy);
Alan Sterne041c682006-03-27 01:16:30 -08002202 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002203 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
2205 /* notification of the new policy */
Alan Sterne041c682006-03-27 01:16:30 -08002206 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302207 CPUFREQ_NOTIFY, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302209 policy->min = new_policy->min;
2210 policy->max = new_policy->max;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002212 pr_debug("new min and max freqs are %u - %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002213 policy->min, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002215 if (cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302216 policy->policy = new_policy->policy;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002217 pr_debug("setting range\n");
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002218 return cpufreq_driver->setpolicy(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 }
2220
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002221 if (new_policy->governor == policy->governor)
2222 goto out;
2223
2224 pr_debug("governor switch\n");
2225
2226 /* save old, working values */
2227 old_gov = policy->governor;
2228 /* end old governor */
2229 if (old_gov) {
2230 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2231 up_write(&policy->rwsem);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02002232 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002233 down_write(&policy->rwsem);
2234 }
2235
2236 /* start new governor */
2237 policy->governor = new_policy->governor;
2238 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2239 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2240 goto out;
2241
2242 up_write(&policy->rwsem);
2243 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2244 down_write(&policy->rwsem);
2245 }
2246
2247 /* new governor failed, so re-start old one */
2248 pr_debug("starting governor %s failed\n", policy->governor->name);
2249 if (old_gov) {
2250 policy->governor = old_gov;
2251 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2252 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2253 }
2254
2255 return -EINVAL;
2256
2257 out:
2258 pr_debug("governor: change or update limits\n");
2259 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260}
2261
2262/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2264 * @cpu: CPU which shall be re-evaluated
2265 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002266 * Useful for policy notifiers which have different necessities
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 * at different times.
2268 */
2269int cpufreq_update_policy(unsigned int cpu)
2270{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302271 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2272 struct cpufreq_policy new_policy;
Julia Lawallf1829e42008-07-25 22:44:53 +02002273 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002275 if (!policy)
2276 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277
viresh kumarad7722d2013-10-18 19:10:15 +05302278 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002280 pr_debug("updating policy for CPU %u\n", cpu);
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302281 memcpy(&new_policy, policy, sizeof(*policy));
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302282 new_policy.min = policy->user_policy.min;
2283 new_policy.max = policy->user_policy.max;
2284 new_policy.policy = policy->user_policy.policy;
2285 new_policy.governor = policy->user_policy.governor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286
Viresh Kumarbb176f72013-06-19 14:19:33 +05302287 /*
2288 * BIOS might change freq behind our back
2289 * -> ask driver for current freq and notify governors about a change
2290 */
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01002291 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302292 new_policy.cur = cpufreq_driver->get(cpu);
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302293 if (WARN_ON(!new_policy.cur)) {
2294 ret = -EIO;
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002295 goto unlock;
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302296 }
2297
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302298 if (!policy->cur) {
Joe Perchese837f9b2014-03-11 10:03:00 -07002299 pr_debug("Driver did not initialize current freq\n");
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302300 policy->cur = new_policy.cur;
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002301 } else {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302302 if (policy->cur != new_policy.cur && has_target())
Viresh Kumara1e1dc42015-01-02 12:34:28 +05302303 cpufreq_out_of_sync(policy, new_policy.cur);
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002304 }
Thomas Renninger0961dd02006-01-26 18:46:33 +01002305 }
2306
Viresh Kumar037ce832013-10-02 14:13:16 +05302307 ret = cpufreq_set_policy(policy, &new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002309unlock:
viresh kumarad7722d2013-10-18 19:10:15 +05302310 up_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002311
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302312 cpufreq_cpu_put(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 return ret;
2314}
2315EXPORT_SYMBOL(cpufreq_update_policy);
2316
Paul Gortmaker27609842013-06-19 13:54:04 -04002317static int cpufreq_cpu_callback(struct notifier_block *nfb,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002318 unsigned long action, void *hcpu)
2319{
2320 unsigned int cpu = (unsigned long)hcpu;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002321 struct device *dev;
Ashok Rajc32b6b82005-10-30 14:59:54 -08002322
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002323 dev = get_cpu_device(cpu);
2324 if (dev) {
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302325 switch (action & ~CPU_TASKS_FROZEN) {
Ashok Rajc32b6b82005-10-30 14:59:54 -08002326 case CPU_ONLINE:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302327 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002328 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302329
Ashok Rajc32b6b82005-10-30 14:59:54 -08002330 case CPU_DOWN_PREPARE:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302331 __cpufreq_remove_dev_prepare(dev, NULL);
Srivatsa S. Bhat1aee40a2013-09-07 01:23:27 +05302332 break;
2333
2334 case CPU_POST_DEAD:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302335 __cpufreq_remove_dev_finish(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002336 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302337
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002338 case CPU_DOWN_FAILED:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302339 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002340 break;
2341 }
2342 }
2343 return NOTIFY_OK;
2344}
2345
Neal Buckendahl9c36f742010-06-22 22:02:44 -05002346static struct notifier_block __refdata cpufreq_cpu_notifier = {
Viresh Kumarbb176f72013-06-19 14:19:33 +05302347 .notifier_call = cpufreq_cpu_callback,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002348};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349
2350/*********************************************************************
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002351 * BOOST *
2352 *********************************************************************/
2353static int cpufreq_boost_set_sw(int state)
2354{
2355 struct cpufreq_frequency_table *freq_table;
2356 struct cpufreq_policy *policy;
2357 int ret = -EINVAL;
2358
Viresh Kumarb4f06762015-01-27 14:06:08 +05302359 for_each_policy(policy) {
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002360 freq_table = cpufreq_frequency_get_table(policy->cpu);
2361 if (freq_table) {
2362 ret = cpufreq_frequency_table_cpuinfo(policy,
2363 freq_table);
2364 if (ret) {
2365 pr_err("%s: Policy frequency update failed\n",
2366 __func__);
2367 break;
2368 }
2369 policy->user_policy.max = policy->max;
2370 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2371 }
2372 }
2373
2374 return ret;
2375}
2376
2377int cpufreq_boost_trigger_state(int state)
2378{
2379 unsigned long flags;
2380 int ret = 0;
2381
2382 if (cpufreq_driver->boost_enabled == state)
2383 return 0;
2384
2385 write_lock_irqsave(&cpufreq_driver_lock, flags);
2386 cpufreq_driver->boost_enabled = state;
2387 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2388
2389 ret = cpufreq_driver->set_boost(state);
2390 if (ret) {
2391 write_lock_irqsave(&cpufreq_driver_lock, flags);
2392 cpufreq_driver->boost_enabled = !state;
2393 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2394
Joe Perchese837f9b2014-03-11 10:03:00 -07002395 pr_err("%s: Cannot %s BOOST\n",
2396 __func__, state ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002397 }
2398
2399 return ret;
2400}
2401
2402int cpufreq_boost_supported(void)
2403{
2404 if (likely(cpufreq_driver))
2405 return cpufreq_driver->boost_supported;
2406
2407 return 0;
2408}
2409EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2410
2411int cpufreq_boost_enabled(void)
2412{
2413 return cpufreq_driver->boost_enabled;
2414}
2415EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2416
2417/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2419 *********************************************************************/
2420
2421/**
2422 * cpufreq_register_driver - register a CPU Frequency driver
2423 * @driver_data: A struct cpufreq_driver containing the values#
2424 * submitted by the CPU Frequency driver.
2425 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302426 * Registers a CPU Frequency driver to this core code. This code
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 * returns zero on success, -EBUSY when another driver got here first
Dave Jones32ee8c32006-02-28 00:43:23 -05002428 * (and isn't unregistered in the meantime).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 *
2430 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002431int cpufreq_register_driver(struct cpufreq_driver *driver_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432{
2433 unsigned long flags;
2434 int ret;
2435
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002436 if (cpufreq_disabled())
2437 return -ENODEV;
2438
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 if (!driver_data || !driver_data->verify || !driver_data->init ||
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302440 !(driver_data->setpolicy || driver_data->target_index ||
Rafael J. Wysocki98322352014-03-19 12:48:30 +01002441 driver_data->target) ||
2442 (driver_data->setpolicy && (driver_data->target_index ||
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05302443 driver_data->target)) ||
2444 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 return -EINVAL;
2446
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002447 pr_debug("trying to register driver %s\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002449 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002450 if (cpufreq_driver) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002451 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Yinghai Lu4dea58062013-09-18 21:05:20 -07002452 return -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002454 cpufreq_driver = driver_data;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002455 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456
Viresh Kumarbc68b7d2015-01-02 12:34:30 +05302457 if (driver_data->setpolicy)
2458 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2459
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002460 if (cpufreq_boost_supported()) {
2461 /*
2462 * Check if driver provides function to enable boost -
2463 * if not, use cpufreq_boost_set_sw as default
2464 */
2465 if (!cpufreq_driver->set_boost)
2466 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2467
2468 ret = cpufreq_sysfs_create_file(&boost.attr);
2469 if (ret) {
2470 pr_err("%s: cannot register global BOOST sysfs file\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002471 __func__);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002472 goto err_null_driver;
2473 }
2474 }
2475
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002476 ret = subsys_interface_register(&cpufreq_interface);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002477 if (ret)
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002478 goto err_boost_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302480 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2481 list_empty(&cpufreq_policy_list)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 /* if all ->init() calls failed, unregister */
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302483 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2484 driver_data->name);
2485 goto err_if_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 }
2487
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002488 register_hotcpu_notifier(&cpufreq_cpu_notifier);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002489 pr_debug("driver %s up and running\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002491 return 0;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002492err_if_unreg:
2493 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002494err_boost_unreg:
2495 if (cpufreq_boost_supported())
2496 cpufreq_sysfs_remove_file(&boost.attr);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002497err_null_driver:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002498 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002499 cpufreq_driver = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002500 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Dave Jones4d34a672008-02-07 16:33:49 -05002501 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502}
2503EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2504
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505/**
2506 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2507 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302508 * Unregister the current CPUFreq driver. Only call this if you have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 * the right to do so, i.e. if you have succeeded in initialising before!
2510 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2511 * currently not initialised.
2512 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002513int cpufreq_unregister_driver(struct cpufreq_driver *driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514{
2515 unsigned long flags;
2516
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002517 if (!cpufreq_driver || (driver != cpufreq_driver))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002520 pr_debug("unregistering driver %s\n", driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002522 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002523 if (cpufreq_boost_supported())
2524 cpufreq_sysfs_remove_file(&boost.attr);
2525
Chandra Seetharaman65edc682006-06-27 02:54:08 -07002526 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527
Viresh Kumar6eed9402013-08-06 22:53:11 +05302528 down_write(&cpufreq_rwsem);
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002529 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302530
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002531 cpufreq_driver = NULL;
Viresh Kumar6eed9402013-08-06 22:53:11 +05302532
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002533 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302534 up_write(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535
2536 return 0;
2537}
2538EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002539
Doug Anderson90de2a42014-12-23 22:09:48 -08002540/*
2541 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2542 * or mutexes when secondary CPUs are halted.
2543 */
2544static struct syscore_ops cpufreq_syscore_ops = {
2545 .shutdown = cpufreq_suspend,
2546};
2547
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002548static int __init cpufreq_core_init(void)
2549{
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002550 if (cpufreq_disabled())
2551 return -ENODEV;
2552
Viresh Kumar2361be22013-05-17 16:09:09 +05302553 cpufreq_global_kobject = kobject_create();
Thomas Renninger8aa84ad2009-07-24 15:25:05 +02002554 BUG_ON(!cpufreq_global_kobject);
2555
Doug Anderson90de2a42014-12-23 22:09:48 -08002556 register_syscore_ops(&cpufreq_syscore_ops);
2557
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002558 return 0;
2559}
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002560core_initcall(cpufreq_core_init);