blob: 48ca0764eb520ba913f250dcca6e651d1c005a8c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
Viresh Kumarbb176f72013-06-19 14:19:33 +05306 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Ashok Rajc32b6b82005-10-30 14:59:54 -08008 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
Dave Jones32ee8c32006-02-28 00:43:23 -05009 * Added handling for CPU hotplug
Dave Jones8ff69732006-03-05 03:37:23 -050010 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
Ashok Rajc32b6b82005-10-30 14:59:54 -080012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Viresh Kumardb701152012-10-23 01:29:03 +020018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Viresh Kumar5ff0a262013-08-06 22:53:03 +053020#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/cpufreq.h>
22#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/device.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053024#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080027#include <linux/mutex.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053028#include <linux/slab.h>
Viresh Kumar2f0aea92014-03-04 11:00:26 +080029#include <linux/suspend.h>
Doug Anderson90de2a42014-12-23 22:09:48 -080030#include <linux/syscore_ops.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053031#include <linux/tick.h>
Thomas Renninger6f4f2722010-04-20 13:17:36 +020032#include <trace/events/power.h>
33
Viresh Kumarb4f06762015-01-27 14:06:08 +053034/* Macros to iterate over lists */
35/* Iterate over online CPUs policies */
36static LIST_HEAD(cpufreq_policy_list);
37#define for_each_policy(__policy) \
38 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
39
Viresh Kumarf7b27062015-01-27 14:06:09 +053040/* Iterate over governors */
41static LIST_HEAD(cpufreq_governor_list);
42#define for_each_governor(__governor) \
43 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/**
Dave Jonescd878472006-08-11 17:59:28 -040046 * The "cpufreq driver" - the arch- or hardware-dependent low
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * level driver of CPUFreq support, and its spinlock. This lock
48 * also protects the cpufreq_cpu_data array.
49 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +020050static struct cpufreq_driver *cpufreq_driver;
Mike Travis7a6aedf2008-03-25 15:06:53 -070051static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +053052static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
Viresh Kumarbb176f72013-06-19 14:19:33 +053053static DEFINE_RWLOCK(cpufreq_driver_lock);
Jane Li6f1e4ef2014-01-03 17:17:41 +080054DEFINE_MUTEX(cpufreq_governor_lock);
Viresh Kumarbb176f72013-06-19 14:19:33 +053055
Thomas Renninger084f3492007-07-09 11:35:28 -070056/* This one keeps track of the previously set governor of a removed CPU */
Dmitry Monakhove77b89f2009-10-05 00:38:55 +040057static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Viresh Kumar2f0aea92014-03-04 11:00:26 +080059/* Flag to suspend/resume CPUFreq governors */
60static bool cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +053062static inline bool has_target(void)
63{
64 return cpufreq_driver->target_index || cpufreq_driver->target;
65}
66
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080067/*
Viresh Kumar6eed9402013-08-06 22:53:11 +053068 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
69 * sections
70 */
71static DECLARE_RWSEM(cpufreq_rwsem);
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/* internal prototypes */
Dave Jones29464f22009-01-18 01:37:11 -050074static int __cpufreq_governor(struct cpufreq_policy *policy,
75 unsigned int event);
Viresh Kumard92d50a2015-01-02 12:34:29 +053076static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
David Howells65f27f32006-11-22 14:55:48 +000077static void handle_update(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79/**
Dave Jones32ee8c32006-02-28 00:43:23 -050080 * Two notifier lists: the "policy" list is involved in the
81 * validation process for a new CPU frequency policy; the
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 * "transition" list for kernel code that needs to handle
83 * changes to devices when the CPU clock speed changes.
84 * The mutex locks both lists.
85 */
Alan Sterne041c682006-03-27 01:16:30 -080086static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
Alan Sternb4dfdbb2006-10-04 02:17:06 -070087static struct srcu_notifier_head cpufreq_transition_notifier_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -020089static bool init_cpufreq_transition_notifier_list_called;
Alan Sternb4dfdbb2006-10-04 02:17:06 -070090static int __init init_cpufreq_transition_notifier_list(void)
91{
92 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -020093 init_cpufreq_transition_notifier_list_called = true;
Alan Sternb4dfdbb2006-10-04 02:17:06 -070094 return 0;
95}
Linus Torvaldsb3438f82006-11-20 11:47:18 -080096pure_initcall(init_cpufreq_transition_notifier_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -040098static int off __read_mostly;
Viresh Kumarda584452012-10-26 00:51:32 +020099static int cpufreq_disabled(void)
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400100{
101 return off;
102}
103void disable_cpufreq(void)
104{
105 off = 1;
106}
Dave Jones29464f22009-01-18 01:37:11 -0500107static DEFINE_MUTEX(cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000109bool have_governor_per_policy(void)
110{
Viresh Kumar0b981e72013-10-02 14:13:18 +0530111 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000112}
Viresh Kumar3f869d62013-05-16 05:09:56 +0000113EXPORT_SYMBOL_GPL(have_governor_per_policy);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000114
Viresh Kumar944e9a02013-05-16 05:09:57 +0000115struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
116{
117 if (have_governor_per_policy())
118 return &policy->kobj;
119 else
120 return cpufreq_global_kobject;
121}
122EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
123
Viresh Kumar72a4ce32013-05-17 11:26:32 +0000124static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
125{
126 u64 idle_time;
127 u64 cur_wall_time;
128 u64 busy_time;
129
130 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
131
132 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
134 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
135 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
136 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
137 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
138
139 idle_time = cur_wall_time - busy_time;
140 if (wall)
141 *wall = cputime_to_usecs(cur_wall_time);
142
143 return cputime_to_usecs(idle_time);
144}
145
146u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
147{
148 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
149
150 if (idle_time == -1ULL)
151 return get_cpu_idle_time_jiffy(cpu, wall);
152 else if (!io_busy)
153 idle_time += get_cpu_iowait_time_us(cpu, wall);
154
155 return idle_time;
156}
157EXPORT_SYMBOL_GPL(get_cpu_idle_time);
158
Viresh Kumar70e9e772013-10-03 20:29:07 +0530159/*
160 * This is a generic cpufreq init() routine which can be used by cpufreq
161 * drivers of SMP systems. It will do following:
162 * - validate & show freq table passed
163 * - set policies transition latency
164 * - policy->cpus with all possible CPUs
165 */
166int cpufreq_generic_init(struct cpufreq_policy *policy,
167 struct cpufreq_frequency_table *table,
168 unsigned int transition_latency)
169{
170 int ret;
171
172 ret = cpufreq_table_validate_and_show(policy, table);
173 if (ret) {
174 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
175 return ret;
176 }
177
178 policy->cpuinfo.transition_latency = transition_latency;
179
180 /*
181 * The driver only supports the SMP configuartion where all processors
182 * share the clock and voltage and clock.
183 */
184 cpumask_setall(policy->cpus);
185
186 return 0;
187}
188EXPORT_SYMBOL_GPL(cpufreq_generic_init);
189
Viresh Kumar652ed952014-01-09 20:38:43 +0530190unsigned int cpufreq_generic_get(unsigned int cpu)
191{
192 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
193
194 if (!policy || IS_ERR(policy->clk)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700195 pr_err("%s: No %s associated to cpu: %d\n",
196 __func__, policy ? "clk" : "policy", cpu);
Viresh Kumar652ed952014-01-09 20:38:43 +0530197 return 0;
198 }
199
200 return clk_get_rate(policy->clk) / 1000;
201}
202EXPORT_SYMBOL_GPL(cpufreq_generic_get);
203
Viresh Kumare0b31652014-03-10 14:53:33 +0530204/* Only for cpufreq core internal use */
205struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
206{
207 return per_cpu(cpufreq_cpu_data, cpu);
208}
209
Viresh Kumar50e9c852015-02-19 17:02:03 +0530210/**
211 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
212 *
213 * @cpu: cpu to find policy for.
214 *
215 * This returns policy for 'cpu', returns NULL if it doesn't exist.
216 * It also increments the kobject reference count to mark it busy and so would
217 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
218 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
219 * freed as that depends on the kobj count.
220 *
221 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
222 * valid policy is found. This is done to make sure the driver doesn't get
223 * unregistered while the policy is being used.
224 *
225 * Return: A valid policy on success, otherwise NULL on failure.
226 */
Viresh Kumar6eed9402013-08-06 22:53:11 +0530227struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530229 struct cpufreq_policy *policy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 unsigned long flags;
231
Viresh Kumar1b947c92015-02-19 17:02:05 +0530232 if (WARN_ON(cpu >= nr_cpu_ids))
Viresh Kumar6eed9402013-08-06 22:53:11 +0530233 return NULL;
234
235 if (!down_read_trylock(&cpufreq_rwsem))
236 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
238 /* get the cpufreq driver */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000239 read_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Viresh Kumar6eed9402013-08-06 22:53:11 +0530241 if (cpufreq_driver) {
242 /* get the CPU */
243 policy = per_cpu(cpufreq_cpu_data, cpu);
244 if (policy)
245 kobject_get(&policy->kobj);
246 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200247
Viresh Kumar6eed9402013-08-06 22:53:11 +0530248 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530250 if (!policy)
Viresh Kumar6eed9402013-08-06 22:53:11 +0530251 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530253 return policy;
Stephen Boyda9144432012-07-20 18:14:38 +0000254}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
256
Viresh Kumar50e9c852015-02-19 17:02:03 +0530257/**
258 * cpufreq_cpu_put: Decrements the usage count of a policy
259 *
260 * @policy: policy earlier returned by cpufreq_cpu_get().
261 *
262 * This decrements the kobject reference count incremented earlier by calling
263 * cpufreq_cpu_get().
264 *
265 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
266 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530267void cpufreq_cpu_put(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530269 kobject_put(&policy->kobj);
270 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271}
272EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
276 *********************************************************************/
277
278/**
279 * adjust_jiffies - adjust the system "loops_per_jiffy"
280 *
281 * This function alters the system "loops_per_jiffy" for the clock
282 * speed change. Note that loops_per_jiffy cannot be updated on SMP
Dave Jones32ee8c32006-02-28 00:43:23 -0500283 * systems as each CPU might be scaled differently. So, use the arch
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 * per-CPU loops_per_jiffy value wherever possible.
285 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800286static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
Viresh Kumar39c132e2015-01-02 12:34:34 +0530288#ifndef CONFIG_SMP
289 static unsigned long l_p_j_ref;
290 static unsigned int l_p_j_ref_freq;
291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 if (ci->flags & CPUFREQ_CONST_LOOPS)
293 return;
294
295 if (!l_p_j_ref_freq) {
296 l_p_j_ref = loops_per_jiffy;
297 l_p_j_ref_freq = ci->old;
Joe Perchese837f9b2014-03-11 10:03:00 -0700298 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
299 l_p_j_ref, l_p_j_ref_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 }
Viresh Kumar0b443ea2014-03-19 11:24:58 +0530301 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530302 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
303 ci->new);
Joe Perchese837f9b2014-03-11 10:03:00 -0700304 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
305 loops_per_jiffy, ci->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307#endif
Viresh Kumar39c132e2015-01-02 12:34:34 +0530308}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
Viresh Kumar0956df9c2013-06-19 14:19:34 +0530310static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530311 struct cpufreq_freqs *freqs, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312{
313 BUG_ON(irqs_disabled());
314
Dirk Brandewied5aaffa2013-01-17 16:22:21 +0000315 if (cpufreq_disabled())
316 return;
317
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200318 freqs->flags = cpufreq_driver->flags;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200319 pr_debug("notification %u of frequency transition to %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -0700320 state, freqs->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 switch (state) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 case CPUFREQ_PRECHANGE:
Dave Jones32ee8c32006-02-28 00:43:23 -0500325 /* detect if the driver reported a value as "old frequency"
Dave Jonese4472cb2006-01-31 15:53:55 -0800326 * which is not equal to what the cpufreq core thinks is
327 * "old frequency".
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200329 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800330 if ((policy) && (policy->cpu == freqs->cpu) &&
331 (policy->cur) && (policy->cur != freqs->old)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700332 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
333 freqs->old, policy->cur);
Dave Jonese4472cb2006-01-31 15:53:55 -0800334 freqs->old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 }
336 }
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700337 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800338 CPUFREQ_PRECHANGE, freqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
340 break;
Dave Jonese4472cb2006-01-31 15:53:55 -0800341
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 case CPUFREQ_POSTCHANGE:
343 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
Joe Perchese837f9b2014-03-11 10:03:00 -0700344 pr_debug("FREQ: %lu - CPU: %lu\n",
345 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
Thomas Renninger25e41932011-01-03 17:50:44 +0100346 trace_cpu_frequency(freqs->new, freqs->cpu);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700347 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800348 CPUFREQ_POSTCHANGE, freqs);
Dave Jonese4472cb2006-01-31 15:53:55 -0800349 if (likely(policy) && likely(policy->cpu == freqs->cpu))
350 policy->cur = freqs->new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 break;
352 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353}
Viresh Kumarbb176f72013-06-19 14:19:33 +0530354
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530355/**
356 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
357 * on frequency transition.
358 *
359 * This function calls the transition notifiers and the "adjust_jiffies"
360 * function. It is called twice on all CPU frequency changes that have
361 * external effects.
362 */
Viresh Kumar236a9802014-03-24 13:35:46 +0530363static void cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530364 struct cpufreq_freqs *freqs, unsigned int state)
365{
366 for_each_cpu(freqs->cpu, policy->cpus)
367 __cpufreq_notify_transition(policy, freqs, state);
368}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530370/* Do post notifications when there are chances that transition has failed */
Viresh Kumar236a9802014-03-24 13:35:46 +0530371static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530372 struct cpufreq_freqs *freqs, int transition_failed)
373{
374 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
375 if (!transition_failed)
376 return;
377
378 swap(freqs->old, freqs->new);
379 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
380 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
381}
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530382
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530383void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
384 struct cpufreq_freqs *freqs)
385{
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530386
387 /*
388 * Catch double invocations of _begin() which lead to self-deadlock.
389 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
390 * doesn't invoke _begin() on their behalf, and hence the chances of
391 * double invocations are very low. Moreover, there are scenarios
392 * where these checks can emit false-positive warnings in these
393 * drivers; so we avoid that by skipping them altogether.
394 */
395 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
396 && current == policy->transition_task);
397
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530398wait:
399 wait_event(policy->transition_wait, !policy->transition_ongoing);
400
401 spin_lock(&policy->transition_lock);
402
403 if (unlikely(policy->transition_ongoing)) {
404 spin_unlock(&policy->transition_lock);
405 goto wait;
406 }
407
408 policy->transition_ongoing = true;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530409 policy->transition_task = current;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530410
411 spin_unlock(&policy->transition_lock);
412
413 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
414}
415EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
416
417void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
418 struct cpufreq_freqs *freqs, int transition_failed)
419{
420 if (unlikely(WARN_ON(!policy->transition_ongoing)))
421 return;
422
423 cpufreq_notify_post_transition(policy, freqs, transition_failed);
424
425 policy->transition_ongoing = false;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530426 policy->transition_task = NULL;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530427
428 wake_up(&policy->transition_wait);
429}
430EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
431
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433/*********************************************************************
434 * SYSFS INTERFACE *
435 *********************************************************************/
Rashika Kheria8a5c74a2014-02-26 22:12:42 +0530436static ssize_t show_boost(struct kobject *kobj,
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100437 struct attribute *attr, char *buf)
438{
439 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
440}
441
442static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
443 const char *buf, size_t count)
444{
445 int ret, enable;
446
447 ret = sscanf(buf, "%d", &enable);
448 if (ret != 1 || enable < 0 || enable > 1)
449 return -EINVAL;
450
451 if (cpufreq_boost_trigger_state(enable)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700452 pr_err("%s: Cannot %s BOOST!\n",
453 __func__, enable ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100454 return -EINVAL;
455 }
456
Joe Perchese837f9b2014-03-11 10:03:00 -0700457 pr_debug("%s: cpufreq BOOST %s\n",
458 __func__, enable ? "enabled" : "disabled");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100459
460 return count;
461}
462define_one_global_rw(boost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530464static struct cpufreq_governor *find_governor(const char *str_governor)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700465{
466 struct cpufreq_governor *t;
467
Viresh Kumarf7b27062015-01-27 14:06:09 +0530468 for_each_governor(t)
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200469 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700470 return t;
471
472 return NULL;
473}
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475/**
476 * cpufreq_parse_governor - parse a governor string
477 */
Dave Jones905d77c2008-03-05 14:28:32 -0500478static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 struct cpufreq_governor **governor)
480{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700481 int err = -EINVAL;
482
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200483 if (!cpufreq_driver)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700484 goto out;
485
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200486 if (cpufreq_driver->setpolicy) {
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200487 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 *policy = CPUFREQ_POLICY_PERFORMANCE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700489 err = 0;
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200490 } else if (!strncasecmp(str_governor, "powersave",
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530491 CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 *policy = CPUFREQ_POLICY_POWERSAVE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700493 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 }
Viresh Kumar2e1cc3a2015-01-02 12:34:27 +0530495 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 struct cpufreq_governor *t;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700497
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800498 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700499
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530500 t = find_governor(str_governor);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700501
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700502 if (t == NULL) {
Kees Cook1a8e1462011-05-04 08:38:56 -0700503 int ret;
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700504
Kees Cook1a8e1462011-05-04 08:38:56 -0700505 mutex_unlock(&cpufreq_governor_mutex);
506 ret = request_module("cpufreq_%s", str_governor);
507 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700508
Kees Cook1a8e1462011-05-04 08:38:56 -0700509 if (ret == 0)
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530510 t = find_governor(str_governor);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700511 }
512
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700513 if (t != NULL) {
514 *governor = t;
515 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 }
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700517
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800518 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 }
Dave Jones29464f22009-01-18 01:37:11 -0500520out:
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700521 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524/**
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530525 * cpufreq_per_cpu_attr_read() / show_##file_name() -
526 * print out cpufreq information
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 *
528 * Write out information from cpufreq_driver->policy[cpu]; object must be
529 * "unsigned int".
530 */
531
Dave Jones32ee8c32006-02-28 00:43:23 -0500532#define show_one(file_name, object) \
533static ssize_t show_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500534(struct cpufreq_policy *policy, char *buf) \
Dave Jones32ee8c32006-02-28 00:43:23 -0500535{ \
Dave Jones29464f22009-01-18 01:37:11 -0500536 return sprintf(buf, "%u\n", policy->object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537}
538
539show_one(cpuinfo_min_freq, cpuinfo.min_freq);
540show_one(cpuinfo_max_freq, cpuinfo.max_freq);
Thomas Renningered129782009-02-04 01:17:41 +0100541show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542show_one(scaling_min_freq, min);
543show_one(scaling_max_freq, max);
Dirk Brandewiec034b022014-10-13 08:37:40 -0700544
Viresh Kumar09347b22015-01-02 12:34:24 +0530545static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
Dirk Brandewiec034b022014-10-13 08:37:40 -0700546{
547 ssize_t ret;
548
549 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
550 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
551 else
552 ret = sprintf(buf, "%u\n", policy->cur);
553 return ret;
554}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
Viresh Kumar037ce832013-10-02 14:13:16 +0530556static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530557 struct cpufreq_policy *new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200558
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559/**
560 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
561 */
562#define store_one(file_name, object) \
563static ssize_t store_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500564(struct cpufreq_policy *policy, const char *buf, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565{ \
Vince Hsu619c144c2014-11-10 14:14:50 +0800566 int ret, temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 struct cpufreq_policy new_policy; \
568 \
569 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
570 if (ret) \
571 return -EINVAL; \
572 \
Dave Jones29464f22009-01-18 01:37:11 -0500573 ret = sscanf(buf, "%u", &new_policy.object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 if (ret != 1) \
575 return -EINVAL; \
576 \
Vince Hsu619c144c2014-11-10 14:14:50 +0800577 temp = new_policy.object; \
Viresh Kumar037ce832013-10-02 14:13:16 +0530578 ret = cpufreq_set_policy(policy, &new_policy); \
Vince Hsu619c144c2014-11-10 14:14:50 +0800579 if (!ret) \
580 policy->user_policy.object = temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 \
582 return ret ? ret : count; \
583}
584
Dave Jones29464f22009-01-18 01:37:11 -0500585store_one(scaling_min_freq, min);
586store_one(scaling_max_freq, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588/**
589 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
590 */
Dave Jones905d77c2008-03-05 14:28:32 -0500591static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
592 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{
Viresh Kumard92d50a2015-01-02 12:34:29 +0530594 unsigned int cur_freq = __cpufreq_get(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 if (!cur_freq)
596 return sprintf(buf, "<unknown>");
597 return sprintf(buf, "%u\n", cur_freq);
598}
599
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600/**
601 * show_scaling_governor - show the current policy for the specified CPU
602 */
Dave Jones905d77c2008-03-05 14:28:32 -0500603static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604{
Dave Jones29464f22009-01-18 01:37:11 -0500605 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 return sprintf(buf, "powersave\n");
607 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
608 return sprintf(buf, "performance\n");
609 else if (policy->governor)
viresh kumar4b972f02012-10-23 01:23:43 +0200610 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
Dave Jones29464f22009-01-18 01:37:11 -0500611 policy->governor->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 return -EINVAL;
613}
614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615/**
616 * store_scaling_governor - store policy for the specified CPU
617 */
Dave Jones905d77c2008-03-05 14:28:32 -0500618static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
619 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620{
Srivatsa S. Bhat5136fa52013-09-07 01:24:06 +0530621 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 char str_governor[16];
623 struct cpufreq_policy new_policy;
624
625 ret = cpufreq_get_policy(&new_policy, policy->cpu);
626 if (ret)
627 return ret;
628
Dave Jones29464f22009-01-18 01:37:11 -0500629 ret = sscanf(buf, "%15s", str_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 if (ret != 1)
631 return -EINVAL;
632
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530633 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
634 &new_policy.governor))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 return -EINVAL;
636
Viresh Kumar037ce832013-10-02 14:13:16 +0530637 ret = cpufreq_set_policy(policy, &new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200638
639 policy->user_policy.policy = policy->policy;
640 policy->user_policy.governor = policy->governor;
Thomas Renninger7970e082006-04-13 15:14:04 +0200641
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530642 if (ret)
643 return ret;
644 else
645 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646}
647
648/**
649 * show_scaling_driver - show the cpufreq driver currently loaded
650 */
Dave Jones905d77c2008-03-05 14:28:32 -0500651static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200653 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654}
655
656/**
657 * show_scaling_available_governors - show the available CPUfreq governors
658 */
Dave Jones905d77c2008-03-05 14:28:32 -0500659static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
660 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
662 ssize_t i = 0;
663 struct cpufreq_governor *t;
664
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530665 if (!has_target()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 i += sprintf(buf, "performance powersave");
667 goto out;
668 }
669
Viresh Kumarf7b27062015-01-27 14:06:09 +0530670 for_each_governor(t) {
Dave Jones29464f22009-01-18 01:37:11 -0500671 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
672 - (CPUFREQ_NAME_LEN + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 goto out;
viresh kumar4b972f02012-10-23 01:23:43 +0200674 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 }
Dave Jones7d5e3502006-02-02 17:03:42 -0500676out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 i += sprintf(&buf[i], "\n");
678 return i;
679}
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700680
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800681ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 ssize_t i = 0;
684 unsigned int cpu;
685
Rusty Russell835481d2009-01-04 05:18:06 -0800686 for_each_cpu(cpu, mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 if (i)
688 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
689 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
690 if (i >= (PAGE_SIZE - 5))
Dave Jones29464f22009-01-18 01:37:11 -0500691 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 }
693 i += sprintf(&buf[i], "\n");
694 return i;
695}
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800696EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700698/**
699 * show_related_cpus - show the CPUs affected by each transition even if
700 * hw coordination is in use
701 */
702static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
703{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800704 return cpufreq_show_cpus(policy->related_cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700705}
706
707/**
708 * show_affected_cpus - show the CPUs affected by each transition
709 */
710static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
711{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800712 return cpufreq_show_cpus(policy->cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700713}
714
Venki Pallipadi9e769882007-10-26 10:18:21 -0700715static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
Dave Jones905d77c2008-03-05 14:28:32 -0500716 const char *buf, size_t count)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700717{
718 unsigned int freq = 0;
719 unsigned int ret;
720
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700721 if (!policy->governor || !policy->governor->store_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700722 return -EINVAL;
723
724 ret = sscanf(buf, "%u", &freq);
725 if (ret != 1)
726 return -EINVAL;
727
728 policy->governor->store_setspeed(policy, freq);
729
730 return count;
731}
732
733static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
734{
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700735 if (!policy->governor || !policy->governor->show_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700736 return sprintf(buf, "<unsupported>\n");
737
738 return policy->governor->show_setspeed(policy, buf);
739}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Thomas Renningere2f74f32009-11-19 12:31:01 +0100741/**
viresh kumar8bf1ac722012-10-23 01:23:33 +0200742 * show_bios_limit - show the current cpufreq HW/BIOS limitation
Thomas Renningere2f74f32009-11-19 12:31:01 +0100743 */
744static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
745{
746 unsigned int limit;
747 int ret;
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200748 if (cpufreq_driver->bios_limit) {
749 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
Thomas Renningere2f74f32009-11-19 12:31:01 +0100750 if (!ret)
751 return sprintf(buf, "%u\n", limit);
752 }
753 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
754}
755
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200756cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
757cpufreq_freq_attr_ro(cpuinfo_min_freq);
758cpufreq_freq_attr_ro(cpuinfo_max_freq);
759cpufreq_freq_attr_ro(cpuinfo_transition_latency);
760cpufreq_freq_attr_ro(scaling_available_governors);
761cpufreq_freq_attr_ro(scaling_driver);
762cpufreq_freq_attr_ro(scaling_cur_freq);
763cpufreq_freq_attr_ro(bios_limit);
764cpufreq_freq_attr_ro(related_cpus);
765cpufreq_freq_attr_ro(affected_cpus);
766cpufreq_freq_attr_rw(scaling_min_freq);
767cpufreq_freq_attr_rw(scaling_max_freq);
768cpufreq_freq_attr_rw(scaling_governor);
769cpufreq_freq_attr_rw(scaling_setspeed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
Dave Jones905d77c2008-03-05 14:28:32 -0500771static struct attribute *default_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 &cpuinfo_min_freq.attr,
773 &cpuinfo_max_freq.attr,
Thomas Renningered129782009-02-04 01:17:41 +0100774 &cpuinfo_transition_latency.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 &scaling_min_freq.attr,
776 &scaling_max_freq.attr,
777 &affected_cpus.attr,
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700778 &related_cpus.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 &scaling_governor.attr,
780 &scaling_driver.attr,
781 &scaling_available_governors.attr,
Venki Pallipadi9e769882007-10-26 10:18:21 -0700782 &scaling_setspeed.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 NULL
784};
785
Dave Jones29464f22009-01-18 01:37:11 -0500786#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
787#define to_attr(a) container_of(a, struct freq_attr, attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
Dave Jones29464f22009-01-18 01:37:11 -0500789static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790{
Dave Jones905d77c2008-03-05 14:28:32 -0500791 struct cpufreq_policy *policy = to_policy(kobj);
792 struct freq_attr *fattr = to_attr(attr);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530793 ssize_t ret;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530794
795 if (!down_read_trylock(&cpufreq_rwsem))
Viresh Kumar1b750e32013-10-02 14:13:09 +0530796 return -EINVAL;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800797
viresh kumarad7722d2013-10-18 19:10:15 +0530798 down_read(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800799
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530800 if (fattr->show)
801 ret = fattr->show(policy, buf);
802 else
803 ret = -EIO;
804
viresh kumarad7722d2013-10-18 19:10:15 +0530805 up_read(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530806 up_read(&cpufreq_rwsem);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530807
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 return ret;
809}
810
Dave Jones905d77c2008-03-05 14:28:32 -0500811static ssize_t store(struct kobject *kobj, struct attribute *attr,
812 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813{
Dave Jones905d77c2008-03-05 14:28:32 -0500814 struct cpufreq_policy *policy = to_policy(kobj);
815 struct freq_attr *fattr = to_attr(attr);
Dave Jonesa07530b2008-03-05 14:22:25 -0500816 ssize_t ret = -EINVAL;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530817
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530818 get_online_cpus();
819
820 if (!cpu_online(policy->cpu))
821 goto unlock;
822
Viresh Kumar6eed9402013-08-06 22:53:11 +0530823 if (!down_read_trylock(&cpufreq_rwsem))
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530824 goto unlock;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800825
viresh kumarad7722d2013-10-18 19:10:15 +0530826 down_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800827
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530828 if (fattr->store)
829 ret = fattr->store(policy, buf, count);
830 else
831 ret = -EIO;
832
viresh kumarad7722d2013-10-18 19:10:15 +0530833 up_write(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530834
Viresh Kumar6eed9402013-08-06 22:53:11 +0530835 up_read(&cpufreq_rwsem);
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530836unlock:
837 put_online_cpus();
838
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 return ret;
840}
841
Dave Jones905d77c2008-03-05 14:28:32 -0500842static void cpufreq_sysfs_release(struct kobject *kobj)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843{
Dave Jones905d77c2008-03-05 14:28:32 -0500844 struct cpufreq_policy *policy = to_policy(kobj);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200845 pr_debug("last reference is dropped\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 complete(&policy->kobj_unregister);
847}
848
Emese Revfy52cf25d2010-01-19 02:58:23 +0100849static const struct sysfs_ops sysfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 .show = show,
851 .store = store,
852};
853
854static struct kobj_type ktype_cpufreq = {
855 .sysfs_ops = &sysfs_ops,
856 .default_attrs = default_attrs,
857 .release = cpufreq_sysfs_release,
858};
859
Viresh Kumar2361be22013-05-17 16:09:09 +0530860struct kobject *cpufreq_global_kobject;
861EXPORT_SYMBOL(cpufreq_global_kobject);
862
863static int cpufreq_global_kobject_usage;
864
865int cpufreq_get_global_kobject(void)
866{
867 if (!cpufreq_global_kobject_usage++)
868 return kobject_add(cpufreq_global_kobject,
869 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
870
871 return 0;
872}
873EXPORT_SYMBOL(cpufreq_get_global_kobject);
874
875void cpufreq_put_global_kobject(void)
876{
877 if (!--cpufreq_global_kobject_usage)
878 kobject_del(cpufreq_global_kobject);
879}
880EXPORT_SYMBOL(cpufreq_put_global_kobject);
881
882int cpufreq_sysfs_create_file(const struct attribute *attr)
883{
884 int ret = cpufreq_get_global_kobject();
885
886 if (!ret) {
887 ret = sysfs_create_file(cpufreq_global_kobject, attr);
888 if (ret)
889 cpufreq_put_global_kobject();
890 }
891
892 return ret;
893}
894EXPORT_SYMBOL(cpufreq_sysfs_create_file);
895
896void cpufreq_sysfs_remove_file(const struct attribute *attr)
897{
898 sysfs_remove_file(cpufreq_global_kobject, attr);
899 cpufreq_put_global_kobject();
900}
901EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
902
Dave Jones19d6f7e2009-07-08 17:35:39 -0400903/* symlink affected CPUs */
Viresh Kumar308b60e2013-07-31 14:35:14 +0200904static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400905{
906 unsigned int j;
907 int ret = 0;
908
909 for_each_cpu(j, policy->cpus) {
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800910 struct device *cpu_dev;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400911
Viresh Kumar308b60e2013-07-31 14:35:14 +0200912 if (j == policy->cpu)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400913 continue;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400914
Viresh Kumare8fdde12013-07-31 14:31:33 +0200915 pr_debug("Adding link for CPU: %u\n", j);
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800916 cpu_dev = get_cpu_device(j);
917 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
Dave Jones19d6f7e2009-07-08 17:35:39 -0400918 "cpufreq");
Rafael J. Wysocki71c34612013-08-04 01:19:34 +0200919 if (ret)
920 break;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400921 }
922 return ret;
923}
924
Viresh Kumar308b60e2013-07-31 14:35:14 +0200925static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800926 struct device *dev)
Dave Jones909a6942009-07-08 18:05:42 -0400927{
928 struct freq_attr **drv_attr;
Dave Jones909a6942009-07-08 18:05:42 -0400929 int ret = 0;
Dave Jones909a6942009-07-08 18:05:42 -0400930
Dave Jones909a6942009-07-08 18:05:42 -0400931 /* set up files for this cpu device */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200932 drv_attr = cpufreq_driver->attr;
Viresh Kumarf13f1182015-01-02 12:34:23 +0530933 while (drv_attr && *drv_attr) {
Dave Jones909a6942009-07-08 18:05:42 -0400934 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
935 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100936 return ret;
Dave Jones909a6942009-07-08 18:05:42 -0400937 drv_attr++;
938 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200939 if (cpufreq_driver->get) {
Dave Jones909a6942009-07-08 18:05:42 -0400940 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
941 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100942 return ret;
Dave Jones909a6942009-07-08 18:05:42 -0400943 }
Dirk Brandewiec034b022014-10-13 08:37:40 -0700944
945 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
946 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100947 return ret;
Dirk Brandewiec034b022014-10-13 08:37:40 -0700948
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200949 if (cpufreq_driver->bios_limit) {
Thomas Renningere2f74f32009-11-19 12:31:01 +0100950 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
951 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100952 return ret;
Thomas Renningere2f74f32009-11-19 12:31:01 +0100953 }
Dave Jones909a6942009-07-08 18:05:42 -0400954
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100955 return cpufreq_add_dev_symlink(policy);
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +0530956}
957
958static void cpufreq_init_policy(struct cpufreq_policy *policy)
959{
viresh kumar6e2c89d2014-03-04 11:43:59 +0800960 struct cpufreq_governor *gov = NULL;
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +0530961 struct cpufreq_policy new_policy;
962 int ret = 0;
963
Viresh Kumard5b73cd2013-08-06 22:53:06 +0530964 memcpy(&new_policy, policy, sizeof(*policy));
Jason Barona27a9ab2013-12-19 22:50:50 +0000965
viresh kumar6e2c89d2014-03-04 11:43:59 +0800966 /* Update governor of new_policy to the governor used before hotplug */
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530967 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
viresh kumar6e2c89d2014-03-04 11:43:59 +0800968 if (gov)
969 pr_debug("Restoring governor %s for cpu %d\n",
970 policy->governor->name, policy->cpu);
971 else
972 gov = CPUFREQ_DEFAULT_GOVERNOR;
973
974 new_policy.governor = gov;
975
Jason Barona27a9ab2013-12-19 22:50:50 +0000976 /* Use the default policy if its valid. */
977 if (cpufreq_driver->setpolicy)
viresh kumar6e2c89d2014-03-04 11:43:59 +0800978 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
Dave Jonesecf7e462009-07-08 18:48:47 -0400979
980 /* set default policy */
Viresh Kumar037ce832013-10-02 14:13:16 +0530981 ret = cpufreq_set_policy(policy, &new_policy);
Dave Jonesecf7e462009-07-08 18:48:47 -0400982 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200983 pr_debug("setting policy failed\n");
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200984 if (cpufreq_driver->exit)
985 cpufreq_driver->exit(policy);
Dave Jonesecf7e462009-07-08 18:48:47 -0400986 }
Dave Jones909a6942009-07-08 18:05:42 -0400987}
988
Viresh Kumard8d3b472013-08-04 01:20:07 +0200989static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
Viresh Kumar42f921a2013-12-20 21:26:02 +0530990 unsigned int cpu, struct device *dev)
Viresh Kumarfcf80582013-01-29 14:39:08 +0000991{
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530992 int ret = 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +0000993 unsigned long flags;
994
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530995 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +0530996 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
997 if (ret) {
998 pr_err("%s: Failed to stop governor\n", __func__);
999 return ret;
1000 }
1001 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001002
viresh kumarad7722d2013-10-18 19:10:15 +05301003 down_write(&policy->rwsem);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301004
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001005 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301006
Viresh Kumarfcf80582013-01-29 14:39:08 +00001007 cpumask_set_cpu(cpu, policy->cpus);
1008 per_cpu(cpufreq_cpu_data, cpu) = policy;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001009 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumarfcf80582013-01-29 14:39:08 +00001010
viresh kumarad7722d2013-10-18 19:10:15 +05301011 up_write(&policy->rwsem);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301012
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301013 if (has_target()) {
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001014 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1015 if (!ret)
1016 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1017
1018 if (ret) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301019 pr_err("%s: Failed to start governor\n", __func__);
1020 return ret;
1021 }
Viresh Kumar820c6ca2013-04-22 00:48:03 +02001022 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001023
Viresh Kumar42f921a2013-12-20 21:26:02 +05301024 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
Viresh Kumarfcf80582013-01-29 14:39:08 +00001025}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301027static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1028{
1029 struct cpufreq_policy *policy;
1030 unsigned long flags;
1031
Lan Tianyu44871c92013-09-11 15:05:05 +08001032 read_lock_irqsave(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301033
1034 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1035
Lan Tianyu44871c92013-09-11 15:05:05 +08001036 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301037
Geert Uytterhoeven09712f52014-11-04 17:05:25 +01001038 if (policy)
1039 policy->governor = NULL;
viresh kumar6e2c89d2014-03-04 11:43:59 +08001040
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301041 return policy;
1042}
1043
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301044static struct cpufreq_policy *cpufreq_policy_alloc(void)
1045{
1046 struct cpufreq_policy *policy;
1047
1048 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1049 if (!policy)
1050 return NULL;
1051
1052 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1053 goto err_free_policy;
1054
1055 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1056 goto err_free_cpumask;
1057
Lukasz Majewskic88a1f82013-08-06 22:53:08 +05301058 INIT_LIST_HEAD(&policy->policy_list);
viresh kumarad7722d2013-10-18 19:10:15 +05301059 init_rwsem(&policy->rwsem);
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +05301060 spin_lock_init(&policy->transition_lock);
1061 init_waitqueue_head(&policy->transition_wait);
Viresh Kumar818c5712015-01-02 12:34:38 +05301062 init_completion(&policy->kobj_unregister);
1063 INIT_WORK(&policy->update, handle_update);
viresh kumarad7722d2013-10-18 19:10:15 +05301064
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301065 return policy;
1066
1067err_free_cpumask:
1068 free_cpumask_var(policy->cpus);
1069err_free_policy:
1070 kfree(policy);
1071
1072 return NULL;
1073}
1074
Viresh Kumar42f921a2013-12-20 21:26:02 +05301075static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1076{
1077 struct kobject *kobj;
1078 struct completion *cmp;
1079
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301080 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1081 CPUFREQ_REMOVE_POLICY, policy);
1082
Viresh Kumar42f921a2013-12-20 21:26:02 +05301083 down_read(&policy->rwsem);
1084 kobj = &policy->kobj;
1085 cmp = &policy->kobj_unregister;
1086 up_read(&policy->rwsem);
1087 kobject_put(kobj);
1088
1089 /*
1090 * We need to make sure that the underlying kobj is
1091 * actually not referenced anymore by anybody before we
1092 * proceed with unloading.
1093 */
1094 pr_debug("waiting for dropping of refcount\n");
1095 wait_for_completion(cmp);
1096 pr_debug("wait complete\n");
1097}
1098
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301099static void cpufreq_policy_free(struct cpufreq_policy *policy)
1100{
1101 free_cpumask_var(policy->related_cpus);
1102 free_cpumask_var(policy->cpus);
1103 kfree(policy);
1104}
1105
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301106static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1107 struct device *cpu_dev)
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301108{
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301109 int ret;
1110
Srivatsa S. Bhat99ec8992013-09-12 17:29:09 +05301111 if (WARN_ON(cpu == policy->cpu))
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301112 return 0;
1113
1114 /* Move kobject to the new policy->cpu */
1115 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1116 if (ret) {
1117 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1118 return ret;
1119 }
Srivatsa S. Bhatcb38ed52013-09-12 01:43:42 +05301120
viresh kumarad7722d2013-10-18 19:10:15 +05301121 down_write(&policy->rwsem);
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301122 policy->cpu = cpu;
viresh kumarad7722d2013-10-18 19:10:15 +05301123 up_write(&policy->rwsem);
Viresh Kumar8efd5762013-09-17 10:22:11 +05301124
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301125 return 0;
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301126}
1127
Viresh Kumar23faf0b2015-02-19 17:02:04 +05301128/**
1129 * cpufreq_add_dev - add a CPU device
1130 *
1131 * Adds the cpufreq interface for a CPU device.
1132 *
1133 * The Oracle says: try running cpufreq registration/unregistration concurrently
1134 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1135 * mess up, but more thorough testing is needed. - Mathieu
1136 */
1137static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138{
Viresh Kumarfcf80582013-01-29 14:39:08 +00001139 unsigned int j, cpu = dev->id;
Viresh Kumar65922462013-02-07 10:56:03 +05301140 int ret = -ENOMEM;
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301141 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 unsigned long flags;
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301143 bool recover_policy = cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
Ashok Rajc32b6b82005-10-30 14:59:54 -08001145 if (cpu_is_offline(cpu))
1146 return 0;
1147
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001148 pr_debug("adding CPU %u\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 /* check whether a different CPU already registered this
1151 * CPU because it is in the same boat. */
Viresh Kumard7a97712015-01-02 12:34:33 +05301152 policy = cpufreq_cpu_get_raw(cpu);
1153 if (unlikely(policy))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 return 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001155
Viresh Kumar6eed9402013-08-06 22:53:11 +05301156 if (!down_read_trylock(&cpufreq_rwsem))
1157 return 0;
1158
Viresh Kumarfcf80582013-01-29 14:39:08 +00001159 /* Check if this cpu was hot-unplugged earlier and has siblings */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001160 read_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumarb4f06762015-01-27 14:06:08 +05301161 for_each_policy(policy) {
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301162 if (cpumask_test_cpu(cpu, policy->related_cpus)) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001163 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301164 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
Viresh Kumar6eed9402013-08-06 22:53:11 +05301165 up_read(&cpufreq_rwsem);
1166 return ret;
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301167 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001168 }
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001169 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001171 /*
1172 * Restore the saved policy when doing light-weight init and fall back
1173 * to the full init if that fails.
1174 */
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301175 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001176 if (!policy) {
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301177 recover_policy = false;
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301178 policy = cpufreq_policy_alloc();
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001179 if (!policy)
1180 goto nomem_out;
1181 }
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301182
1183 /*
1184 * In the resume path, since we restore a saved policy, the assignment
1185 * to policy->cpu is like an update of the existing policy, rather than
1186 * the creation of a brand new one. So we need to perform this update
1187 * by invoking update_policy_cpu().
1188 */
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301189 if (recover_policy && cpu != policy->cpu)
1190 WARN_ON(update_policy_cpu(policy, cpu, dev));
1191 else
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301192 policy->cpu = cpu;
1193
Rusty Russell835481d2009-01-04 05:18:06 -08001194 cpumask_copy(policy->cpus, cpumask_of(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 /* call driver. From then on the cpufreq must be able
1197 * to accept all calls to ->verify and ->setpolicy for this CPU
1198 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001199 ret = cpufreq_driver->init(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001201 pr_debug("initialization failed\n");
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301202 goto err_set_policy_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 }
Viresh Kumar643ae6e2013-01-12 05:14:38 +00001204
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001205 down_write(&policy->rwsem);
1206
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001207 /* related cpus should atleast have policy->cpus */
1208 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1209
1210 /*
1211 * affected cpus must always be the one, which are online. We aren't
1212 * managing offline cpus here.
1213 */
1214 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1215
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301216 if (!recover_policy) {
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001217 policy->user_policy.min = policy->min;
1218 policy->user_policy.max = policy->max;
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001219
1220 /* prepare interface data */
1221 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1222 &dev->kobj, "cpufreq");
1223 if (ret) {
1224 pr_err("%s: failed to init policy->kobj: %d\n",
1225 __func__, ret);
1226 goto err_init_policy_kobj;
1227 }
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001228 }
1229
Viresh Kumar652ed952014-01-09 20:38:43 +05301230 write_lock_irqsave(&cpufreq_driver_lock, flags);
1231 for_each_cpu(j, policy->cpus)
1232 per_cpu(cpufreq_cpu_data, j) = policy;
1233 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1234
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01001235 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumarda60ce92013-10-03 20:28:30 +05301236 policy->cur = cpufreq_driver->get(policy->cpu);
1237 if (!policy->cur) {
1238 pr_err("%s: ->get() failed\n", __func__);
1239 goto err_get_freq;
1240 }
1241 }
1242
Viresh Kumard3916692013-12-03 11:20:46 +05301243 /*
1244 * Sometimes boot loaders set CPU frequency to a value outside of
1245 * frequency table present with cpufreq core. In such cases CPU might be
1246 * unstable if it has to run on that frequency for long duration of time
1247 * and so its better to set it to a frequency which is specified in
1248 * freq-table. This also makes cpufreq stats inconsistent as
1249 * cpufreq-stats would fail to register because current frequency of CPU
1250 * isn't found in freq-table.
1251 *
1252 * Because we don't want this change to effect boot process badly, we go
1253 * for the next freq which is >= policy->cur ('cur' must be set by now,
1254 * otherwise we will end up setting freq to lowest of the table as 'cur'
1255 * is initialized to zero).
1256 *
1257 * We are passing target-freq as "policy->cur - 1" otherwise
1258 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1259 * equal to target-freq.
1260 */
1261 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1262 && has_target()) {
1263 /* Are we running at unknown frequency ? */
1264 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1265 if (ret == -EINVAL) {
1266 /* Warn user and fix it */
1267 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1268 __func__, policy->cpu, policy->cur);
1269 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1270 CPUFREQ_RELATION_L);
1271
1272 /*
1273 * Reaching here after boot in a few seconds may not
1274 * mean that system will remain stable at "unknown"
1275 * frequency for longer duration. Hence, a BUG_ON().
1276 */
1277 BUG_ON(ret);
1278 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1279 __func__, policy->cpu, policy->cur);
1280 }
1281 }
1282
Thomas Renningera1531ac2008-07-29 22:32:58 -07001283 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1284 CPUFREQ_START, policy);
1285
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301286 if (!recover_policy) {
Viresh Kumar308b60e2013-07-31 14:35:14 +02001287 ret = cpufreq_add_dev_interface(policy, dev);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301288 if (ret)
1289 goto err_out_unregister;
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301290 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1291 CPUFREQ_CREATE_POLICY, policy);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301292 }
Dave Jones8ff69732006-03-05 03:37:23 -05001293
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301294 write_lock_irqsave(&cpufreq_driver_lock, flags);
1295 list_add(&policy->policy_list, &cpufreq_policy_list);
1296 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1297
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301298 cpufreq_init_policy(policy);
1299
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301300 if (!recover_policy) {
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301301 policy->user_policy.policy = policy->policy;
1302 policy->user_policy.governor = policy->governor;
1303 }
Viresh Kumar4e97b632014-03-04 11:44:01 +08001304 up_write(&policy->rwsem);
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301305
Greg Kroah-Hartman038c5b32007-12-17 15:54:39 -04001306 kobject_uevent(&policy->kobj, KOBJ_ADD);
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301307
Viresh Kumar6eed9402013-08-06 22:53:11 +05301308 up_read(&cpufreq_rwsem);
1309
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301310 /* Callback for handling stuff after policy is ready */
1311 if (cpufreq_driver->ready)
1312 cpufreq_driver->ready(policy);
1313
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001314 pr_debug("initialization complete\n");
Dave Jones87c32272006-03-29 01:48:37 -05001315
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 return 0;
1317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318err_out_unregister:
Viresh Kumar652ed952014-01-09 20:38:43 +05301319err_get_freq:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001320 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar474deff2013-08-20 12:08:25 +05301321 for_each_cpu(j, policy->cpus)
Mike Travis7a6aedf2008-03-25 15:06:53 -07001322 per_cpu(cpufreq_cpu_data, j) = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001323 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001325 if (!recover_policy) {
1326 kobject_put(&policy->kobj);
1327 wait_for_completion(&policy->kobj_unregister);
1328 }
1329err_init_policy_kobj:
Prarit Bhargava7106e022014-09-10 10:12:08 -04001330 up_write(&policy->rwsem);
1331
Viresh Kumarda60ce92013-10-03 20:28:30 +05301332 if (cpufreq_driver->exit)
1333 cpufreq_driver->exit(policy);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301334err_set_policy_cpu:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301335 if (recover_policy) {
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001336 /* Do not leave stale fallback data behind. */
1337 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
Viresh Kumar42f921a2013-12-20 21:26:02 +05301338 cpufreq_policy_put_kobj(policy);
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001339 }
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301340 cpufreq_policy_free(policy);
Viresh Kumar42f921a2013-12-20 21:26:02 +05301341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342nomem_out:
Viresh Kumar6eed9402013-08-06 22:53:11 +05301343 up_read(&cpufreq_rwsem);
1344
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 return ret;
1346}
1347
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301348static int __cpufreq_remove_dev_prepare(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301349 struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350{
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301351 unsigned int cpu = dev->id, cpus;
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301352 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 unsigned long flags;
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301354 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001356 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001358 write_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301360 policy = per_cpu(cpufreq_cpu_data, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301362 /* Save the policy somewhere when doing a light-weight tear-down */
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301363 if (cpufreq_suspended)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301364 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301365
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001366 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301368 if (!policy) {
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001369 pr_debug("%s: No cpu_data found\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301373 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301374 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1375 if (ret) {
1376 pr_err("%s: Failed to stop governor\n", __func__);
1377 return ret;
1378 }
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001379
Dirk Brandewiefa69e332013-02-06 09:02:11 -08001380 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301381 policy->governor->name, CPUFREQ_NAME_LEN);
Viresh Kumardb5f2992015-01-02 12:34:25 +05301382 }
Jacob Shin27ecddc2011-04-27 13:32:11 -05001383
viresh kumarad7722d2013-10-18 19:10:15 +05301384 down_read(&policy->rwsem);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301385 cpus = cpumask_weight(policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301386 up_read(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387
Srivatsa S. Bhat61173f22013-09-12 01:43:25 +05301388 if (cpu != policy->cpu) {
viresh kumar6964d912014-02-17 14:52:11 +05301389 sysfs_remove_link(&dev->kobj, "cpufreq");
Viresh Kumar73bf0fc2013-02-05 22:21:14 +01001390 } else if (cpus > 1) {
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301391 /* Nominate new CPU */
1392 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1393 struct device *cpu_dev = get_cpu_device(new_cpu);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301394
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301395 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1396 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1397 if (ret) {
1398 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1399 "cpufreq"))
1400 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1401 __func__, cpu_dev->id);
1402 return ret;
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001403 }
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301404
1405 if (!cpufreq_suspended)
1406 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1407 __func__, new_cpu, cpu);
Preeti U Murthy789ca242014-09-29 15:47:12 +02001408 } else if (cpufreq_driver->stop_cpu) {
Dirk Brandewie367dc4a2014-03-19 08:45:53 -07001409 cpufreq_driver->stop_cpu(policy);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001410 }
Venki Pallipadiec282972007-03-26 12:03:19 -07001411
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301412 return 0;
1413}
1414
1415static int __cpufreq_remove_dev_finish(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301416 struct subsys_interface *sif)
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301417{
1418 unsigned int cpu = dev->id, cpus;
1419 int ret;
1420 unsigned long flags;
1421 struct cpufreq_policy *policy;
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301422
Viresh Kumar6ffae8c2015-01-31 06:02:44 +05301423 write_lock_irqsave(&cpufreq_driver_lock, flags);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301424 policy = per_cpu(cpufreq_cpu_data, cpu);
Viresh Kumar6ffae8c2015-01-31 06:02:44 +05301425 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1426 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301427
1428 if (!policy) {
1429 pr_debug("%s: No cpu_data found\n", __func__);
1430 return -EINVAL;
1431 }
1432
viresh kumarad7722d2013-10-18 19:10:15 +05301433 down_write(&policy->rwsem);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301434 cpus = cpumask_weight(policy->cpus);
Viresh Kumar9c8f1ee2013-09-12 17:06:33 +05301435
1436 if (cpus > 1)
1437 cpumask_clear_cpu(cpu, policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301438 up_write(&policy->rwsem);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301439
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001440 /* If cpu is last user of policy, free policy */
1441 if (cpus == 1) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301442 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301443 ret = __cpufreq_governor(policy,
1444 CPUFREQ_GOV_POLICY_EXIT);
1445 if (ret) {
1446 pr_err("%s: Failed to exit governor\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07001447 __func__);
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301448 return ret;
1449 }
Viresh Kumaredab2fb2013-08-20 12:08:22 +05301450 }
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001451
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301452 if (!cpufreq_suspended)
Viresh Kumar42f921a2013-12-20 21:26:02 +05301453 cpufreq_policy_put_kobj(policy);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301454
1455 /*
1456 * Perform the ->exit() even during light-weight tear-down,
1457 * since this is a core component, and is essential for the
1458 * subsequent light-weight ->init() to succeed.
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001459 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001460 if (cpufreq_driver->exit)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301461 cpufreq_driver->exit(policy);
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001462
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301463 /* Remove policy from list of active policies */
1464 write_lock_irqsave(&cpufreq_driver_lock, flags);
1465 list_del(&policy->policy_list);
1466 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1467
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301468 if (!cpufreq_suspended)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301469 cpufreq_policy_free(policy);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001470 } else if (has_target()) {
1471 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1472 if (!ret)
1473 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1474
1475 if (ret) {
1476 pr_err("%s: Failed to start governor\n", __func__);
1477 return ret;
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001478 }
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 return 0;
1482}
1483
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301484/**
Viresh Kumar27a862e2013-10-02 14:13:14 +05301485 * cpufreq_remove_dev - remove a CPU device
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301486 *
1487 * Removes the cpufreq interface for a CPU device.
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301488 */
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001489static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001490{
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001491 unsigned int cpu = dev->id;
Viresh Kumar27a862e2013-10-02 14:13:14 +05301492 int ret;
Venki Pallipadiec282972007-03-26 12:03:19 -07001493
1494 if (cpu_is_offline(cpu))
1495 return 0;
1496
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301497 ret = __cpufreq_remove_dev_prepare(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301498
1499 if (!ret)
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301500 ret = __cpufreq_remove_dev_finish(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301501
1502 return ret;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001503}
1504
David Howells65f27f32006-11-22 14:55:48 +00001505static void handle_update(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506{
David Howells65f27f32006-11-22 14:55:48 +00001507 struct cpufreq_policy *policy =
1508 container_of(work, struct cpufreq_policy, update);
1509 unsigned int cpu = policy->cpu;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001510 pr_debug("handle_update for cpu %u called\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 cpufreq_update_policy(cpu);
1512}
1513
1514/**
Viresh Kumarbb176f72013-06-19 14:19:33 +05301515 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1516 * in deep trouble.
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301517 * @policy: policy managing CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 * @new_freq: CPU frequency the CPU actually runs at
1519 *
Dave Jones29464f22009-01-18 01:37:11 -05001520 * We adjust to current frequency first, and need to clean up later.
1521 * So either call to cpufreq_update_policy() or schedule handle_update()).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 */
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301523static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301524 unsigned int new_freq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525{
1526 struct cpufreq_freqs freqs;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301527
Joe Perchese837f9b2014-03-11 10:03:00 -07001528 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301529 policy->cur, new_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301531 freqs.old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 freqs.new = new_freq;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301533
Viresh Kumar8fec0512014-03-24 13:35:45 +05301534 cpufreq_freq_transition_begin(policy, &freqs);
1535 cpufreq_freq_transition_end(policy, &freqs, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536}
1537
Dave Jones32ee8c32006-02-28 00:43:23 -05001538/**
Dhaval Giani4ab70df2006-12-13 14:49:15 +05301539 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001540 * @cpu: CPU number
1541 *
1542 * This is the last known freq, without actually getting it from the driver.
1543 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1544 */
1545unsigned int cpufreq_quick_get(unsigned int cpu)
1546{
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001547 struct cpufreq_policy *policy;
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301548 unsigned int ret_freq = 0;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001549
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001550 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1551 return cpufreq_driver->get(cpu);
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001552
1553 policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001554 if (policy) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301555 ret_freq = policy->cur;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001556 cpufreq_cpu_put(policy);
1557 }
1558
Dave Jones4d34a672008-02-07 16:33:49 -05001559 return ret_freq;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001560}
1561EXPORT_SYMBOL(cpufreq_quick_get);
1562
Jesse Barnes3d737102011-06-28 10:59:12 -07001563/**
1564 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1565 * @cpu: CPU number
1566 *
1567 * Just return the max possible frequency for a given CPU.
1568 */
1569unsigned int cpufreq_quick_get_max(unsigned int cpu)
1570{
1571 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1572 unsigned int ret_freq = 0;
1573
1574 if (policy) {
1575 ret_freq = policy->max;
1576 cpufreq_cpu_put(policy);
1577 }
1578
1579 return ret_freq;
1580}
1581EXPORT_SYMBOL(cpufreq_quick_get_max);
1582
Viresh Kumard92d50a2015-01-02 12:34:29 +05301583static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584{
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301585 unsigned int ret_freq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001587 if (!cpufreq_driver->get)
Dave Jones4d34a672008-02-07 16:33:49 -05001588 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
Viresh Kumard92d50a2015-01-02 12:34:29 +05301590 ret_freq = cpufreq_driver->get(policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301592 if (ret_freq && policy->cur &&
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001593 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301594 /* verify no discrepancy between actual and
1595 saved value exists */
1596 if (unlikely(ret_freq != policy->cur)) {
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301597 cpufreq_out_of_sync(policy, ret_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 schedule_work(&policy->update);
1599 }
1600 }
1601
Dave Jones4d34a672008-02-07 16:33:49 -05001602 return ret_freq;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001603}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001605/**
1606 * cpufreq_get - get the current CPU frequency (in kHz)
1607 * @cpu: CPU number
1608 *
1609 * Get the CPU current (static) CPU frequency
1610 */
1611unsigned int cpufreq_get(unsigned int cpu)
1612{
Aaron Plattner999976e2014-03-04 12:42:15 -08001613 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001614 unsigned int ret_freq = 0;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001615
Aaron Plattner999976e2014-03-04 12:42:15 -08001616 if (policy) {
1617 down_read(&policy->rwsem);
Viresh Kumard92d50a2015-01-02 12:34:29 +05301618 ret_freq = __cpufreq_get(policy);
Aaron Plattner999976e2014-03-04 12:42:15 -08001619 up_read(&policy->rwsem);
Viresh Kumar26ca8692013-09-20 22:37:31 +05301620
Aaron Plattner999976e2014-03-04 12:42:15 -08001621 cpufreq_cpu_put(policy);
1622 }
Viresh Kumar6eed9402013-08-06 22:53:11 +05301623
Dave Jones4d34a672008-02-07 16:33:49 -05001624 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625}
1626EXPORT_SYMBOL(cpufreq_get);
1627
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001628static struct subsys_interface cpufreq_interface = {
1629 .name = "cpufreq",
1630 .subsys = &cpu_subsys,
1631 .add_dev = cpufreq_add_dev,
1632 .remove_dev = cpufreq_remove_dev,
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001633};
1634
Viresh Kumare28867e2014-03-04 11:00:27 +08001635/*
1636 * In case platform wants some specific frequency to be configured
1637 * during suspend..
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001638 */
Viresh Kumare28867e2014-03-04 11:00:27 +08001639int cpufreq_generic_suspend(struct cpufreq_policy *policy)
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001640{
Viresh Kumare28867e2014-03-04 11:00:27 +08001641 int ret;
Dave Jones4bc5d342009-08-04 14:03:25 -04001642
Viresh Kumare28867e2014-03-04 11:00:27 +08001643 if (!policy->suspend_freq) {
1644 pr_err("%s: suspend_freq can't be zero\n", __func__);
1645 return -EINVAL;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001646 }
1647
Viresh Kumare28867e2014-03-04 11:00:27 +08001648 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1649 policy->suspend_freq);
1650
1651 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1652 CPUFREQ_RELATION_H);
1653 if (ret)
1654 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1655 __func__, policy->suspend_freq, ret);
1656
Dave Jonesc9060492008-02-07 16:32:18 -05001657 return ret;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001658}
Viresh Kumare28867e2014-03-04 11:00:27 +08001659EXPORT_SYMBOL(cpufreq_generic_suspend);
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001660
1661/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001662 * cpufreq_suspend() - Suspend CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001664 * Called during system wide Suspend/Hibernate cycles for suspending governors
1665 * as some platforms can't change frequency after this point in suspend cycle.
1666 * Because some of the devices (like: i2c, regulators, etc) they use for
1667 * changing frequency are suspended quickly after this point.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001669void cpufreq_suspend(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301671 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001673 if (!cpufreq_driver)
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001674 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001676 if (!has_target())
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301677 goto suspend;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001679 pr_debug("%s: Suspending Governors\n", __func__);
1680
Viresh Kumarb4f06762015-01-27 14:06:08 +05301681 for_each_policy(policy) {
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001682 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1683 pr_err("%s: Failed to stop governor for policy: %p\n",
1684 __func__, policy);
1685 else if (cpufreq_driver->suspend
1686 && cpufreq_driver->suspend(policy))
1687 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1688 policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 }
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301690
1691suspend:
1692 cpufreq_suspended = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693}
1694
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001696 * cpufreq_resume() - Resume CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001698 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1699 * are suspended with cpufreq_suspend().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001701void cpufreq_resume(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 struct cpufreq_policy *policy;
1704
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001705 if (!cpufreq_driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 return;
1707
Lan Tianyu8e304442014-09-18 15:03:07 +08001708 cpufreq_suspended = false;
1709
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001710 if (!has_target())
1711 return;
1712
1713 pr_debug("%s: Resuming Governors\n", __func__);
1714
Viresh Kumarb4f06762015-01-27 14:06:08 +05301715 for_each_policy(policy) {
Viresh Kumar0c5aa402014-03-24 12:30:29 +05301716 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1717 pr_err("%s: Failed to resume driver: %p\n", __func__,
1718 policy);
1719 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001720 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1721 pr_err("%s: Failed to start governor for policy: %p\n",
1722 __func__, policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 }
Viresh Kumarc75de0a2015-04-02 10:21:33 +05301724
1725 /*
1726 * schedule call cpufreq_update_policy() for first-online CPU, as that
1727 * wouldn't be hotplugged-out on suspend. It will verify that the
1728 * current freq is in sync with what we believe it to be.
1729 */
1730 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1731 if (WARN_ON(!policy))
1732 return;
1733
1734 schedule_work(&policy->update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736
Borislav Petkov9d950462013-01-20 10:24:28 +00001737/**
1738 * cpufreq_get_current_driver - return current driver's name
1739 *
1740 * Return the name string of the currently loaded cpufreq driver
1741 * or NULL, if none.
1742 */
1743const char *cpufreq_get_current_driver(void)
1744{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001745 if (cpufreq_driver)
1746 return cpufreq_driver->name;
1747
1748 return NULL;
Borislav Petkov9d950462013-01-20 10:24:28 +00001749}
1750EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
Thomas Petazzoni51315cd2014-10-19 11:30:27 +02001752/**
1753 * cpufreq_get_driver_data - return current driver data
1754 *
1755 * Return the private data of the currently loaded cpufreq
1756 * driver, or NULL if no cpufreq driver is loaded.
1757 */
1758void *cpufreq_get_driver_data(void)
1759{
1760 if (cpufreq_driver)
1761 return cpufreq_driver->driver_data;
1762
1763 return NULL;
1764}
1765EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1766
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767/*********************************************************************
1768 * NOTIFIER LISTS INTERFACE *
1769 *********************************************************************/
1770
1771/**
1772 * cpufreq_register_notifier - register a driver with cpufreq
1773 * @nb: notifier function to register
1774 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1775 *
Dave Jones32ee8c32006-02-28 00:43:23 -05001776 * Add a driver to one of two lists: either a list of drivers that
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 * are notified about clock rate changes (once before and once after
1778 * the transition), or a list of drivers that are notified about
1779 * changes in cpufreq policy.
1780 *
1781 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001782 * blocking_notifier_chain_register.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 */
1784int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1785{
1786 int ret;
1787
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001788 if (cpufreq_disabled())
1789 return -EINVAL;
1790
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -02001791 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1792
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 switch (list) {
1794 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001795 ret = srcu_notifier_chain_register(
Alan Sterne041c682006-03-27 01:16:30 -08001796 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 break;
1798 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001799 ret = blocking_notifier_chain_register(
1800 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 break;
1802 default:
1803 ret = -EINVAL;
1804 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
1806 return ret;
1807}
1808EXPORT_SYMBOL(cpufreq_register_notifier);
1809
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810/**
1811 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1812 * @nb: notifier block to be unregistered
Viresh Kumarbb176f72013-06-19 14:19:33 +05301813 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 *
1815 * Remove a driver from the CPU frequency notifier list.
1816 *
1817 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001818 * blocking_notifier_chain_unregister.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 */
1820int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1821{
1822 int ret;
1823
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001824 if (cpufreq_disabled())
1825 return -EINVAL;
1826
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 switch (list) {
1828 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001829 ret = srcu_notifier_chain_unregister(
Alan Sterne041c682006-03-27 01:16:30 -08001830 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 break;
1832 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001833 ret = blocking_notifier_chain_unregister(
1834 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 break;
1836 default:
1837 ret = -EINVAL;
1838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839
1840 return ret;
1841}
1842EXPORT_SYMBOL(cpufreq_unregister_notifier);
1843
1844
1845/*********************************************************************
1846 * GOVERNORS *
1847 *********************************************************************/
1848
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301849/* Must set freqs->new to intermediate frequency */
1850static int __target_intermediate(struct cpufreq_policy *policy,
1851 struct cpufreq_freqs *freqs, int index)
1852{
1853 int ret;
1854
1855 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1856
1857 /* We don't need to switch to intermediate freq */
1858 if (!freqs->new)
1859 return 0;
1860
1861 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1862 __func__, policy->cpu, freqs->old, freqs->new);
1863
1864 cpufreq_freq_transition_begin(policy, freqs);
1865 ret = cpufreq_driver->target_intermediate(policy, index);
1866 cpufreq_freq_transition_end(policy, freqs, ret);
1867
1868 if (ret)
1869 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1870 __func__, ret);
1871
1872 return ret;
1873}
1874
Viresh Kumar8d657752014-05-21 14:29:29 +05301875static int __target_index(struct cpufreq_policy *policy,
1876 struct cpufreq_frequency_table *freq_table, int index)
1877{
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301878 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1879 unsigned int intermediate_freq = 0;
Viresh Kumar8d657752014-05-21 14:29:29 +05301880 int retval = -EINVAL;
1881 bool notify;
1882
1883 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
Viresh Kumar8d657752014-05-21 14:29:29 +05301884 if (notify) {
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301885 /* Handle switching to intermediate frequency */
1886 if (cpufreq_driver->get_intermediate) {
1887 retval = __target_intermediate(policy, &freqs, index);
1888 if (retval)
1889 return retval;
Viresh Kumar8d657752014-05-21 14:29:29 +05301890
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301891 intermediate_freq = freqs.new;
1892 /* Set old freq to intermediate */
1893 if (intermediate_freq)
1894 freqs.old = freqs.new;
1895 }
1896
1897 freqs.new = freq_table[index].frequency;
Viresh Kumar8d657752014-05-21 14:29:29 +05301898 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1899 __func__, policy->cpu, freqs.old, freqs.new);
1900
1901 cpufreq_freq_transition_begin(policy, &freqs);
1902 }
1903
1904 retval = cpufreq_driver->target_index(policy, index);
1905 if (retval)
1906 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1907 retval);
1908
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301909 if (notify) {
Viresh Kumar8d657752014-05-21 14:29:29 +05301910 cpufreq_freq_transition_end(policy, &freqs, retval);
1911
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301912 /*
1913 * Failed after setting to intermediate freq? Driver should have
1914 * reverted back to initial frequency and so should we. Check
1915 * here for intermediate_freq instead of get_intermediate, in
1916 * case we have't switched to intermediate freq at all.
1917 */
1918 if (unlikely(retval && intermediate_freq)) {
1919 freqs.old = intermediate_freq;
1920 freqs.new = policy->restore_freq;
1921 cpufreq_freq_transition_begin(policy, &freqs);
1922 cpufreq_freq_transition_end(policy, &freqs, 0);
1923 }
1924 }
1925
Viresh Kumar8d657752014-05-21 14:29:29 +05301926 return retval;
1927}
1928
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929int __cpufreq_driver_target(struct cpufreq_policy *policy,
1930 unsigned int target_freq,
1931 unsigned int relation)
1932{
Viresh Kumar72499242012-10-31 01:28:21 +01001933 unsigned int old_target_freq = target_freq;
Viresh Kumar8d657752014-05-21 14:29:29 +05301934 int retval = -EINVAL;
Ashok Rajc32b6b82005-10-30 14:59:54 -08001935
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04001936 if (cpufreq_disabled())
1937 return -ENODEV;
1938
Viresh Kumar72499242012-10-31 01:28:21 +01001939 /* Make sure that target_freq is within supported range */
1940 if (target_freq > policy->max)
1941 target_freq = policy->max;
1942 if (target_freq < policy->min)
1943 target_freq = policy->min;
1944
1945 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07001946 policy->cpu, target_freq, relation, old_target_freq);
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001947
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301948 /*
1949 * This might look like a redundant call as we are checking it again
1950 * after finding index. But it is left intentionally for cases where
1951 * exactly same freq is called again and so we can save on few function
1952 * calls.
1953 */
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001954 if (target_freq == policy->cur)
1955 return 0;
1956
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301957 /* Save last value to restore later on errors */
1958 policy->restore_freq = policy->cur;
1959
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001960 if (cpufreq_driver->target)
1961 retval = cpufreq_driver->target(policy, target_freq, relation);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301962 else if (cpufreq_driver->target_index) {
1963 struct cpufreq_frequency_table *freq_table;
1964 int index;
Ashok Raj90d45d12005-11-08 21:34:24 -08001965
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301966 freq_table = cpufreq_frequency_get_table(policy->cpu);
1967 if (unlikely(!freq_table)) {
1968 pr_err("%s: Unable to find freq_table\n", __func__);
1969 goto out;
1970 }
1971
1972 retval = cpufreq_frequency_table_target(policy, freq_table,
1973 target_freq, relation, &index);
1974 if (unlikely(retval)) {
1975 pr_err("%s: Unable to find matching freq\n", __func__);
1976 goto out;
1977 }
1978
Viresh Kumard4019f02013-08-14 19:38:24 +05301979 if (freq_table[index].frequency == policy->cur) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301980 retval = 0;
Viresh Kumard4019f02013-08-14 19:38:24 +05301981 goto out;
1982 }
1983
Viresh Kumar8d657752014-05-21 14:29:29 +05301984 retval = __target_index(policy, freq_table, index);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301985 }
1986
1987out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 return retval;
1989}
1990EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1991
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992int cpufreq_driver_target(struct cpufreq_policy *policy,
1993 unsigned int target_freq,
1994 unsigned int relation)
1995{
Julia Lawallf1829e42008-07-25 22:44:53 +02001996 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
viresh kumarad7722d2013-10-18 19:10:15 +05301998 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
2000 ret = __cpufreq_driver_target(policy, target_freq, relation);
2001
viresh kumarad7722d2013-10-18 19:10:15 +05302002 up_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 return ret;
2005}
2006EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2007
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05302008static int __cpufreq_governor(struct cpufreq_policy *policy,
2009 unsigned int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010{
Dave Jonescc993ca2005-07-28 09:43:56 -07002011 int ret;
Thomas Renninger6afde102007-10-02 13:28:13 -07002012
2013 /* Only must be defined when default governor is known to have latency
2014 restrictions, like e.g. conservative or ondemand.
2015 That this is the case is already ensured in Kconfig
2016 */
2017#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2018 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2019#else
2020 struct cpufreq_governor *gov = NULL;
2021#endif
Thomas Renninger1c256242007-10-02 13:28:12 -07002022
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002023 /* Don't start any governor operations if we are entering suspend */
2024 if (cpufreq_suspended)
2025 return 0;
Ethan Zhaocb577202014-12-18 15:28:19 +09002026 /*
2027 * Governor might not be initiated here if ACPI _PPC changed
2028 * notification happened, so check it.
2029 */
2030 if (!policy->governor)
2031 return -EINVAL;
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002032
Thomas Renninger1c256242007-10-02 13:28:12 -07002033 if (policy->governor->max_transition_latency &&
2034 policy->cpuinfo.transition_latency >
2035 policy->governor->max_transition_latency) {
Thomas Renninger6afde102007-10-02 13:28:13 -07002036 if (!gov)
2037 return -EINVAL;
2038 else {
Joe Perchese837f9b2014-03-11 10:03:00 -07002039 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2040 policy->governor->name, gov->name);
Thomas Renninger6afde102007-10-02 13:28:13 -07002041 policy->governor = gov;
2042 }
Thomas Renninger1c256242007-10-02 13:28:12 -07002043 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
Viresh Kumarfe492f32013-08-06 22:53:10 +05302045 if (event == CPUFREQ_GOV_POLICY_INIT)
2046 if (!try_module_get(policy->governor->owner))
2047 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002049 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002050 policy->cpu, event);
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002051
2052 mutex_lock(&cpufreq_governor_lock);
Srivatsa S. Bhat56d07db2013-09-07 01:23:55 +05302053 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
Viresh Kumarf73d3932013-08-31 17:53:40 +05302054 || (!policy->governor_enabled
2055 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002056 mutex_unlock(&cpufreq_governor_lock);
2057 return -EBUSY;
2058 }
2059
2060 if (event == CPUFREQ_GOV_STOP)
2061 policy->governor_enabled = false;
2062 else if (event == CPUFREQ_GOV_START)
2063 policy->governor_enabled = true;
2064
2065 mutex_unlock(&cpufreq_governor_lock);
2066
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 ret = policy->governor->governor(policy, event);
2068
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002069 if (!ret) {
2070 if (event == CPUFREQ_GOV_POLICY_INIT)
2071 policy->governor->initialized++;
2072 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2073 policy->governor->initialized--;
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002074 } else {
2075 /* Restore original values */
2076 mutex_lock(&cpufreq_governor_lock);
2077 if (event == CPUFREQ_GOV_STOP)
2078 policy->governor_enabled = true;
2079 else if (event == CPUFREQ_GOV_START)
2080 policy->governor_enabled = false;
2081 mutex_unlock(&cpufreq_governor_lock);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002082 }
Viresh Kumarb3940582013-02-01 05:42:58 +00002083
Viresh Kumarfe492f32013-08-06 22:53:10 +05302084 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2085 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 module_put(policy->governor->owner);
2087
2088 return ret;
2089}
2090
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091int cpufreq_register_governor(struct cpufreq_governor *governor)
2092{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002093 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094
2095 if (!governor)
2096 return -EINVAL;
2097
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002098 if (cpufreq_disabled())
2099 return -ENODEV;
2100
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002101 mutex_lock(&cpufreq_governor_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -05002102
Viresh Kumarb3940582013-02-01 05:42:58 +00002103 governor->initialized = 0;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002104 err = -EBUSY;
Viresh Kumar42f91fa2015-01-02 12:34:26 +05302105 if (!find_governor(governor->name)) {
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002106 err = 0;
2107 list_add(&governor->governor_list, &cpufreq_governor_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
Dave Jones32ee8c32006-02-28 00:43:23 -05002110 mutex_unlock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002111 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112}
2113EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2114
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2116{
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002117 int cpu;
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002118
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 if (!governor)
2120 return;
2121
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002122 if (cpufreq_disabled())
2123 return;
2124
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002125 for_each_present_cpu(cpu) {
2126 if (cpu_online(cpu))
2127 continue;
2128 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2129 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2130 }
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002131
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002132 mutex_lock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 list_del(&governor->governor_list);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002134 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 return;
2136}
2137EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2138
2139
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140/*********************************************************************
2141 * POLICY INTERFACE *
2142 *********************************************************************/
2143
2144/**
2145 * cpufreq_get_policy - get the current cpufreq_policy
Dave Jones29464f22009-01-18 01:37:11 -05002146 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2147 * is written
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 *
2149 * Reads the current cpufreq policy.
2150 */
2151int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2152{
2153 struct cpufreq_policy *cpu_policy;
2154 if (!policy)
2155 return -EINVAL;
2156
2157 cpu_policy = cpufreq_cpu_get(cpu);
2158 if (!cpu_policy)
2159 return -EINVAL;
2160
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302161 memcpy(policy, cpu_policy, sizeof(*policy));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162
2163 cpufreq_cpu_put(cpu_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 return 0;
2165}
2166EXPORT_SYMBOL(cpufreq_get_policy);
2167
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002168/*
Viresh Kumar037ce832013-10-02 14:13:16 +05302169 * policy : current policy.
2170 * new_policy: policy to be set.
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002171 */
Viresh Kumar037ce832013-10-02 14:13:16 +05302172static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302173 struct cpufreq_policy *new_policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174{
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002175 struct cpufreq_governor *old_gov;
2176 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177
Joe Perchese837f9b2014-03-11 10:03:00 -07002178 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2179 new_policy->cpu, new_policy->min, new_policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302181 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002183 if (new_policy->min > policy->max || new_policy->max < policy->min)
2184 return -EINVAL;
Mattia Dongili9c9a43e2006-07-05 23:12:20 +02002185
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 /* verify the cpu speed can be set within this limit */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302187 ret = cpufreq_driver->verify(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002189 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 /* adjust if necessary - all reasons */
Alan Sterne041c682006-03-27 01:16:30 -08002192 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302193 CPUFREQ_ADJUST, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194
2195 /* adjust if necessary - hardware incompatibility*/
Alan Sterne041c682006-03-27 01:16:30 -08002196 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302197 CPUFREQ_INCOMPATIBLE, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
Viresh Kumarbb176f72013-06-19 14:19:33 +05302199 /*
2200 * verify the cpu speed can be set within this limit, which might be
2201 * different to the first one
2202 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302203 ret = cpufreq_driver->verify(new_policy);
Alan Sterne041c682006-03-27 01:16:30 -08002204 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002205 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
2207 /* notification of the new policy */
Alan Sterne041c682006-03-27 01:16:30 -08002208 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302209 CPUFREQ_NOTIFY, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302211 policy->min = new_policy->min;
2212 policy->max = new_policy->max;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002214 pr_debug("new min and max freqs are %u - %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002215 policy->min, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002217 if (cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302218 policy->policy = new_policy->policy;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002219 pr_debug("setting range\n");
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002220 return cpufreq_driver->setpolicy(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 }
2222
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002223 if (new_policy->governor == policy->governor)
2224 goto out;
2225
2226 pr_debug("governor switch\n");
2227
2228 /* save old, working values */
2229 old_gov = policy->governor;
2230 /* end old governor */
2231 if (old_gov) {
2232 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2233 up_write(&policy->rwsem);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02002234 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002235 down_write(&policy->rwsem);
2236 }
2237
2238 /* start new governor */
2239 policy->governor = new_policy->governor;
2240 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2241 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2242 goto out;
2243
2244 up_write(&policy->rwsem);
2245 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2246 down_write(&policy->rwsem);
2247 }
2248
2249 /* new governor failed, so re-start old one */
2250 pr_debug("starting governor %s failed\n", policy->governor->name);
2251 if (old_gov) {
2252 policy->governor = old_gov;
2253 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2254 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2255 }
2256
2257 return -EINVAL;
2258
2259 out:
2260 pr_debug("governor: change or update limits\n");
2261 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262}
2263
2264/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2266 * @cpu: CPU which shall be re-evaluated
2267 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002268 * Useful for policy notifiers which have different necessities
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 * at different times.
2270 */
2271int cpufreq_update_policy(unsigned int cpu)
2272{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302273 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2274 struct cpufreq_policy new_policy;
Julia Lawallf1829e42008-07-25 22:44:53 +02002275 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002277 if (!policy)
2278 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
viresh kumarad7722d2013-10-18 19:10:15 +05302280 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002282 pr_debug("updating policy for CPU %u\n", cpu);
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302283 memcpy(&new_policy, policy, sizeof(*policy));
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302284 new_policy.min = policy->user_policy.min;
2285 new_policy.max = policy->user_policy.max;
2286 new_policy.policy = policy->user_policy.policy;
2287 new_policy.governor = policy->user_policy.governor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
Viresh Kumarbb176f72013-06-19 14:19:33 +05302289 /*
2290 * BIOS might change freq behind our back
2291 * -> ask driver for current freq and notify governors about a change
2292 */
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01002293 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302294 new_policy.cur = cpufreq_driver->get(cpu);
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302295 if (WARN_ON(!new_policy.cur)) {
2296 ret = -EIO;
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002297 goto unlock;
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302298 }
2299
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302300 if (!policy->cur) {
Joe Perchese837f9b2014-03-11 10:03:00 -07002301 pr_debug("Driver did not initialize current freq\n");
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302302 policy->cur = new_policy.cur;
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002303 } else {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302304 if (policy->cur != new_policy.cur && has_target())
Viresh Kumara1e1dc42015-01-02 12:34:28 +05302305 cpufreq_out_of_sync(policy, new_policy.cur);
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002306 }
Thomas Renninger0961dd02006-01-26 18:46:33 +01002307 }
2308
Viresh Kumar037ce832013-10-02 14:13:16 +05302309 ret = cpufreq_set_policy(policy, &new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002311unlock:
viresh kumarad7722d2013-10-18 19:10:15 +05302312 up_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002313
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302314 cpufreq_cpu_put(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 return ret;
2316}
2317EXPORT_SYMBOL(cpufreq_update_policy);
2318
Paul Gortmaker27609842013-06-19 13:54:04 -04002319static int cpufreq_cpu_callback(struct notifier_block *nfb,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002320 unsigned long action, void *hcpu)
2321{
2322 unsigned int cpu = (unsigned long)hcpu;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002323 struct device *dev;
Ashok Rajc32b6b82005-10-30 14:59:54 -08002324
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002325 dev = get_cpu_device(cpu);
2326 if (dev) {
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302327 switch (action & ~CPU_TASKS_FROZEN) {
Ashok Rajc32b6b82005-10-30 14:59:54 -08002328 case CPU_ONLINE:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302329 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002330 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302331
Ashok Rajc32b6b82005-10-30 14:59:54 -08002332 case CPU_DOWN_PREPARE:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302333 __cpufreq_remove_dev_prepare(dev, NULL);
Srivatsa S. Bhat1aee40a2013-09-07 01:23:27 +05302334 break;
2335
2336 case CPU_POST_DEAD:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302337 __cpufreq_remove_dev_finish(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002338 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302339
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002340 case CPU_DOWN_FAILED:
Viresh Kumar23faf0b2015-02-19 17:02:04 +05302341 cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002342 break;
2343 }
2344 }
2345 return NOTIFY_OK;
2346}
2347
Neal Buckendahl9c36f742010-06-22 22:02:44 -05002348static struct notifier_block __refdata cpufreq_cpu_notifier = {
Viresh Kumarbb176f72013-06-19 14:19:33 +05302349 .notifier_call = cpufreq_cpu_callback,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002350};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
2352/*********************************************************************
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002353 * BOOST *
2354 *********************************************************************/
2355static int cpufreq_boost_set_sw(int state)
2356{
2357 struct cpufreq_frequency_table *freq_table;
2358 struct cpufreq_policy *policy;
2359 int ret = -EINVAL;
2360
Viresh Kumarb4f06762015-01-27 14:06:08 +05302361 for_each_policy(policy) {
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002362 freq_table = cpufreq_frequency_get_table(policy->cpu);
2363 if (freq_table) {
2364 ret = cpufreq_frequency_table_cpuinfo(policy,
2365 freq_table);
2366 if (ret) {
2367 pr_err("%s: Policy frequency update failed\n",
2368 __func__);
2369 break;
2370 }
2371 policy->user_policy.max = policy->max;
2372 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2373 }
2374 }
2375
2376 return ret;
2377}
2378
2379int cpufreq_boost_trigger_state(int state)
2380{
2381 unsigned long flags;
2382 int ret = 0;
2383
2384 if (cpufreq_driver->boost_enabled == state)
2385 return 0;
2386
2387 write_lock_irqsave(&cpufreq_driver_lock, flags);
2388 cpufreq_driver->boost_enabled = state;
2389 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2390
2391 ret = cpufreq_driver->set_boost(state);
2392 if (ret) {
2393 write_lock_irqsave(&cpufreq_driver_lock, flags);
2394 cpufreq_driver->boost_enabled = !state;
2395 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2396
Joe Perchese837f9b2014-03-11 10:03:00 -07002397 pr_err("%s: Cannot %s BOOST\n",
2398 __func__, state ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002399 }
2400
2401 return ret;
2402}
2403
2404int cpufreq_boost_supported(void)
2405{
2406 if (likely(cpufreq_driver))
2407 return cpufreq_driver->boost_supported;
2408
2409 return 0;
2410}
2411EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2412
2413int cpufreq_boost_enabled(void)
2414{
2415 return cpufreq_driver->boost_enabled;
2416}
2417EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2418
2419/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2421 *********************************************************************/
2422
2423/**
2424 * cpufreq_register_driver - register a CPU Frequency driver
2425 * @driver_data: A struct cpufreq_driver containing the values#
2426 * submitted by the CPU Frequency driver.
2427 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302428 * Registers a CPU Frequency driver to this core code. This code
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 * returns zero on success, -EBUSY when another driver got here first
Dave Jones32ee8c32006-02-28 00:43:23 -05002430 * (and isn't unregistered in the meantime).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 *
2432 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002433int cpufreq_register_driver(struct cpufreq_driver *driver_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434{
2435 unsigned long flags;
2436 int ret;
2437
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002438 if (cpufreq_disabled())
2439 return -ENODEV;
2440
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 if (!driver_data || !driver_data->verify || !driver_data->init ||
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302442 !(driver_data->setpolicy || driver_data->target_index ||
Rafael J. Wysocki98322352014-03-19 12:48:30 +01002443 driver_data->target) ||
2444 (driver_data->setpolicy && (driver_data->target_index ||
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05302445 driver_data->target)) ||
2446 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 return -EINVAL;
2448
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002449 pr_debug("trying to register driver %s\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002451 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002452 if (cpufreq_driver) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002453 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Yinghai Lu4dea58062013-09-18 21:05:20 -07002454 return -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002456 cpufreq_driver = driver_data;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002457 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458
Viresh Kumarbc68b7d2015-01-02 12:34:30 +05302459 if (driver_data->setpolicy)
2460 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2461
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002462 if (cpufreq_boost_supported()) {
2463 /*
2464 * Check if driver provides function to enable boost -
2465 * if not, use cpufreq_boost_set_sw as default
2466 */
2467 if (!cpufreq_driver->set_boost)
2468 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2469
2470 ret = cpufreq_sysfs_create_file(&boost.attr);
2471 if (ret) {
2472 pr_err("%s: cannot register global BOOST sysfs file\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002473 __func__);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002474 goto err_null_driver;
2475 }
2476 }
2477
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002478 ret = subsys_interface_register(&cpufreq_interface);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002479 if (ret)
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002480 goto err_boost_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302482 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2483 list_empty(&cpufreq_policy_list)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 /* if all ->init() calls failed, unregister */
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302485 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2486 driver_data->name);
2487 goto err_if_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 }
2489
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002490 register_hotcpu_notifier(&cpufreq_cpu_notifier);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002491 pr_debug("driver %s up and running\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002493 return 0;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002494err_if_unreg:
2495 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002496err_boost_unreg:
2497 if (cpufreq_boost_supported())
2498 cpufreq_sysfs_remove_file(&boost.attr);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002499err_null_driver:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002500 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002501 cpufreq_driver = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002502 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Dave Jones4d34a672008-02-07 16:33:49 -05002503 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504}
2505EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2506
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507/**
2508 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2509 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302510 * Unregister the current CPUFreq driver. Only call this if you have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 * the right to do so, i.e. if you have succeeded in initialising before!
2512 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2513 * currently not initialised.
2514 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002515int cpufreq_unregister_driver(struct cpufreq_driver *driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516{
2517 unsigned long flags;
2518
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002519 if (!cpufreq_driver || (driver != cpufreq_driver))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002522 pr_debug("unregistering driver %s\n", driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002524 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002525 if (cpufreq_boost_supported())
2526 cpufreq_sysfs_remove_file(&boost.attr);
2527
Chandra Seetharaman65edc682006-06-27 02:54:08 -07002528 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529
Viresh Kumar6eed9402013-08-06 22:53:11 +05302530 down_write(&cpufreq_rwsem);
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002531 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302532
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002533 cpufreq_driver = NULL;
Viresh Kumar6eed9402013-08-06 22:53:11 +05302534
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002535 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302536 up_write(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537
2538 return 0;
2539}
2540EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002541
Doug Anderson90de2a42014-12-23 22:09:48 -08002542/*
2543 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2544 * or mutexes when secondary CPUs are halted.
2545 */
2546static struct syscore_ops cpufreq_syscore_ops = {
2547 .shutdown = cpufreq_suspend,
2548};
2549
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002550static int __init cpufreq_core_init(void)
2551{
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002552 if (cpufreq_disabled())
2553 return -ENODEV;
2554
Viresh Kumar2361be22013-05-17 16:09:09 +05302555 cpufreq_global_kobject = kobject_create();
Thomas Renninger8aa84ad2009-07-24 15:25:05 +02002556 BUG_ON(!cpufreq_global_kobject);
2557
Doug Anderson90de2a42014-12-23 22:09:48 -08002558 register_syscore_ops(&cpufreq_syscore_ops);
2559
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002560 return 0;
2561}
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002562core_initcall(cpufreq_core_init);