blob: 8ae655c364f48aeeeec7e4717aab6e6bfe2d9f95 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
Viresh Kumarbb176f72013-06-19 14:19:33 +05306 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Ashok Rajc32b6b82005-10-30 14:59:54 -08008 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
Dave Jones32ee8c32006-02-28 00:43:23 -05009 * Added handling for CPU hotplug
Dave Jones8ff69732006-03-05 03:37:23 -050010 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
Ashok Rajc32b6b82005-10-30 14:59:54 -080012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Viresh Kumardb701152012-10-23 01:29:03 +020018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Viresh Kumar5ff0a262013-08-06 22:53:03 +053020#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/cpufreq.h>
22#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/device.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053024#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080027#include <linux/mutex.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053028#include <linux/slab.h>
Viresh Kumar2f0aea92014-03-04 11:00:26 +080029#include <linux/suspend.h>
Doug Anderson90de2a42014-12-23 22:09:48 -080030#include <linux/syscore_ops.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053031#include <linux/tick.h>
Thomas Renninger6f4f2722010-04-20 13:17:36 +020032#include <trace/events/power.h>
33
Viresh Kumarb4f06762015-01-27 14:06:08 +053034/* Macros to iterate over lists */
35/* Iterate over online CPUs policies */
36static LIST_HEAD(cpufreq_policy_list);
37#define for_each_policy(__policy) \
38 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
39
Viresh Kumarf7b27062015-01-27 14:06:09 +053040/* Iterate over governors */
41static LIST_HEAD(cpufreq_governor_list);
42#define for_each_governor(__governor) \
43 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/**
Dave Jonescd878472006-08-11 17:59:28 -040046 * The "cpufreq driver" - the arch- or hardware-dependent low
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * level driver of CPUFreq support, and its spinlock. This lock
48 * also protects the cpufreq_cpu_data array.
49 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +020050static struct cpufreq_driver *cpufreq_driver;
Mike Travis7a6aedf2008-03-25 15:06:53 -070051static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +053052static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
Viresh Kumarbb176f72013-06-19 14:19:33 +053053static DEFINE_RWLOCK(cpufreq_driver_lock);
Jane Li6f1e4ef2014-01-03 17:17:41 +080054DEFINE_MUTEX(cpufreq_governor_lock);
Viresh Kumarbb176f72013-06-19 14:19:33 +053055
Thomas Renninger084f3492007-07-09 11:35:28 -070056/* This one keeps track of the previously set governor of a removed CPU */
Dmitry Monakhove77b89f2009-10-05 00:38:55 +040057static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Viresh Kumar2f0aea92014-03-04 11:00:26 +080059/* Flag to suspend/resume CPUFreq governors */
60static bool cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +053062static inline bool has_target(void)
63{
64 return cpufreq_driver->target_index || cpufreq_driver->target;
65}
66
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080067/*
Viresh Kumar6eed9402013-08-06 22:53:11 +053068 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
69 * sections
70 */
71static DECLARE_RWSEM(cpufreq_rwsem);
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/* internal prototypes */
Dave Jones29464f22009-01-18 01:37:11 -050074static int __cpufreq_governor(struct cpufreq_policy *policy,
75 unsigned int event);
Viresh Kumard92d50a2015-01-02 12:34:29 +053076static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
David Howells65f27f32006-11-22 14:55:48 +000077static void handle_update(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79/**
Dave Jones32ee8c32006-02-28 00:43:23 -050080 * Two notifier lists: the "policy" list is involved in the
81 * validation process for a new CPU frequency policy; the
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 * "transition" list for kernel code that needs to handle
83 * changes to devices when the CPU clock speed changes.
84 * The mutex locks both lists.
85 */
Alan Sterne041c682006-03-27 01:16:30 -080086static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
Alan Sternb4dfdbb2006-10-04 02:17:06 -070087static struct srcu_notifier_head cpufreq_transition_notifier_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -020089static bool init_cpufreq_transition_notifier_list_called;
Alan Sternb4dfdbb2006-10-04 02:17:06 -070090static int __init init_cpufreq_transition_notifier_list(void)
91{
92 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -020093 init_cpufreq_transition_notifier_list_called = true;
Alan Sternb4dfdbb2006-10-04 02:17:06 -070094 return 0;
95}
Linus Torvaldsb3438f82006-11-20 11:47:18 -080096pure_initcall(init_cpufreq_transition_notifier_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -040098static int off __read_mostly;
Viresh Kumarda584452012-10-26 00:51:32 +020099static int cpufreq_disabled(void)
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400100{
101 return off;
102}
103void disable_cpufreq(void)
104{
105 off = 1;
106}
Dave Jones29464f22009-01-18 01:37:11 -0500107static DEFINE_MUTEX(cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000109bool have_governor_per_policy(void)
110{
Viresh Kumar0b981e72013-10-02 14:13:18 +0530111 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000112}
Viresh Kumar3f869d62013-05-16 05:09:56 +0000113EXPORT_SYMBOL_GPL(have_governor_per_policy);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000114
Viresh Kumar944e9a02013-05-16 05:09:57 +0000115struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
116{
117 if (have_governor_per_policy())
118 return &policy->kobj;
119 else
120 return cpufreq_global_kobject;
121}
122EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
123
Viresh Kumar72a4ce32013-05-17 11:26:32 +0000124static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
125{
126 u64 idle_time;
127 u64 cur_wall_time;
128 u64 busy_time;
129
130 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
131
132 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
134 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
135 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
136 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
137 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
138
139 idle_time = cur_wall_time - busy_time;
140 if (wall)
141 *wall = cputime_to_usecs(cur_wall_time);
142
143 return cputime_to_usecs(idle_time);
144}
145
146u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
147{
148 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
149
150 if (idle_time == -1ULL)
151 return get_cpu_idle_time_jiffy(cpu, wall);
152 else if (!io_busy)
153 idle_time += get_cpu_iowait_time_us(cpu, wall);
154
155 return idle_time;
156}
157EXPORT_SYMBOL_GPL(get_cpu_idle_time);
158
Viresh Kumar70e9e772013-10-03 20:29:07 +0530159/*
160 * This is a generic cpufreq init() routine which can be used by cpufreq
161 * drivers of SMP systems. It will do following:
162 * - validate & show freq table passed
163 * - set policies transition latency
164 * - policy->cpus with all possible CPUs
165 */
166int cpufreq_generic_init(struct cpufreq_policy *policy,
167 struct cpufreq_frequency_table *table,
168 unsigned int transition_latency)
169{
170 int ret;
171
172 ret = cpufreq_table_validate_and_show(policy, table);
173 if (ret) {
174 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
175 return ret;
176 }
177
178 policy->cpuinfo.transition_latency = transition_latency;
179
180 /*
181 * The driver only supports the SMP configuartion where all processors
182 * share the clock and voltage and clock.
183 */
184 cpumask_setall(policy->cpus);
185
186 return 0;
187}
188EXPORT_SYMBOL_GPL(cpufreq_generic_init);
189
Viresh Kumar652ed952014-01-09 20:38:43 +0530190unsigned int cpufreq_generic_get(unsigned int cpu)
191{
192 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
193
194 if (!policy || IS_ERR(policy->clk)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700195 pr_err("%s: No %s associated to cpu: %d\n",
196 __func__, policy ? "clk" : "policy", cpu);
Viresh Kumar652ed952014-01-09 20:38:43 +0530197 return 0;
198 }
199
200 return clk_get_rate(policy->clk) / 1000;
201}
202EXPORT_SYMBOL_GPL(cpufreq_generic_get);
203
Viresh Kumare0b31652014-03-10 14:53:33 +0530204/* Only for cpufreq core internal use */
205struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
206{
207 return per_cpu(cpufreq_cpu_data, cpu);
208}
209
Viresh Kumar6eed9402013-08-06 22:53:11 +0530210struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530212 struct cpufreq_policy *policy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 unsigned long flags;
214
Viresh Kumar1e63eaf2015-01-27 14:06:07 +0530215 if (cpu >= nr_cpu_ids)
Viresh Kumar6eed9402013-08-06 22:53:11 +0530216 return NULL;
217
218 if (!down_read_trylock(&cpufreq_rwsem))
219 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221 /* get the cpufreq driver */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000222 read_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Viresh Kumar6eed9402013-08-06 22:53:11 +0530224 if (cpufreq_driver) {
225 /* get the CPU */
226 policy = per_cpu(cpufreq_cpu_data, cpu);
227 if (policy)
228 kobject_get(&policy->kobj);
229 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200230
Viresh Kumar6eed9402013-08-06 22:53:11 +0530231 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530233 if (!policy)
Viresh Kumar6eed9402013-08-06 22:53:11 +0530234 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530236 return policy;
Stephen Boyda9144432012-07-20 18:14:38 +0000237}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
239
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530240void cpufreq_cpu_put(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530242 kobject_put(&policy->kobj);
243 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
249 *********************************************************************/
250
251/**
252 * adjust_jiffies - adjust the system "loops_per_jiffy"
253 *
254 * This function alters the system "loops_per_jiffy" for the clock
255 * speed change. Note that loops_per_jiffy cannot be updated on SMP
Dave Jones32ee8c32006-02-28 00:43:23 -0500256 * systems as each CPU might be scaled differently. So, use the arch
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 * per-CPU loops_per_jiffy value wherever possible.
258 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800259static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
Viresh Kumar39c132e2015-01-02 12:34:34 +0530261#ifndef CONFIG_SMP
262 static unsigned long l_p_j_ref;
263 static unsigned int l_p_j_ref_freq;
264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 if (ci->flags & CPUFREQ_CONST_LOOPS)
266 return;
267
268 if (!l_p_j_ref_freq) {
269 l_p_j_ref = loops_per_jiffy;
270 l_p_j_ref_freq = ci->old;
Joe Perchese837f9b2014-03-11 10:03:00 -0700271 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
272 l_p_j_ref, l_p_j_ref_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 }
Viresh Kumar0b443ea2014-03-19 11:24:58 +0530274 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530275 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
276 ci->new);
Joe Perchese837f9b2014-03-11 10:03:00 -0700277 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
278 loops_per_jiffy, ci->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280#endif
Viresh Kumar39c132e2015-01-02 12:34:34 +0530281}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Viresh Kumar0956df9c2013-06-19 14:19:34 +0530283static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530284 struct cpufreq_freqs *freqs, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285{
286 BUG_ON(irqs_disabled());
287
Dirk Brandewied5aaffa2013-01-17 16:22:21 +0000288 if (cpufreq_disabled())
289 return;
290
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200291 freqs->flags = cpufreq_driver->flags;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200292 pr_debug("notification %u of frequency transition to %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -0700293 state, freqs->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 switch (state) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 case CPUFREQ_PRECHANGE:
Dave Jones32ee8c32006-02-28 00:43:23 -0500298 /* detect if the driver reported a value as "old frequency"
Dave Jonese4472cb2006-01-31 15:53:55 -0800299 * which is not equal to what the cpufreq core thinks is
300 * "old frequency".
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200302 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800303 if ((policy) && (policy->cpu == freqs->cpu) &&
304 (policy->cur) && (policy->cur != freqs->old)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700305 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
306 freqs->old, policy->cur);
Dave Jonese4472cb2006-01-31 15:53:55 -0800307 freqs->old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 }
309 }
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700310 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800311 CPUFREQ_PRECHANGE, freqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
313 break;
Dave Jonese4472cb2006-01-31 15:53:55 -0800314
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 case CPUFREQ_POSTCHANGE:
316 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
Joe Perchese837f9b2014-03-11 10:03:00 -0700317 pr_debug("FREQ: %lu - CPU: %lu\n",
318 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
Thomas Renninger25e41932011-01-03 17:50:44 +0100319 trace_cpu_frequency(freqs->new, freqs->cpu);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700320 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800321 CPUFREQ_POSTCHANGE, freqs);
Dave Jonese4472cb2006-01-31 15:53:55 -0800322 if (likely(policy) && likely(policy->cpu == freqs->cpu))
323 policy->cur = freqs->new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 break;
325 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
Viresh Kumarbb176f72013-06-19 14:19:33 +0530327
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530328/**
329 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
330 * on frequency transition.
331 *
332 * This function calls the transition notifiers and the "adjust_jiffies"
333 * function. It is called twice on all CPU frequency changes that have
334 * external effects.
335 */
Viresh Kumar236a9802014-03-24 13:35:46 +0530336static void cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530337 struct cpufreq_freqs *freqs, unsigned int state)
338{
339 for_each_cpu(freqs->cpu, policy->cpus)
340 __cpufreq_notify_transition(policy, freqs, state);
341}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530343/* Do post notifications when there are chances that transition has failed */
Viresh Kumar236a9802014-03-24 13:35:46 +0530344static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530345 struct cpufreq_freqs *freqs, int transition_failed)
346{
347 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
348 if (!transition_failed)
349 return;
350
351 swap(freqs->old, freqs->new);
352 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
353 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
354}
Viresh Kumarf7ba3b42013-12-02 11:04:12 +0530355
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530356void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
357 struct cpufreq_freqs *freqs)
358{
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530359
360 /*
361 * Catch double invocations of _begin() which lead to self-deadlock.
362 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
363 * doesn't invoke _begin() on their behalf, and hence the chances of
364 * double invocations are very low. Moreover, there are scenarios
365 * where these checks can emit false-positive warnings in these
366 * drivers; so we avoid that by skipping them altogether.
367 */
368 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
369 && current == policy->transition_task);
370
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530371wait:
372 wait_event(policy->transition_wait, !policy->transition_ongoing);
373
374 spin_lock(&policy->transition_lock);
375
376 if (unlikely(policy->transition_ongoing)) {
377 spin_unlock(&policy->transition_lock);
378 goto wait;
379 }
380
381 policy->transition_ongoing = true;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530382 policy->transition_task = current;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530383
384 spin_unlock(&policy->transition_lock);
385
386 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
387}
388EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
389
390void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
391 struct cpufreq_freqs *freqs, int transition_failed)
392{
393 if (unlikely(WARN_ON(!policy->transition_ongoing)))
394 return;
395
396 cpufreq_notify_post_transition(policy, freqs, transition_failed);
397
398 policy->transition_ongoing = false;
Srivatsa S. Bhatca654dc2014-05-05 12:52:39 +0530399 policy->transition_task = NULL;
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +0530400
401 wake_up(&policy->transition_wait);
402}
403EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
404
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406/*********************************************************************
407 * SYSFS INTERFACE *
408 *********************************************************************/
Rashika Kheria8a5c74a2014-02-26 22:12:42 +0530409static ssize_t show_boost(struct kobject *kobj,
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100410 struct attribute *attr, char *buf)
411{
412 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
413}
414
415static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
416 const char *buf, size_t count)
417{
418 int ret, enable;
419
420 ret = sscanf(buf, "%d", &enable);
421 if (ret != 1 || enable < 0 || enable > 1)
422 return -EINVAL;
423
424 if (cpufreq_boost_trigger_state(enable)) {
Joe Perchese837f9b2014-03-11 10:03:00 -0700425 pr_err("%s: Cannot %s BOOST!\n",
426 __func__, enable ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100427 return -EINVAL;
428 }
429
Joe Perchese837f9b2014-03-11 10:03:00 -0700430 pr_debug("%s: cpufreq BOOST %s\n",
431 __func__, enable ? "enabled" : "disabled");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +0100432
433 return count;
434}
435define_one_global_rw(boost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530437static struct cpufreq_governor *find_governor(const char *str_governor)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700438{
439 struct cpufreq_governor *t;
440
Viresh Kumarf7b27062015-01-27 14:06:09 +0530441 for_each_governor(t)
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200442 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700443 return t;
444
445 return NULL;
446}
447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448/**
449 * cpufreq_parse_governor - parse a governor string
450 */
Dave Jones905d77c2008-03-05 14:28:32 -0500451static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 struct cpufreq_governor **governor)
453{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700454 int err = -EINVAL;
455
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200456 if (!cpufreq_driver)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700457 goto out;
458
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200459 if (cpufreq_driver->setpolicy) {
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200460 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 *policy = CPUFREQ_POLICY_PERFORMANCE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700462 err = 0;
Rasmus Villemoes7c4f4532014-09-29 15:50:11 +0200463 } else if (!strncasecmp(str_governor, "powersave",
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530464 CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 *policy = CPUFREQ_POLICY_POWERSAVE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700466 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 }
Viresh Kumar2e1cc3a2015-01-02 12:34:27 +0530468 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 struct cpufreq_governor *t;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700470
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800471 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700472
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530473 t = find_governor(str_governor);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700474
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700475 if (t == NULL) {
Kees Cook1a8e1462011-05-04 08:38:56 -0700476 int ret;
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700477
Kees Cook1a8e1462011-05-04 08:38:56 -0700478 mutex_unlock(&cpufreq_governor_mutex);
479 ret = request_module("cpufreq_%s", str_governor);
480 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700481
Kees Cook1a8e1462011-05-04 08:38:56 -0700482 if (ret == 0)
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530483 t = find_governor(str_governor);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700484 }
485
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700486 if (t != NULL) {
487 *governor = t;
488 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 }
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700490
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800491 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 }
Dave Jones29464f22009-01-18 01:37:11 -0500493out:
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700494 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497/**
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530498 * cpufreq_per_cpu_attr_read() / show_##file_name() -
499 * print out cpufreq information
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 *
501 * Write out information from cpufreq_driver->policy[cpu]; object must be
502 * "unsigned int".
503 */
504
Dave Jones32ee8c32006-02-28 00:43:23 -0500505#define show_one(file_name, object) \
506static ssize_t show_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500507(struct cpufreq_policy *policy, char *buf) \
Dave Jones32ee8c32006-02-28 00:43:23 -0500508{ \
Dave Jones29464f22009-01-18 01:37:11 -0500509 return sprintf(buf, "%u\n", policy->object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510}
511
512show_one(cpuinfo_min_freq, cpuinfo.min_freq);
513show_one(cpuinfo_max_freq, cpuinfo.max_freq);
Thomas Renningered129782009-02-04 01:17:41 +0100514show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515show_one(scaling_min_freq, min);
516show_one(scaling_max_freq, max);
Dirk Brandewiec034b022014-10-13 08:37:40 -0700517
Viresh Kumar09347b22015-01-02 12:34:24 +0530518static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
Dirk Brandewiec034b022014-10-13 08:37:40 -0700519{
520 ssize_t ret;
521
522 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
523 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
524 else
525 ret = sprintf(buf, "%u\n", policy->cur);
526 return ret;
527}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
Viresh Kumar037ce832013-10-02 14:13:16 +0530529static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530530 struct cpufreq_policy *new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200531
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532/**
533 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
534 */
535#define store_one(file_name, object) \
536static ssize_t store_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500537(struct cpufreq_policy *policy, const char *buf, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538{ \
Vince Hsu619c144c2014-11-10 14:14:50 +0800539 int ret, temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 struct cpufreq_policy new_policy; \
541 \
542 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
543 if (ret) \
544 return -EINVAL; \
545 \
Dave Jones29464f22009-01-18 01:37:11 -0500546 ret = sscanf(buf, "%u", &new_policy.object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 if (ret != 1) \
548 return -EINVAL; \
549 \
Vince Hsu619c144c2014-11-10 14:14:50 +0800550 temp = new_policy.object; \
Viresh Kumar037ce832013-10-02 14:13:16 +0530551 ret = cpufreq_set_policy(policy, &new_policy); \
Vince Hsu619c144c2014-11-10 14:14:50 +0800552 if (!ret) \
553 policy->user_policy.object = temp; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 \
555 return ret ? ret : count; \
556}
557
Dave Jones29464f22009-01-18 01:37:11 -0500558store_one(scaling_min_freq, min);
559store_one(scaling_max_freq, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
561/**
562 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
563 */
Dave Jones905d77c2008-03-05 14:28:32 -0500564static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
565 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566{
Viresh Kumard92d50a2015-01-02 12:34:29 +0530567 unsigned int cur_freq = __cpufreq_get(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 if (!cur_freq)
569 return sprintf(buf, "<unknown>");
570 return sprintf(buf, "%u\n", cur_freq);
571}
572
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573/**
574 * show_scaling_governor - show the current policy for the specified CPU
575 */
Dave Jones905d77c2008-03-05 14:28:32 -0500576static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577{
Dave Jones29464f22009-01-18 01:37:11 -0500578 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 return sprintf(buf, "powersave\n");
580 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
581 return sprintf(buf, "performance\n");
582 else if (policy->governor)
viresh kumar4b972f02012-10-23 01:23:43 +0200583 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
Dave Jones29464f22009-01-18 01:37:11 -0500584 policy->governor->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 return -EINVAL;
586}
587
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588/**
589 * store_scaling_governor - store policy for the specified CPU
590 */
Dave Jones905d77c2008-03-05 14:28:32 -0500591static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
592 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{
Srivatsa S. Bhat5136fa52013-09-07 01:24:06 +0530594 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 char str_governor[16];
596 struct cpufreq_policy new_policy;
597
598 ret = cpufreq_get_policy(&new_policy, policy->cpu);
599 if (ret)
600 return ret;
601
Dave Jones29464f22009-01-18 01:37:11 -0500602 ret = sscanf(buf, "%15s", str_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 if (ret != 1)
604 return -EINVAL;
605
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530606 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
607 &new_policy.governor))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 return -EINVAL;
609
Viresh Kumar037ce832013-10-02 14:13:16 +0530610 ret = cpufreq_set_policy(policy, &new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200611
612 policy->user_policy.policy = policy->policy;
613 policy->user_policy.governor = policy->governor;
Thomas Renninger7970e082006-04-13 15:14:04 +0200614
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530615 if (ret)
616 return ret;
617 else
618 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619}
620
621/**
622 * show_scaling_driver - show the cpufreq driver currently loaded
623 */
Dave Jones905d77c2008-03-05 14:28:32 -0500624static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200626 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627}
628
629/**
630 * show_scaling_available_governors - show the available CPUfreq governors
631 */
Dave Jones905d77c2008-03-05 14:28:32 -0500632static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
633 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634{
635 ssize_t i = 0;
636 struct cpufreq_governor *t;
637
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530638 if (!has_target()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 i += sprintf(buf, "performance powersave");
640 goto out;
641 }
642
Viresh Kumarf7b27062015-01-27 14:06:09 +0530643 for_each_governor(t) {
Dave Jones29464f22009-01-18 01:37:11 -0500644 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
645 - (CPUFREQ_NAME_LEN + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 goto out;
viresh kumar4b972f02012-10-23 01:23:43 +0200647 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 }
Dave Jones7d5e3502006-02-02 17:03:42 -0500649out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 i += sprintf(&buf[i], "\n");
651 return i;
652}
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700653
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800654ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
656 ssize_t i = 0;
657 unsigned int cpu;
658
Rusty Russell835481d2009-01-04 05:18:06 -0800659 for_each_cpu(cpu, mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 if (i)
661 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
662 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
663 if (i >= (PAGE_SIZE - 5))
Dave Jones29464f22009-01-18 01:37:11 -0500664 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 }
666 i += sprintf(&buf[i], "\n");
667 return i;
668}
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800669EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700671/**
672 * show_related_cpus - show the CPUs affected by each transition even if
673 * hw coordination is in use
674 */
675static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
676{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800677 return cpufreq_show_cpus(policy->related_cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700678}
679
680/**
681 * show_affected_cpus - show the CPUs affected by each transition
682 */
683static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
684{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800685 return cpufreq_show_cpus(policy->cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700686}
687
Venki Pallipadi9e769882007-10-26 10:18:21 -0700688static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
Dave Jones905d77c2008-03-05 14:28:32 -0500689 const char *buf, size_t count)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700690{
691 unsigned int freq = 0;
692 unsigned int ret;
693
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700694 if (!policy->governor || !policy->governor->store_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700695 return -EINVAL;
696
697 ret = sscanf(buf, "%u", &freq);
698 if (ret != 1)
699 return -EINVAL;
700
701 policy->governor->store_setspeed(policy, freq);
702
703 return count;
704}
705
706static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
707{
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700708 if (!policy->governor || !policy->governor->show_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700709 return sprintf(buf, "<unsupported>\n");
710
711 return policy->governor->show_setspeed(policy, buf);
712}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Thomas Renningere2f74f32009-11-19 12:31:01 +0100714/**
viresh kumar8bf1ac722012-10-23 01:23:33 +0200715 * show_bios_limit - show the current cpufreq HW/BIOS limitation
Thomas Renningere2f74f32009-11-19 12:31:01 +0100716 */
717static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
718{
719 unsigned int limit;
720 int ret;
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200721 if (cpufreq_driver->bios_limit) {
722 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
Thomas Renningere2f74f32009-11-19 12:31:01 +0100723 if (!ret)
724 return sprintf(buf, "%u\n", limit);
725 }
726 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
727}
728
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200729cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
730cpufreq_freq_attr_ro(cpuinfo_min_freq);
731cpufreq_freq_attr_ro(cpuinfo_max_freq);
732cpufreq_freq_attr_ro(cpuinfo_transition_latency);
733cpufreq_freq_attr_ro(scaling_available_governors);
734cpufreq_freq_attr_ro(scaling_driver);
735cpufreq_freq_attr_ro(scaling_cur_freq);
736cpufreq_freq_attr_ro(bios_limit);
737cpufreq_freq_attr_ro(related_cpus);
738cpufreq_freq_attr_ro(affected_cpus);
739cpufreq_freq_attr_rw(scaling_min_freq);
740cpufreq_freq_attr_rw(scaling_max_freq);
741cpufreq_freq_attr_rw(scaling_governor);
742cpufreq_freq_attr_rw(scaling_setspeed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Dave Jones905d77c2008-03-05 14:28:32 -0500744static struct attribute *default_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 &cpuinfo_min_freq.attr,
746 &cpuinfo_max_freq.attr,
Thomas Renningered129782009-02-04 01:17:41 +0100747 &cpuinfo_transition_latency.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 &scaling_min_freq.attr,
749 &scaling_max_freq.attr,
750 &affected_cpus.attr,
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700751 &related_cpus.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 &scaling_governor.attr,
753 &scaling_driver.attr,
754 &scaling_available_governors.attr,
Venki Pallipadi9e769882007-10-26 10:18:21 -0700755 &scaling_setspeed.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 NULL
757};
758
Dave Jones29464f22009-01-18 01:37:11 -0500759#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
760#define to_attr(a) container_of(a, struct freq_attr, attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
Dave Jones29464f22009-01-18 01:37:11 -0500762static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763{
Dave Jones905d77c2008-03-05 14:28:32 -0500764 struct cpufreq_policy *policy = to_policy(kobj);
765 struct freq_attr *fattr = to_attr(attr);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530766 ssize_t ret;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530767
768 if (!down_read_trylock(&cpufreq_rwsem))
Viresh Kumar1b750e32013-10-02 14:13:09 +0530769 return -EINVAL;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800770
viresh kumarad7722d2013-10-18 19:10:15 +0530771 down_read(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800772
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530773 if (fattr->show)
774 ret = fattr->show(policy, buf);
775 else
776 ret = -EIO;
777
viresh kumarad7722d2013-10-18 19:10:15 +0530778 up_read(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530779 up_read(&cpufreq_rwsem);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 return ret;
782}
783
Dave Jones905d77c2008-03-05 14:28:32 -0500784static ssize_t store(struct kobject *kobj, struct attribute *attr,
785 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786{
Dave Jones905d77c2008-03-05 14:28:32 -0500787 struct cpufreq_policy *policy = to_policy(kobj);
788 struct freq_attr *fattr = to_attr(attr);
Dave Jonesa07530b2008-03-05 14:22:25 -0500789 ssize_t ret = -EINVAL;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530790
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530791 get_online_cpus();
792
793 if (!cpu_online(policy->cpu))
794 goto unlock;
795
Viresh Kumar6eed9402013-08-06 22:53:11 +0530796 if (!down_read_trylock(&cpufreq_rwsem))
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530797 goto unlock;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800798
viresh kumarad7722d2013-10-18 19:10:15 +0530799 down_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800800
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530801 if (fattr->store)
802 ret = fattr->store(policy, buf, count);
803 else
804 ret = -EIO;
805
viresh kumarad7722d2013-10-18 19:10:15 +0530806 up_write(&policy->rwsem);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530807
Viresh Kumar6eed9402013-08-06 22:53:11 +0530808 up_read(&cpufreq_rwsem);
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530809unlock:
810 put_online_cpus();
811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 return ret;
813}
814
Dave Jones905d77c2008-03-05 14:28:32 -0500815static void cpufreq_sysfs_release(struct kobject *kobj)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816{
Dave Jones905d77c2008-03-05 14:28:32 -0500817 struct cpufreq_policy *policy = to_policy(kobj);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200818 pr_debug("last reference is dropped\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 complete(&policy->kobj_unregister);
820}
821
Emese Revfy52cf25d2010-01-19 02:58:23 +0100822static const struct sysfs_ops sysfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 .show = show,
824 .store = store,
825};
826
827static struct kobj_type ktype_cpufreq = {
828 .sysfs_ops = &sysfs_ops,
829 .default_attrs = default_attrs,
830 .release = cpufreq_sysfs_release,
831};
832
Viresh Kumar2361be22013-05-17 16:09:09 +0530833struct kobject *cpufreq_global_kobject;
834EXPORT_SYMBOL(cpufreq_global_kobject);
835
836static int cpufreq_global_kobject_usage;
837
838int cpufreq_get_global_kobject(void)
839{
840 if (!cpufreq_global_kobject_usage++)
841 return kobject_add(cpufreq_global_kobject,
842 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
843
844 return 0;
845}
846EXPORT_SYMBOL(cpufreq_get_global_kobject);
847
848void cpufreq_put_global_kobject(void)
849{
850 if (!--cpufreq_global_kobject_usage)
851 kobject_del(cpufreq_global_kobject);
852}
853EXPORT_SYMBOL(cpufreq_put_global_kobject);
854
855int cpufreq_sysfs_create_file(const struct attribute *attr)
856{
857 int ret = cpufreq_get_global_kobject();
858
859 if (!ret) {
860 ret = sysfs_create_file(cpufreq_global_kobject, attr);
861 if (ret)
862 cpufreq_put_global_kobject();
863 }
864
865 return ret;
866}
867EXPORT_SYMBOL(cpufreq_sysfs_create_file);
868
869void cpufreq_sysfs_remove_file(const struct attribute *attr)
870{
871 sysfs_remove_file(cpufreq_global_kobject, attr);
872 cpufreq_put_global_kobject();
873}
874EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
875
Dave Jones19d6f7e2009-07-08 17:35:39 -0400876/* symlink affected CPUs */
Viresh Kumar308b60e2013-07-31 14:35:14 +0200877static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400878{
879 unsigned int j;
880 int ret = 0;
881
882 for_each_cpu(j, policy->cpus) {
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800883 struct device *cpu_dev;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400884
Viresh Kumar308b60e2013-07-31 14:35:14 +0200885 if (j == policy->cpu)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400886 continue;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400887
Viresh Kumare8fdde12013-07-31 14:31:33 +0200888 pr_debug("Adding link for CPU: %u\n", j);
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800889 cpu_dev = get_cpu_device(j);
890 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
Dave Jones19d6f7e2009-07-08 17:35:39 -0400891 "cpufreq");
Rafael J. Wysocki71c34612013-08-04 01:19:34 +0200892 if (ret)
893 break;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400894 }
895 return ret;
896}
897
Viresh Kumar308b60e2013-07-31 14:35:14 +0200898static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800899 struct device *dev)
Dave Jones909a6942009-07-08 18:05:42 -0400900{
901 struct freq_attr **drv_attr;
Dave Jones909a6942009-07-08 18:05:42 -0400902 int ret = 0;
Dave Jones909a6942009-07-08 18:05:42 -0400903
Dave Jones909a6942009-07-08 18:05:42 -0400904 /* set up files for this cpu device */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200905 drv_attr = cpufreq_driver->attr;
Viresh Kumarf13f1182015-01-02 12:34:23 +0530906 while (drv_attr && *drv_attr) {
Dave Jones909a6942009-07-08 18:05:42 -0400907 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
908 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100909 return ret;
Dave Jones909a6942009-07-08 18:05:42 -0400910 drv_attr++;
911 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200912 if (cpufreq_driver->get) {
Dave Jones909a6942009-07-08 18:05:42 -0400913 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
914 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100915 return ret;
Dave Jones909a6942009-07-08 18:05:42 -0400916 }
Dirk Brandewiec034b022014-10-13 08:37:40 -0700917
918 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
919 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100920 return ret;
Dirk Brandewiec034b022014-10-13 08:37:40 -0700921
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200922 if (cpufreq_driver->bios_limit) {
Thomas Renningere2f74f32009-11-19 12:31:01 +0100923 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
924 if (ret)
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100925 return ret;
Thomas Renningere2f74f32009-11-19 12:31:01 +0100926 }
Dave Jones909a6942009-07-08 18:05:42 -0400927
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +0100928 return cpufreq_add_dev_symlink(policy);
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +0530929}
930
931static void cpufreq_init_policy(struct cpufreq_policy *policy)
932{
viresh kumar6e2c89d2014-03-04 11:43:59 +0800933 struct cpufreq_governor *gov = NULL;
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +0530934 struct cpufreq_policy new_policy;
935 int ret = 0;
936
Viresh Kumard5b73cd2013-08-06 22:53:06 +0530937 memcpy(&new_policy, policy, sizeof(*policy));
Jason Barona27a9ab2013-12-19 22:50:50 +0000938
viresh kumar6e2c89d2014-03-04 11:43:59 +0800939 /* Update governor of new_policy to the governor used before hotplug */
Viresh Kumar42f91fa2015-01-02 12:34:26 +0530940 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
viresh kumar6e2c89d2014-03-04 11:43:59 +0800941 if (gov)
942 pr_debug("Restoring governor %s for cpu %d\n",
943 policy->governor->name, policy->cpu);
944 else
945 gov = CPUFREQ_DEFAULT_GOVERNOR;
946
947 new_policy.governor = gov;
948
Jason Barona27a9ab2013-12-19 22:50:50 +0000949 /* Use the default policy if its valid. */
950 if (cpufreq_driver->setpolicy)
viresh kumar6e2c89d2014-03-04 11:43:59 +0800951 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
Dave Jonesecf7e462009-07-08 18:48:47 -0400952
953 /* set default policy */
Viresh Kumar037ce832013-10-02 14:13:16 +0530954 ret = cpufreq_set_policy(policy, &new_policy);
Dave Jonesecf7e462009-07-08 18:48:47 -0400955 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200956 pr_debug("setting policy failed\n");
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200957 if (cpufreq_driver->exit)
958 cpufreq_driver->exit(policy);
Dave Jonesecf7e462009-07-08 18:48:47 -0400959 }
Dave Jones909a6942009-07-08 18:05:42 -0400960}
961
Viresh Kumard8d3b472013-08-04 01:20:07 +0200962static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
Viresh Kumar42f921a2013-12-20 21:26:02 +0530963 unsigned int cpu, struct device *dev)
Viresh Kumarfcf80582013-01-29 14:39:08 +0000964{
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530965 int ret = 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +0000966 unsigned long flags;
967
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530968 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +0530969 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
970 if (ret) {
971 pr_err("%s: Failed to stop governor\n", __func__);
972 return ret;
973 }
974 }
Viresh Kumarfcf80582013-01-29 14:39:08 +0000975
viresh kumarad7722d2013-10-18 19:10:15 +0530976 down_write(&policy->rwsem);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +0530977
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000978 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +0530979
Viresh Kumarfcf80582013-01-29 14:39:08 +0000980 cpumask_set_cpu(cpu, policy->cpus);
981 per_cpu(cpufreq_cpu_data, cpu) = policy;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000982 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumarfcf80582013-01-29 14:39:08 +0000983
viresh kumarad7722d2013-10-18 19:10:15 +0530984 up_write(&policy->rwsem);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +0530985
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530986 if (has_target()) {
Stratos Karafotise5c87b72014-03-19 23:29:17 +0200987 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
988 if (!ret)
989 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
990
991 if (ret) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +0530992 pr_err("%s: Failed to start governor\n", __func__);
993 return ret;
994 }
Viresh Kumar820c6ca2013-04-22 00:48:03 +0200995 }
Viresh Kumarfcf80582013-01-29 14:39:08 +0000996
Viresh Kumar42f921a2013-12-20 21:26:02 +0530997 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
Viresh Kumarfcf80582013-01-29 14:39:08 +0000998}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301000static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1001{
1002 struct cpufreq_policy *policy;
1003 unsigned long flags;
1004
Lan Tianyu44871c92013-09-11 15:05:05 +08001005 read_lock_irqsave(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301006
1007 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1008
Lan Tianyu44871c92013-09-11 15:05:05 +08001009 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301010
Geert Uytterhoeven09712f52014-11-04 17:05:25 +01001011 if (policy)
1012 policy->governor = NULL;
viresh kumar6e2c89d2014-03-04 11:43:59 +08001013
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301014 return policy;
1015}
1016
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301017static struct cpufreq_policy *cpufreq_policy_alloc(void)
1018{
1019 struct cpufreq_policy *policy;
1020
1021 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1022 if (!policy)
1023 return NULL;
1024
1025 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1026 goto err_free_policy;
1027
1028 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1029 goto err_free_cpumask;
1030
Lukasz Majewskic88a1f82013-08-06 22:53:08 +05301031 INIT_LIST_HEAD(&policy->policy_list);
viresh kumarad7722d2013-10-18 19:10:15 +05301032 init_rwsem(&policy->rwsem);
Srivatsa S. Bhat12478cf2014-03-24 13:35:44 +05301033 spin_lock_init(&policy->transition_lock);
1034 init_waitqueue_head(&policy->transition_wait);
Viresh Kumar818c5712015-01-02 12:34:38 +05301035 init_completion(&policy->kobj_unregister);
1036 INIT_WORK(&policy->update, handle_update);
viresh kumarad7722d2013-10-18 19:10:15 +05301037
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301038 return policy;
1039
1040err_free_cpumask:
1041 free_cpumask_var(policy->cpus);
1042err_free_policy:
1043 kfree(policy);
1044
1045 return NULL;
1046}
1047
Viresh Kumar42f921a2013-12-20 21:26:02 +05301048static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1049{
1050 struct kobject *kobj;
1051 struct completion *cmp;
1052
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301053 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1054 CPUFREQ_REMOVE_POLICY, policy);
1055
Viresh Kumar42f921a2013-12-20 21:26:02 +05301056 down_read(&policy->rwsem);
1057 kobj = &policy->kobj;
1058 cmp = &policy->kobj_unregister;
1059 up_read(&policy->rwsem);
1060 kobject_put(kobj);
1061
1062 /*
1063 * We need to make sure that the underlying kobj is
1064 * actually not referenced anymore by anybody before we
1065 * proceed with unloading.
1066 */
1067 pr_debug("waiting for dropping of refcount\n");
1068 wait_for_completion(cmp);
1069 pr_debug("wait complete\n");
1070}
1071
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301072static void cpufreq_policy_free(struct cpufreq_policy *policy)
1073{
1074 free_cpumask_var(policy->related_cpus);
1075 free_cpumask_var(policy->cpus);
1076 kfree(policy);
1077}
1078
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301079static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1080 struct device *cpu_dev)
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301081{
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301082 int ret;
1083
Srivatsa S. Bhat99ec8992013-09-12 17:29:09 +05301084 if (WARN_ON(cpu == policy->cpu))
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301085 return 0;
1086
1087 /* Move kobject to the new policy->cpu */
1088 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1089 if (ret) {
1090 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1091 return ret;
1092 }
Srivatsa S. Bhatcb38ed52013-09-12 01:43:42 +05301093
viresh kumarad7722d2013-10-18 19:10:15 +05301094 down_write(&policy->rwsem);
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301095 policy->cpu = cpu;
viresh kumarad7722d2013-10-18 19:10:15 +05301096 up_write(&policy->rwsem);
Viresh Kumar8efd5762013-09-17 10:22:11 +05301097
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301098 return 0;
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301099}
1100
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301101static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102{
Viresh Kumarfcf80582013-01-29 14:39:08 +00001103 unsigned int j, cpu = dev->id;
Viresh Kumar65922462013-02-07 10:56:03 +05301104 int ret = -ENOMEM;
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301105 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 unsigned long flags;
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301107 bool recover_policy = cpufreq_suspended;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
Ashok Rajc32b6b82005-10-30 14:59:54 -08001109 if (cpu_is_offline(cpu))
1110 return 0;
1111
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001112 pr_debug("adding CPU %u\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 /* check whether a different CPU already registered this
1115 * CPU because it is in the same boat. */
Viresh Kumard7a97712015-01-02 12:34:33 +05301116 policy = cpufreq_cpu_get_raw(cpu);
1117 if (unlikely(policy))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 return 0;
Viresh Kumarfcf80582013-01-29 14:39:08 +00001119
Viresh Kumar6eed9402013-08-06 22:53:11 +05301120 if (!down_read_trylock(&cpufreq_rwsem))
1121 return 0;
1122
Viresh Kumarfcf80582013-01-29 14:39:08 +00001123 /* Check if this cpu was hot-unplugged earlier and has siblings */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001124 read_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumarb4f06762015-01-27 14:06:08 +05301125 for_each_policy(policy) {
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301126 if (cpumask_test_cpu(cpu, policy->related_cpus)) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001127 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar7f0c0202015-01-02 12:34:32 +05301128 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
Viresh Kumar6eed9402013-08-06 22:53:11 +05301129 up_read(&cpufreq_rwsem);
1130 return ret;
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301131 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001132 }
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001133 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001135 /*
1136 * Restore the saved policy when doing light-weight init and fall back
1137 * to the full init if that fails.
1138 */
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301139 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001140 if (!policy) {
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301141 recover_policy = false;
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301142 policy = cpufreq_policy_alloc();
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001143 if (!policy)
1144 goto nomem_out;
1145 }
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301146
1147 /*
1148 * In the resume path, since we restore a saved policy, the assignment
1149 * to policy->cpu is like an update of the existing policy, rather than
1150 * the creation of a brand new one. So we need to perform this update
1151 * by invoking update_policy_cpu().
1152 */
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301153 if (recover_policy && cpu != policy->cpu)
1154 WARN_ON(update_policy_cpu(policy, cpu, dev));
1155 else
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301156 policy->cpu = cpu;
1157
Rusty Russell835481d2009-01-04 05:18:06 -08001158 cpumask_copy(policy->cpus, cpumask_of(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 /* call driver. From then on the cpufreq must be able
1161 * to accept all calls to ->verify and ->setpolicy for this CPU
1162 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001163 ret = cpufreq_driver->init(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001165 pr_debug("initialization failed\n");
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301166 goto err_set_policy_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 }
Viresh Kumar643ae6e2013-01-12 05:14:38 +00001168
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001169 down_write(&policy->rwsem);
1170
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001171 /* related cpus should atleast have policy->cpus */
1172 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1173
1174 /*
1175 * affected cpus must always be the one, which are online. We aren't
1176 * managing offline cpus here.
1177 */
1178 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1179
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301180 if (!recover_policy) {
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001181 policy->user_policy.min = policy->min;
1182 policy->user_policy.max = policy->max;
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001183
1184 /* prepare interface data */
1185 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1186 &dev->kobj, "cpufreq");
1187 if (ret) {
1188 pr_err("%s: failed to init policy->kobj: %d\n",
1189 __func__, ret);
1190 goto err_init_policy_kobj;
1191 }
Viresh Kumar5a7e56a2014-03-04 11:44:00 +08001192 }
1193
Viresh Kumar652ed952014-01-09 20:38:43 +05301194 write_lock_irqsave(&cpufreq_driver_lock, flags);
1195 for_each_cpu(j, policy->cpus)
1196 per_cpu(cpufreq_cpu_data, j) = policy;
1197 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1198
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01001199 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumarda60ce92013-10-03 20:28:30 +05301200 policy->cur = cpufreq_driver->get(policy->cpu);
1201 if (!policy->cur) {
1202 pr_err("%s: ->get() failed\n", __func__);
1203 goto err_get_freq;
1204 }
1205 }
1206
Viresh Kumard3916692013-12-03 11:20:46 +05301207 /*
1208 * Sometimes boot loaders set CPU frequency to a value outside of
1209 * frequency table present with cpufreq core. In such cases CPU might be
1210 * unstable if it has to run on that frequency for long duration of time
1211 * and so its better to set it to a frequency which is specified in
1212 * freq-table. This also makes cpufreq stats inconsistent as
1213 * cpufreq-stats would fail to register because current frequency of CPU
1214 * isn't found in freq-table.
1215 *
1216 * Because we don't want this change to effect boot process badly, we go
1217 * for the next freq which is >= policy->cur ('cur' must be set by now,
1218 * otherwise we will end up setting freq to lowest of the table as 'cur'
1219 * is initialized to zero).
1220 *
1221 * We are passing target-freq as "policy->cur - 1" otherwise
1222 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1223 * equal to target-freq.
1224 */
1225 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1226 && has_target()) {
1227 /* Are we running at unknown frequency ? */
1228 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1229 if (ret == -EINVAL) {
1230 /* Warn user and fix it */
1231 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1232 __func__, policy->cpu, policy->cur);
1233 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1234 CPUFREQ_RELATION_L);
1235
1236 /*
1237 * Reaching here after boot in a few seconds may not
1238 * mean that system will remain stable at "unknown"
1239 * frequency for longer duration. Hence, a BUG_ON().
1240 */
1241 BUG_ON(ret);
1242 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1243 __func__, policy->cpu, policy->cur);
1244 }
1245 }
1246
Thomas Renningera1531ac2008-07-29 22:32:58 -07001247 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1248 CPUFREQ_START, policy);
1249
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301250 if (!recover_policy) {
Viresh Kumar308b60e2013-07-31 14:35:14 +02001251 ret = cpufreq_add_dev_interface(policy, dev);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301252 if (ret)
1253 goto err_out_unregister;
Viresh Kumarfcd7af92014-01-07 07:10:10 +05301254 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1255 CPUFREQ_CREATE_POLICY, policy);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301256 }
Dave Jones8ff69732006-03-05 03:37:23 -05001257
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301258 write_lock_irqsave(&cpufreq_driver_lock, flags);
1259 list_add(&policy->policy_list, &cpufreq_policy_list);
1260 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1261
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301262 cpufreq_init_policy(policy);
1263
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301264 if (!recover_policy) {
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301265 policy->user_policy.policy = policy->policy;
1266 policy->user_policy.governor = policy->governor;
1267 }
Viresh Kumar4e97b632014-03-04 11:44:01 +08001268 up_write(&policy->rwsem);
Viresh Kumar08fd8c1c2013-12-24 07:11:01 +05301269
Greg Kroah-Hartman038c5b32007-12-17 15:54:39 -04001270 kobject_uevent(&policy->kobj, KOBJ_ADD);
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301271
Viresh Kumar6eed9402013-08-06 22:53:11 +05301272 up_read(&cpufreq_rwsem);
1273
Viresh Kumar7c45cf32014-11-27 06:07:51 +05301274 /* Callback for handling stuff after policy is ready */
1275 if (cpufreq_driver->ready)
1276 cpufreq_driver->ready(policy);
1277
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001278 pr_debug("initialization complete\n");
Dave Jones87c32272006-03-29 01:48:37 -05001279
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 return 0;
1281
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282err_out_unregister:
Viresh Kumar652ed952014-01-09 20:38:43 +05301283err_get_freq:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001284 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar474deff2013-08-20 12:08:25 +05301285 for_each_cpu(j, policy->cpus)
Mike Travis7a6aedf2008-03-25 15:06:53 -07001286 per_cpu(cpufreq_cpu_data, j) = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001287 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
Tomeu Vizoso6d4e81e2014-11-24 10:08:03 +01001289 if (!recover_policy) {
1290 kobject_put(&policy->kobj);
1291 wait_for_completion(&policy->kobj_unregister);
1292 }
1293err_init_policy_kobj:
Prarit Bhargava7106e022014-09-10 10:12:08 -04001294 up_write(&policy->rwsem);
1295
Viresh Kumarda60ce92013-10-03 20:28:30 +05301296 if (cpufreq_driver->exit)
1297 cpufreq_driver->exit(policy);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301298err_set_policy_cpu:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301299 if (recover_policy) {
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001300 /* Do not leave stale fallback data behind. */
1301 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
Viresh Kumar42f921a2013-12-20 21:26:02 +05301302 cpufreq_policy_put_kobj(policy);
Rafael J. Wysocki72368d12013-12-27 01:07:11 +01001303 }
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301304 cpufreq_policy_free(policy);
Viresh Kumar42f921a2013-12-20 21:26:02 +05301305
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306nomem_out:
Viresh Kumar6eed9402013-08-06 22:53:11 +05301307 up_read(&cpufreq_rwsem);
1308
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 return ret;
1310}
1311
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301312/**
1313 * cpufreq_add_dev - add a CPU device
1314 *
1315 * Adds the cpufreq interface for a CPU device.
1316 *
1317 * The Oracle says: try running cpufreq registration/unregistration concurrently
1318 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1319 * mess up, but more thorough testing is needed. - Mathieu
1320 */
1321static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1322{
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301323 return __cpufreq_add_dev(dev, sif);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301324}
1325
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301326static int __cpufreq_remove_dev_prepare(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301327 struct subsys_interface *sif)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328{
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301329 unsigned int cpu = dev->id, cpus;
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301330 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 unsigned long flags;
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301332 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001334 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001336 write_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301338 policy = per_cpu(cpufreq_cpu_data, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301340 /* Save the policy somewhere when doing a light-weight tear-down */
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301341 if (cpufreq_suspended)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301342 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301343
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001344 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301346 if (!policy) {
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001347 pr_debug("%s: No cpu_data found\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301351 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301352 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1353 if (ret) {
1354 pr_err("%s: Failed to stop governor\n", __func__);
1355 return ret;
1356 }
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001357
Dirk Brandewiefa69e332013-02-06 09:02:11 -08001358 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301359 policy->governor->name, CPUFREQ_NAME_LEN);
Viresh Kumardb5f2992015-01-02 12:34:25 +05301360 }
Jacob Shin27ecddc2011-04-27 13:32:11 -05001361
viresh kumarad7722d2013-10-18 19:10:15 +05301362 down_read(&policy->rwsem);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301363 cpus = cpumask_weight(policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301364 up_read(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
Srivatsa S. Bhat61173f22013-09-12 01:43:25 +05301366 if (cpu != policy->cpu) {
viresh kumar6964d912014-02-17 14:52:11 +05301367 sysfs_remove_link(&dev->kobj, "cpufreq");
Viresh Kumar73bf0fc2013-02-05 22:21:14 +01001368 } else if (cpus > 1) {
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301369 /* Nominate new CPU */
1370 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1371 struct device *cpu_dev = get_cpu_device(new_cpu);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301372
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301373 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1374 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1375 if (ret) {
1376 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1377 "cpufreq"))
1378 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1379 __func__, cpu_dev->id);
1380 return ret;
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001381 }
Viresh Kumar1bfb4252014-07-17 10:48:28 +05301382
1383 if (!cpufreq_suspended)
1384 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1385 __func__, new_cpu, cpu);
Preeti U Murthy789ca242014-09-29 15:47:12 +02001386 } else if (cpufreq_driver->stop_cpu) {
Dirk Brandewie367dc4a2014-03-19 08:45:53 -07001387 cpufreq_driver->stop_cpu(policy);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001388 }
Venki Pallipadiec282972007-03-26 12:03:19 -07001389
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301390 return 0;
1391}
1392
1393static int __cpufreq_remove_dev_finish(struct device *dev,
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301394 struct subsys_interface *sif)
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301395{
1396 unsigned int cpu = dev->id, cpus;
1397 int ret;
1398 unsigned long flags;
1399 struct cpufreq_policy *policy;
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301400
Viresh Kumar6ffae8c2015-01-31 06:02:44 +05301401 write_lock_irqsave(&cpufreq_driver_lock, flags);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301402 policy = per_cpu(cpufreq_cpu_data, cpu);
Viresh Kumar6ffae8c2015-01-31 06:02:44 +05301403 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1404 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301405
1406 if (!policy) {
1407 pr_debug("%s: No cpu_data found\n", __func__);
1408 return -EINVAL;
1409 }
1410
viresh kumarad7722d2013-10-18 19:10:15 +05301411 down_write(&policy->rwsem);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301412 cpus = cpumask_weight(policy->cpus);
Viresh Kumar9c8f1ee2013-09-12 17:06:33 +05301413
1414 if (cpus > 1)
1415 cpumask_clear_cpu(cpu, policy->cpus);
viresh kumarad7722d2013-10-18 19:10:15 +05301416 up_write(&policy->rwsem);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301417
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001418 /* If cpu is last user of policy, free policy */
1419 if (cpus == 1) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301420 if (has_target()) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301421 ret = __cpufreq_governor(policy,
1422 CPUFREQ_GOV_POLICY_EXIT);
1423 if (ret) {
1424 pr_err("%s: Failed to exit governor\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07001425 __func__);
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301426 return ret;
1427 }
Viresh Kumaredab2fb2013-08-20 12:08:22 +05301428 }
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001429
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301430 if (!cpufreq_suspended)
Viresh Kumar42f921a2013-12-20 21:26:02 +05301431 cpufreq_policy_put_kobj(policy);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301432
1433 /*
1434 * Perform the ->exit() even during light-weight tear-down,
1435 * since this is a core component, and is essential for the
1436 * subsequent light-weight ->init() to succeed.
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001437 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001438 if (cpufreq_driver->exit)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301439 cpufreq_driver->exit(policy);
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001440
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301441 /* Remove policy from list of active policies */
1442 write_lock_irqsave(&cpufreq_driver_lock, flags);
1443 list_del(&policy->policy_list);
1444 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1445
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301446 if (!cpufreq_suspended)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301447 cpufreq_policy_free(policy);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02001448 } else if (has_target()) {
1449 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1450 if (!ret)
1451 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1452
1453 if (ret) {
1454 pr_err("%s: Failed to start governor\n", __func__);
1455 return ret;
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001456 }
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001457 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 return 0;
1460}
1461
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301462/**
Viresh Kumar27a862e2013-10-02 14:13:14 +05301463 * cpufreq_remove_dev - remove a CPU device
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301464 *
1465 * Removes the cpufreq interface for a CPU device.
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301466 */
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001467static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001468{
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001469 unsigned int cpu = dev->id;
Viresh Kumar27a862e2013-10-02 14:13:14 +05301470 int ret;
Venki Pallipadiec282972007-03-26 12:03:19 -07001471
1472 if (cpu_is_offline(cpu))
1473 return 0;
1474
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301475 ret = __cpufreq_remove_dev_prepare(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301476
1477 if (!ret)
Viresh Kumar96bbbe42014-03-10 14:53:35 +05301478 ret = __cpufreq_remove_dev_finish(dev, sif);
Viresh Kumar27a862e2013-10-02 14:13:14 +05301479
1480 return ret;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001481}
1482
David Howells65f27f32006-11-22 14:55:48 +00001483static void handle_update(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484{
David Howells65f27f32006-11-22 14:55:48 +00001485 struct cpufreq_policy *policy =
1486 container_of(work, struct cpufreq_policy, update);
1487 unsigned int cpu = policy->cpu;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001488 pr_debug("handle_update for cpu %u called\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 cpufreq_update_policy(cpu);
1490}
1491
1492/**
Viresh Kumarbb176f72013-06-19 14:19:33 +05301493 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1494 * in deep trouble.
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301495 * @policy: policy managing CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 * @new_freq: CPU frequency the CPU actually runs at
1497 *
Dave Jones29464f22009-01-18 01:37:11 -05001498 * We adjust to current frequency first, and need to clean up later.
1499 * So either call to cpufreq_update_policy() or schedule handle_update()).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 */
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301501static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301502 unsigned int new_freq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503{
1504 struct cpufreq_freqs freqs;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301505
Joe Perchese837f9b2014-03-11 10:03:00 -07001506 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301507 policy->cur, new_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301509 freqs.old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 freqs.new = new_freq;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301511
Viresh Kumar8fec0512014-03-24 13:35:45 +05301512 cpufreq_freq_transition_begin(policy, &freqs);
1513 cpufreq_freq_transition_end(policy, &freqs, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514}
1515
Dave Jones32ee8c32006-02-28 00:43:23 -05001516/**
Dhaval Giani4ab70df2006-12-13 14:49:15 +05301517 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001518 * @cpu: CPU number
1519 *
1520 * This is the last known freq, without actually getting it from the driver.
1521 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1522 */
1523unsigned int cpufreq_quick_get(unsigned int cpu)
1524{
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001525 struct cpufreq_policy *policy;
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301526 unsigned int ret_freq = 0;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001527
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001528 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1529 return cpufreq_driver->get(cpu);
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001530
1531 policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001532 if (policy) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301533 ret_freq = policy->cur;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001534 cpufreq_cpu_put(policy);
1535 }
1536
Dave Jones4d34a672008-02-07 16:33:49 -05001537 return ret_freq;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001538}
1539EXPORT_SYMBOL(cpufreq_quick_get);
1540
Jesse Barnes3d737102011-06-28 10:59:12 -07001541/**
1542 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1543 * @cpu: CPU number
1544 *
1545 * Just return the max possible frequency for a given CPU.
1546 */
1547unsigned int cpufreq_quick_get_max(unsigned int cpu)
1548{
1549 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1550 unsigned int ret_freq = 0;
1551
1552 if (policy) {
1553 ret_freq = policy->max;
1554 cpufreq_cpu_put(policy);
1555 }
1556
1557 return ret_freq;
1558}
1559EXPORT_SYMBOL(cpufreq_quick_get_max);
1560
Viresh Kumard92d50a2015-01-02 12:34:29 +05301561static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562{
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301563 unsigned int ret_freq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001565 if (!cpufreq_driver->get)
Dave Jones4d34a672008-02-07 16:33:49 -05001566 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
Viresh Kumard92d50a2015-01-02 12:34:29 +05301568 ret_freq = cpufreq_driver->get(policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301570 if (ret_freq && policy->cur &&
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001571 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301572 /* verify no discrepancy between actual and
1573 saved value exists */
1574 if (unlikely(ret_freq != policy->cur)) {
Viresh Kumara1e1dc42015-01-02 12:34:28 +05301575 cpufreq_out_of_sync(policy, ret_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 schedule_work(&policy->update);
1577 }
1578 }
1579
Dave Jones4d34a672008-02-07 16:33:49 -05001580 return ret_freq;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001581}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001583/**
1584 * cpufreq_get - get the current CPU frequency (in kHz)
1585 * @cpu: CPU number
1586 *
1587 * Get the CPU current (static) CPU frequency
1588 */
1589unsigned int cpufreq_get(unsigned int cpu)
1590{
Aaron Plattner999976e2014-03-04 12:42:15 -08001591 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001592 unsigned int ret_freq = 0;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001593
Aaron Plattner999976e2014-03-04 12:42:15 -08001594 if (policy) {
1595 down_read(&policy->rwsem);
Viresh Kumard92d50a2015-01-02 12:34:29 +05301596 ret_freq = __cpufreq_get(policy);
Aaron Plattner999976e2014-03-04 12:42:15 -08001597 up_read(&policy->rwsem);
Viresh Kumar26ca8692013-09-20 22:37:31 +05301598
Aaron Plattner999976e2014-03-04 12:42:15 -08001599 cpufreq_cpu_put(policy);
1600 }
Viresh Kumar6eed9402013-08-06 22:53:11 +05301601
Dave Jones4d34a672008-02-07 16:33:49 -05001602 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603}
1604EXPORT_SYMBOL(cpufreq_get);
1605
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001606static struct subsys_interface cpufreq_interface = {
1607 .name = "cpufreq",
1608 .subsys = &cpu_subsys,
1609 .add_dev = cpufreq_add_dev,
1610 .remove_dev = cpufreq_remove_dev,
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001611};
1612
Viresh Kumare28867e2014-03-04 11:00:27 +08001613/*
1614 * In case platform wants some specific frequency to be configured
1615 * during suspend..
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001616 */
Viresh Kumare28867e2014-03-04 11:00:27 +08001617int cpufreq_generic_suspend(struct cpufreq_policy *policy)
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001618{
Viresh Kumare28867e2014-03-04 11:00:27 +08001619 int ret;
Dave Jones4bc5d342009-08-04 14:03:25 -04001620
Viresh Kumare28867e2014-03-04 11:00:27 +08001621 if (!policy->suspend_freq) {
1622 pr_err("%s: suspend_freq can't be zero\n", __func__);
1623 return -EINVAL;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001624 }
1625
Viresh Kumare28867e2014-03-04 11:00:27 +08001626 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1627 policy->suspend_freq);
1628
1629 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1630 CPUFREQ_RELATION_H);
1631 if (ret)
1632 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1633 __func__, policy->suspend_freq, ret);
1634
Dave Jonesc9060492008-02-07 16:32:18 -05001635 return ret;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001636}
Viresh Kumare28867e2014-03-04 11:00:27 +08001637EXPORT_SYMBOL(cpufreq_generic_suspend);
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001638
1639/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001640 * cpufreq_suspend() - Suspend CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001642 * Called during system wide Suspend/Hibernate cycles for suspending governors
1643 * as some platforms can't change frequency after this point in suspend cycle.
1644 * Because some of the devices (like: i2c, regulators, etc) they use for
1645 * changing frequency are suspended quickly after this point.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001647void cpufreq_suspend(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301649 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001651 if (!cpufreq_driver)
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001652 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001654 if (!has_target())
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301655 goto suspend;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001657 pr_debug("%s: Suspending Governors\n", __func__);
1658
Viresh Kumarb4f06762015-01-27 14:06:08 +05301659 for_each_policy(policy) {
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001660 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1661 pr_err("%s: Failed to stop governor for policy: %p\n",
1662 __func__, policy);
1663 else if (cpufreq_driver->suspend
1664 && cpufreq_driver->suspend(policy))
1665 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1666 policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 }
Viresh Kumarb1b12ba2014-09-30 09:33:17 +05301668
1669suspend:
1670 cpufreq_suspended = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671}
1672
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673/**
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001674 * cpufreq_resume() - Resume CPUFreq governors
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 *
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001676 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1677 * are suspended with cpufreq_suspend().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 */
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001679void cpufreq_resume(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 struct cpufreq_policy *policy;
1682
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001683 if (!cpufreq_driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 return;
1685
Lan Tianyu8e304442014-09-18 15:03:07 +08001686 cpufreq_suspended = false;
1687
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001688 if (!has_target())
1689 return;
1690
1691 pr_debug("%s: Resuming Governors\n", __func__);
1692
Viresh Kumarb4f06762015-01-27 14:06:08 +05301693 for_each_policy(policy) {
Viresh Kumar0c5aa402014-03-24 12:30:29 +05301694 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1695 pr_err("%s: Failed to resume driver: %p\n", __func__,
1696 policy);
1697 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
Viresh Kumar2f0aea92014-03-04 11:00:26 +08001698 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1699 pr_err("%s: Failed to start governor for policy: %p\n",
1700 __func__, policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 }
Viresh Kumarc75de0a2015-04-02 10:21:33 +05301702
1703 /*
1704 * schedule call cpufreq_update_policy() for first-online CPU, as that
1705 * wouldn't be hotplugged-out on suspend. It will verify that the
1706 * current freq is in sync with what we believe it to be.
1707 */
1708 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1709 if (WARN_ON(!policy))
1710 return;
1711
1712 schedule_work(&policy->update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714
Borislav Petkov9d950462013-01-20 10:24:28 +00001715/**
1716 * cpufreq_get_current_driver - return current driver's name
1717 *
1718 * Return the name string of the currently loaded cpufreq driver
1719 * or NULL, if none.
1720 */
1721const char *cpufreq_get_current_driver(void)
1722{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001723 if (cpufreq_driver)
1724 return cpufreq_driver->name;
1725
1726 return NULL;
Borislav Petkov9d950462013-01-20 10:24:28 +00001727}
1728EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
Thomas Petazzoni51315cd2014-10-19 11:30:27 +02001730/**
1731 * cpufreq_get_driver_data - return current driver data
1732 *
1733 * Return the private data of the currently loaded cpufreq
1734 * driver, or NULL if no cpufreq driver is loaded.
1735 */
1736void *cpufreq_get_driver_data(void)
1737{
1738 if (cpufreq_driver)
1739 return cpufreq_driver->driver_data;
1740
1741 return NULL;
1742}
1743EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1744
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745/*********************************************************************
1746 * NOTIFIER LISTS INTERFACE *
1747 *********************************************************************/
1748
1749/**
1750 * cpufreq_register_notifier - register a driver with cpufreq
1751 * @nb: notifier function to register
1752 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1753 *
Dave Jones32ee8c32006-02-28 00:43:23 -05001754 * Add a driver to one of two lists: either a list of drivers that
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 * are notified about clock rate changes (once before and once after
1756 * the transition), or a list of drivers that are notified about
1757 * changes in cpufreq policy.
1758 *
1759 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001760 * blocking_notifier_chain_register.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 */
1762int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1763{
1764 int ret;
1765
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001766 if (cpufreq_disabled())
1767 return -EINVAL;
1768
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -02001769 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1770
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 switch (list) {
1772 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001773 ret = srcu_notifier_chain_register(
Alan Sterne041c682006-03-27 01:16:30 -08001774 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 break;
1776 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001777 ret = blocking_notifier_chain_register(
1778 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 break;
1780 default:
1781 ret = -EINVAL;
1782 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
1784 return ret;
1785}
1786EXPORT_SYMBOL(cpufreq_register_notifier);
1787
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788/**
1789 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1790 * @nb: notifier block to be unregistered
Viresh Kumarbb176f72013-06-19 14:19:33 +05301791 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 *
1793 * Remove a driver from the CPU frequency notifier list.
1794 *
1795 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001796 * blocking_notifier_chain_unregister.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 */
1798int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1799{
1800 int ret;
1801
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001802 if (cpufreq_disabled())
1803 return -EINVAL;
1804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 switch (list) {
1806 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001807 ret = srcu_notifier_chain_unregister(
Alan Sterne041c682006-03-27 01:16:30 -08001808 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 break;
1810 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001811 ret = blocking_notifier_chain_unregister(
1812 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 break;
1814 default:
1815 ret = -EINVAL;
1816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817
1818 return ret;
1819}
1820EXPORT_SYMBOL(cpufreq_unregister_notifier);
1821
1822
1823/*********************************************************************
1824 * GOVERNORS *
1825 *********************************************************************/
1826
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301827/* Must set freqs->new to intermediate frequency */
1828static int __target_intermediate(struct cpufreq_policy *policy,
1829 struct cpufreq_freqs *freqs, int index)
1830{
1831 int ret;
1832
1833 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1834
1835 /* We don't need to switch to intermediate freq */
1836 if (!freqs->new)
1837 return 0;
1838
1839 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1840 __func__, policy->cpu, freqs->old, freqs->new);
1841
1842 cpufreq_freq_transition_begin(policy, freqs);
1843 ret = cpufreq_driver->target_intermediate(policy, index);
1844 cpufreq_freq_transition_end(policy, freqs, ret);
1845
1846 if (ret)
1847 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1848 __func__, ret);
1849
1850 return ret;
1851}
1852
Viresh Kumar8d657752014-05-21 14:29:29 +05301853static int __target_index(struct cpufreq_policy *policy,
1854 struct cpufreq_frequency_table *freq_table, int index)
1855{
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301856 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1857 unsigned int intermediate_freq = 0;
Viresh Kumar8d657752014-05-21 14:29:29 +05301858 int retval = -EINVAL;
1859 bool notify;
1860
1861 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
Viresh Kumar8d657752014-05-21 14:29:29 +05301862 if (notify) {
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301863 /* Handle switching to intermediate frequency */
1864 if (cpufreq_driver->get_intermediate) {
1865 retval = __target_intermediate(policy, &freqs, index);
1866 if (retval)
1867 return retval;
Viresh Kumar8d657752014-05-21 14:29:29 +05301868
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301869 intermediate_freq = freqs.new;
1870 /* Set old freq to intermediate */
1871 if (intermediate_freq)
1872 freqs.old = freqs.new;
1873 }
1874
1875 freqs.new = freq_table[index].frequency;
Viresh Kumar8d657752014-05-21 14:29:29 +05301876 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1877 __func__, policy->cpu, freqs.old, freqs.new);
1878
1879 cpufreq_freq_transition_begin(policy, &freqs);
1880 }
1881
1882 retval = cpufreq_driver->target_index(policy, index);
1883 if (retval)
1884 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1885 retval);
1886
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301887 if (notify) {
Viresh Kumar8d657752014-05-21 14:29:29 +05301888 cpufreq_freq_transition_end(policy, &freqs, retval);
1889
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301890 /*
1891 * Failed after setting to intermediate freq? Driver should have
1892 * reverted back to initial frequency and so should we. Check
1893 * here for intermediate_freq instead of get_intermediate, in
1894 * case we have't switched to intermediate freq at all.
1895 */
1896 if (unlikely(retval && intermediate_freq)) {
1897 freqs.old = intermediate_freq;
1898 freqs.new = policy->restore_freq;
1899 cpufreq_freq_transition_begin(policy, &freqs);
1900 cpufreq_freq_transition_end(policy, &freqs, 0);
1901 }
1902 }
1903
Viresh Kumar8d657752014-05-21 14:29:29 +05301904 return retval;
1905}
1906
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907int __cpufreq_driver_target(struct cpufreq_policy *policy,
1908 unsigned int target_freq,
1909 unsigned int relation)
1910{
Viresh Kumar72499242012-10-31 01:28:21 +01001911 unsigned int old_target_freq = target_freq;
Viresh Kumar8d657752014-05-21 14:29:29 +05301912 int retval = -EINVAL;
Ashok Rajc32b6b82005-10-30 14:59:54 -08001913
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04001914 if (cpufreq_disabled())
1915 return -ENODEV;
1916
Viresh Kumar72499242012-10-31 01:28:21 +01001917 /* Make sure that target_freq is within supported range */
1918 if (target_freq > policy->max)
1919 target_freq = policy->max;
1920 if (target_freq < policy->min)
1921 target_freq = policy->min;
1922
1923 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07001924 policy->cpu, target_freq, relation, old_target_freq);
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001925
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301926 /*
1927 * This might look like a redundant call as we are checking it again
1928 * after finding index. But it is left intentionally for cases where
1929 * exactly same freq is called again and so we can save on few function
1930 * calls.
1931 */
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001932 if (target_freq == policy->cur)
1933 return 0;
1934
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05301935 /* Save last value to restore later on errors */
1936 policy->restore_freq = policy->cur;
1937
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001938 if (cpufreq_driver->target)
1939 retval = cpufreq_driver->target(policy, target_freq, relation);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301940 else if (cpufreq_driver->target_index) {
1941 struct cpufreq_frequency_table *freq_table;
1942 int index;
Ashok Raj90d45d12005-11-08 21:34:24 -08001943
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301944 freq_table = cpufreq_frequency_get_table(policy->cpu);
1945 if (unlikely(!freq_table)) {
1946 pr_err("%s: Unable to find freq_table\n", __func__);
1947 goto out;
1948 }
1949
1950 retval = cpufreq_frequency_table_target(policy, freq_table,
1951 target_freq, relation, &index);
1952 if (unlikely(retval)) {
1953 pr_err("%s: Unable to find matching freq\n", __func__);
1954 goto out;
1955 }
1956
Viresh Kumard4019f02013-08-14 19:38:24 +05301957 if (freq_table[index].frequency == policy->cur) {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301958 retval = 0;
Viresh Kumard4019f02013-08-14 19:38:24 +05301959 goto out;
1960 }
1961
Viresh Kumar8d657752014-05-21 14:29:29 +05301962 retval = __target_index(policy, freq_table, index);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05301963 }
1964
1965out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 return retval;
1967}
1968EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1969
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970int cpufreq_driver_target(struct cpufreq_policy *policy,
1971 unsigned int target_freq,
1972 unsigned int relation)
1973{
Julia Lawallf1829e42008-07-25 22:44:53 +02001974 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
viresh kumarad7722d2013-10-18 19:10:15 +05301976 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
1978 ret = __cpufreq_driver_target(policy, target_freq, relation);
1979
viresh kumarad7722d2013-10-18 19:10:15 +05301980 up_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 return ret;
1983}
1984EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1985
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301986static int __cpufreq_governor(struct cpufreq_policy *policy,
1987 unsigned int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988{
Dave Jonescc993ca2005-07-28 09:43:56 -07001989 int ret;
Thomas Renninger6afde102007-10-02 13:28:13 -07001990
1991 /* Only must be defined when default governor is known to have latency
1992 restrictions, like e.g. conservative or ondemand.
1993 That this is the case is already ensured in Kconfig
1994 */
1995#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1996 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1997#else
1998 struct cpufreq_governor *gov = NULL;
1999#endif
Thomas Renninger1c256242007-10-02 13:28:12 -07002000
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002001 /* Don't start any governor operations if we are entering suspend */
2002 if (cpufreq_suspended)
2003 return 0;
Ethan Zhaocb577202014-12-18 15:28:19 +09002004 /*
2005 * Governor might not be initiated here if ACPI _PPC changed
2006 * notification happened, so check it.
2007 */
2008 if (!policy->governor)
2009 return -EINVAL;
Viresh Kumar2f0aea92014-03-04 11:00:26 +08002010
Thomas Renninger1c256242007-10-02 13:28:12 -07002011 if (policy->governor->max_transition_latency &&
2012 policy->cpuinfo.transition_latency >
2013 policy->governor->max_transition_latency) {
Thomas Renninger6afde102007-10-02 13:28:13 -07002014 if (!gov)
2015 return -EINVAL;
2016 else {
Joe Perchese837f9b2014-03-11 10:03:00 -07002017 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2018 policy->governor->name, gov->name);
Thomas Renninger6afde102007-10-02 13:28:13 -07002019 policy->governor = gov;
2020 }
Thomas Renninger1c256242007-10-02 13:28:12 -07002021 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022
Viresh Kumarfe492f32013-08-06 22:53:10 +05302023 if (event == CPUFREQ_GOV_POLICY_INIT)
2024 if (!try_module_get(policy->governor->owner))
2025 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002027 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002028 policy->cpu, event);
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002029
2030 mutex_lock(&cpufreq_governor_lock);
Srivatsa S. Bhat56d07db2013-09-07 01:23:55 +05302031 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
Viresh Kumarf73d3932013-08-31 17:53:40 +05302032 || (!policy->governor_enabled
2033 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002034 mutex_unlock(&cpufreq_governor_lock);
2035 return -EBUSY;
2036 }
2037
2038 if (event == CPUFREQ_GOV_STOP)
2039 policy->governor_enabled = false;
2040 else if (event == CPUFREQ_GOV_START)
2041 policy->governor_enabled = true;
2042
2043 mutex_unlock(&cpufreq_governor_lock);
2044
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 ret = policy->governor->governor(policy, event);
2046
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002047 if (!ret) {
2048 if (event == CPUFREQ_GOV_POLICY_INIT)
2049 policy->governor->initialized++;
2050 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2051 policy->governor->initialized--;
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08002052 } else {
2053 /* Restore original values */
2054 mutex_lock(&cpufreq_governor_lock);
2055 if (event == CPUFREQ_GOV_STOP)
2056 policy->governor_enabled = true;
2057 else if (event == CPUFREQ_GOV_START)
2058 policy->governor_enabled = false;
2059 mutex_unlock(&cpufreq_governor_lock);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00002060 }
Viresh Kumarb3940582013-02-01 05:42:58 +00002061
Viresh Kumarfe492f32013-08-06 22:53:10 +05302062 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2063 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 module_put(policy->governor->owner);
2065
2066 return ret;
2067}
2068
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069int cpufreq_register_governor(struct cpufreq_governor *governor)
2070{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002071 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
2073 if (!governor)
2074 return -EINVAL;
2075
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002076 if (cpufreq_disabled())
2077 return -ENODEV;
2078
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002079 mutex_lock(&cpufreq_governor_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -05002080
Viresh Kumarb3940582013-02-01 05:42:58 +00002081 governor->initialized = 0;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002082 err = -EBUSY;
Viresh Kumar42f91fa2015-01-02 12:34:26 +05302083 if (!find_governor(governor->name)) {
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002084 err = 0;
2085 list_add(&governor->governor_list, &cpufreq_governor_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
Dave Jones32ee8c32006-02-28 00:43:23 -05002088 mutex_unlock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07002089 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090}
2091EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2092
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2094{
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002095 int cpu;
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002096
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 if (!governor)
2098 return;
2099
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002100 if (cpufreq_disabled())
2101 return;
2102
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002103 for_each_present_cpu(cpu) {
2104 if (cpu_online(cpu))
2105 continue;
2106 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2107 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2108 }
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05002109
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002110 mutex_lock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 list_del(&governor->governor_list);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08002112 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 return;
2114}
2115EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2116
2117
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118/*********************************************************************
2119 * POLICY INTERFACE *
2120 *********************************************************************/
2121
2122/**
2123 * cpufreq_get_policy - get the current cpufreq_policy
Dave Jones29464f22009-01-18 01:37:11 -05002124 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2125 * is written
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 *
2127 * Reads the current cpufreq policy.
2128 */
2129int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2130{
2131 struct cpufreq_policy *cpu_policy;
2132 if (!policy)
2133 return -EINVAL;
2134
2135 cpu_policy = cpufreq_cpu_get(cpu);
2136 if (!cpu_policy)
2137 return -EINVAL;
2138
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302139 memcpy(policy, cpu_policy, sizeof(*policy));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
2141 cpufreq_cpu_put(cpu_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 return 0;
2143}
2144EXPORT_SYMBOL(cpufreq_get_policy);
2145
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002146/*
Viresh Kumar037ce832013-10-02 14:13:16 +05302147 * policy : current policy.
2148 * new_policy: policy to be set.
Arjan van de Ven153d7f32006-07-26 15:40:07 +02002149 */
Viresh Kumar037ce832013-10-02 14:13:16 +05302150static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302151 struct cpufreq_policy *new_policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152{
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002153 struct cpufreq_governor *old_gov;
2154 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
Joe Perchese837f9b2014-03-11 10:03:00 -07002156 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2157 new_policy->cpu, new_policy->min, new_policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302159 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002161 if (new_policy->min > policy->max || new_policy->max < policy->min)
2162 return -EINVAL;
Mattia Dongili9c9a43e2006-07-05 23:12:20 +02002163
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 /* verify the cpu speed can be set within this limit */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302165 ret = cpufreq_driver->verify(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002167 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 /* adjust if necessary - all reasons */
Alan Sterne041c682006-03-27 01:16:30 -08002170 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302171 CPUFREQ_ADJUST, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172
2173 /* adjust if necessary - hardware incompatibility*/
Alan Sterne041c682006-03-27 01:16:30 -08002174 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302175 CPUFREQ_INCOMPATIBLE, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
Viresh Kumarbb176f72013-06-19 14:19:33 +05302177 /*
2178 * verify the cpu speed can be set within this limit, which might be
2179 * different to the first one
2180 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302181 ret = cpufreq_driver->verify(new_policy);
Alan Sterne041c682006-03-27 01:16:30 -08002182 if (ret)
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002183 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
2185 /* notification of the new policy */
Alan Sterne041c682006-03-27 01:16:30 -08002186 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302187 CPUFREQ_NOTIFY, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302189 policy->min = new_policy->min;
2190 policy->max = new_policy->max;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002192 pr_debug("new min and max freqs are %u - %u kHz\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002193 policy->min, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002195 if (cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302196 policy->policy = new_policy->policy;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002197 pr_debug("setting range\n");
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002198 return cpufreq_driver->setpolicy(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 }
2200
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002201 if (new_policy->governor == policy->governor)
2202 goto out;
2203
2204 pr_debug("governor switch\n");
2205
2206 /* save old, working values */
2207 old_gov = policy->governor;
2208 /* end old governor */
2209 if (old_gov) {
2210 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2211 up_write(&policy->rwsem);
Stratos Karafotise5c87b72014-03-19 23:29:17 +02002212 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
Rafael J. Wysockid9a789c2014-02-17 22:56:35 +01002213 down_write(&policy->rwsem);
2214 }
2215
2216 /* start new governor */
2217 policy->governor = new_policy->governor;
2218 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2219 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2220 goto out;
2221
2222 up_write(&policy->rwsem);
2223 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2224 down_write(&policy->rwsem);
2225 }
2226
2227 /* new governor failed, so re-start old one */
2228 pr_debug("starting governor %s failed\n", policy->governor->name);
2229 if (old_gov) {
2230 policy->governor = old_gov;
2231 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2232 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2233 }
2234
2235 return -EINVAL;
2236
2237 out:
2238 pr_debug("governor: change or update limits\n");
2239 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240}
2241
2242/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2244 * @cpu: CPU which shall be re-evaluated
2245 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002246 * Useful for policy notifiers which have different necessities
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 * at different times.
2248 */
2249int cpufreq_update_policy(unsigned int cpu)
2250{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302251 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2252 struct cpufreq_policy new_policy;
Julia Lawallf1829e42008-07-25 22:44:53 +02002253 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002255 if (!policy)
2256 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
viresh kumarad7722d2013-10-18 19:10:15 +05302258 down_write(&policy->rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002260 pr_debug("updating policy for CPU %u\n", cpu);
Viresh Kumard5b73cd2013-08-06 22:53:06 +05302261 memcpy(&new_policy, policy, sizeof(*policy));
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302262 new_policy.min = policy->user_policy.min;
2263 new_policy.max = policy->user_policy.max;
2264 new_policy.policy = policy->user_policy.policy;
2265 new_policy.governor = policy->user_policy.governor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266
Viresh Kumarbb176f72013-06-19 14:19:33 +05302267 /*
2268 * BIOS might change freq behind our back
2269 * -> ask driver for current freq and notify governors about a change
2270 */
Rafael J. Wysocki2ed99e32014-03-12 21:49:33 +01002271 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302272 new_policy.cur = cpufreq_driver->get(cpu);
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302273 if (WARN_ON(!new_policy.cur)) {
2274 ret = -EIO;
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002275 goto unlock;
Viresh Kumarbd0fa9b2014-02-25 14:29:44 +05302276 }
2277
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302278 if (!policy->cur) {
Joe Perchese837f9b2014-03-11 10:03:00 -07002279 pr_debug("Driver did not initialize current freq\n");
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302280 policy->cur = new_policy.cur;
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002281 } else {
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302282 if (policy->cur != new_policy.cur && has_target())
Viresh Kumara1e1dc42015-01-02 12:34:28 +05302283 cpufreq_out_of_sync(policy, new_policy.cur);
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002284 }
Thomas Renninger0961dd02006-01-26 18:46:33 +01002285 }
2286
Viresh Kumar037ce832013-10-02 14:13:16 +05302287 ret = cpufreq_set_policy(policy, &new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
Aaron Plattnerfefa8ff2014-06-18 11:27:32 -07002289unlock:
viresh kumarad7722d2013-10-18 19:10:15 +05302290 up_write(&policy->rwsem);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002291
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302292 cpufreq_cpu_put(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 return ret;
2294}
2295EXPORT_SYMBOL(cpufreq_update_policy);
2296
Paul Gortmaker27609842013-06-19 13:54:04 -04002297static int cpufreq_cpu_callback(struct notifier_block *nfb,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002298 unsigned long action, void *hcpu)
2299{
2300 unsigned int cpu = (unsigned long)hcpu;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002301 struct device *dev;
Ashok Rajc32b6b82005-10-30 14:59:54 -08002302
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002303 dev = get_cpu_device(cpu);
2304 if (dev) {
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302305 switch (action & ~CPU_TASKS_FROZEN) {
Ashok Rajc32b6b82005-10-30 14:59:54 -08002306 case CPU_ONLINE:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302307 __cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002308 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302309
Ashok Rajc32b6b82005-10-30 14:59:54 -08002310 case CPU_DOWN_PREPARE:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302311 __cpufreq_remove_dev_prepare(dev, NULL);
Srivatsa S. Bhat1aee40a2013-09-07 01:23:27 +05302312 break;
2313
2314 case CPU_POST_DEAD:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302315 __cpufreq_remove_dev_finish(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002316 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302317
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002318 case CPU_DOWN_FAILED:
Viresh Kumar96bbbe42014-03-10 14:53:35 +05302319 __cpufreq_add_dev(dev, NULL);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002320 break;
2321 }
2322 }
2323 return NOTIFY_OK;
2324}
2325
Neal Buckendahl9c36f742010-06-22 22:02:44 -05002326static struct notifier_block __refdata cpufreq_cpu_notifier = {
Viresh Kumarbb176f72013-06-19 14:19:33 +05302327 .notifier_call = cpufreq_cpu_callback,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002328};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329
2330/*********************************************************************
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002331 * BOOST *
2332 *********************************************************************/
2333static int cpufreq_boost_set_sw(int state)
2334{
2335 struct cpufreq_frequency_table *freq_table;
2336 struct cpufreq_policy *policy;
2337 int ret = -EINVAL;
2338
Viresh Kumarb4f06762015-01-27 14:06:08 +05302339 for_each_policy(policy) {
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002340 freq_table = cpufreq_frequency_get_table(policy->cpu);
2341 if (freq_table) {
2342 ret = cpufreq_frequency_table_cpuinfo(policy,
2343 freq_table);
2344 if (ret) {
2345 pr_err("%s: Policy frequency update failed\n",
2346 __func__);
2347 break;
2348 }
2349 policy->user_policy.max = policy->max;
2350 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2351 }
2352 }
2353
2354 return ret;
2355}
2356
2357int cpufreq_boost_trigger_state(int state)
2358{
2359 unsigned long flags;
2360 int ret = 0;
2361
2362 if (cpufreq_driver->boost_enabled == state)
2363 return 0;
2364
2365 write_lock_irqsave(&cpufreq_driver_lock, flags);
2366 cpufreq_driver->boost_enabled = state;
2367 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2368
2369 ret = cpufreq_driver->set_boost(state);
2370 if (ret) {
2371 write_lock_irqsave(&cpufreq_driver_lock, flags);
2372 cpufreq_driver->boost_enabled = !state;
2373 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2374
Joe Perchese837f9b2014-03-11 10:03:00 -07002375 pr_err("%s: Cannot %s BOOST\n",
2376 __func__, state ? "enable" : "disable");
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002377 }
2378
2379 return ret;
2380}
2381
2382int cpufreq_boost_supported(void)
2383{
2384 if (likely(cpufreq_driver))
2385 return cpufreq_driver->boost_supported;
2386
2387 return 0;
2388}
2389EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2390
2391int cpufreq_boost_enabled(void)
2392{
2393 return cpufreq_driver->boost_enabled;
2394}
2395EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2396
2397/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2399 *********************************************************************/
2400
2401/**
2402 * cpufreq_register_driver - register a CPU Frequency driver
2403 * @driver_data: A struct cpufreq_driver containing the values#
2404 * submitted by the CPU Frequency driver.
2405 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302406 * Registers a CPU Frequency driver to this core code. This code
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 * returns zero on success, -EBUSY when another driver got here first
Dave Jones32ee8c32006-02-28 00:43:23 -05002408 * (and isn't unregistered in the meantime).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 *
2410 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002411int cpufreq_register_driver(struct cpufreq_driver *driver_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412{
2413 unsigned long flags;
2414 int ret;
2415
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002416 if (cpufreq_disabled())
2417 return -ENODEV;
2418
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 if (!driver_data || !driver_data->verify || !driver_data->init ||
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +05302420 !(driver_data->setpolicy || driver_data->target_index ||
Rafael J. Wysocki98322352014-03-19 12:48:30 +01002421 driver_data->target) ||
2422 (driver_data->setpolicy && (driver_data->target_index ||
Viresh Kumar1c03a2d2014-06-02 22:49:28 +05302423 driver_data->target)) ||
2424 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 return -EINVAL;
2426
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002427 pr_debug("trying to register driver %s\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002429 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002430 if (cpufreq_driver) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002431 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Yinghai Lu4dea58062013-09-18 21:05:20 -07002432 return -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002434 cpufreq_driver = driver_data;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002435 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436
Viresh Kumarbc68b7d2015-01-02 12:34:30 +05302437 if (driver_data->setpolicy)
2438 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2439
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002440 if (cpufreq_boost_supported()) {
2441 /*
2442 * Check if driver provides function to enable boost -
2443 * if not, use cpufreq_boost_set_sw as default
2444 */
2445 if (!cpufreq_driver->set_boost)
2446 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2447
2448 ret = cpufreq_sysfs_create_file(&boost.attr);
2449 if (ret) {
2450 pr_err("%s: cannot register global BOOST sysfs file\n",
Joe Perchese837f9b2014-03-11 10:03:00 -07002451 __func__);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002452 goto err_null_driver;
2453 }
2454 }
2455
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002456 ret = subsys_interface_register(&cpufreq_interface);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002457 if (ret)
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002458 goto err_boost_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302460 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2461 list_empty(&cpufreq_policy_list)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 /* if all ->init() calls failed, unregister */
Viresh Kumarce1bcfe2015-01-02 12:34:35 +05302463 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2464 driver_data->name);
2465 goto err_if_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466 }
2467
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002468 register_hotcpu_notifier(&cpufreq_cpu_notifier);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002469 pr_debug("driver %s up and running\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002471 return 0;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002472err_if_unreg:
2473 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002474err_boost_unreg:
2475 if (cpufreq_boost_supported())
2476 cpufreq_sysfs_remove_file(&boost.attr);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002477err_null_driver:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002478 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002479 cpufreq_driver = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002480 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Dave Jones4d34a672008-02-07 16:33:49 -05002481 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482}
2483EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2484
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485/**
2486 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2487 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302488 * Unregister the current CPUFreq driver. Only call this if you have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 * the right to do so, i.e. if you have succeeded in initialising before!
2490 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2491 * currently not initialised.
2492 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002493int cpufreq_unregister_driver(struct cpufreq_driver *driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494{
2495 unsigned long flags;
2496
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002497 if (!cpufreq_driver || (driver != cpufreq_driver))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002500 pr_debug("unregistering driver %s\n", driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002502 subsys_interface_unregister(&cpufreq_interface);
Lukasz Majewski6f19efc2013-12-20 15:24:49 +01002503 if (cpufreq_boost_supported())
2504 cpufreq_sysfs_remove_file(&boost.attr);
2505
Chandra Seetharaman65edc682006-06-27 02:54:08 -07002506 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507
Viresh Kumar6eed9402013-08-06 22:53:11 +05302508 down_write(&cpufreq_rwsem);
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002509 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302510
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002511 cpufreq_driver = NULL;
Viresh Kumar6eed9402013-08-06 22:53:11 +05302512
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002513 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302514 up_write(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515
2516 return 0;
2517}
2518EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002519
Doug Anderson90de2a42014-12-23 22:09:48 -08002520/*
2521 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2522 * or mutexes when secondary CPUs are halted.
2523 */
2524static struct syscore_ops cpufreq_syscore_ops = {
2525 .shutdown = cpufreq_suspend,
2526};
2527
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002528static int __init cpufreq_core_init(void)
2529{
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002530 if (cpufreq_disabled())
2531 return -ENODEV;
2532
Viresh Kumar2361be22013-05-17 16:09:09 +05302533 cpufreq_global_kobject = kobject_create();
Thomas Renninger8aa84ad2009-07-24 15:25:05 +02002534 BUG_ON(!cpufreq_global_kobject);
2535
Doug Anderson90de2a42014-12-23 22:09:48 -08002536 register_syscore_ops(&cpufreq_syscore_ops);
2537
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002538 return 0;
2539}
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002540core_initcall(cpufreq_core_init);