blob: 506fd23c755069f1f1b1e81763335f8f79b0de37 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Dave Jones3a58df32009-01-17 22:36:14 -05002 * acpi-cpufreq.c - ACPI Processor P-States Driver
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -07007 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
24 *
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 */
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -070031#include <linux/smp.h>
32#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/cpufreq.h>
Venkatesh Pallipadid395bf12005-08-25 15:59:00 -040034#include <linux/compiler.h>
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -070035#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38#include <linux/acpi.h>
Dave Jones3a58df32009-01-17 22:36:14 -050039#include <linux/io.h>
40#include <linux/delay.h>
41#include <linux/uaccess.h>
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <acpi/processor.h>
44
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -070045#include <asm/msr.h>
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -070046#include <asm/processor.h>
47#include <asm/cpufeature.h>
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -070048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
50MODULE_DESCRIPTION("ACPI Processor P-States Driver");
51MODULE_LICENSE("GPL");
52
Andre Przywaraacd31622012-09-04 08:28:03 +000053#define PFX "acpi-cpufreq: "
54
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -070055enum {
56 UNDEFINED_CAPABLE = 0,
57 SYSTEM_INTEL_MSR_CAPABLE,
Matthew Garrett3dc9a632012-09-04 08:28:02 +000058 SYSTEM_AMD_MSR_CAPABLE,
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -070059 SYSTEM_IO_CAPABLE,
60};
61
62#define INTEL_MSR_RANGE (0xffff)
Matthew Garrett3dc9a632012-09-04 08:28:02 +000063#define AMD_MSR_RANGE (0x7)
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -070064
Andre Przywara615b7302012-09-04 08:28:07 +000065#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
66
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -070067struct acpi_cpufreq_data {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -070068 struct acpi_processor_performance *acpi_data;
69 struct cpufreq_frequency_table *freq_table;
70 unsigned int resume;
71 unsigned int cpu_feature;
Lan Tianyuf4fd3792013-06-27 15:08:54 +080072 cpumask_var_t freqdomain_cpus;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073};
74
Tejun Heof1625062009-10-29 22:34:13 +090075static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
travis@sgi.comea348f32008-01-30 13:33:12 +010076
Fenghua Yu50109292007-08-07 18:40:30 -040077/* acpi_perf_data is a pointer to percpu data. */
Namhyung Kim3f6c4df2010-08-13 23:00:11 +090078static struct acpi_processor_performance __percpu *acpi_perf_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80static struct cpufreq_driver acpi_cpufreq_driver;
81
Venkatesh Pallipadid395bf12005-08-25 15:59:00 -040082static unsigned int acpi_pstate_strict;
Andre Przywara615b7302012-09-04 08:28:07 +000083static bool boost_enabled, boost_supported;
84static struct msr __percpu *msrs;
85
86static bool boost_state(unsigned int cpu)
87{
88 u32 lo, hi;
89 u64 msr;
90
91 switch (boot_cpu_data.x86_vendor) {
92 case X86_VENDOR_INTEL:
93 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
94 msr = lo | ((u64)hi << 32);
95 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
96 case X86_VENDOR_AMD:
97 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
98 msr = lo | ((u64)hi << 32);
99 return !(msr & MSR_K7_HWCR_CPB_DIS);
100 }
101 return false;
102}
103
104static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
105{
106 u32 cpu;
107 u32 msr_addr;
108 u64 msr_mask;
109
110 switch (boot_cpu_data.x86_vendor) {
111 case X86_VENDOR_INTEL:
112 msr_addr = MSR_IA32_MISC_ENABLE;
113 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
114 break;
115 case X86_VENDOR_AMD:
116 msr_addr = MSR_K7_HWCR;
117 msr_mask = MSR_K7_HWCR_CPB_DIS;
118 break;
119 default:
120 return;
121 }
122
123 rdmsr_on_cpus(cpumask, msr_addr, msrs);
124
125 for_each_cpu(cpu, cpumask) {
126 struct msr *reg = per_cpu_ptr(msrs, cpu);
127 if (enable)
128 reg->q &= ~msr_mask;
129 else
130 reg->q |= msr_mask;
131 }
132
133 wrmsr_on_cpus(cpumask, msr_addr, msrs);
134}
135
Andre Przywara11269ff2012-09-04 08:28:08 +0000136static ssize_t _store_boost(const char *buf, size_t count)
Andre Przywara615b7302012-09-04 08:28:07 +0000137{
138 int ret;
139 unsigned long val = 0;
140
141 if (!boost_supported)
142 return -EINVAL;
143
144 ret = kstrtoul(buf, 10, &val);
145 if (ret || (val > 1))
146 return -EINVAL;
147
148 if ((val && boost_enabled) || (!val && !boost_enabled))
149 return count;
150
151 get_online_cpus();
152
153 boost_set_msrs(val, cpu_online_mask);
154
155 put_online_cpus();
156
157 boost_enabled = val;
158 pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
159
160 return count;
161}
162
Andre Przywara11269ff2012-09-04 08:28:08 +0000163static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
164 const char *buf, size_t count)
165{
166 return _store_boost(buf, count);
167}
168
Andre Przywara615b7302012-09-04 08:28:07 +0000169static ssize_t show_global_boost(struct kobject *kobj,
170 struct attribute *attr, char *buf)
171{
172 return sprintf(buf, "%u\n", boost_enabled);
173}
174
175static struct global_attr global_boost = __ATTR(boost, 0644,
176 show_global_boost,
177 store_global_boost);
Venkatesh Pallipadid395bf12005-08-25 15:59:00 -0400178
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800179static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
180{
181 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
182
183 return cpufreq_show_cpus(data->freqdomain_cpus, buf);
184}
185
186cpufreq_freq_attr_ro(freqdomain_cpus);
187
Andre Przywara11269ff2012-09-04 08:28:08 +0000188#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
189static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
190 size_t count)
191{
192 return _store_boost(buf, count);
193}
194
195static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
196{
197 return sprintf(buf, "%u\n", boost_enabled);
198}
199
Lan Tianyu59027d32013-08-13 10:05:53 +0800200cpufreq_freq_attr_rw(cpb);
Andre Przywara11269ff2012-09-04 08:28:08 +0000201#endif
202
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700203static int check_est_cpu(unsigned int cpuid)
204{
Mike Travis92cb7612007-10-19 20:35:04 +0200205 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700206
Harald Welte0de51082009-06-08 18:27:54 +0800207 return cpu_has(cpu, X86_FEATURE_EST);
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700208}
209
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000210static int check_amd_hwpstate_cpu(unsigned int cpuid)
211{
212 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
213
214 return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
215}
216
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700217static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700218{
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700219 struct acpi_processor_performance *perf;
220 int i;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700221
222 perf = data->acpi_data;
223
Dave Jones3a58df32009-01-17 22:36:14 -0500224 for (i = 0; i < perf->state_count; i++) {
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700225 if (value == perf->states[i].status)
226 return data->freq_table[i].frequency;
227 }
228 return 0;
229}
230
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700231static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
232{
233 int i;
Venkatesh Pallipadia6f6e6e62006-10-03 12:37:42 -0700234 struct acpi_processor_performance *perf;
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700235
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000236 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
237 msr &= AMD_MSR_RANGE;
238 else
239 msr &= INTEL_MSR_RANGE;
240
Venkatesh Pallipadia6f6e6e62006-10-03 12:37:42 -0700241 perf = data->acpi_data;
242
Dave Jones3a58df32009-01-17 22:36:14 -0500243 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
Viresh Kumar50701582013-03-30 16:25:15 +0530244 if (msr == perf->states[data->freq_table[i].driver_data].status)
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700245 return data->freq_table[i].frequency;
246 }
247 return data->freq_table[0].frequency;
248}
249
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700250static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
251{
252 switch (data->cpu_feature) {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700253 case SYSTEM_INTEL_MSR_CAPABLE:
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000254 case SYSTEM_AMD_MSR_CAPABLE:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700255 return extract_msr(val, data);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700256 case SYSTEM_IO_CAPABLE:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700257 return extract_io(val, data);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700258 default:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700259 return 0;
260 }
261}
262
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700263struct msr_addr {
264 u32 reg;
265};
266
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700267struct io_addr {
268 u16 port;
269 u8 bit_width;
270};
271
272struct drv_cmd {
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700273 unsigned int type;
Ingo Molnarbfa318a2009-01-15 15:46:08 +0100274 const struct cpumask *mask;
Dave Jones3a58df32009-01-17 22:36:14 -0500275 union {
276 struct msr_addr msr;
277 struct io_addr io;
278 } addr;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700279 u32 val;
280};
281
Andrew Morton01599fc2009-04-13 10:27:49 -0700282/* Called via smp_call_function_single(), on the target CPU */
283static void do_drv_read(void *_cmd)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700284{
Mike Travis72859082009-01-16 15:31:15 -0800285 struct drv_cmd *cmd = _cmd;
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700286 u32 h;
287
288 switch (cmd->type) {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700289 case SYSTEM_INTEL_MSR_CAPABLE:
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000290 case SYSTEM_AMD_MSR_CAPABLE:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700291 rdmsr(cmd->addr.msr.reg, cmd->val, h);
292 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700293 case SYSTEM_IO_CAPABLE:
Venkatesh Pallipadi4e581ff2006-12-13 10:41:16 -0800294 acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
295 &cmd->val,
296 (u32)cmd->addr.io.bit_width);
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700297 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700298 default:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700299 break;
300 }
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700301}
302
Andrew Morton01599fc2009-04-13 10:27:49 -0700303/* Called via smp_call_function_many(), on the target CPUs */
304static void do_drv_write(void *_cmd)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700305{
Mike Travis72859082009-01-16 15:31:15 -0800306 struct drv_cmd *cmd = _cmd;
Venki Pallipadi13424f62007-05-23 15:42:13 -0700307 u32 lo, hi;
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700308
309 switch (cmd->type) {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700310 case SYSTEM_INTEL_MSR_CAPABLE:
Venki Pallipadi13424f62007-05-23 15:42:13 -0700311 rdmsr(cmd->addr.msr.reg, lo, hi);
312 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
313 wrmsr(cmd->addr.msr.reg, lo, hi);
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700314 break;
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000315 case SYSTEM_AMD_MSR_CAPABLE:
316 wrmsr(cmd->addr.msr.reg, cmd->val, 0);
317 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700318 case SYSTEM_IO_CAPABLE:
Venkatesh Pallipadi4e581ff2006-12-13 10:41:16 -0800319 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
320 cmd->val,
321 (u32)cmd->addr.io.bit_width);
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700322 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700323 default:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700324 break;
325 }
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700326}
327
Dave Jones95dd7222006-10-18 00:41:48 -0400328static void drv_read(struct drv_cmd *cmd)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700329{
Andrew Morton4a283952009-12-21 16:19:58 -0800330 int err;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700331 cmd->val = 0;
332
Andrew Morton4a283952009-12-21 16:19:58 -0800333 err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
334 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700335}
336
337static void drv_write(struct drv_cmd *cmd)
338{
Linus Torvaldsea34f432009-04-15 08:05:13 -0700339 int this_cpu;
340
341 this_cpu = get_cpu();
342 if (cpumask_test_cpu(this_cpu, cmd->mask))
343 do_drv_write(cmd);
Andrew Morton01599fc2009-04-13 10:27:49 -0700344 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
Linus Torvaldsea34f432009-04-15 08:05:13 -0700345 put_cpu();
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700346}
347
Mike Travis4d8bb532009-01-04 05:18:08 -0800348static u32 get_cur_val(const struct cpumask *mask)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700349{
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700350 struct acpi_processor_performance *perf;
351 struct drv_cmd cmd;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700352
Mike Travis4d8bb532009-01-04 05:18:08 -0800353 if (unlikely(cpumask_empty(mask)))
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700354 return 0;
355
Tejun Heof1625062009-10-29 22:34:13 +0900356 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700357 case SYSTEM_INTEL_MSR_CAPABLE:
358 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
Ross Lagerwall8673b832013-05-31 20:45:17 +0100359 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700360 break;
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000361 case SYSTEM_AMD_MSR_CAPABLE:
362 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
Ross Lagerwall8673b832013-05-31 20:45:17 +0100363 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000364 break;
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700365 case SYSTEM_IO_CAPABLE:
366 cmd.type = SYSTEM_IO_CAPABLE;
Tejun Heof1625062009-10-29 22:34:13 +0900367 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700368 cmd.addr.io.port = perf->control_register.address;
369 cmd.addr.io.bit_width = perf->control_register.bit_width;
370 break;
371 default:
372 return 0;
373 }
374
Ingo Molnarbfa318a2009-01-15 15:46:08 +0100375 cmd.mask = mask;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700376 drv_read(&cmd);
377
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200378 pr_debug("get_cur_val = %u\n", cmd.val);
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700379
380 return cmd.val;
381}
382
383static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
384{
Tejun Heof1625062009-10-29 22:34:13 +0900385 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700386 unsigned int freq;
Venkatesh Pallipadie56a7272008-04-28 15:13:43 -0400387 unsigned int cached_freq;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700388
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200389 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700390
391 if (unlikely(data == NULL ||
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700392 data->acpi_data == NULL || data->freq_table == NULL)) {
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700393 return 0;
394 }
395
Venkatesh Pallipadie56a7272008-04-28 15:13:43 -0400396 cached_freq = data->freq_table[data->acpi_data->state].frequency;
Mike Travise39ad412009-01-04 05:18:10 -0800397 freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
Venkatesh Pallipadie56a7272008-04-28 15:13:43 -0400398 if (freq != cached_freq) {
399 /*
400 * The dreaded BIOS frequency change behind our back.
401 * Force set the frequency on next target call.
402 */
403 data->resume = 1;
404 }
405
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200406 pr_debug("cur freq = %u\n", freq);
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700407
408 return freq;
409}
410
Mike Travis72859082009-01-16 15:31:15 -0800411static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700412 struct acpi_cpufreq_data *data)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700413{
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700414 unsigned int cur_freq;
415 unsigned int i;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700416
Dave Jones3a58df32009-01-17 22:36:14 -0500417 for (i = 0; i < 100; i++) {
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700418 cur_freq = extract_freq(get_cur_val(mask), data);
419 if (cur_freq == freq)
420 return 1;
421 udelay(10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 }
423 return 0;
424}
425
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700426static int acpi_cpufreq_target(struct cpufreq_policy *policy,
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700427 unsigned int target_freq, unsigned int relation)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428{
Tejun Heof1625062009-10-29 22:34:13 +0900429 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700430 struct acpi_processor_performance *perf;
431 struct cpufreq_freqs freqs;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700432 struct drv_cmd cmd;
Venkatesh Pallipadi8edc59d92006-12-19 12:58:55 -0800433 unsigned int next_state = 0; /* Index into freq_table */
434 unsigned int next_perf_state = 0; /* Index into perf table */
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700435 int result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200437 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700439 if (unlikely(data == NULL ||
Dave Jones95dd7222006-10-18 00:41:48 -0400440 data->acpi_data == NULL || data->freq_table == NULL)) {
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700441 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 }
443
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500444 perf = data->acpi_data;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700445 result = cpufreq_frequency_table_target(policy,
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700446 data->freq_table,
447 target_freq,
448 relation, &next_state);
Mike Travis4d8bb532009-01-04 05:18:08 -0800449 if (unlikely(result)) {
450 result = -ENODEV;
451 goto out;
452 }
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500453
Viresh Kumar50701582013-03-30 16:25:15 +0530454 next_perf_state = data->freq_table[next_state].driver_data;
Venkatesh Pallipadi7650b282006-10-03 12:36:30 -0700455 if (perf->state == next_perf_state) {
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700456 if (unlikely(data->resume)) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200457 pr_debug("Called after resume, resetting to P%d\n",
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700458 next_perf_state);
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700459 data->resume = 0;
460 } else {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200461 pr_debug("Already at target state (P%d)\n",
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700462 next_perf_state);
Mike Travis4d8bb532009-01-04 05:18:08 -0800463 goto out;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700464 }
465 }
466
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700467 switch (data->cpu_feature) {
468 case SYSTEM_INTEL_MSR_CAPABLE:
469 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
470 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
Venki Pallipadi13424f62007-05-23 15:42:13 -0700471 cmd.val = (u32) perf->states[next_perf_state].control;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700472 break;
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000473 case SYSTEM_AMD_MSR_CAPABLE:
474 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
475 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
476 cmd.val = (u32) perf->states[next_perf_state].control;
477 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700478 case SYSTEM_IO_CAPABLE:
479 cmd.type = SYSTEM_IO_CAPABLE;
480 cmd.addr.io.port = perf->control_register.address;
481 cmd.addr.io.bit_width = perf->control_register.bit_width;
482 cmd.val = (u32) perf->states[next_perf_state].control;
483 break;
484 default:
Mike Travis4d8bb532009-01-04 05:18:08 -0800485 result = -ENODEV;
486 goto out;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700487 }
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700488
Mike Travis4d8bb532009-01-04 05:18:08 -0800489 /* cpufreq holds the hotplug lock, so we are safe from here on */
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700490 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
Ingo Molnarbfa318a2009-01-15 15:46:08 +0100491 cmd.mask = policy->cpus;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700492 else
Ingo Molnarbfa318a2009-01-15 15:46:08 +0100493 cmd.mask = cpumask_of(policy->cpu);
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700494
Venkatesh Pallipadi8edc59d92006-12-19 12:58:55 -0800495 freqs.old = perf->states[perf->state].core_frequency * 1000;
496 freqs.new = data->freq_table[next_state].frequency;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530497 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500498
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700499 drv_write(&cmd);
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500500
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700501 if (acpi_pstate_strict) {
Mike Travis4d8bb532009-01-04 05:18:08 -0800502 if (!check_freqs(cmd.mask, freqs.new, data)) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200503 pr_debug("acpi_cpufreq_target failed (%d)\n",
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700504 policy->cpu);
Mike Travis4d8bb532009-01-04 05:18:08 -0800505 result = -EAGAIN;
Viresh Kumare15d8302013-06-19 14:22:55 +0530506 freqs.new = freqs.old;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500507 }
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500508 }
509
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530510 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
Viresh Kumare15d8302013-06-19 14:22:55 +0530511
512 if (!result)
513 perf->state = next_perf_state;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500514
Mike Travis4d8bb532009-01-04 05:18:08 -0800515out:
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700516 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517}
518
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700519static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520{
Tejun Heof1625062009-10-29 22:34:13 +0900521 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200523 pr_debug("acpi_cpufreq_verify\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700525 return cpufreq_frequency_table_verify(policy, data->freq_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526}
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528static unsigned long
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700529acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700531 struct acpi_processor_performance *perf = data->acpi_data;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500532
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 if (cpu_khz) {
534 /* search the closest match to cpu_khz */
535 unsigned int i;
536 unsigned long freq;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500537 unsigned long freqn = perf->states[0].core_frequency * 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Dave Jones3a58df32009-01-17 22:36:14 -0500539 for (i = 0; i < (perf->state_count-1); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 freq = freqn;
Dave Jones95dd7222006-10-18 00:41:48 -0400541 freqn = perf->states[i+1].core_frequency * 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 if ((2 * cpu_khz) > (freqn + freq)) {
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500543 perf->state = i;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700544 return freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 }
546 }
Dave Jones95dd7222006-10-18 00:41:48 -0400547 perf->state = perf->state_count-1;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700548 return freqn;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500549 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 /* assume CPU is at P0... */
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500551 perf->state = 0;
552 return perf->states[0].core_frequency * 1000;
553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554}
555
Rusty Russell2fdf66b2008-12-31 18:08:47 -0800556static void free_acpi_perf_data(void)
557{
558 unsigned int i;
559
560 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
561 for_each_possible_cpu(i)
562 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
563 ->shared_cpu_map);
564 free_percpu(acpi_perf_data);
565}
566
Andre Przywara615b7302012-09-04 08:28:07 +0000567static int boost_notify(struct notifier_block *nb, unsigned long action,
568 void *hcpu)
569{
570 unsigned cpu = (long)hcpu;
571 const struct cpumask *cpumask;
572
573 cpumask = get_cpu_mask(cpu);
574
575 /*
576 * Clear the boost-disable bit on the CPU_DOWN path so that
577 * this cpu cannot block the remaining ones from boosting. On
578 * the CPU_UP path we simply keep the boost-disable flag in
579 * sync with the current global state.
580 */
581
582 switch (action) {
583 case CPU_UP_PREPARE:
584 case CPU_UP_PREPARE_FROZEN:
585 boost_set_msrs(boost_enabled, cpumask);
586 break;
587
588 case CPU_DOWN_PREPARE:
589 case CPU_DOWN_PREPARE_FROZEN:
590 boost_set_msrs(1, cpumask);
591 break;
592
593 default:
594 break;
595 }
596
597 return NOTIFY_OK;
598}
599
600
601static struct notifier_block boost_nb = {
602 .notifier_call = boost_notify,
603};
604
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500605/*
606 * acpi_cpufreq_early_init - initialize ACPI P-States library
607 *
608 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
609 * in order to determine correct frequency and voltage pairings. We can
610 * do _PDC and _PSD and find out the processor dependency for the
611 * actual init that will happen later...
612 */
Fenghua Yu50109292007-08-07 18:40:30 -0400613static int __init acpi_cpufreq_early_init(void)
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500614{
Rusty Russell2fdf66b2008-12-31 18:08:47 -0800615 unsigned int i;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200616 pr_debug("acpi_cpufreq_early_init\n");
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500617
Fenghua Yu50109292007-08-07 18:40:30 -0400618 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
619 if (!acpi_perf_data) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200620 pr_debug("Memory allocation error for acpi_perf_data.\n");
Fenghua Yu50109292007-08-07 18:40:30 -0400621 return -ENOMEM;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500622 }
Rusty Russell2fdf66b2008-12-31 18:08:47 -0800623 for_each_possible_cpu(i) {
Yinghai Lueaa95842009-06-06 14:51:36 -0700624 if (!zalloc_cpumask_var_node(
Mike Travis80855f72008-12-31 18:08:47 -0800625 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
626 GFP_KERNEL, cpu_to_node(i))) {
Rusty Russell2fdf66b2008-12-31 18:08:47 -0800627
628 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
629 free_acpi_perf_data();
630 return -ENOMEM;
631 }
632 }
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500633
634 /* Do initialization in ACPI core */
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700635 acpi_processor_preregister_performance(acpi_perf_data);
636 return 0;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500637}
638
Dave Jones95625b82006-10-21 01:37:39 -0400639#ifdef CONFIG_SMP
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700640/*
641 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
642 * or do it in BIOS firmware and won't inform about it to OS. If not
643 * detected, this has a side effect of making CPU run at a different speed
644 * than OS intended it to run at. Detect it and handle it cleanly.
645 */
646static int bios_with_sw_any_bug;
647
Jeff Garzik18552562007-10-03 15:15:40 -0400648static int sw_any_bug_found(const struct dmi_system_id *d)
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700649{
650 bios_with_sw_any_bug = 1;
651 return 0;
652}
653
Jeff Garzik18552562007-10-03 15:15:40 -0400654static const struct dmi_system_id sw_any_bug_dmi_table[] = {
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700655 {
656 .callback = sw_any_bug_found,
657 .ident = "Supermicro Server X6DLP",
658 .matches = {
659 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
660 DMI_MATCH(DMI_BIOS_VERSION, "080010"),
661 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
662 },
663 },
664 { }
665};
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400666
667static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
668{
John Villalovos293afe42009-09-25 13:30:08 -0400669 /* Intel Xeon Processor 7100 Series Specification Update
670 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400671 * AL30: A Machine Check Exception (MCE) Occurring during an
672 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
John Villalovos293afe42009-09-25 13:30:08 -0400673 * Both Processor Cores to Lock Up. */
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400674 if (c->x86_vendor == X86_VENDOR_INTEL) {
675 if ((c->x86 == 15) &&
676 (c->x86_model == 6) &&
John Villalovos293afe42009-09-25 13:30:08 -0400677 (c->x86_mask == 8)) {
678 printk(KERN_INFO "acpi-cpufreq: Intel(R) "
679 "Xeon(R) 7100 Errata AL30, processors may "
680 "lock up on frequency changes: disabling "
681 "acpi-cpufreq.\n");
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400682 return -ENODEV;
John Villalovos293afe42009-09-25 13:30:08 -0400683 }
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400684 }
685 return 0;
686}
Dave Jones95625b82006-10-21 01:37:39 -0400687#endif
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700688
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700689static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690{
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700691 unsigned int i;
692 unsigned int valid_states = 0;
693 unsigned int cpu = policy->cpu;
694 struct acpi_cpufreq_data *data;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700695 unsigned int result = 0;
Mike Travis92cb7612007-10-19 20:35:04 +0200696 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700697 struct acpi_processor_performance *perf;
John Villalovos293afe42009-09-25 13:30:08 -0400698#ifdef CONFIG_SMP
699 static int blacklisted;
700#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200702 pr_debug("acpi_cpufreq_cpu_init\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400704#ifdef CONFIG_SMP
John Villalovos293afe42009-09-25 13:30:08 -0400705 if (blacklisted)
706 return blacklisted;
707 blacklisted = acpi_cpufreq_blacklist(c);
708 if (blacklisted)
709 return blacklisted;
Prarit Bhargava1a8e42f2009-08-26 13:19:37 -0400710#endif
711
Viresh Kumard5b73cd2013-08-06 22:53:06 +0530712 data = kzalloc(sizeof(*data), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 if (!data)
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700714 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800716 if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
717 result = -ENOMEM;
718 goto err_free;
719 }
720
Rusty Russellb36128c2009-02-20 16:29:08 +0900721 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
Tejun Heof1625062009-10-29 22:34:13 +0900722 per_cpu(acfreq_data, cpu) = data;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700723
Dave Jones95dd7222006-10-18 00:41:48 -0400724 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700725 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500727 result = acpi_processor_register_performance(data->acpi_data, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 if (result)
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800729 goto err_free_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500731 perf = data->acpi_data;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500732 policy->shared_type = perf->shared_type;
Dave Jones95dd7222006-10-18 00:41:48 -0400733
Venkatesh Pallipadi46f18e32006-06-26 00:34:43 -0400734 /*
Dave Jones95dd7222006-10-18 00:41:48 -0400735 * Will let policy->cpus know about dependency only when software
Venkatesh Pallipadi46f18e32006-06-26 00:34:43 -0400736 * coordination is required.
737 */
738 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700739 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
Rusty Russell835481d2009-01-04 05:18:06 -0800740 cpumask_copy(policy->cpus, perf->shared_cpu_map);
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700741 }
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800742 cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700743
744#ifdef CONFIG_SMP
745 dmi_check_system(sw_any_bug_dmi_table);
Fabio Baltieri2624f902013-01-31 09:44:40 +0000746 if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700747 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
Rusty Russell835481d2009-01-04 05:18:06 -0800748 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700749 }
Andre Przywaraacd31622012-09-04 08:28:03 +0000750
751 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
752 cpumask_clear(policy->cpus);
753 cpumask_set_cpu(cpu, policy->cpus);
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800754 cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu));
Andre Przywaraacd31622012-09-04 08:28:03 +0000755 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
756 pr_info_once(PFX "overriding BIOS provided _PSD data\n");
757 }
Venkatesh Pallipadi8adcc0c2006-09-01 14:02:24 -0700758#endif
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500759
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 /* capability check */
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500761 if (perf->state_count <= 1) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200762 pr_debug("No P-States\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 result = -ENODEV;
764 goto err_unreg;
765 }
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500766
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700767 if (perf->control_register.space_id != perf->status_register.space_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 result = -ENODEV;
769 goto err_unreg;
770 }
771
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700772 switch (perf->control_register.space_id) {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700773 case ACPI_ADR_SPACE_SYSTEM_IO:
Matthew Garrettc40a4512013-01-20 10:24:26 +0000774 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
775 boot_cpu_data.x86 == 0xf) {
776 pr_debug("AMD K8 systems must use native drivers.\n");
777 result = -ENODEV;
778 goto err_unreg;
779 }
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200780 pr_debug("SYSTEM IO addr space\n");
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700781 data->cpu_feature = SYSTEM_IO_CAPABLE;
782 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700783 case ACPI_ADR_SPACE_FIXED_HARDWARE:
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200784 pr_debug("HARDWARE addr space\n");
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000785 if (check_est_cpu(cpu)) {
786 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
787 break;
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700788 }
Matthew Garrett3dc9a632012-09-04 08:28:02 +0000789 if (check_amd_hwpstate_cpu(cpu)) {
790 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
791 break;
792 }
793 result = -ENODEV;
794 goto err_unreg;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700795 default:
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200796 pr_debug("Unknown addr space %d\n",
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700797 (u32) (perf->control_register.space_id));
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700798 result = -ENODEV;
799 goto err_unreg;
800 }
801
Viresh Kumard5b73cd2013-08-06 22:53:06 +0530802 data->freq_table = kmalloc(sizeof(*data->freq_table) *
Dave Jones95dd7222006-10-18 00:41:48 -0400803 (perf->state_count+1), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 if (!data->freq_table) {
805 result = -ENOMEM;
806 goto err_unreg;
807 }
808
809 /* detect transition latency */
810 policy->cpuinfo.transition_latency = 0;
Dave Jones3a58df32009-01-17 22:36:14 -0500811 for (i = 0; i < perf->state_count; i++) {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700812 if ((perf->states[i].transition_latency * 1000) >
813 policy->cpuinfo.transition_latency)
814 policy->cpuinfo.transition_latency =
815 perf->states[i].transition_latency * 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
Pallipadi, Venkatesha59d1632009-03-19 14:41:40 -0700818 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
819 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
820 policy->cpuinfo.transition_latency > 20 * 1000) {
Pallipadi, Venkatesha59d1632009-03-19 14:41:40 -0700821 policy->cpuinfo.transition_latency = 20 * 1000;
Joe Perches61c8c672009-05-26 14:58:39 -0700822 printk_once(KERN_INFO
823 "P-state transition latency capped at 20 uS\n");
Pallipadi, Venkatesha59d1632009-03-19 14:41:40 -0700824 }
825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 /* table init */
Dave Jones3a58df32009-01-17 22:36:14 -0500827 for (i = 0; i < perf->state_count; i++) {
828 if (i > 0 && perf->states[i].core_frequency >=
Zhang Rui3cdf5522007-06-13 21:24:02 -0400829 data->freq_table[valid_states-1].frequency / 1000)
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700830 continue;
831
Viresh Kumar50701582013-03-30 16:25:15 +0530832 data->freq_table[valid_states].driver_data = i;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700833 data->freq_table[valid_states].frequency =
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700834 perf->states[i].core_frequency * 1000;
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700835 valid_states++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 }
Venkatesh Pallipadi3d4a7ef2006-11-13 17:47:44 -0800837 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
Venkatesh Pallipadi8edc59d92006-12-19 12:58:55 -0800838 perf->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
840 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
Dave Jones95dd7222006-10-18 00:41:48 -0400841 if (result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 goto err_freqfree;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Thomas Renningerd876dfb2009-04-17 16:22:08 +0200844 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
845 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
846
Mattia Dongilia507ac42006-12-15 19:52:45 +0100847 switch (perf->control_register.space_id) {
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700848 case ACPI_ADR_SPACE_SYSTEM_IO:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700849 /* Current speed is unknown and not detectable by IO port */
850 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
851 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700852 case ACPI_ADR_SPACE_FIXED_HARDWARE:
Venkatesh Pallipadi7650b282006-10-03 12:36:30 -0700853 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
Mattia Dongilia507ac42006-12-15 19:52:45 +0100854 policy->cur = get_cur_freq_on_cpu(cpu);
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700855 break;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700856 default:
Venkatesh Pallipadidde9f7b2006-10-03 12:33:14 -0700857 break;
858 }
859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 /* notify BIOS that we exist */
861 acpi_processor_notify_smm(THIS_MODULE);
862
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200863 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500864 for (i = 0; i < perf->state_count; i++)
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200865 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700866 (i == perf->state ? '*' : ' '), i,
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500867 (u32) perf->states[i].core_frequency,
868 (u32) perf->states[i].power,
869 (u32) perf->states[i].transition_latency);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870
871 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700872
Dominik Brodowski4b31e772005-05-18 13:49:00 -0400873 /*
874 * the first call to ->target() should result in us actually
875 * writing something to the appropriate registers.
876 */
877 data->resume = 1;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700878
Venkatesh Pallipadife27cb32006-10-03 12:29:15 -0700879 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Dave Jones95dd7222006-10-18 00:41:48 -0400881err_freqfree:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 kfree(data->freq_table);
Dave Jones95dd7222006-10-18 00:41:48 -0400883err_unreg:
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -0500884 acpi_processor_unregister_performance(perf, cpu);
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800885err_free_mask:
886 free_cpumask_var(data->freqdomain_cpus);
Dave Jones95dd7222006-10-18 00:41:48 -0400887err_free:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 kfree(data);
Tejun Heof1625062009-10-29 22:34:13 +0900889 per_cpu(acfreq_data, cpu) = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700891 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892}
893
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700894static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895{
Tejun Heof1625062009-10-29 22:34:13 +0900896 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200898 pr_debug("acpi_cpufreq_cpu_exit\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
900 if (data) {
901 cpufreq_frequency_table_put_attr(policy->cpu);
Tejun Heof1625062009-10-29 22:34:13 +0900902 per_cpu(acfreq_data, policy->cpu) = NULL;
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700903 acpi_processor_unregister_performance(data->acpi_data,
904 policy->cpu);
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800905 free_cpumask_var(data->freqdomain_cpus);
Zhang Ruidab5fff2010-10-12 09:09:37 +0800906 kfree(data->freq_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 kfree(data);
908 }
909
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700910 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911}
912
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700913static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914{
Tejun Heof1625062009-10-29 22:34:13 +0900915 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200917 pr_debug("acpi_cpufreq_resume\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918
919 data->resume = 1;
920
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700921 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922}
923
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700924static struct freq_attr *acpi_cpufreq_attr[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 &cpufreq_freq_attr_scaling_available_freqs,
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800926 &freqdomain_cpus,
Andre Przywara11269ff2012-09-04 08:28:08 +0000927 NULL, /* this is a placeholder for cpb, do not remove */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 NULL,
929};
930
931static struct cpufreq_driver acpi_cpufreq_driver = {
Thomas Renningere2f74f32009-11-19 12:31:01 +0100932 .verify = acpi_cpufreq_verify,
933 .target = acpi_cpufreq_target,
934 .bios_limit = acpi_processor_get_bios_limit,
935 .init = acpi_cpufreq_cpu_init,
936 .exit = acpi_cpufreq_cpu_exit,
937 .resume = acpi_cpufreq_resume,
938 .name = "acpi-cpufreq",
Thomas Renningere2f74f32009-11-19 12:31:01 +0100939 .attr = acpi_cpufreq_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940};
941
Andre Przywara615b7302012-09-04 08:28:07 +0000942static void __init acpi_cpufreq_boost_init(void)
943{
944 if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
945 msrs = msrs_alloc();
946
947 if (!msrs)
948 return;
949
950 boost_supported = true;
951 boost_enabled = boost_state(0);
952
953 get_online_cpus();
954
955 /* Force all MSRs to the same value */
956 boost_set_msrs(boost_enabled, cpu_online_mask);
957
958 register_cpu_notifier(&boost_nb);
959
960 put_online_cpus();
961 } else
962 global_boost.attr.mode = 0444;
963
964 /* We create the boost file in any case, though for systems without
965 * hardware support it will be read-only and hardwired to return 0.
966 */
Viresh Kumar2361be22013-05-17 16:09:09 +0530967 if (cpufreq_sysfs_create_file(&(global_boost.attr)))
Andre Przywara615b7302012-09-04 08:28:07 +0000968 pr_warn(PFX "could not register global boost sysfs file\n");
969 else
970 pr_debug("registered global boost sysfs file\n");
971}
972
973static void __exit acpi_cpufreq_boost_exit(void)
974{
Viresh Kumar2361be22013-05-17 16:09:09 +0530975 cpufreq_sysfs_remove_file(&(global_boost.attr));
Andre Przywara615b7302012-09-04 08:28:07 +0000976
977 if (msrs) {
978 unregister_cpu_notifier(&boost_nb);
979
980 msrs_free(msrs);
981 msrs = NULL;
982 }
983}
984
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -0700985static int __init acpi_cpufreq_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986{
Fenghua Yu50109292007-08-07 18:40:30 -0400987 int ret;
988
Rafael J. Wysocki75c07582013-10-25 16:22:47 +0200989 if (acpi_disabled)
990 return -ENODEV;
991
Yinghai Lu8a61e122013-09-20 10:43:56 -0700992 /* don't keep reloading if cpufreq_driver exists */
993 if (cpufreq_get_current_driver())
Rafael J. Wysocki75c07582013-10-25 16:22:47 +0200994 return -EEXIST;
Yinghai Luee297532008-09-24 19:04:31 -0700995
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200996 pr_debug("acpi_cpufreq_init\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
Fenghua Yu50109292007-08-07 18:40:30 -0400998 ret = acpi_cpufreq_early_init();
999 if (ret)
1000 return ret;
Venkatesh Pallipadi09b4d1e2005-12-14 15:05:00 -05001001
Andre Przywara11269ff2012-09-04 08:28:08 +00001002#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
1003 /* this is a sysfs file with a strange name and an even stranger
1004 * semantic - per CPU instantiation, but system global effect.
1005 * Lets enable it only on AMD CPUs for compatibility reasons and
1006 * only if configured. This is considered legacy code, which
1007 * will probably be removed at some point in the future.
1008 */
1009 if (check_amd_hwpstate_cpu(0)) {
1010 struct freq_attr **iter;
1011
1012 pr_debug("adding sysfs entry for cpb\n");
1013
1014 for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
1015 ;
1016
1017 /* make sure there is a terminator behind it */
1018 if (iter[1] == NULL)
1019 *iter = &cpb;
1020 }
1021#endif
1022
Akinobu Mita847aef62008-07-14 11:59:44 +09001023 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
1024 if (ret)
Rusty Russell2fdf66b2008-12-31 18:08:47 -08001025 free_acpi_perf_data();
Andre Przywara615b7302012-09-04 08:28:07 +00001026 else
1027 acpi_cpufreq_boost_init();
Akinobu Mita847aef62008-07-14 11:59:44 +09001028
1029 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030}
1031
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -07001032static void __exit acpi_cpufreq_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033{
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001034 pr_debug("acpi_cpufreq_exit\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035
Andre Przywara615b7302012-09-04 08:28:07 +00001036 acpi_cpufreq_boost_exit();
1037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 cpufreq_unregister_driver(&acpi_cpufreq_driver);
1039
Luming Yu50f4ddd2011-07-08 16:37:44 -04001040 free_acpi_perf_data();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041}
1042
Venkatesh Pallipadid395bf12005-08-25 15:59:00 -04001043module_param(acpi_pstate_strict, uint, 0644);
Venkatesh Pallipadi64be7ee2006-10-03 12:35:23 -07001044MODULE_PARM_DESC(acpi_pstate_strict,
Dave Jones95dd7222006-10-18 00:41:48 -04001045 "value 0 or non-zero. non-zero -> strict ACPI checks are "
1046 "performed during frequency changes.");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047
1048late_initcall(acpi_cpufreq_init);
1049module_exit(acpi_cpufreq_exit);
1050
Matthew Garrettefa17192013-01-22 22:33:46 +01001051static const struct x86_cpu_id acpi_cpufreq_ids[] = {
1052 X86_FEATURE_MATCH(X86_FEATURE_ACPI),
1053 X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
1054 {}
1055};
1056MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
1057
Rafael J. Wysockic655aff2013-06-07 13:13:31 +02001058static const struct acpi_device_id processor_device_ids[] = {
1059 {ACPI_PROCESSOR_OBJECT_HID, },
1060 {ACPI_PROCESSOR_DEVICE_HID, },
1061 {},
1062};
1063MODULE_DEVICE_TABLE(acpi, processor_device_ids);
1064
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065MODULE_ALIAS("acpi");