Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 1 | /* |
| 2 | * x86_pkg_temp_thermal driver |
| 3 | * Copyright (c) 2013, Intel Corporation. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms and conditions of the GNU General Public License, |
| 7 | * version 2, as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with |
| 15 | * this program; if not, write to the Free Software Foundation, Inc. |
| 16 | * |
| 17 | */ |
| 18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 19 | |
| 20 | #include <linux/module.h> |
| 21 | #include <linux/init.h> |
| 22 | #include <linux/err.h> |
| 23 | #include <linux/param.h> |
| 24 | #include <linux/device.h> |
| 25 | #include <linux/platform_device.h> |
| 26 | #include <linux/cpu.h> |
| 27 | #include <linux/smp.h> |
| 28 | #include <linux/slab.h> |
| 29 | #include <linux/pm.h> |
| 30 | #include <linux/thermal.h> |
| 31 | #include <linux/debugfs.h> |
| 32 | #include <asm/cpu_device_id.h> |
| 33 | #include <asm/mce.h> |
| 34 | |
| 35 | /* |
| 36 | * Rate control delay: Idea is to introduce denounce effect |
| 37 | * This should be long enough to avoid reduce events, when |
| 38 | * threshold is set to a temperature, which is constantly |
| 39 | * violated, but at the short enough to take any action. |
| 40 | * The action can be remove threshold or change it to next |
| 41 | * interesting setting. Based on experiments, in around |
| 42 | * every 5 seconds under load will give us a significant |
| 43 | * temperature change. |
| 44 | */ |
| 45 | #define PKG_TEMP_THERMAL_NOTIFY_DELAY 5000 |
| 46 | static int notify_delay_ms = PKG_TEMP_THERMAL_NOTIFY_DELAY; |
| 47 | module_param(notify_delay_ms, int, 0644); |
| 48 | MODULE_PARM_DESC(notify_delay_ms, |
| 49 | "User space notification delay in milli seconds."); |
| 50 | |
| 51 | /* Number of trip points in thermal zone. Currently it can't |
| 52 | * be more than 2. MSR can allow setting and getting notifications |
| 53 | * for only 2 thresholds. This define enforces this, if there |
| 54 | * is some wrong values returned by cpuid for number of thresholds. |
| 55 | */ |
| 56 | #define MAX_NUMBER_OF_TRIPS 2 |
| 57 | |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 58 | struct pkg_device { |
Thomas Gleixner | 556238e | 2016-11-22 17:57:14 +0000 | [diff] [blame^] | 59 | int cpu; |
Thomas Gleixner | 64ca738 | 2016-11-22 17:57:12 +0000 | [diff] [blame] | 60 | bool work_scheduled; |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 61 | u32 tj_max; |
| 62 | u32 msr_pkg_therm_low; |
| 63 | u32 msr_pkg_therm_high; |
Thomas Gleixner | 411bb38 | 2016-11-22 17:57:13 +0000 | [diff] [blame] | 64 | struct delayed_work work; |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 65 | struct thermal_zone_device *tzone; |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 66 | struct cpumask cpumask; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 67 | }; |
| 68 | |
Javi Merino | 4abe602 | 2015-03-03 15:30:50 +0000 | [diff] [blame] | 69 | static struct thermal_zone_params pkg_temp_tz_params = { |
Jean Delvare | 7978688 | 2014-03-02 15:33:35 +0100 | [diff] [blame] | 70 | .no_hwmon = true, |
| 71 | }; |
| 72 | |
Thomas Gleixner | 556238e | 2016-11-22 17:57:14 +0000 | [diff] [blame^] | 73 | /* Keep track of how many package pointers we allocated in init() */ |
| 74 | static int max_packages __read_mostly; |
| 75 | /* Array of package pointers */ |
| 76 | static struct pkg_device **packages; |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 77 | /* Serializes interrupt notification, work and hotplug */ |
| 78 | static DEFINE_SPINLOCK(pkg_temp_lock); |
| 79 | /* Protects zone operation in the work function against hotplug removal */ |
| 80 | static DEFINE_MUTEX(thermal_zone_mutex); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 81 | |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 82 | /* Debug counters to show using debugfs */ |
| 83 | static struct dentry *debugfs; |
| 84 | static unsigned int pkg_interrupt_cnt; |
| 85 | static unsigned int pkg_work_cnt; |
| 86 | |
| 87 | static int pkg_temp_debugfs_init(void) |
| 88 | { |
| 89 | struct dentry *d; |
| 90 | |
| 91 | debugfs = debugfs_create_dir("pkg_temp_thermal", NULL); |
| 92 | if (!debugfs) |
| 93 | return -ENOENT; |
| 94 | |
| 95 | d = debugfs_create_u32("pkg_thres_interrupt", S_IRUGO, debugfs, |
| 96 | (u32 *)&pkg_interrupt_cnt); |
| 97 | if (!d) |
| 98 | goto err_out; |
| 99 | |
| 100 | d = debugfs_create_u32("pkg_thres_work", S_IRUGO, debugfs, |
| 101 | (u32 *)&pkg_work_cnt); |
| 102 | if (!d) |
| 103 | goto err_out; |
| 104 | |
| 105 | return 0; |
| 106 | |
| 107 | err_out: |
| 108 | debugfs_remove_recursive(debugfs); |
| 109 | return -ENOENT; |
| 110 | } |
| 111 | |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 112 | /* |
| 113 | * Protection: |
| 114 | * |
| 115 | * - cpu hotplug: Read serialized by cpu hotplug lock |
| 116 | * Write must hold pkg_temp_lock |
| 117 | * |
| 118 | * - Other callsites: Must hold pkg_temp_lock |
| 119 | */ |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 120 | static struct pkg_device *pkg_temp_thermal_get_dev(unsigned int cpu) |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 121 | { |
Thomas Gleixner | 556238e | 2016-11-22 17:57:14 +0000 | [diff] [blame^] | 122 | int pkgid = topology_logical_package_id(cpu); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 123 | |
Thomas Gleixner | 556238e | 2016-11-22 17:57:14 +0000 | [diff] [blame^] | 124 | if (pkgid >= 0 && pkgid < max_packages) |
| 125 | return packages[pkgid]; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 126 | return NULL; |
| 127 | } |
| 128 | |
| 129 | /* |
| 130 | * tj-max is is interesting because threshold is set relative to this |
| 131 | * temperature. |
| 132 | */ |
| 133 | static int get_tj_max(int cpu, u32 *tj_max) |
| 134 | { |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 135 | u32 eax, edx, val; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 136 | int err; |
| 137 | |
| 138 | err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); |
| 139 | if (err) |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 140 | return err; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 141 | |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 142 | val = (eax >> 16) & 0xff; |
| 143 | *tj_max = val * 1000; |
| 144 | |
| 145 | return val ? 0 : -EINVAL; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 146 | } |
| 147 | |
Sascha Hauer | 17e8351 | 2015-07-24 08:12:54 +0200 | [diff] [blame] | 148 | static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 149 | { |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 150 | struct pkg_device *pkgdev = tzd->devdata; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 151 | u32 eax, edx; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 152 | |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 153 | rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_STATUS, &eax, &edx); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 154 | if (eax & 0x80000000) { |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 155 | *temp = pkgdev->tj_max - ((eax >> 16) & 0x7f) * 1000; |
Sascha Hauer | 17e8351 | 2015-07-24 08:12:54 +0200 | [diff] [blame] | 156 | pr_debug("sys_get_curr_temp %d\n", *temp); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 157 | return 0; |
| 158 | } |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 159 | return -EINVAL; |
| 160 | } |
| 161 | |
| 162 | static int sys_get_trip_temp(struct thermal_zone_device *tzd, |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 163 | int trip, int *temp) |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 164 | { |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 165 | struct pkg_device *pkgdev = tzd->devdata; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 166 | unsigned long thres_reg_value; |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 167 | u32 mask, shift, eax, edx; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 168 | int ret; |
| 169 | |
| 170 | if (trip >= MAX_NUMBER_OF_TRIPS) |
| 171 | return -EINVAL; |
| 172 | |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 173 | if (trip) { |
| 174 | mask = THERM_MASK_THRESHOLD1; |
| 175 | shift = THERM_SHIFT_THRESHOLD1; |
| 176 | } else { |
| 177 | mask = THERM_MASK_THRESHOLD0; |
| 178 | shift = THERM_SHIFT_THRESHOLD0; |
| 179 | } |
| 180 | |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 181 | ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, |
Thomas Gleixner | b6badbe | 2016-11-22 17:57:07 +0000 | [diff] [blame] | 182 | &eax, &edx); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 183 | if (ret < 0) |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 184 | return ret; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 185 | |
| 186 | thres_reg_value = (eax & mask) >> shift; |
| 187 | if (thres_reg_value) |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 188 | *temp = pkgdev->tj_max - thres_reg_value * 1000; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 189 | else |
| 190 | *temp = 0; |
Sascha Hauer | 17e8351 | 2015-07-24 08:12:54 +0200 | [diff] [blame] | 191 | pr_debug("sys_get_trip_temp %d\n", *temp); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 192 | |
| 193 | return 0; |
| 194 | } |
| 195 | |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 196 | static int |
| 197 | sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 198 | { |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 199 | struct pkg_device *pkgdev = tzd->devdata; |
| 200 | u32 l, h, mask, shift, intr; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 201 | int ret; |
| 202 | |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 203 | if (trip >= MAX_NUMBER_OF_TRIPS || temp >= pkgdev->tj_max) |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 204 | return -EINVAL; |
| 205 | |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 206 | ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, |
Thomas Gleixner | b6badbe | 2016-11-22 17:57:07 +0000 | [diff] [blame] | 207 | &l, &h); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 208 | if (ret < 0) |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 209 | return ret; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 210 | |
| 211 | if (trip) { |
| 212 | mask = THERM_MASK_THRESHOLD1; |
| 213 | shift = THERM_SHIFT_THRESHOLD1; |
| 214 | intr = THERM_INT_THRESHOLD1_ENABLE; |
| 215 | } else { |
| 216 | mask = THERM_MASK_THRESHOLD0; |
| 217 | shift = THERM_SHIFT_THRESHOLD0; |
| 218 | intr = THERM_INT_THRESHOLD0_ENABLE; |
| 219 | } |
| 220 | l &= ~mask; |
| 221 | /* |
| 222 | * When users space sets a trip temperature == 0, which is indication |
| 223 | * that, it is no longer interested in receiving notifications. |
| 224 | */ |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 225 | if (!temp) { |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 226 | l &= ~intr; |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 227 | } else { |
| 228 | l |= (pkgdev->tj_max - temp)/1000 << shift; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 229 | l |= intr; |
| 230 | } |
| 231 | |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 232 | return wrmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 233 | } |
| 234 | |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 235 | static int sys_get_trip_type(struct thermal_zone_device *thermal, int trip, |
| 236 | enum thermal_trip_type *type) |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 237 | { |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 238 | *type = THERMAL_TRIP_PASSIVE; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 239 | return 0; |
| 240 | } |
| 241 | |
| 242 | /* Thermal zone callback registry */ |
| 243 | static struct thermal_zone_device_ops tzone_ops = { |
| 244 | .get_temp = sys_get_curr_temp, |
| 245 | .get_trip_temp = sys_get_trip_temp, |
| 246 | .get_trip_type = sys_get_trip_type, |
| 247 | .set_trip_temp = sys_set_trip_temp, |
| 248 | }; |
| 249 | |
Thomas Gleixner | 09a674c | 2016-11-22 17:57:06 +0000 | [diff] [blame] | 250 | static bool pkg_thermal_rate_control(void) |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 251 | { |
| 252 | return true; |
| 253 | } |
| 254 | |
| 255 | /* Enable threshold interrupt on local package/cpu */ |
| 256 | static inline void enable_pkg_thres_interrupt(void) |
| 257 | { |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 258 | u8 thres_0, thres_1; |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 259 | u32 l, h; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 260 | |
| 261 | rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); |
| 262 | /* only enable/disable if it had valid threshold value */ |
| 263 | thres_0 = (l & THERM_MASK_THRESHOLD0) >> THERM_SHIFT_THRESHOLD0; |
| 264 | thres_1 = (l & THERM_MASK_THRESHOLD1) >> THERM_SHIFT_THRESHOLD1; |
| 265 | if (thres_0) |
| 266 | l |= THERM_INT_THRESHOLD0_ENABLE; |
| 267 | if (thres_1) |
| 268 | l |= THERM_INT_THRESHOLD1_ENABLE; |
| 269 | wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); |
| 270 | } |
| 271 | |
| 272 | /* Disable threshold interrupt on local package/cpu */ |
| 273 | static inline void disable_pkg_thres_interrupt(void) |
| 274 | { |
| 275 | u32 l, h; |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 276 | |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 277 | rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 278 | |
| 279 | l &= ~(THERM_INT_THRESHOLD0_ENABLE | THERM_INT_THRESHOLD1_ENABLE); |
| 280 | wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 281 | } |
| 282 | |
| 283 | static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) |
| 284 | { |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 285 | struct thermal_zone_device *tzone = NULL; |
Thomas Gleixner | 64ca738 | 2016-11-22 17:57:12 +0000 | [diff] [blame] | 286 | int cpu = smp_processor_id(); |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 287 | struct pkg_device *pkgdev; |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 288 | u64 msr_val, wr_val; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 289 | |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 290 | mutex_lock(&thermal_zone_mutex); |
| 291 | spin_lock_irq(&pkg_temp_lock); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 292 | ++pkg_work_cnt; |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 293 | |
| 294 | pkgdev = pkg_temp_thermal_get_dev(cpu); |
| 295 | if (!pkgdev) { |
| 296 | spin_unlock_irq(&pkg_temp_lock); |
| 297 | mutex_unlock(&thermal_zone_mutex); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 298 | return; |
| 299 | } |
Thomas Gleixner | 64ca738 | 2016-11-22 17:57:12 +0000 | [diff] [blame] | 300 | pkgdev->work_scheduled = false; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 301 | |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 302 | rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 303 | wr_val = msr_val & ~(THERM_LOG_THRESHOLD0 | THERM_LOG_THRESHOLD1); |
| 304 | if (wr_val != msr_val) { |
| 305 | wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, wr_val); |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 306 | tzone = pkgdev->tzone; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 307 | } |
Thomas Gleixner | 768bd13 | 2016-11-22 17:57:04 +0000 | [diff] [blame] | 308 | |
| 309 | enable_pkg_thres_interrupt(); |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 310 | spin_unlock_irq(&pkg_temp_lock); |
Thomas Gleixner | 768bd13 | 2016-11-22 17:57:04 +0000 | [diff] [blame] | 311 | |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 312 | /* |
| 313 | * If tzone is not NULL, then thermal_zone_mutex will prevent the |
| 314 | * concurrent removal in the cpu offline callback. |
| 315 | */ |
| 316 | if (tzone) |
| 317 | thermal_zone_device_update(tzone, THERMAL_EVENT_UNSPECIFIED); |
| 318 | |
| 319 | mutex_unlock(&thermal_zone_mutex); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 320 | } |
| 321 | |
Thomas Gleixner | 411bb38 | 2016-11-22 17:57:13 +0000 | [diff] [blame] | 322 | static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work) |
| 323 | { |
| 324 | unsigned long ms = msecs_to_jiffies(notify_delay_ms); |
| 325 | |
| 326 | schedule_delayed_work_on(cpu, work, ms); |
| 327 | } |
| 328 | |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 329 | static int pkg_thermal_notify(u64 msr_val) |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 330 | { |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 331 | int cpu = smp_processor_id(); |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 332 | struct pkg_device *pkgdev; |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 333 | unsigned long flags; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 334 | |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 335 | spin_lock_irqsave(&pkg_temp_lock, flags); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 336 | ++pkg_interrupt_cnt; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 337 | |
| 338 | disable_pkg_thres_interrupt(); |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 339 | |
| 340 | /* Work is per package, so scheduling it once is enough. */ |
| 341 | pkgdev = pkg_temp_thermal_get_dev(cpu); |
Thomas Gleixner | 64ca738 | 2016-11-22 17:57:12 +0000 | [diff] [blame] | 342 | if (pkgdev && !pkgdev->work_scheduled) { |
| 343 | pkgdev->work_scheduled = true; |
Thomas Gleixner | 411bb38 | 2016-11-22 17:57:13 +0000 | [diff] [blame] | 344 | pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work); |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 345 | } |
| 346 | |
| 347 | spin_unlock_irqrestore(&pkg_temp_lock, flags); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 348 | return 0; |
| 349 | } |
| 350 | |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 351 | static int pkg_temp_thermal_device_add(unsigned int cpu) |
| 352 | { |
Thomas Gleixner | 556238e | 2016-11-22 17:57:14 +0000 | [diff] [blame^] | 353 | int pkgid = topology_logical_package_id(cpu); |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 354 | u32 tj_max, eax, ebx, ecx, edx; |
| 355 | struct pkg_device *pkgdev; |
| 356 | int thres_count, err; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 357 | |
Thomas Gleixner | 556238e | 2016-11-22 17:57:14 +0000 | [diff] [blame^] | 358 | if (pkgid >= max_packages) |
| 359 | return -ENOMEM; |
| 360 | |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 361 | cpuid(6, &eax, &ebx, &ecx, &edx); |
| 362 | thres_count = ebx & 0x07; |
| 363 | if (!thres_count) |
| 364 | return -ENODEV; |
| 365 | |
| 366 | thres_count = clamp_val(thres_count, 0, MAX_NUMBER_OF_TRIPS); |
| 367 | |
| 368 | err = get_tj_max(cpu, &tj_max); |
| 369 | if (err) |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 370 | return err; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 371 | |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 372 | pkgdev = kzalloc(sizeof(*pkgdev), GFP_KERNEL); |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 373 | if (!pkgdev) |
| 374 | return -ENOMEM; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 375 | |
Thomas Gleixner | 411bb38 | 2016-11-22 17:57:13 +0000 | [diff] [blame] | 376 | INIT_DELAYED_WORK(&pkgdev->work, pkg_temp_thermal_threshold_work_fn); |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 377 | pkgdev->cpu = cpu; |
| 378 | pkgdev->tj_max = tj_max; |
| 379 | pkgdev->tzone = thermal_zone_device_register("x86_pkg_temp", |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 380 | thres_count, |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 381 | (thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01, |
| 382 | pkgdev, &tzone_ops, &pkg_temp_tz_params, 0, 0); |
| 383 | if (IS_ERR(pkgdev->tzone)) { |
| 384 | err = PTR_ERR(pkgdev->tzone); |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 385 | kfree(pkgdev); |
| 386 | return err; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 387 | } |
| 388 | /* Store MSR value for package thermal interrupt, to restore at exit */ |
| 389 | rdmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 390 | &pkgdev->msr_pkg_therm_low, |
| 391 | &pkgdev->msr_pkg_therm_high); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 392 | |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 393 | cpumask_set_cpu(cpu, &pkgdev->cpumask); |
| 394 | spin_lock_irq(&pkg_temp_lock); |
Thomas Gleixner | 556238e | 2016-11-22 17:57:14 +0000 | [diff] [blame^] | 395 | packages[pkgid] = pkgdev; |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 396 | spin_unlock_irq(&pkg_temp_lock); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 397 | return 0; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 398 | } |
| 399 | |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 400 | static void put_core_offline(unsigned int cpu) |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 401 | { |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 402 | struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu); |
Thomas Gleixner | 411bb38 | 2016-11-22 17:57:13 +0000 | [diff] [blame] | 403 | bool lastcpu, was_target; |
Thomas Gleixner | b6badbe | 2016-11-22 17:57:07 +0000 | [diff] [blame] | 404 | int target; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 405 | |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 406 | if (!pkgdev) |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 407 | return; |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 408 | |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 409 | target = cpumask_any_but(&pkgdev->cpumask, cpu); |
| 410 | cpumask_clear_cpu(cpu, &pkgdev->cpumask); |
| 411 | lastcpu = target >= nr_cpu_ids; |
| 412 | /* |
| 413 | * Remove the sysfs files, if this is the last cpu in the package |
| 414 | * before doing further cleanups. |
| 415 | */ |
| 416 | if (lastcpu) { |
| 417 | struct thermal_zone_device *tzone = pkgdev->tzone; |
Thomas Gleixner | 21a3d3d | 2016-11-22 17:57:06 +0000 | [diff] [blame] | 418 | |
Thomas Gleixner | 89baa56 | 2016-11-22 17:57:05 +0000 | [diff] [blame] | 419 | /* |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 420 | * We must protect against a work function calling |
| 421 | * thermal_zone_update, after/while unregister. We null out |
| 422 | * the pointer under the zone mutex, so the worker function |
| 423 | * won't try to call. |
Thomas Gleixner | 89baa56 | 2016-11-22 17:57:05 +0000 | [diff] [blame] | 424 | */ |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 425 | mutex_lock(&thermal_zone_mutex); |
| 426 | pkgdev->tzone = NULL; |
| 427 | mutex_unlock(&thermal_zone_mutex); |
| 428 | |
| 429 | thermal_zone_device_unregister(tzone); |
| 430 | } |
| 431 | |
Thomas Gleixner | 411bb38 | 2016-11-22 17:57:13 +0000 | [diff] [blame] | 432 | /* Protect against work and interrupts */ |
| 433 | spin_lock_irq(&pkg_temp_lock); |
| 434 | |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 435 | /* |
Thomas Gleixner | 411bb38 | 2016-11-22 17:57:13 +0000 | [diff] [blame] | 436 | * Check whether this cpu was the current target and store the new |
| 437 | * one. When we drop the lock, then the interrupt notify function |
| 438 | * will see the new target. |
| 439 | */ |
| 440 | was_target = pkgdev->cpu == cpu; |
| 441 | pkgdev->cpu = target; |
| 442 | |
| 443 | /* |
| 444 | * If this is the last CPU in the package remove the package |
Thomas Gleixner | 556238e | 2016-11-22 17:57:14 +0000 | [diff] [blame^] | 445 | * reference from the array and restore the interrupt MSR. When we |
Thomas Gleixner | 411bb38 | 2016-11-22 17:57:13 +0000 | [diff] [blame] | 446 | * drop the lock neither the interrupt notify function nor the |
| 447 | * worker will see the package anymore. |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 448 | */ |
| 449 | if (lastcpu) { |
Thomas Gleixner | 556238e | 2016-11-22 17:57:14 +0000 | [diff] [blame^] | 450 | packages[topology_logical_package_id(cpu)] = NULL; |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 451 | /* |
| 452 | * After this point nothing touches the MSR anymore. We |
| 453 | * must drop the lock to make the cross cpu call. This goes |
| 454 | * away once we move that code to the hotplug state machine. |
| 455 | */ |
| 456 | spin_unlock_irq(&pkg_temp_lock); |
Thomas Gleixner | 89baa56 | 2016-11-22 17:57:05 +0000 | [diff] [blame] | 457 | wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 458 | pkgdev->msr_pkg_therm_low, |
| 459 | pkgdev->msr_pkg_therm_high); |
Thomas Gleixner | 411bb38 | 2016-11-22 17:57:13 +0000 | [diff] [blame] | 460 | spin_lock_irq(&pkg_temp_lock); |
Thomas Gleixner | 89baa56 | 2016-11-22 17:57:05 +0000 | [diff] [blame] | 461 | } |
Thomas Gleixner | b6badbe | 2016-11-22 17:57:07 +0000 | [diff] [blame] | 462 | |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 463 | /* |
Thomas Gleixner | 411bb38 | 2016-11-22 17:57:13 +0000 | [diff] [blame] | 464 | * Check whether there is work scheduled and whether the work is |
| 465 | * targeted at the outgoing CPU. |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 466 | */ |
Thomas Gleixner | 411bb38 | 2016-11-22 17:57:13 +0000 | [diff] [blame] | 467 | if (pkgdev->work_scheduled && was_target) { |
| 468 | /* |
| 469 | * To cancel the work we need to drop the lock, otherwise |
| 470 | * we might deadlock if the work needs to be flushed. |
| 471 | */ |
| 472 | spin_unlock_irq(&pkg_temp_lock); |
| 473 | cancel_delayed_work_sync(&pkgdev->work); |
| 474 | spin_lock_irq(&pkg_temp_lock); |
| 475 | /* |
| 476 | * If this is not the last cpu in the package and the work |
| 477 | * did not run after we dropped the lock above, then we |
| 478 | * need to reschedule the work, otherwise the interrupt |
| 479 | * stays disabled forever. |
| 480 | */ |
| 481 | if (!lastcpu && pkgdev->work_scheduled) |
| 482 | pkg_thermal_schedule_work(target, &pkgdev->work); |
| 483 | } |
| 484 | |
| 485 | spin_unlock_irq(&pkg_temp_lock); |
| 486 | |
| 487 | /* Final cleanup if this is the last cpu */ |
| 488 | if (lastcpu) |
| 489 | kfree(pkgdev); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 490 | } |
| 491 | |
| 492 | static int get_core_online(unsigned int cpu) |
| 493 | { |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 494 | struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 495 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 496 | |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 497 | /* Paranoia check */ |
| 498 | if (!cpu_has(c, X86_FEATURE_DTHERM) || !cpu_has(c, X86_FEATURE_PTS)) |
| 499 | return -ENODEV; |
Thomas Gleixner | 3883a64 | 2016-11-22 17:57:08 +0000 | [diff] [blame] | 500 | |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 501 | /* If the package exists, nothing to do */ |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 502 | if (pkgdev) { |
| 503 | cpumask_set_cpu(cpu, &pkgdev->cpumask); |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 504 | return 0; |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 505 | } |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 506 | return pkg_temp_thermal_device_add(cpu); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 507 | } |
| 508 | |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 509 | static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb, |
| 510 | unsigned long action, void *hcpu) |
| 511 | { |
| 512 | unsigned int cpu = (unsigned long) hcpu; |
| 513 | |
Richard Cochran | 5af897e | 2016-03-18 22:26:09 +0100 | [diff] [blame] | 514 | switch (action & ~CPU_TASKS_FROZEN) { |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 515 | case CPU_ONLINE: |
| 516 | case CPU_DOWN_FAILED: |
| 517 | get_core_online(cpu); |
| 518 | break; |
| 519 | case CPU_DOWN_PREPARE: |
| 520 | put_core_offline(cpu); |
| 521 | break; |
| 522 | } |
| 523 | return NOTIFY_OK; |
| 524 | } |
| 525 | |
| 526 | static struct notifier_block pkg_temp_thermal_notifier __refdata = { |
| 527 | .notifier_call = pkg_temp_thermal_cpu_callback, |
| 528 | }; |
| 529 | |
| 530 | static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = { |
Srinivas Pandruvada | f3ed0a1 | 2013-07-11 09:50:30 -0700 | [diff] [blame] | 531 | { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_PTS }, |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 532 | {} |
| 533 | }; |
| 534 | MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids); |
| 535 | |
| 536 | static int __init pkg_temp_thermal_init(void) |
| 537 | { |
| 538 | int i; |
| 539 | |
| 540 | if (!x86_match_cpu(pkg_temp_thermal_ids)) |
| 541 | return -ENODEV; |
| 542 | |
Thomas Gleixner | 556238e | 2016-11-22 17:57:14 +0000 | [diff] [blame^] | 543 | max_packages = topology_max_packages(); |
| 544 | packages = kzalloc(max_packages * sizeof(struct pkg_device *), GFP_KERNEL); |
| 545 | if (!packages) |
| 546 | return -ENOMEM; |
| 547 | |
Srivatsa S. Bhat | cf0485a | 2014-03-11 02:10:54 +0530 | [diff] [blame] | 548 | cpu_notifier_register_begin(); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 549 | for_each_online_cpu(i) |
| 550 | if (get_core_online(i)) |
| 551 | goto err_ret; |
Srivatsa S. Bhat | cf0485a | 2014-03-11 02:10:54 +0530 | [diff] [blame] | 552 | __register_hotcpu_notifier(&pkg_temp_thermal_notifier); |
| 553 | cpu_notifier_register_done(); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 554 | |
Thomas Gleixner | 09a674c | 2016-11-22 17:57:06 +0000 | [diff] [blame] | 555 | platform_thermal_package_notify = pkg_thermal_notify; |
| 556 | platform_thermal_package_rate_control = pkg_thermal_rate_control; |
| 557 | |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 558 | /* Don't care if it fails */ |
| 559 | pkg_temp_debugfs_init(); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 560 | return 0; |
| 561 | |
| 562 | err_ret: |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 563 | for_each_online_cpu(i) |
| 564 | put_core_offline(i); |
Srivatsa S. Bhat | cf0485a | 2014-03-11 02:10:54 +0530 | [diff] [blame] | 565 | cpu_notifier_register_done(); |
Thomas Gleixner | 556238e | 2016-11-22 17:57:14 +0000 | [diff] [blame^] | 566 | kfree(packages); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 567 | return -ENODEV; |
| 568 | } |
Thomas Gleixner | 8079a4b | 2016-11-22 17:57:09 +0000 | [diff] [blame] | 569 | module_init(pkg_temp_thermal_init) |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 570 | |
| 571 | static void __exit pkg_temp_thermal_exit(void) |
| 572 | { |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 573 | int i; |
| 574 | |
Thomas Gleixner | 09a674c | 2016-11-22 17:57:06 +0000 | [diff] [blame] | 575 | platform_thermal_package_notify = NULL; |
| 576 | platform_thermal_package_rate_control = NULL; |
| 577 | |
Srivatsa S. Bhat | cf0485a | 2014-03-11 02:10:54 +0530 | [diff] [blame] | 578 | cpu_notifier_register_begin(); |
| 579 | __unregister_hotcpu_notifier(&pkg_temp_thermal_notifier); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 580 | for_each_online_cpu(i) |
Thomas Gleixner | ab47bd9 | 2016-11-22 17:57:10 +0000 | [diff] [blame] | 581 | put_core_offline(i); |
Srivatsa S. Bhat | cf0485a | 2014-03-11 02:10:54 +0530 | [diff] [blame] | 582 | cpu_notifier_register_done(); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 583 | |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 584 | debugfs_remove_recursive(debugfs); |
Thomas Gleixner | 556238e | 2016-11-22 17:57:14 +0000 | [diff] [blame^] | 585 | kfree(packages); |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 586 | } |
Srinivas Pandruvada | f1a18a1 | 2013-05-17 23:42:02 +0000 | [diff] [blame] | 587 | module_exit(pkg_temp_thermal_exit) |
| 588 | |
| 589 | MODULE_DESCRIPTION("X86 PKG TEMP Thermal Driver"); |
| 590 | MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); |
| 591 | MODULE_LICENSE("GPL v2"); |