blob: 63657d193db58a29ee5a1c1a1ef49091b7c72602 [file] [log] [blame]
Jacob Pand6d71ee2013-01-21 04:37:57 -08001/*
2 * intel_powerclamp.c - package c-state idle injection
3 *
4 * Copyright (c) 2012, Intel Corporation.
5 *
6 * Authors:
7 * Arjan van de Ven <arjan@linux.intel.com>
8 * Jacob Pan <jacob.jun.pan@linux.intel.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 *
24 * TODO:
25 * 1. better handle wakeup from external interrupts, currently a fixed
26 * compensation is added to clamping duration when excessive amount
27 * of wakeups are observed during idle time. the reason is that in
28 * case of external interrupts without need for ack, clamping down
29 * cpu in non-irq context does not reduce irq. for majority of the
30 * cases, clamping down cpu does help reduce irq as well, we should
31 * be able to differenciate the two cases and give a quantitative
32 * solution for the irqs that we can control. perhaps based on
33 * get_cpu_iowait_time_us()
34 *
35 * 2. synchronization with other hw blocks
36 *
37 *
38 */
39
40#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41
42#include <linux/module.h>
43#include <linux/kernel.h>
44#include <linux/delay.h>
45#include <linux/kthread.h>
46#include <linux/freezer.h>
47#include <linux/cpu.h>
48#include <linux/thermal.h>
49#include <linux/slab.h>
50#include <linux/tick.h>
51#include <linux/debugfs.h>
52#include <linux/seq_file.h>
Linus Torvalds19cc90f2013-02-28 20:23:09 -080053#include <linux/sched/rt.h>
Jacob Pand6d71ee2013-01-21 04:37:57 -080054
55#include <asm/nmi.h>
56#include <asm/msr.h>
57#include <asm/mwait.h>
58#include <asm/cpu_device_id.h>
59#include <asm/idle.h>
60#include <asm/hardirq.h>
61
62#define MAX_TARGET_RATIO (50U)
63/* For each undisturbed clamping period (no extra wake ups during idle time),
64 * we increment the confidence counter for the given target ratio.
65 * CONFIDENCE_OK defines the level where runtime calibration results are
66 * valid.
67 */
68#define CONFIDENCE_OK (3)
69/* Default idle injection duration, driver adjust sleep time to meet target
70 * idle ratio. Similar to frequency modulation.
71 */
72#define DEFAULT_DURATION_JIFFIES (6)
73
74static unsigned int target_mwait;
75static struct dentry *debug_dir;
76
77/* user selected target */
78static unsigned int set_target_ratio;
79static unsigned int current_ratio;
80static bool should_skip;
81static bool reduce_irq;
82static atomic_t idle_wakeup_counter;
83static unsigned int control_cpu; /* The cpu assigned to collect stat and update
84 * control parameters. default to BSP but BSP
85 * can be offlined.
86 */
87static bool clamping;
88
89
90static struct task_struct * __percpu *powerclamp_thread;
91static struct thermal_cooling_device *cooling_dev;
92static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
93 * clamping thread
94 */
95
96static unsigned int duration;
97static unsigned int pkg_cstate_ratio_cur;
98static unsigned int window_size;
99
100static int duration_set(const char *arg, const struct kernel_param *kp)
101{
102 int ret = 0;
103 unsigned long new_duration;
104
105 ret = kstrtoul(arg, 10, &new_duration);
106 if (ret)
107 goto exit;
108 if (new_duration > 25 || new_duration < 6) {
109 pr_err("Out of recommended range %lu, between 6-25ms\n",
110 new_duration);
111 ret = -EINVAL;
112 }
113
114 duration = clamp(new_duration, 6ul, 25ul);
115 smp_mb();
116
117exit:
118
119 return ret;
120}
121
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930122static const struct kernel_param_ops duration_ops = {
Jacob Pand6d71ee2013-01-21 04:37:57 -0800123 .set = duration_set,
124 .get = param_get_int,
125};
126
127
128module_param_cb(duration, &duration_ops, &duration, 0644);
129MODULE_PARM_DESC(duration, "forced idle time for each attempt in msec.");
130
131struct powerclamp_calibration_data {
132 unsigned long confidence; /* used for calibration, basically a counter
133 * gets incremented each time a clamping
134 * period is completed without extra wakeups
135 * once that counter is reached given level,
136 * compensation is deemed usable.
137 */
138 unsigned long steady_comp; /* steady state compensation used when
139 * no extra wakeups occurred.
140 */
141 unsigned long dynamic_comp; /* compensate excessive wakeup from idle
142 * mostly from external interrupts.
143 */
144};
145
146static struct powerclamp_calibration_data cal_data[MAX_TARGET_RATIO];
147
148static int window_size_set(const char *arg, const struct kernel_param *kp)
149{
150 int ret = 0;
151 unsigned long new_window_size;
152
153 ret = kstrtoul(arg, 10, &new_window_size);
154 if (ret)
155 goto exit_win;
156 if (new_window_size > 10 || new_window_size < 2) {
157 pr_err("Out of recommended window size %lu, between 2-10\n",
158 new_window_size);
159 ret = -EINVAL;
160 }
161
162 window_size = clamp(new_window_size, 2ul, 10ul);
163 smp_mb();
164
165exit_win:
166
167 return ret;
168}
169
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930170static const struct kernel_param_ops window_size_ops = {
Jacob Pand6d71ee2013-01-21 04:37:57 -0800171 .set = window_size_set,
172 .get = param_get_int,
173};
174
175module_param_cb(window_size, &window_size_ops, &window_size, 0644);
176MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n"
177 "\tpowerclamp controls idle ratio within this window. larger\n"
178 "\twindow size results in slower response time but more smooth\n"
179 "\tclamping results. default to 2.");
180
181static void find_target_mwait(void)
182{
183 unsigned int eax, ebx, ecx, edx;
184 unsigned int highest_cstate = 0;
185 unsigned int highest_subcstate = 0;
186 int i;
187
188 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
189 return;
190
191 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
192
193 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
194 !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
195 return;
196
197 edx >>= MWAIT_SUBSTATE_SIZE;
198 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
199 if (edx & MWAIT_SUBSTATE_MASK) {
200 highest_cstate = i;
201 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
202 }
203 }
204 target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
205 (highest_subcstate - 1);
206
207}
208
Jacob Pand8186112015-05-07 09:03:59 -0700209struct pkg_cstate_info {
210 bool skip;
211 int msr_index;
212 int cstate_id;
213};
214
215#define PKG_CSTATE_INIT(id) { \
216 .msr_index = MSR_PKG_C##id##_RESIDENCY, \
217 .cstate_id = id \
218 }
219
220static struct pkg_cstate_info pkg_cstates[] = {
221 PKG_CSTATE_INIT(2),
222 PKG_CSTATE_INIT(3),
223 PKG_CSTATE_INIT(6),
224 PKG_CSTATE_INIT(7),
225 PKG_CSTATE_INIT(8),
226 PKG_CSTATE_INIT(9),
227 PKG_CSTATE_INIT(10),
228 {NULL},
229};
230
Yuxuan Shui7734e3a2013-11-18 15:06:35 +0800231static bool has_pkg_state_counter(void)
232{
Jacob Pand8186112015-05-07 09:03:59 -0700233 u64 val;
234 struct pkg_cstate_info *info = pkg_cstates;
235
236 /* check if any one of the counter msrs exists */
237 while (info->msr_index) {
238 if (!rdmsrl_safe(info->msr_index, &val))
239 return true;
240 info++;
241 }
242
243 return false;
Yuxuan Shui7734e3a2013-11-18 15:06:35 +0800244}
245
Jacob Pand6d71ee2013-01-21 04:37:57 -0800246static u64 pkg_state_counter(void)
247{
248 u64 val;
249 u64 count = 0;
Jacob Pand8186112015-05-07 09:03:59 -0700250 struct pkg_cstate_info *info = pkg_cstates;
Jacob Pand6d71ee2013-01-21 04:37:57 -0800251
Jacob Pand8186112015-05-07 09:03:59 -0700252 while (info->msr_index) {
253 if (!info->skip) {
254 if (!rdmsrl_safe(info->msr_index, &val))
255 count += val;
256 else
257 info->skip = true;
258 }
259 info++;
Jacob Pand6d71ee2013-01-21 04:37:57 -0800260 }
261
262 return count;
263}
264
265static void noop_timer(unsigned long foo)
266{
267 /* empty... just the fact that we get the interrupt wakes us up */
268}
269
270static unsigned int get_compensation(int ratio)
271{
272 unsigned int comp = 0;
273
274 /* we only use compensation if all adjacent ones are good */
275 if (ratio == 1 &&
276 cal_data[ratio].confidence >= CONFIDENCE_OK &&
277 cal_data[ratio + 1].confidence >= CONFIDENCE_OK &&
278 cal_data[ratio + 2].confidence >= CONFIDENCE_OK) {
279 comp = (cal_data[ratio].steady_comp +
280 cal_data[ratio + 1].steady_comp +
281 cal_data[ratio + 2].steady_comp) / 3;
282 } else if (ratio == MAX_TARGET_RATIO - 1 &&
283 cal_data[ratio].confidence >= CONFIDENCE_OK &&
284 cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
285 cal_data[ratio - 2].confidence >= CONFIDENCE_OK) {
286 comp = (cal_data[ratio].steady_comp +
287 cal_data[ratio - 1].steady_comp +
288 cal_data[ratio - 2].steady_comp) / 3;
289 } else if (cal_data[ratio].confidence >= CONFIDENCE_OK &&
290 cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
291 cal_data[ratio + 1].confidence >= CONFIDENCE_OK) {
292 comp = (cal_data[ratio].steady_comp +
293 cal_data[ratio - 1].steady_comp +
294 cal_data[ratio + 1].steady_comp) / 3;
295 }
296
297 /* REVISIT: simple penalty of double idle injection */
298 if (reduce_irq)
299 comp = ratio;
300 /* do not exceed limit */
301 if (comp + ratio >= MAX_TARGET_RATIO)
302 comp = MAX_TARGET_RATIO - ratio - 1;
303
304 return comp;
305}
306
307static void adjust_compensation(int target_ratio, unsigned int win)
308{
309 int delta;
310 struct powerclamp_calibration_data *d = &cal_data[target_ratio];
311
312 /*
313 * adjust compensations if confidence level has not been reached or
314 * there are too many wakeups during the last idle injection period, we
315 * cannot trust the data for compensation.
316 */
317 if (d->confidence >= CONFIDENCE_OK ||
318 atomic_read(&idle_wakeup_counter) >
319 win * num_online_cpus())
320 return;
321
322 delta = set_target_ratio - current_ratio;
323 /* filter out bad data */
324 if (delta >= 0 && delta <= (1+target_ratio/10)) {
325 if (d->steady_comp)
326 d->steady_comp =
327 roundup(delta+d->steady_comp, 2)/2;
328 else
329 d->steady_comp = delta;
330 d->confidence++;
331 }
332}
333
334static bool powerclamp_adjust_controls(unsigned int target_ratio,
335 unsigned int guard, unsigned int win)
336{
337 static u64 msr_last, tsc_last;
338 u64 msr_now, tsc_now;
339 u64 val64;
340
341 /* check result for the last window */
342 msr_now = pkg_state_counter();
Andy Lutomirski4ea16362015-06-25 18:44:07 +0200343 tsc_now = rdtsc();
Jacob Pand6d71ee2013-01-21 04:37:57 -0800344
345 /* calculate pkg cstate vs tsc ratio */
346 if (!msr_last || !tsc_last)
347 current_ratio = 1;
348 else if (tsc_now-tsc_last) {
349 val64 = 100*(msr_now-msr_last);
350 do_div(val64, (tsc_now-tsc_last));
351 current_ratio = val64;
352 }
353
354 /* update record */
355 msr_last = msr_now;
356 tsc_last = tsc_now;
357
358 adjust_compensation(target_ratio, win);
359 /*
360 * too many external interrupts, set flag such
361 * that we can take measure later.
362 */
363 reduce_irq = atomic_read(&idle_wakeup_counter) >=
364 2 * win * num_online_cpus();
365
366 atomic_set(&idle_wakeup_counter, 0);
367 /* if we are above target+guard, skip */
368 return set_target_ratio + guard <= current_ratio;
369}
370
371static int clamp_thread(void *arg)
372{
373 int cpunr = (unsigned long)arg;
374 DEFINE_TIMER(wakeup_timer, noop_timer, 0, 0);
375 static const struct sched_param param = {
376 .sched_priority = MAX_USER_RT_PRIO/2,
377 };
378 unsigned int count = 0;
379 unsigned int target_ratio;
380
381 set_bit(cpunr, cpu_clamping_mask);
382 set_freezable();
383 init_timer_on_stack(&wakeup_timer);
384 sched_setscheduler(current, SCHED_FIFO, &param);
385
386 while (true == clamping && !kthread_should_stop() &&
387 cpu_online(cpunr)) {
388 int sleeptime;
389 unsigned long target_jiffies;
390 unsigned int guard;
Petr Mladek70c50ee2016-08-05 15:20:41 +0200391 unsigned int compensated_ratio;
Jacob Pand6d71ee2013-01-21 04:37:57 -0800392 int interval; /* jiffies to sleep for each attempt */
393 unsigned int duration_jiffies = msecs_to_jiffies(duration);
394 unsigned int window_size_now;
395
396 try_to_freeze();
397 /*
398 * make sure user selected ratio does not take effect until
399 * the next round. adjust target_ratio if user has changed
400 * target such that we can converge quickly.
401 */
402 target_ratio = set_target_ratio;
403 guard = 1 + target_ratio/20;
404 window_size_now = window_size;
405 count++;
406
407 /*
408 * systems may have different ability to enter package level
409 * c-states, thus we need to compensate the injected idle ratio
410 * to achieve the actual target reported by the HW.
411 */
Petr Mladek70c50ee2016-08-05 15:20:41 +0200412 compensated_ratio = target_ratio +
413 get_compensation(target_ratio);
414 if (compensated_ratio <= 0)
415 compensated_ratio = 1;
416 interval = duration_jiffies * 100 / compensated_ratio;
Jacob Pand6d71ee2013-01-21 04:37:57 -0800417
418 /* align idle time */
419 target_jiffies = roundup(jiffies, interval);
420 sleeptime = target_jiffies - jiffies;
421 if (sleeptime <= 0)
422 sleeptime = 1;
423 schedule_timeout_interruptible(sleeptime);
424 /*
425 * only elected controlling cpu can collect stats and update
426 * control parameters.
427 */
428 if (cpunr == control_cpu && !(count%window_size_now)) {
429 should_skip =
430 powerclamp_adjust_controls(target_ratio,
431 guard, window_size_now);
432 smp_mb();
433 }
434
435 if (should_skip)
436 continue;
437
438 target_jiffies = jiffies + duration_jiffies;
439 mod_timer(&wakeup_timer, target_jiffies);
440 if (unlikely(local_softirq_pending()))
441 continue;
442 /*
443 * stop tick sched during idle time, interrupts are still
444 * allowed. thus jiffies are updated properly.
445 */
446 preempt_disable();
Jacob Pand6d71ee2013-01-21 04:37:57 -0800447 /* mwait until target jiffies is reached */
448 while (time_before(jiffies, target_jiffies)) {
449 unsigned long ecx = 1;
450 unsigned long eax = target_mwait;
451
452 /*
453 * REVISIT: may call enter_idle() to notify drivers who
454 * can save power during cpu idle. same for exit_idle()
455 */
456 local_touch_nmi();
457 stop_critical_timings();
Peter Zijlstra16824252013-12-12 15:08:36 +0100458 mwait_idle_with_hints(eax, ecx);
Jacob Pand6d71ee2013-01-21 04:37:57 -0800459 start_critical_timings();
460 atomic_inc(&idle_wakeup_counter);
461 }
Peter Zijlstra130816c2013-12-11 12:21:17 +0100462 preempt_enable();
Jacob Pand6d71ee2013-01-21 04:37:57 -0800463 }
464 del_timer_sync(&wakeup_timer);
465 clear_bit(cpunr, cpu_clamping_mask);
466
467 return 0;
468}
469
470/*
471 * 1 HZ polling while clamping is active, useful for userspace
472 * to monitor actual idle ratio.
473 */
474static void poll_pkg_cstate(struct work_struct *dummy);
475static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate);
476static void poll_pkg_cstate(struct work_struct *dummy)
477{
478 static u64 msr_last;
479 static u64 tsc_last;
480 static unsigned long jiffies_last;
481
482 u64 msr_now;
483 unsigned long jiffies_now;
484 u64 tsc_now;
485 u64 val64;
486
487 msr_now = pkg_state_counter();
Andy Lutomirski4ea16362015-06-25 18:44:07 +0200488 tsc_now = rdtsc();
Jacob Pand6d71ee2013-01-21 04:37:57 -0800489 jiffies_now = jiffies;
490
491 /* calculate pkg cstate vs tsc ratio */
492 if (!msr_last || !tsc_last)
493 pkg_cstate_ratio_cur = 1;
494 else {
495 if (tsc_now - tsc_last) {
496 val64 = 100 * (msr_now - msr_last);
497 do_div(val64, (tsc_now - tsc_last));
498 pkg_cstate_ratio_cur = val64;
499 }
500 }
501
502 /* update record */
503 msr_last = msr_now;
504 jiffies_last = jiffies_now;
505 tsc_last = tsc_now;
506
507 if (true == clamping)
508 schedule_delayed_work(&poll_pkg_cstate_work, HZ);
509}
510
Petr Mladek14f3f7d2016-11-28 13:44:49 -0800511static void start_power_clamp_thread(unsigned long cpu)
512{
513 struct task_struct **p = per_cpu_ptr(powerclamp_thread, cpu);
514 struct task_struct *thread;
515
516 thread = kthread_create_on_node(clamp_thread,
517 (void *) cpu,
518 cpu_to_node(cpu),
519 "kidle_inject/%ld", cpu);
520 if (IS_ERR(thread))
521 return;
522
523 /* bind to cpu here */
524 kthread_bind(thread, cpu);
525 wake_up_process(thread);
526 *p = thread;
527}
528
Jacob Pand6d71ee2013-01-21 04:37:57 -0800529static int start_power_clamp(void)
530{
531 unsigned long cpu;
Jacob Pand6d71ee2013-01-21 04:37:57 -0800532
Dan Carpenterc8165dc2013-01-24 08:51:22 +0000533 set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
Jacob Pand6d71ee2013-01-21 04:37:57 -0800534 /* prevent cpu hotplug */
535 get_online_cpus();
536
537 /* prefer BSP */
538 control_cpu = 0;
539 if (!cpu_online(control_cpu))
540 control_cpu = smp_processor_id();
541
542 clamping = true;
543 schedule_delayed_work(&poll_pkg_cstate_work, 0);
544
545 /* start one thread per online cpu */
546 for_each_online_cpu(cpu) {
Petr Mladek14f3f7d2016-11-28 13:44:49 -0800547 start_power_clamp_thread(cpu);
Jacob Pand6d71ee2013-01-21 04:37:57 -0800548 }
549 put_online_cpus();
550
551 return 0;
552}
553
554static void end_power_clamp(void)
555{
556 int i;
557 struct task_struct *thread;
558
559 clamping = false;
560 /*
561 * make clamping visible to other cpus and give per cpu clamping threads
562 * sometime to exit, or gets killed later.
563 */
564 smp_mb();
565 msleep(20);
566 if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) {
567 for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
568 pr_debug("clamping thread for cpu %d alive, kill\n", i);
569 thread = *per_cpu_ptr(powerclamp_thread, i);
570 kthread_stop(thread);
571 }
572 }
573}
574
575static int powerclamp_cpu_callback(struct notifier_block *nfb,
576 unsigned long action, void *hcpu)
577{
578 unsigned long cpu = (unsigned long)hcpu;
Jacob Pand6d71ee2013-01-21 04:37:57 -0800579 struct task_struct **percpu_thread =
580 per_cpu_ptr(powerclamp_thread, cpu);
581
582 if (false == clamping)
583 goto exit_ok;
584
585 switch (action) {
586 case CPU_ONLINE:
Petr Mladek14f3f7d2016-11-28 13:44:49 -0800587 start_power_clamp_thread(cpu);
Jacob Pand6d71ee2013-01-21 04:37:57 -0800588 /* prefer BSP as controlling CPU */
589 if (cpu == 0) {
590 control_cpu = 0;
591 smp_mb();
592 }
593 break;
594 case CPU_DEAD:
595 if (test_bit(cpu, cpu_clamping_mask)) {
596 pr_err("cpu %lu dead but powerclamping thread is not\n",
597 cpu);
598 kthread_stop(*percpu_thread);
599 }
600 if (cpu == control_cpu) {
601 control_cpu = smp_processor_id();
602 smp_mb();
603 }
604 }
605
606exit_ok:
607 return NOTIFY_OK;
608}
609
610static struct notifier_block powerclamp_cpu_notifier = {
611 .notifier_call = powerclamp_cpu_callback,
612};
613
614static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
615 unsigned long *state)
616{
617 *state = MAX_TARGET_RATIO;
618
619 return 0;
620}
621
622static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev,
623 unsigned long *state)
624{
625 if (true == clamping)
626 *state = pkg_cstate_ratio_cur;
627 else
628 /* to save power, do not poll idle ratio while not clamping */
629 *state = -1; /* indicates invalid state */
630
631 return 0;
632}
633
634static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev,
635 unsigned long new_target_ratio)
636{
637 int ret = 0;
638
639 new_target_ratio = clamp(new_target_ratio, 0UL,
640 (unsigned long) (MAX_TARGET_RATIO-1));
641 if (set_target_ratio == 0 && new_target_ratio > 0) {
642 pr_info("Start idle injection to reduce power\n");
643 set_target_ratio = new_target_ratio;
644 ret = start_power_clamp();
645 goto exit_set;
646 } else if (set_target_ratio > 0 && new_target_ratio == 0) {
647 pr_info("Stop forced idle injection\n");
Jacob Pand6d71ee2013-01-21 04:37:57 -0800648 end_power_clamp();
Petr Mladek70c50ee2016-08-05 15:20:41 +0200649 set_target_ratio = 0;
Jacob Pand6d71ee2013-01-21 04:37:57 -0800650 } else /* adjust currently running */ {
651 set_target_ratio = new_target_ratio;
652 /* make new set_target_ratio visible to other cpus */
653 smp_mb();
654 }
655
656exit_set:
657 return ret;
658}
659
660/* bind to generic thermal layer as cooling device*/
661static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
662 .get_max_state = powerclamp_get_max_state,
663 .get_cur_state = powerclamp_get_cur_state,
664 .set_cur_state = powerclamp_set_cur_state,
665};
666
Mathias Krause4d2b6e42015-03-25 22:16:24 +0100667static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
Jacob Panb721ca02016-03-17 11:26:13 -0700668 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
669 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT },
670 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC },
671 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC},
Jacob Pand6d71ee2013-01-21 04:37:57 -0800672 {}
673};
674MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
675
Mathias Krause4d2b6e42015-03-25 22:16:24 +0100676static int __init powerclamp_probe(void)
Jacob Pand6d71ee2013-01-21 04:37:57 -0800677{
678 if (!x86_match_cpu(intel_powerclamp_ids)) {
679 pr_err("Intel powerclamp does not run on family %d model %d\n",
680 boot_cpu_data.x86, boot_cpu_data.x86_model);
681 return -ENODEV;
682 }
Jacob Panb721ca02016-03-17 11:26:13 -0700683
684 /* The goal for idle time alignment is to achieve package cstate. */
685 if (!has_pkg_state_counter()) {
686 pr_info("No package C-state available");
Jacob Pand6d71ee2013-01-21 04:37:57 -0800687 return -ENODEV;
Jacob Panb721ca02016-03-17 11:26:13 -0700688 }
Jacob Pand6d71ee2013-01-21 04:37:57 -0800689
690 /* find the deepest mwait value */
691 find_target_mwait();
692
693 return 0;
694}
695
696static int powerclamp_debug_show(struct seq_file *m, void *unused)
697{
698 int i = 0;
699
700 seq_printf(m, "controlling cpu: %d\n", control_cpu);
701 seq_printf(m, "pct confidence steady dynamic (compensation)\n");
702 for (i = 0; i < MAX_TARGET_RATIO; i++) {
703 seq_printf(m, "%d\t%lu\t%lu\t%lu\n",
704 i,
705 cal_data[i].confidence,
706 cal_data[i].steady_comp,
707 cal_data[i].dynamic_comp);
708 }
709
710 return 0;
711}
712
713static int powerclamp_debug_open(struct inode *inode,
714 struct file *file)
715{
716 return single_open(file, powerclamp_debug_show, inode->i_private);
717}
718
719static const struct file_operations powerclamp_debug_fops = {
720 .open = powerclamp_debug_open,
721 .read = seq_read,
722 .llseek = seq_lseek,
723 .release = single_release,
724 .owner = THIS_MODULE,
725};
726
727static inline void powerclamp_create_debug_files(void)
728{
729 debug_dir = debugfs_create_dir("intel_powerclamp", NULL);
730 if (!debug_dir)
731 return;
732
733 if (!debugfs_create_file("powerclamp_calib", S_IRUGO, debug_dir,
734 cal_data, &powerclamp_debug_fops))
735 goto file_error;
736
737 return;
738
739file_error:
740 debugfs_remove_recursive(debug_dir);
741}
742
Mathias Krause4d2b6e42015-03-25 22:16:24 +0100743static int __init powerclamp_init(void)
Jacob Pand6d71ee2013-01-21 04:37:57 -0800744{
745 int retval;
746 int bitmap_size;
747
748 bitmap_size = BITS_TO_LONGS(num_possible_cpus()) * sizeof(long);
749 cpu_clamping_mask = kzalloc(bitmap_size, GFP_KERNEL);
750 if (!cpu_clamping_mask)
751 return -ENOMEM;
752
753 /* probe cpu features and ids here */
754 retval = powerclamp_probe();
755 if (retval)
durgadoss.r@intel.comc32a5082013-10-04 21:53:24 +0530756 goto exit_free;
757
Jacob Pand6d71ee2013-01-21 04:37:57 -0800758 /* set default limit, maybe adjusted during runtime based on feedback */
759 window_size = 2;
760 register_hotcpu_notifier(&powerclamp_cpu_notifier);
durgadoss.r@intel.comc32a5082013-10-04 21:53:24 +0530761
Jacob Pand6d71ee2013-01-21 04:37:57 -0800762 powerclamp_thread = alloc_percpu(struct task_struct *);
durgadoss.r@intel.comc32a5082013-10-04 21:53:24 +0530763 if (!powerclamp_thread) {
764 retval = -ENOMEM;
765 goto exit_unregister;
766 }
767
Jacob Pand6d71ee2013-01-21 04:37:57 -0800768 cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
769 &powerclamp_cooling_ops);
durgadoss.r@intel.comc32a5082013-10-04 21:53:24 +0530770 if (IS_ERR(cooling_dev)) {
771 retval = -ENODEV;
772 goto exit_free_thread;
773 }
Jacob Pand6d71ee2013-01-21 04:37:57 -0800774
775 if (!duration)
776 duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
durgadoss.r@intel.comc32a5082013-10-04 21:53:24 +0530777
Jacob Pand6d71ee2013-01-21 04:37:57 -0800778 powerclamp_create_debug_files();
779
780 return 0;
durgadoss.r@intel.comc32a5082013-10-04 21:53:24 +0530781
782exit_free_thread:
783 free_percpu(powerclamp_thread);
784exit_unregister:
785 unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
786exit_free:
787 kfree(cpu_clamping_mask);
788 return retval;
Jacob Pand6d71ee2013-01-21 04:37:57 -0800789}
790module_init(powerclamp_init);
791
Mathias Krause4d2b6e42015-03-25 22:16:24 +0100792static void __exit powerclamp_exit(void)
Jacob Pand6d71ee2013-01-21 04:37:57 -0800793{
794 unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
795 end_power_clamp();
796 free_percpu(powerclamp_thread);
797 thermal_cooling_device_unregister(cooling_dev);
798 kfree(cpu_clamping_mask);
799
800 cancel_delayed_work_sync(&poll_pkg_cstate_work);
801 debugfs_remove_recursive(debug_dir);
802}
803module_exit(powerclamp_exit);
804
805MODULE_LICENSE("GPL");
806MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
807MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
808MODULE_DESCRIPTION("Package Level C-state Idle Injection for Intel CPUs");