blob: ab23ac891b3401047fe6d222f91a9bf8739c3ff6 [file] [log] [blame]
Saravana Kannanbd1b66e2013-07-13 01:49:09 -07001/*
2 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "cpu-boost: " fmt
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/notifier.h>
19#include <linux/cpufreq.h>
20#include <linux/sched.h>
21#include <linux/jiffies.h>
22#include <linux/kthread.h>
23#include <linux/moduleparam.h>
24
25struct cpu_sync {
26 struct task_struct *thread;
27 wait_queue_head_t sync_wq;
28 struct delayed_work boost_rem;
29 int cpu;
30 spinlock_t lock;
31 bool pending;
32 int src_cpu;
33 unsigned int boost_min;
34};
35
36static DEFINE_PER_CPU(struct cpu_sync, sync_info);
37static struct workqueue_struct *boost_rem_wq;
38
39static unsigned int boost_ms = 50;
40module_param(boost_ms, uint, 0644);
41
42/*
43 * The CPUFREQ_ADJUST notifier is used to override the current policy min to
44 * make sure policy min >= boost_min. The cpufreq framework then does the job
45 * of enforcing the new policy.
46 */
47static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data)
48{
49 struct cpufreq_policy *policy = data;
50 unsigned int cpu = policy->cpu;
51 struct cpu_sync *s = &per_cpu(sync_info, cpu);
52 unsigned int min = s->boost_min;
53
54 if (val != CPUFREQ_ADJUST)
55 return NOTIFY_OK;
56
57 if (min == 0)
58 return NOTIFY_OK;
59
60 pr_debug("CPU%u policy min before boost: %u kHz\n",
61 cpu, policy->min);
62 pr_debug("CPU%u boost min: %u kHz\n", cpu, min);
63
64 cpufreq_verify_within_limits(policy, min, UINT_MAX);
65
66 pr_debug("CPU%u policy min after boost: %u kHz\n",
67 cpu, policy->min);
68
69 return NOTIFY_OK;
70}
71
72static struct notifier_block boost_adjust_nb = {
73 .notifier_call = boost_adjust_notify,
74};
75
76static void do_boost_rem(struct work_struct *work)
77{
78 struct cpu_sync *s = container_of(work, struct cpu_sync,
79 boost_rem.work);
80
81 pr_debug("Removing boost for CPU%d\n", s->cpu);
82 s->boost_min = 0;
83 /* Force policy re-evaluation to trigger adjust notifier. */
84 cpufreq_update_policy(s->cpu);
85}
86
87static int boost_mig_sync_thread(void *data)
88{
89 int dest_cpu = (int) data;
90 int src_cpu, ret;
91 struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
92 struct cpufreq_policy dest_policy;
93 struct cpufreq_policy src_policy;
94 unsigned long flags;
95
96 while(1) {
97 wait_event(s->sync_wq, s->pending || kthread_should_stop());
98
99 if (kthread_should_stop())
100 break;
101
102 spin_lock_irqsave(&s->lock, flags);
103 s->pending = false;
104 src_cpu = s->src_cpu;
105 spin_unlock_irqrestore(&s->lock, flags);
106
107 ret = cpufreq_get_policy(&src_policy, src_cpu);
108 if (ret)
109 continue;
110
111 ret = cpufreq_get_policy(&dest_policy, dest_cpu);
112 if (ret)
113 continue;
114
115 if (dest_policy.cur >= src_policy.cur ) {
116 pr_debug("No sync. CPU%d@%dKHz >= CPU%d@%dKHz\n",
117 dest_cpu, dest_policy.cur, src_cpu, src_policy.cur);
118 continue;
119 }
120
121 cancel_delayed_work_sync(&s->boost_rem);
122 s->boost_min = src_policy.cur;
123 /* Force policy re-evaluation to trigger adjust notifier. */
124 cpufreq_update_policy(dest_cpu);
125 queue_delayed_work_on(s->cpu, boost_rem_wq,
126 &s->boost_rem, msecs_to_jiffies(boost_ms));
127 }
128
129 return 0;
130}
131
132static int boost_migration_notify(struct notifier_block *nb,
133 unsigned long dest_cpu, void *arg)
134{
135 unsigned long flags;
136 struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
137
138 if (!boost_ms)
139 return NOTIFY_OK;
140
141 pr_debug("Migration: CPU%d --> CPU%d\n", (int) arg, (int) dest_cpu);
142 spin_lock_irqsave(&s->lock, flags);
143 s->pending = true;
144 s->src_cpu = (int) arg;
145 spin_unlock_irqrestore(&s->lock, flags);
146 wake_up(&s->sync_wq);
147
148 return NOTIFY_OK;
149}
150
151static struct notifier_block boost_migration_nb = {
152 .notifier_call = boost_migration_notify,
153};
154
155static int cpu_boost_init(void)
156{
157 int cpu;
158 struct cpu_sync *s;
159
160 cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
161
162 boost_rem_wq = alloc_workqueue("cpuboost_rem_wq", WQ_HIGHPRI, 0);
163 if (!boost_rem_wq)
164 return -EFAULT;
165
166 for_each_possible_cpu(cpu) {
167 s = &per_cpu(sync_info, cpu);
168 s->cpu = cpu;
169 init_waitqueue_head(&s->sync_wq);
170 spin_lock_init(&s->lock);
171 INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem);
172 s->thread = kthread_run(boost_mig_sync_thread, (void *)cpu,
173 "boost_sync/%d", cpu);
174 }
175 atomic_notifier_chain_register(&migration_notifier_head,
176 &boost_migration_nb);
177
178 return 0;
179}
180late_initcall(cpu_boost_init);