blob: 0fcfde3b1abd6605ad3e87fb1247fc85ec5e5316 [file] [log] [blame]
Matt Wagantallc481c782012-08-01 19:10:10 -07001/*
2 * Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/percpu.h>
17#include <linux/slab.h>
18#include <linux/rbtree.h>
19#include <linux/hrtimer.h>
20#include <linux/tracefs.h>
21#include <linux/ktime.h>
22#include <trace/events/power.h>
23#include "trace_stat.h"
24#include "trace.h"
25
26struct trans {
27 struct rb_node node;
28 unsigned int cpu;
29 unsigned int start_freq;
30 unsigned int end_freq;
31 unsigned int min_us;
32 unsigned int max_us;
33 ktime_t total_t;
34 unsigned int count;
35};
36static struct rb_root freq_trans_tree = RB_ROOT;
37
38static struct trans *tr_search(struct rb_root *root, unsigned int cpu,
39 unsigned int start_freq, unsigned int end_freq)
40{
41 struct rb_node *node = root->rb_node;
42
43 while (node) {
44 struct trans *tr = container_of(node, struct trans, node);
45
46 if (cpu < tr->cpu)
47 node = node->rb_left;
48 else if (cpu > tr->cpu)
49 node = node->rb_right;
50 else if (start_freq < tr->start_freq)
51 node = node->rb_left;
52 else if (start_freq > tr->start_freq)
53 node = node->rb_right;
54 else if (end_freq < tr->end_freq)
55 node = node->rb_left;
56 else if (end_freq > tr->end_freq)
57 node = node->rb_right;
58 else
59 return tr;
60 }
61 return NULL;
62}
63
64static int tr_insert(struct rb_root *root, struct trans *tr)
65{
66 struct rb_node **new = &(root->rb_node), *parent = NULL;
67
68 while (*new) {
69 struct trans *this = container_of(*new, struct trans, node);
70
71 parent = *new;
72 if (tr->cpu < this->cpu)
73 new = &((*new)->rb_left);
74 else if (tr->cpu > this->cpu)
75 new = &((*new)->rb_right);
76 else if (tr->start_freq < this->start_freq)
77 new = &((*new)->rb_left);
78 else if (tr->start_freq > this->start_freq)
79 new = &((*new)->rb_right);
80 else if (tr->end_freq < this->end_freq)
81 new = &((*new)->rb_left);
82 else if (tr->end_freq > this->end_freq)
83 new = &((*new)->rb_right);
84 else
85 return -EINVAL;
86 }
87
88 rb_link_node(&tr->node, parent, new);
89 rb_insert_color(&tr->node, root);
90
91 return 0;
92}
93
94struct trans_state {
95 spinlock_t lock;
96 unsigned int start_freq;
97 unsigned int end_freq;
98 ktime_t start_t;
99 bool started;
100};
101static DEFINE_PER_CPU(struct trans_state, freq_trans_state);
102
103static DEFINE_SPINLOCK(state_lock);
104
105static void probe_start(void *ignore, unsigned int start_freq,
106 unsigned int end_freq, unsigned int cpu)
107{
108 unsigned long flags;
109
110 spin_lock_irqsave(&state_lock, flags);
111 per_cpu(freq_trans_state, cpu).start_freq = start_freq;
112 per_cpu(freq_trans_state, cpu).end_freq = end_freq;
113 per_cpu(freq_trans_state, cpu).start_t = ktime_get();
114 per_cpu(freq_trans_state, cpu).started = true;
115 spin_unlock_irqrestore(&state_lock, flags);
116}
117
118static void probe_end(void *ignore, unsigned int cpu)
119{
120 unsigned long flags;
121 struct trans *tr;
122 s64 dur_us;
123 ktime_t dur_t, end_t = ktime_get();
124
125 spin_lock_irqsave(&state_lock, flags);
126
127 if (!per_cpu(freq_trans_state, cpu).started)
128 goto out;
129
130 dur_t = ktime_sub(end_t, per_cpu(freq_trans_state, cpu).start_t);
131 dur_us = ktime_to_us(dur_t);
132
133 tr = tr_search(&freq_trans_tree, cpu,
134 per_cpu(freq_trans_state, cpu).start_freq,
135 per_cpu(freq_trans_state, cpu).end_freq);
136 if (!tr) {
137 tr = kzalloc(sizeof(*tr), GFP_ATOMIC);
138 if (!tr) {
139 WARN_ONCE(1, "CPU frequency trace is now invalid!\n");
140 goto out;
141 }
142
143 tr->start_freq = per_cpu(freq_trans_state, cpu).start_freq;
144 tr->end_freq = per_cpu(freq_trans_state, cpu).end_freq;
145 tr->cpu = cpu;
146 tr->min_us = UINT_MAX;
147 tr_insert(&freq_trans_tree, tr);
148 }
149 tr->total_t = ktime_add(tr->total_t, dur_t);
150 tr->count++;
151
152 if (dur_us > tr->max_us)
153 tr->max_us = dur_us;
154 if (dur_us < tr->min_us)
155 tr->min_us = dur_us;
156
157 per_cpu(freq_trans_state, cpu).started = false;
158out:
159 spin_unlock_irqrestore(&state_lock, flags);
160}
161
162static void *freq_switch_stat_start(struct tracer_stat *trace)
163{
164 struct rb_node *n;
165 unsigned long flags;
166
167 spin_lock_irqsave(&state_lock, flags);
168 n = rb_first(&freq_trans_tree);
169 spin_unlock_irqrestore(&state_lock, flags);
170
171 return n;
172}
173
174static void *freq_switch_stat_next(void *prev, int idx)
175{
176 struct rb_node *n;
177 unsigned long flags;
178
179 spin_lock_irqsave(&state_lock, flags);
180 n = rb_next(prev);
181 spin_unlock_irqrestore(&state_lock, flags);
182
183 return n;
184}
185
186static int freq_switch_stat_show(struct seq_file *s, void *p)
187{
188 unsigned long flags;
189 struct trans *tr = p;
190
191 spin_lock_irqsave(&state_lock, flags);
192 seq_printf(s, "%3d %9d %8d %5d %6lld %6d %6d\n", tr->cpu,
193 tr->start_freq, tr->end_freq, tr->count,
194 div_s64(ktime_to_us(tr->total_t), tr->count),
195 tr->min_us, tr->max_us);
196 spin_unlock_irqrestore(&state_lock, flags);
197
198 return 0;
199}
200
201static void freq_switch_stat_release(void *stat)
202{
203 struct trans *tr = stat;
204 unsigned long flags;
205
206 spin_lock_irqsave(&state_lock, flags);
207 rb_erase(&tr->node, &freq_trans_tree);
208 spin_unlock_irqrestore(&state_lock, flags);
209 kfree(tr);
210}
211
212static int freq_switch_stat_headers(struct seq_file *s)
213{
214 seq_puts(s, "CPU START_KHZ END_KHZ COUNT AVG_US MIN_US MAX_US\n");
215 seq_puts(s, " | | | | | | |\n");
216 return 0;
217}
218
219struct tracer_stat freq_switch_stats __read_mostly = {
220 .name = "cpu_freq_switch",
221 .stat_start = freq_switch_stat_start,
222 .stat_next = freq_switch_stat_next,
223 .stat_show = freq_switch_stat_show,
224 .stat_release = freq_switch_stat_release,
225 .stat_headers = freq_switch_stat_headers
226};
227
228static void trace_freq_switch_disable(void)
229{
230 unregister_stat_tracer(&freq_switch_stats);
231 unregister_trace_cpu_frequency_switch_end(probe_end, NULL);
232 unregister_trace_cpu_frequency_switch_start(probe_start, NULL);
233 pr_info("disabled cpu frequency switch time profiling\n");
234}
235
236static int trace_freq_switch_enable(void)
237{
238 int ret;
239
240 ret = register_trace_cpu_frequency_switch_start(probe_start, NULL);
241 if (ret)
242 goto out;
243
244 ret = register_trace_cpu_frequency_switch_end(probe_end, NULL);
245 if (ret)
246 goto err_register_switch_end;
247
248 ret = register_stat_tracer(&freq_switch_stats);
249 if (ret)
250 goto err_register_stat_tracer;
251
252 pr_info("enabled cpu frequency switch time profiling\n");
253 return 0;
254
255err_register_stat_tracer:
256 unregister_trace_cpu_frequency_switch_end(probe_end, NULL);
257err_register_switch_end:
258 register_trace_cpu_frequency_switch_start(probe_start, NULL);
259out:
260 pr_err("failed to enable cpu frequency switch time profiling\n");
261
262 return ret;
263}
264
265static DEFINE_MUTEX(debugfs_lock);
266static bool trace_freq_switch_enabled;
267
268static int debug_toggle_tracing(void *data, u64 val)
269{
270 int ret = 0;
271
272 mutex_lock(&debugfs_lock);
273
274 if (val == 1 && !trace_freq_switch_enabled)
275 ret = trace_freq_switch_enable();
276 else if (val == 0 && trace_freq_switch_enabled)
277 trace_freq_switch_disable();
278 else if (val > 1)
279 ret = -EINVAL;
280
281 if (!ret)
282 trace_freq_switch_enabled = val;
283
284 mutex_unlock(&debugfs_lock);
285
286 return ret;
287}
288
289static int debug_tracing_state_get(void *data, u64 *val)
290{
291 mutex_lock(&debugfs_lock);
292 *val = trace_freq_switch_enabled;
293 mutex_unlock(&debugfs_lock);
294
295 return 0;
296}
297DEFINE_SIMPLE_ATTRIBUTE(debug_tracing_state_fops, debug_tracing_state_get,
298 debug_toggle_tracing, "%llu\n");
299
300static int __init trace_freq_switch_init(void)
301{
302 struct dentry *d_tracer = tracing_init_dentry();
303
304 if (IS_ERR(d_tracer))
305 return 0;
306
307 tracefs_create_file("cpu_freq_switch_profile_enabled",
308 0644, d_tracer, NULL, &debug_tracing_state_fops);
309
310 return 0;
311}
312late_initcall(trace_freq_switch_init);