blob: 8ec87d11494f1132b639f0aadfa9139babe0fe5d [file] [log] [blame]
Matt Wagantalld64560fe2011-01-26 16:20:54 -08001/*
2 * Copyright (C) 2007 Google, Inc.
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -07003 * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved.
Matt Wagantalld64560fe2011-01-26 16:20:54 -08004 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/ctype.h>
19#include <linux/debugfs.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/seq_file.h>
Matt Wagantalld64560fe2011-01-26 16:20:54 -080021#include <linux/clk.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <linux/list.h>
23#include <linux/clkdev.h>
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -070024#include <linux/uaccess.h>
Vikram Mulukutla4785ab62012-12-10 20:51:22 -080025#include <linux/mutex.h>
26
Matt Wagantall33d01f52012-02-23 23:27:44 -080027#include <mach/clk-provider.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028
Matt Wagantalld64560fe2011-01-26 16:20:54 -080029#include "clock.h"
Matt Wagantalld64560fe2011-01-26 16:20:54 -080030
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -070031static LIST_HEAD(clk_list);
32static DEFINE_SPINLOCK(clk_list_lock);
33
34static struct dentry *debugfs_base;
35static u32 debug_suspend;
36
37struct clk_table {
38 struct list_head node;
39 struct clk_lookup *clocks;
40 size_t num_clocks;
41};
42
Matt Wagantalld64560fe2011-01-26 16:20:54 -080043static int clock_debug_rate_set(void *data, u64 val)
44{
45 struct clk *clock = data;
46 int ret;
47
48 /* Only increases to max rate will succeed, but that's actually good
49 * for debugging purposes so we don't check for error. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050 if (clock->flags & CLKFLAG_MAX)
Matt Wagantalld64560fe2011-01-26 16:20:54 -080051 clk_set_max_rate(clock, val);
Matt Wagantall8e6126f2011-11-08 13:34:19 -080052 ret = clk_set_rate(clock, val);
53 if (ret)
Stephen Boyd753ab932012-08-02 13:14:38 -070054 pr_err("clk_set_rate(%s, %lu) failed (%d)\n", clock->dbg_name,
55 (unsigned long)val, ret);
Matt Wagantall8e6126f2011-11-08 13:34:19 -080056
Matt Wagantalld64560fe2011-01-26 16:20:54 -080057 return ret;
58}
59
60static int clock_debug_rate_get(void *data, u64 *val)
61{
62 struct clk *clock = data;
63 *val = clk_get_rate(clock);
64 return 0;
65}
66
67DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
68 clock_debug_rate_set, "%llu\n");
69
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070static struct clk *measure;
71
72static int clock_debug_measure_get(void *data, u64 *val)
73{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074 struct clk *clock = data;
Stephen Boyda52d7e32011-11-10 11:59:00 -080075 int ret, is_hw_gated;
76
77 /* Check to see if the clock is in hardware gating mode */
Stephen Boyd0f7e5642012-08-02 12:59:33 -070078 if (clock->ops->in_hwcg_mode)
Stephen Boyda52d7e32011-11-10 11:59:00 -080079 is_hw_gated = clock->ops->in_hwcg_mode(clock);
80 else
81 is_hw_gated = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082
83 ret = clk_set_parent(measure, clock);
Stephen Boyda52d7e32011-11-10 11:59:00 -080084 if (!ret) {
85 /*
86 * Disable hw gating to get accurate rate measurements. Only do
87 * this if the clock is explictly enabled by software. This
88 * allows us to detect errors where clocks are on even though
89 * software is not requesting them to be on due to broken
90 * hardware gating signals.
91 */
92 if (is_hw_gated && clock->count)
93 clock->ops->disable_hwcg(clock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094 *val = clk_get_rate(measure);
Stephen Boyda52d7e32011-11-10 11:59:00 -080095 /* Reenable hwgating if it was disabled */
96 if (is_hw_gated && clock->count)
97 clock->ops->enable_hwcg(clock);
98 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099
100 return ret;
101}
102
103DEFINE_SIMPLE_ATTRIBUTE(clock_measure_fops, clock_debug_measure_get,
104 NULL, "%lld\n");
105
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800106static int clock_debug_enable_set(void *data, u64 val)
107{
108 struct clk *clock = data;
109 int rc = 0;
110
111 if (val)
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800112 rc = clk_prepare_enable(clock);
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800113 else
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800114 clk_disable_unprepare(clock);
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800115
116 return rc;
117}
118
119static int clock_debug_enable_get(void *data, u64 *val)
120{
121 struct clk *clock = data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122 int enabled;
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800123
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124 if (clock->ops->is_enabled)
125 enabled = clock->ops->is_enabled(clock);
126 else
127 enabled = !!(clock->count);
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800128
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129 *val = enabled;
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800130 return 0;
131}
132
133DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134 clock_debug_enable_set, "%lld\n");
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800135
136static int clock_debug_local_get(void *data, u64 *val)
137{
138 struct clk *clock = data;
139
Matt Wagantallacb8d022012-02-14 15:28:23 -0800140 if (!clock->ops->is_local)
141 *val = true;
142 else
143 *val = clock->ops->is_local(clock);
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800144
145 return 0;
146}
147
148DEFINE_SIMPLE_ATTRIBUTE(clock_local_fops, clock_debug_local_get,
149 NULL, "%llu\n");
150
Stephen Boyda52d7e32011-11-10 11:59:00 -0800151static int clock_debug_hwcg_get(void *data, u64 *val)
152{
153 struct clk *clock = data;
Stephen Boyd0f7e5642012-08-02 12:59:33 -0700154 if (clock->ops->in_hwcg_mode)
155 *val = !!clock->ops->in_hwcg_mode(clock);
156 else
157 *val = 0;
Stephen Boyda52d7e32011-11-10 11:59:00 -0800158 return 0;
159}
160
161DEFINE_SIMPLE_ATTRIBUTE(clock_hwcg_fops, clock_debug_hwcg_get,
162 NULL, "%llu\n");
163
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800164static int fmax_rates_show(struct seq_file *m, void *unused)
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800165{
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800166 struct clk *clock = m->private;
167 int level = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700168
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800169 int vdd_level = find_vdd_level(clock, clock->rate);
170 if (vdd_level < 0) {
171 seq_printf(m, "could not find_vdd_level for %s, %ld\n",
172 clock->dbg_name, clock->rate);
173 return 0;
174 }
Saravana Kannan55e959d2012-10-15 22:16:04 -0700175 for (level = 0; level < clock->num_fmax; level++) {
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800176 if (vdd_level == level)
177 seq_printf(m, "[%lu] ", clock->fmax[level]);
178 else
179 seq_printf(m, "%lu ", clock->fmax[level]);
180 }
181 seq_printf(m, "\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182
Stephen Boyd31c01e82012-04-13 15:22:00 -0700183 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184}
185
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800186static int fmax_rates_open(struct inode *inode, struct file *file)
Stephen Boydcd50fae2011-11-03 11:05:42 -0700187{
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800188 return single_open(file, fmax_rates_show, inode->i_private);
Stephen Boydcd50fae2011-11-03 11:05:42 -0700189}
190
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800191static const struct file_operations fmax_rates_fops = {
192 .open = fmax_rates_open,
193 .read = seq_read,
194 .llseek = seq_lseek,
195 .release = seq_release,
196};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197
198static int list_rates_show(struct seq_file *m, void *unused)
199{
200 struct clk *clock = m->private;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700201 int rate, level, fmax = 0, i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700203 /* Find max frequency supported within voltage constraints. */
204 if (!clock->vdd_class) {
Matt Wagantalle426f9042011-11-01 16:21:34 -0700205 fmax = INT_MAX;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700206 } else {
Saravana Kannan55e959d2012-10-15 22:16:04 -0700207 for (level = 0; level < clock->num_fmax; level++)
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700208 if (clock->fmax[level])
209 fmax = clock->fmax[level];
210 }
211
212 /*
213 * List supported frequencies <= fmax. Higher frequencies may appear in
214 * the frequency table, but are not valid and should not be listed.
215 */
216 while ((rate = clock->ops->list_rate(clock, i++)) >= 0) {
217 if (rate <= fmax)
218 seq_printf(m, "%u\n", rate);
219 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800221 return 0;
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800222}
223
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224static int list_rates_open(struct inode *inode, struct file *file)
225{
226 return single_open(file, list_rates_show, inode->i_private);
227}
228
229static const struct file_operations list_rates_fops = {
230 .open = list_rates_open,
231 .read = seq_read,
232 .llseek = seq_lseek,
233 .release = seq_release,
234};
235
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700236static ssize_t clock_parent_read(struct file *filp, char __user *ubuf,
237 size_t cnt, loff_t *ppos)
Saravana Kannan531051f2012-09-27 16:19:07 -0700238{
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700239 struct clk *clock = filp->private_data;
240 struct clk *p = clock->parent;
241 char name[256] = {0};
Saravana Kannan531051f2012-09-27 16:19:07 -0700242
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700243 snprintf(name, sizeof(name), "%s\n", p ? p->dbg_name : "None\n");
Saravana Kannan531051f2012-09-27 16:19:07 -0700244
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700245 return simple_read_from_buffer(ubuf, cnt, ppos, name, strlen(name));
Saravana Kannan531051f2012-09-27 16:19:07 -0700246}
247
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700248
249static ssize_t clock_parent_write(struct file *filp,
250 const char __user *ubuf, size_t cnt, loff_t *ppos)
Saravana Kannan531051f2012-09-27 16:19:07 -0700251{
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700252 struct clk *clock = filp->private_data;
253 char buf[256];
254 char *cmp;
255 unsigned long flags;
256 struct clk_table *table;
257 int i, ret;
258 struct clk *parent = NULL;
259
260 cnt = min(cnt, sizeof(buf) - 1);
261 if (copy_from_user(&buf, ubuf, cnt))
262 return -EFAULT;
263 buf[cnt] = '\0';
264 cmp = strstrip(buf);
265
266 spin_lock_irqsave(&clk_list_lock, flags);
267 list_for_each_entry(table, &clk_list, node) {
268 for (i = 0; i < table->num_clocks; i++)
269 if (!strcmp(cmp, table->clocks[i].clk->dbg_name)) {
270 parent = table->clocks[i].clk;
271 break;
272 }
273 if (parent)
274 break;
275 }
276
277 if (!parent) {
278 ret = -EINVAL;
279 goto err;
280 }
281
282 spin_unlock_irqrestore(&clk_list_lock, flags);
283 ret = clk_set_parent(clock, table->clocks[i].clk);
284 if (ret)
285 return ret;
286
287 return cnt;
288err:
289 spin_unlock_irqrestore(&clk_list_lock, flags);
290 return ret;
Saravana Kannan531051f2012-09-27 16:19:07 -0700291}
292
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700293
Saravana Kannan531051f2012-09-27 16:19:07 -0700294static const struct file_operations clock_parent_fops = {
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700295 .open = simple_open,
296 .read = clock_parent_read,
297 .write = clock_parent_write,
Patrick Daly0a78a0e2012-07-23 13:18:59 -0700298};
299
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800300static int clock_debug_add(struct clk *clock)
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800301{
302 char temp[50], *ptr;
Stephen Boyd6e6d9b52011-01-26 16:20:55 -0800303 struct dentry *clk_dir;
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800304
Stephen Boyd6e6d9b52011-01-26 16:20:55 -0800305 if (!debugfs_base)
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800306 return -ENOMEM;
307
Saravana Kannan7f626982011-09-26 19:02:02 -0700308 strlcpy(temp, clock->dbg_name, ARRAY_SIZE(temp));
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800309 for (ptr = temp; *ptr; ptr++)
310 *ptr = tolower(*ptr);
311
Stephen Boyd6e6d9b52011-01-26 16:20:55 -0800312 clk_dir = debugfs_create_dir(temp, debugfs_base);
313 if (!clk_dir)
314 return -ENOMEM;
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800315
Stephen Boyd6e6d9b52011-01-26 16:20:55 -0800316 if (!debugfs_create_file("rate", S_IRUGO | S_IWUSR, clk_dir,
317 clock, &clock_rate_fops))
318 goto error;
319
320 if (!debugfs_create_file("enable", S_IRUGO | S_IWUSR, clk_dir,
321 clock, &clock_enable_fops))
322 goto error;
323
324 if (!debugfs_create_file("is_local", S_IRUGO, clk_dir, clock,
325 &clock_local_fops))
326 goto error;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327
Stephen Boyda52d7e32011-11-10 11:59:00 -0800328 if (!debugfs_create_file("has_hw_gating", S_IRUGO, clk_dir, clock,
329 &clock_hwcg_fops))
330 goto error;
331
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332 if (measure &&
333 !clk_set_parent(measure, clock) &&
334 !debugfs_create_file("measure", S_IRUGO, clk_dir, clock,
335 &clock_measure_fops))
336 goto error;
337
338 if (clock->ops->list_rate)
339 if (!debugfs_create_file("list_rates",
340 S_IRUGO, clk_dir, clock, &list_rates_fops))
341 goto error;
342
Patrick Daly0a78a0e2012-07-23 13:18:59 -0700343 if (clock->vdd_class && !debugfs_create_file("fmax_rates",
344 S_IRUGO, clk_dir, clock, &fmax_rates_fops))
345 goto error;
346
Saravana Kannan531051f2012-09-27 16:19:07 -0700347 if (!debugfs_create_file("parent", S_IRUGO, clk_dir, clock,
348 &clock_parent_fops))
349 goto error;
350
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800351 return 0;
Stephen Boyd6e6d9b52011-01-26 16:20:55 -0800352error:
353 debugfs_remove_recursive(clk_dir);
354 return -ENOMEM;
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800355}
Vikram Mulukutla4785ab62012-12-10 20:51:22 -0800356static DEFINE_MUTEX(clk_debug_lock);
357static int clk_debug_init_once;
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800358
359/**
360 * clock_debug_init() - Initialize clock debugfs
Vikram Mulukutla4785ab62012-12-10 20:51:22 -0800361 * Lock clk_debug_lock before invoking this function.
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800362 */
Vikram Mulukutla4785ab62012-12-10 20:51:22 -0800363static int clock_debug_init(void)
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800364{
Vikram Mulukutla4785ab62012-12-10 20:51:22 -0800365 if (clk_debug_init_once)
366 return 0;
367
368 clk_debug_init_once = 1;
369
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800370 debugfs_base = debugfs_create_dir("clk", NULL);
371 if (!debugfs_base)
372 return -ENOMEM;
Vikram Mulukutla4785ab62012-12-10 20:51:22 -0800373
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800374 if (!debugfs_create_u32("debug_suspend", S_IRUGO | S_IWUSR,
375 debugfs_base, &debug_suspend)) {
376 debugfs_remove_recursive(debugfs_base);
377 return -ENOMEM;
378 }
379
380 measure = clk_get_sys("debug", "measure");
381 if (IS_ERR(measure))
382 measure = NULL;
383
384 return 0;
385}
386
Vikram Mulukutla4785ab62012-12-10 20:51:22 -0800387/**
388 * clock_debug_register() - Add additional clocks to clock debugfs hierarchy
389 * @table: Table of clocks to create debugfs nodes for
390 * @size: Size of @table
391 *
392 */
393int clock_debug_register(struct clk_lookup *table, size_t size)
394{
395 struct clk_table *clk_table;
396 unsigned long flags;
397 int i, ret;
398
399 mutex_lock(&clk_debug_lock);
400
401 ret = clock_debug_init();
402 if (ret)
403 goto out;
404
405 clk_table = kmalloc(sizeof(*clk_table), GFP_KERNEL);
406 if (!clk_table) {
407 ret = -ENOMEM;
408 goto out;
409 }
410
411 clk_table->clocks = table;
412 clk_table->num_clocks = size;
413
414 spin_lock_irqsave(&clk_list_lock, flags);
415 list_add_tail(&clk_table->node, &clk_list);
416 spin_unlock_irqrestore(&clk_list_lock, flags);
417
418 for (i = 0; i < size; i++)
419 clock_debug_add(table[i].clk);
420
421out:
422 mutex_unlock(&clk_debug_lock);
423 return ret;
424}
425
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800426static int clock_debug_print_clock(struct clk *c)
427{
428 char *start = "";
429
430 if (!c || !c->prepare_count)
431 return 0;
432
433 pr_info("\t");
434 do {
435 if (c->vdd_class)
436 pr_cont("%s%s:%u:%u [%ld, %lu]", start, c->dbg_name,
437 c->prepare_count, c->count, c->rate,
438 c->vdd_class->cur_level);
439 else
440 pr_cont("%s%s:%u:%u [%ld]", start, c->dbg_name,
441 c->prepare_count, c->count, c->rate);
442 start = " -> ";
443 } while ((c = clk_get_parent(c)));
444
445 pr_cont("\n");
446
447 return 1;
448}
449
450/**
451 * clock_debug_print_enabled() - Print names of enabled clocks for suspend debug
452 *
453 * Print the names of enabled clocks and their parents if debug_suspend is set
454 */
455void clock_debug_print_enabled(void)
456{
457 struct clk_table *table;
458 unsigned long flags;
459 int i, cnt = 0;
460
461 if (likely(!debug_suspend))
462 return;
463
464 pr_info("Enabled clocks:\n");
465 spin_lock_irqsave(&clk_list_lock, flags);
466 list_for_each_entry(table, &clk_list, node) {
467 for (i = 0; i < table->num_clocks; i++)
468 cnt += clock_debug_print_clock(table->clocks[i].clk);
469 }
470 spin_unlock_irqrestore(&clk_list_lock, flags);
471
472 if (cnt)
473 pr_info("Enabled clock count: %d\n", cnt);
474 else
475 pr_info("No clocks enabled.\n");
476
477}