blob: c3b722936875f2eb54263016a42882c210c06ed7 [file] [log] [blame]
Matt Wagantalld64560fe2011-01-26 16:20:54 -08001/*
2 * Copyright (C) 2007 Google, Inc.
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -07003 * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved.
Matt Wagantalld64560fe2011-01-26 16:20:54 -08004 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/ctype.h>
19#include <linux/debugfs.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/seq_file.h>
Matt Wagantalld64560fe2011-01-26 16:20:54 -080021#include <linux/clk.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <linux/list.h>
23#include <linux/clkdev.h>
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -070024#include <linux/uaccess.h>
Vikram Mulukutla4785ab62012-12-10 20:51:22 -080025#include <linux/mutex.h>
Patrick Daly0ed36912013-09-26 18:14:51 -070026#include <linux/io.h>
Vikram Mulukutla4785ab62012-12-10 20:51:22 -080027
Matt Wagantall33d01f52012-02-23 23:27:44 -080028#include <mach/clk-provider.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
Matt Wagantalld64560fe2011-01-26 16:20:54 -080030#include "clock.h"
Matt Wagantalld64560fe2011-01-26 16:20:54 -080031
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -070032static LIST_HEAD(clk_list);
33static DEFINE_SPINLOCK(clk_list_lock);
34
35static struct dentry *debugfs_base;
36static u32 debug_suspend;
37
38struct clk_table {
39 struct list_head node;
40 struct clk_lookup *clocks;
41 size_t num_clocks;
42};
43
Matt Wagantalld64560fe2011-01-26 16:20:54 -080044static int clock_debug_rate_set(void *data, u64 val)
45{
46 struct clk *clock = data;
47 int ret;
48
49 /* Only increases to max rate will succeed, but that's actually good
50 * for debugging purposes so we don't check for error. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051 if (clock->flags & CLKFLAG_MAX)
Matt Wagantalld64560fe2011-01-26 16:20:54 -080052 clk_set_max_rate(clock, val);
Matt Wagantall8e6126f2011-11-08 13:34:19 -080053 ret = clk_set_rate(clock, val);
54 if (ret)
Stephen Boyd753ab932012-08-02 13:14:38 -070055 pr_err("clk_set_rate(%s, %lu) failed (%d)\n", clock->dbg_name,
56 (unsigned long)val, ret);
Matt Wagantall8e6126f2011-11-08 13:34:19 -080057
Matt Wagantalld64560fe2011-01-26 16:20:54 -080058 return ret;
59}
60
61static int clock_debug_rate_get(void *data, u64 *val)
62{
63 struct clk *clock = data;
64 *val = clk_get_rate(clock);
65 return 0;
66}
67
68DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
69 clock_debug_rate_set, "%llu\n");
70
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071static struct clk *measure;
72
73static int clock_debug_measure_get(void *data, u64 *val)
74{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075 struct clk *clock = data;
Stephen Boyda52d7e32011-11-10 11:59:00 -080076 int ret, is_hw_gated;
77
78 /* Check to see if the clock is in hardware gating mode */
Stephen Boyd0f7e5642012-08-02 12:59:33 -070079 if (clock->ops->in_hwcg_mode)
Stephen Boyda52d7e32011-11-10 11:59:00 -080080 is_hw_gated = clock->ops->in_hwcg_mode(clock);
81 else
82 is_hw_gated = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083
84 ret = clk_set_parent(measure, clock);
Stephen Boyda52d7e32011-11-10 11:59:00 -080085 if (!ret) {
86 /*
87 * Disable hw gating to get accurate rate measurements. Only do
88 * this if the clock is explictly enabled by software. This
89 * allows us to detect errors where clocks are on even though
90 * software is not requesting them to be on due to broken
91 * hardware gating signals.
92 */
93 if (is_hw_gated && clock->count)
94 clock->ops->disable_hwcg(clock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095 *val = clk_get_rate(measure);
Stephen Boyda52d7e32011-11-10 11:59:00 -080096 /* Reenable hwgating if it was disabled */
97 if (is_hw_gated && clock->count)
98 clock->ops->enable_hwcg(clock);
99 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100
101 return ret;
102}
103
104DEFINE_SIMPLE_ATTRIBUTE(clock_measure_fops, clock_debug_measure_get,
105 NULL, "%lld\n");
106
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800107static int clock_debug_enable_set(void *data, u64 val)
108{
109 struct clk *clock = data;
110 int rc = 0;
111
112 if (val)
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800113 rc = clk_prepare_enable(clock);
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800114 else
Stephen Boyd3bbf3462012-01-12 00:19:23 -0800115 clk_disable_unprepare(clock);
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800116
117 return rc;
118}
119
120static int clock_debug_enable_get(void *data, u64 *val)
121{
122 struct clk *clock = data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123 int enabled;
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800124
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125 if (clock->ops->is_enabled)
126 enabled = clock->ops->is_enabled(clock);
127 else
128 enabled = !!(clock->count);
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800129
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130 *val = enabled;
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800131 return 0;
132}
133
134DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135 clock_debug_enable_set, "%lld\n");
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800136
137static int clock_debug_local_get(void *data, u64 *val)
138{
139 struct clk *clock = data;
140
Matt Wagantallacb8d022012-02-14 15:28:23 -0800141 if (!clock->ops->is_local)
142 *val = true;
143 else
144 *val = clock->ops->is_local(clock);
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800145
146 return 0;
147}
148
149DEFINE_SIMPLE_ATTRIBUTE(clock_local_fops, clock_debug_local_get,
150 NULL, "%llu\n");
151
Stephen Boyda52d7e32011-11-10 11:59:00 -0800152static int clock_debug_hwcg_get(void *data, u64 *val)
153{
154 struct clk *clock = data;
Stephen Boyd0f7e5642012-08-02 12:59:33 -0700155 if (clock->ops->in_hwcg_mode)
156 *val = !!clock->ops->in_hwcg_mode(clock);
157 else
158 *val = 0;
Stephen Boyda52d7e32011-11-10 11:59:00 -0800159 return 0;
160}
161
162DEFINE_SIMPLE_ATTRIBUTE(clock_hwcg_fops, clock_debug_hwcg_get,
163 NULL, "%llu\n");
164
xiaogangc2a9ff12013-07-05 03:34:17 +0800165static void clock_print_fmax_by_level(struct seq_file *m, int level)
166{
167 struct clk *clock = m->private;
168 struct clk_vdd_class *vdd_class = clock->vdd_class;
169 int off, i, vdd_level, nregs = vdd_class->num_regulators;
170
171 vdd_level = find_vdd_level(clock, clock->rate);
172
173 seq_printf(m, "%2s%10lu", vdd_level == level ? "[" : "",
174 clock->fmax[level]);
175 for (i = 0; i < nregs; i++) {
176 off = nregs*level + i;
177 if (vdd_class->vdd_uv)
178 seq_printf(m, "%10u", vdd_class->vdd_uv[off]);
179 if (vdd_class->vdd_ua)
180 seq_printf(m, "%10u", vdd_class->vdd_ua[off]);
181 }
182
183 if (vdd_level == level)
184 seq_puts(m, "]");
185 seq_puts(m, "\n");
186}
187
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800188static int fmax_rates_show(struct seq_file *m, void *unused)
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800189{
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800190 struct clk *clock = m->private;
xiaogangc2a9ff12013-07-05 03:34:17 +0800191 struct clk_vdd_class *vdd_class = clock->vdd_class;
192 int level = 0, i, nregs = vdd_class->num_regulators;
193 char reg_name[10];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800195 int vdd_level = find_vdd_level(clock, clock->rate);
196 if (vdd_level < 0) {
197 seq_printf(m, "could not find_vdd_level for %s, %ld\n",
xiaogangc2a9ff12013-07-05 03:34:17 +0800198 clock->dbg_name, clock->rate);
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800199 return 0;
200 }
xiaogangc2a9ff12013-07-05 03:34:17 +0800201
202 seq_printf(m, "%12s", "");
203 for (i = 0; i < nregs; i++) {
204 snprintf(reg_name, ARRAY_SIZE(reg_name), "reg %d", i);
205 seq_printf(m, "%10s", reg_name);
206 if (vdd_class->vdd_ua)
207 seq_printf(m, "%10s", "");
208 }
209
210 seq_printf(m, "\n%12s", "freq");
211 for (i = 0; i < nregs; i++) {
212 seq_printf(m, "%10s", "uV");
213 if (vdd_class->vdd_ua)
214 seq_printf(m, "%10s", "uA");
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800215 }
216 seq_printf(m, "\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217
xiaogangc2a9ff12013-07-05 03:34:17 +0800218 for (level = 0; level < clock->num_fmax; level++)
219 clock_print_fmax_by_level(m, level);
220
Stephen Boyd31c01e82012-04-13 15:22:00 -0700221 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222}
223
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800224static int fmax_rates_open(struct inode *inode, struct file *file)
Stephen Boydcd50fae2011-11-03 11:05:42 -0700225{
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800226 return single_open(file, fmax_rates_show, inode->i_private);
Stephen Boydcd50fae2011-11-03 11:05:42 -0700227}
228
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800229static const struct file_operations fmax_rates_fops = {
230 .open = fmax_rates_open,
231 .read = seq_read,
232 .llseek = seq_lseek,
233 .release = seq_release,
234};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235
Xiaogang Cuic7f4edd2013-07-13 06:45:45 +0800236#define clock_debug_output(m, c, fmt, ...) \
237do { \
238 if (m) \
239 seq_printf(m, fmt, ##__VA_ARGS__); \
240 else if (c) \
241 pr_cont(fmt, ##__VA_ARGS__); \
242 else \
243 pr_info(fmt, ##__VA_ARGS__); \
244} while (0)
245
246static int clock_debug_print_clock(struct clk *c, struct seq_file *m)
247{
248 char *start = "";
249
250 if (!c || !c->prepare_count)
251 return 0;
252
253 clock_debug_output(m, 0, "\t");
254 do {
255 if (c->vdd_class)
256 clock_debug_output(m, 1, "%s%s:%u:%u [%ld, %lu]", start,
257 c->dbg_name, c->prepare_count, c->count,
258 c->rate, c->vdd_class->cur_level);
259 else
260 clock_debug_output(m, 1, "%s%s:%u:%u [%ld]", start,
261 c->dbg_name, c->prepare_count, c->count,
262 c->rate);
263 start = " -> ";
264 } while ((c = clk_get_parent(c)));
265
266 clock_debug_output(m, 1, "\n");
267
268 return 1;
269}
270
271/**
272 * clock_debug_print_enabled_clocks() - Print names of enabled clocks
273 *
274 */
275static void clock_debug_print_enabled_clocks(struct seq_file *m)
276{
277 struct clk_table *table;
278 unsigned long flags;
279 int i, cnt = 0;
280
281 clock_debug_output(m, 0, "Enabled clocks:\n");
282 spin_lock_irqsave(&clk_list_lock, flags);
283 list_for_each_entry(table, &clk_list, node) {
284 for (i = 0; i < table->num_clocks; i++)
285 cnt += clock_debug_print_clock(table->clocks[i].clk, m);
286 }
287 spin_unlock_irqrestore(&clk_list_lock, flags);
288
289 if (cnt)
290 clock_debug_output(m, 0, "Enabled clock count: %d\n", cnt);
291 else
292 clock_debug_output(m, 0, "No clocks enabled.\n");
293}
294
295static int enabled_clocks_show(struct seq_file *m, void *unused)
296{
297 clock_debug_print_enabled_clocks(m);
298 return 0;
299}
300
301static int enabled_clocks_open(struct inode *inode, struct file *file)
302{
303 return single_open(file, enabled_clocks_show, inode->i_private);
304}
305
306static const struct file_operations enabled_clocks_fops = {
307 .open = enabled_clocks_open,
308 .read = seq_read,
309 .llseek = seq_lseek,
310 .release = seq_release,
311};
312
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313static int list_rates_show(struct seq_file *m, void *unused)
314{
315 struct clk *clock = m->private;
Saravana Kannane02bd3a2013-07-02 10:31:18 -0700316 int level, i = 0;
317 unsigned long rate, fmax = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700319 /* Find max frequency supported within voltage constraints. */
320 if (!clock->vdd_class) {
Saravana Kannane02bd3a2013-07-02 10:31:18 -0700321 fmax = ULONG_MAX;
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700322 } else {
Saravana Kannan55e959d2012-10-15 22:16:04 -0700323 for (level = 0; level < clock->num_fmax; level++)
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700324 if (clock->fmax[level])
325 fmax = clock->fmax[level];
326 }
327
328 /*
329 * List supported frequencies <= fmax. Higher frequencies may appear in
330 * the frequency table, but are not valid and should not be listed.
331 */
Saravana Kannane02bd3a2013-07-02 10:31:18 -0700332 while (!IS_ERR_VALUE(rate = clock->ops->list_rate(clock, i++))) {
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700333 if (rate <= fmax)
Saravana Kannane02bd3a2013-07-02 10:31:18 -0700334 seq_printf(m, "%lu\n", rate);
Matt Wagantalle18bbc82011-10-06 10:07:28 -0700335 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800337 return 0;
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800338}
339
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340static int list_rates_open(struct inode *inode, struct file *file)
341{
342 return single_open(file, list_rates_show, inode->i_private);
343}
344
345static const struct file_operations list_rates_fops = {
346 .open = list_rates_open,
347 .read = seq_read,
348 .llseek = seq_lseek,
349 .release = seq_release,
350};
351
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700352static ssize_t clock_parent_read(struct file *filp, char __user *ubuf,
353 size_t cnt, loff_t *ppos)
Saravana Kannan531051f2012-09-27 16:19:07 -0700354{
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700355 struct clk *clock = filp->private_data;
356 struct clk *p = clock->parent;
357 char name[256] = {0};
Saravana Kannan531051f2012-09-27 16:19:07 -0700358
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700359 snprintf(name, sizeof(name), "%s\n", p ? p->dbg_name : "None\n");
Saravana Kannan531051f2012-09-27 16:19:07 -0700360
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700361 return simple_read_from_buffer(ubuf, cnt, ppos, name, strlen(name));
Saravana Kannan531051f2012-09-27 16:19:07 -0700362}
363
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700364
365static ssize_t clock_parent_write(struct file *filp,
366 const char __user *ubuf, size_t cnt, loff_t *ppos)
Saravana Kannan531051f2012-09-27 16:19:07 -0700367{
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700368 struct clk *clock = filp->private_data;
369 char buf[256];
370 char *cmp;
371 unsigned long flags;
372 struct clk_table *table;
373 int i, ret;
374 struct clk *parent = NULL;
375
376 cnt = min(cnt, sizeof(buf) - 1);
377 if (copy_from_user(&buf, ubuf, cnt))
378 return -EFAULT;
379 buf[cnt] = '\0';
380 cmp = strstrip(buf);
381
382 spin_lock_irqsave(&clk_list_lock, flags);
383 list_for_each_entry(table, &clk_list, node) {
384 for (i = 0; i < table->num_clocks; i++)
385 if (!strcmp(cmp, table->clocks[i].clk->dbg_name)) {
386 parent = table->clocks[i].clk;
387 break;
388 }
389 if (parent)
390 break;
391 }
392
393 if (!parent) {
394 ret = -EINVAL;
395 goto err;
396 }
397
398 spin_unlock_irqrestore(&clk_list_lock, flags);
399 ret = clk_set_parent(clock, table->clocks[i].clk);
400 if (ret)
401 return ret;
402
403 return cnt;
404err:
405 spin_unlock_irqrestore(&clk_list_lock, flags);
406 return ret;
Saravana Kannan531051f2012-09-27 16:19:07 -0700407}
408
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700409
Saravana Kannan531051f2012-09-27 16:19:07 -0700410static const struct file_operations clock_parent_fops = {
Vikram Mulukutlaa0073af2013-04-10 14:24:38 -0700411 .open = simple_open,
412 .read = clock_parent_read,
413 .write = clock_parent_write,
Patrick Daly0a78a0e2012-07-23 13:18:59 -0700414};
415
Patrick Daly0ed36912013-09-26 18:14:51 -0700416void clk_debug_print_hw(struct clk *clk, struct seq_file *f)
417{
418 void __iomem *base;
419 struct clk_register_data *regs;
420 u32 i, j, size;
421
422 if (IS_ERR_OR_NULL(clk))
423 return;
424
425 clk_debug_print_hw(clk->parent, f);
426
427 clock_debug_output(f, false, "%s\n", clk->dbg_name);
428
429 if (!clk->ops->list_registers)
430 return;
431
432 j = 0;
433 base = clk->ops->list_registers(clk, j, &regs, &size);
434 while (!IS_ERR(base)) {
435 for (i = 0; i < size; i++) {
436 u32 val = readl_relaxed(base + regs[i].offset);
437 clock_debug_output(f, false, "%20s: 0x%.8x\n",
438 regs[i].name, val);
439 }
440 j++;
441 base = clk->ops->list_registers(clk, j, &regs, &size);
442 }
443}
444
445static int print_hw_show(struct seq_file *m, void *unused)
446{
447 struct clk *c = m->private;
448 clk_debug_print_hw(c, m);
449
450 return 0;
451}
452
453static int print_hw_open(struct inode *inode, struct file *file)
454{
455 return single_open(file, print_hw_show, inode->i_private);
456}
457
458static const struct file_operations clock_print_hw_fops = {
459 .open = print_hw_open,
460 .read = seq_read,
461 .llseek = seq_lseek,
462 .release = seq_release,
463};
464
465
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800466static int clock_debug_add(struct clk *clock)
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800467{
468 char temp[50], *ptr;
Stephen Boyd6e6d9b52011-01-26 16:20:55 -0800469 struct dentry *clk_dir;
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800470
Stephen Boyd6e6d9b52011-01-26 16:20:55 -0800471 if (!debugfs_base)
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800472 return -ENOMEM;
473
Saravana Kannan7f626982011-09-26 19:02:02 -0700474 strlcpy(temp, clock->dbg_name, ARRAY_SIZE(temp));
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800475 for (ptr = temp; *ptr; ptr++)
476 *ptr = tolower(*ptr);
477
Stephen Boyd6e6d9b52011-01-26 16:20:55 -0800478 clk_dir = debugfs_create_dir(temp, debugfs_base);
479 if (!clk_dir)
480 return -ENOMEM;
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800481
Stephen Boyd6e6d9b52011-01-26 16:20:55 -0800482 if (!debugfs_create_file("rate", S_IRUGO | S_IWUSR, clk_dir,
483 clock, &clock_rate_fops))
484 goto error;
485
486 if (!debugfs_create_file("enable", S_IRUGO | S_IWUSR, clk_dir,
487 clock, &clock_enable_fops))
488 goto error;
489
490 if (!debugfs_create_file("is_local", S_IRUGO, clk_dir, clock,
491 &clock_local_fops))
492 goto error;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493
Stephen Boyda52d7e32011-11-10 11:59:00 -0800494 if (!debugfs_create_file("has_hw_gating", S_IRUGO, clk_dir, clock,
495 &clock_hwcg_fops))
496 goto error;
497
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498 if (measure &&
499 !clk_set_parent(measure, clock) &&
500 !debugfs_create_file("measure", S_IRUGO, clk_dir, clock,
501 &clock_measure_fops))
502 goto error;
503
504 if (clock->ops->list_rate)
505 if (!debugfs_create_file("list_rates",
506 S_IRUGO, clk_dir, clock, &list_rates_fops))
507 goto error;
508
Patrick Daly0a78a0e2012-07-23 13:18:59 -0700509 if (clock->vdd_class && !debugfs_create_file("fmax_rates",
510 S_IRUGO, clk_dir, clock, &fmax_rates_fops))
511 goto error;
512
Saravana Kannan531051f2012-09-27 16:19:07 -0700513 if (!debugfs_create_file("parent", S_IRUGO, clk_dir, clock,
514 &clock_parent_fops))
515 goto error;
516
Patrick Daly0ed36912013-09-26 18:14:51 -0700517 if (!debugfs_create_file("print", S_IRUGO, clk_dir, clock,
518 &clock_print_hw_fops))
519 goto error;
520
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800521 return 0;
Stephen Boyd6e6d9b52011-01-26 16:20:55 -0800522error:
523 debugfs_remove_recursive(clk_dir);
524 return -ENOMEM;
Matt Wagantalld64560fe2011-01-26 16:20:54 -0800525}
Vikram Mulukutla4785ab62012-12-10 20:51:22 -0800526static DEFINE_MUTEX(clk_debug_lock);
527static int clk_debug_init_once;
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800528
529/**
530 * clock_debug_init() - Initialize clock debugfs
Vikram Mulukutla4785ab62012-12-10 20:51:22 -0800531 * Lock clk_debug_lock before invoking this function.
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800532 */
Vikram Mulukutla4785ab62012-12-10 20:51:22 -0800533static int clock_debug_init(void)
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800534{
Vikram Mulukutla4785ab62012-12-10 20:51:22 -0800535 if (clk_debug_init_once)
536 return 0;
537
538 clk_debug_init_once = 1;
539
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800540 debugfs_base = debugfs_create_dir("clk", NULL);
541 if (!debugfs_base)
542 return -ENOMEM;
Vikram Mulukutla4785ab62012-12-10 20:51:22 -0800543
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800544 if (!debugfs_create_u32("debug_suspend", S_IRUGO | S_IWUSR,
545 debugfs_base, &debug_suspend)) {
546 debugfs_remove_recursive(debugfs_base);
547 return -ENOMEM;
548 }
549
Xiaogang Cuic7f4edd2013-07-13 06:45:45 +0800550 if (!debugfs_create_file("enabled_clocks", S_IRUGO, debugfs_base, NULL,
551 &enabled_clocks_fops))
552 return -ENOMEM;
553
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800554 measure = clk_get_sys("debug", "measure");
555 if (IS_ERR(measure))
556 measure = NULL;
557
558 return 0;
559}
560
Vikram Mulukutla4785ab62012-12-10 20:51:22 -0800561/**
562 * clock_debug_register() - Add additional clocks to clock debugfs hierarchy
563 * @table: Table of clocks to create debugfs nodes for
564 * @size: Size of @table
565 *
566 */
567int clock_debug_register(struct clk_lookup *table, size_t size)
568{
569 struct clk_table *clk_table;
570 unsigned long flags;
571 int i, ret;
572
573 mutex_lock(&clk_debug_lock);
574
575 ret = clock_debug_init();
576 if (ret)
577 goto out;
578
579 clk_table = kmalloc(sizeof(*clk_table), GFP_KERNEL);
580 if (!clk_table) {
581 ret = -ENOMEM;
582 goto out;
583 }
584
585 clk_table->clocks = table;
586 clk_table->num_clocks = size;
587
588 spin_lock_irqsave(&clk_list_lock, flags);
589 list_add_tail(&clk_table->node, &clk_list);
590 spin_unlock_irqrestore(&clk_list_lock, flags);
591
592 for (i = 0; i < size; i++)
593 clock_debug_add(table[i].clk);
594
595out:
596 mutex_unlock(&clk_debug_lock);
597 return ret;
598}
599
Xiaogang Cuic7f4edd2013-07-13 06:45:45 +0800600/*
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800601 * Print the names of enabled clocks and their parents if debug_suspend is set
602 */
603void clock_debug_print_enabled(void)
604{
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800605 if (likely(!debug_suspend))
606 return;
607
Xiaogang Cuic7f4edd2013-07-13 06:45:45 +0800608 clock_debug_print_enabled_clocks(NULL);
Matt Wagantall665f0cf2012-02-27 15:54:43 -0800609}