| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2007 Google, Inc. | 
| Vikram Mulukutla | a0073af | 2013-04-10 14:24:38 -0700 | [diff] [blame] | 3 | * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved. | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 4 | * | 
|  | 5 | * This software is licensed under the terms of the GNU General Public | 
|  | 6 | * License version 2, as published by the Free Software Foundation, and | 
|  | 7 | * may be copied, distributed, and modified under those terms. | 
|  | 8 | * | 
|  | 9 | * This program is distributed in the hope that it will be useful, | 
|  | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the | 
|  | 12 | * GNU General Public License for more details. | 
|  | 13 | * | 
|  | 14 | */ | 
|  | 15 |  | 
|  | 16 | #include <linux/kernel.h> | 
|  | 17 | #include <linux/module.h> | 
|  | 18 | #include <linux/ctype.h> | 
|  | 19 | #include <linux/debugfs.h> | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 20 | #include <linux/seq_file.h> | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 21 | #include <linux/clk.h> | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 22 | #include <linux/list.h> | 
|  | 23 | #include <linux/clkdev.h> | 
| Vikram Mulukutla | a0073af | 2013-04-10 14:24:38 -0700 | [diff] [blame] | 24 | #include <linux/uaccess.h> | 
| Vikram Mulukutla | 4785ab6 | 2012-12-10 20:51:22 -0800 | [diff] [blame] | 25 | #include <linux/mutex.h> | 
|  | 26 |  | 
| Matt Wagantall | 33d01f5 | 2012-02-23 23:27:44 -0800 | [diff] [blame] | 27 | #include <mach/clk-provider.h> | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 28 |  | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 29 | #include "clock.h" | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 30 |  | 
| Vikram Mulukutla | a0073af | 2013-04-10 14:24:38 -0700 | [diff] [blame] | 31 | static LIST_HEAD(clk_list); | 
|  | 32 | static DEFINE_SPINLOCK(clk_list_lock); | 
|  | 33 |  | 
|  | 34 | static struct dentry *debugfs_base; | 
|  | 35 | static u32 debug_suspend; | 
|  | 36 |  | 
|  | 37 | struct clk_table { | 
|  | 38 | struct list_head node; | 
|  | 39 | struct clk_lookup *clocks; | 
|  | 40 | size_t num_clocks; | 
|  | 41 | }; | 
|  | 42 |  | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 43 | static int clock_debug_rate_set(void *data, u64 val) | 
|  | 44 | { | 
|  | 45 | struct clk *clock = data; | 
|  | 46 | int ret; | 
|  | 47 |  | 
|  | 48 | /* Only increases to max rate will succeed, but that's actually good | 
|  | 49 | * for debugging purposes so we don't check for error. */ | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 50 | if (clock->flags & CLKFLAG_MAX) | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 51 | clk_set_max_rate(clock, val); | 
| Matt Wagantall | 8e6126f | 2011-11-08 13:34:19 -0800 | [diff] [blame] | 52 | ret = clk_set_rate(clock, val); | 
|  | 53 | if (ret) | 
| Stephen Boyd | 753ab93 | 2012-08-02 13:14:38 -0700 | [diff] [blame] | 54 | pr_err("clk_set_rate(%s, %lu) failed (%d)\n", clock->dbg_name, | 
|  | 55 | (unsigned long)val, ret); | 
| Matt Wagantall | 8e6126f | 2011-11-08 13:34:19 -0800 | [diff] [blame] | 56 |  | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 57 | return ret; | 
|  | 58 | } | 
|  | 59 |  | 
|  | 60 | static int clock_debug_rate_get(void *data, u64 *val) | 
|  | 61 | { | 
|  | 62 | struct clk *clock = data; | 
|  | 63 | *val = clk_get_rate(clock); | 
|  | 64 | return 0; | 
|  | 65 | } | 
|  | 66 |  | 
|  | 67 | DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get, | 
|  | 68 | clock_debug_rate_set, "%llu\n"); | 
|  | 69 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 70 | static struct clk *measure; | 
|  | 71 |  | 
|  | 72 | static int clock_debug_measure_get(void *data, u64 *val) | 
|  | 73 | { | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 74 | struct clk *clock = data; | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 75 | int ret, is_hw_gated; | 
|  | 76 |  | 
|  | 77 | /* Check to see if the clock is in hardware gating mode */ | 
| Stephen Boyd | 0f7e564 | 2012-08-02 12:59:33 -0700 | [diff] [blame] | 78 | if (clock->ops->in_hwcg_mode) | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 79 | is_hw_gated = clock->ops->in_hwcg_mode(clock); | 
|  | 80 | else | 
|  | 81 | is_hw_gated = 0; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 82 |  | 
|  | 83 | ret = clk_set_parent(measure, clock); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 84 | if (!ret) { | 
|  | 85 | /* | 
|  | 86 | * Disable hw gating to get accurate rate measurements. Only do | 
|  | 87 | * this if the clock is explictly enabled by software. This | 
|  | 88 | * allows us to detect errors where clocks are on even though | 
|  | 89 | * software is not requesting them to be on due to broken | 
|  | 90 | * hardware gating signals. | 
|  | 91 | */ | 
|  | 92 | if (is_hw_gated && clock->count) | 
|  | 93 | clock->ops->disable_hwcg(clock); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 94 | *val = clk_get_rate(measure); | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 95 | /* Reenable hwgating if it was disabled */ | 
|  | 96 | if (is_hw_gated && clock->count) | 
|  | 97 | clock->ops->enable_hwcg(clock); | 
|  | 98 | } | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 99 |  | 
|  | 100 | return ret; | 
|  | 101 | } | 
|  | 102 |  | 
|  | 103 | DEFINE_SIMPLE_ATTRIBUTE(clock_measure_fops, clock_debug_measure_get, | 
|  | 104 | NULL, "%lld\n"); | 
|  | 105 |  | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 106 | static int clock_debug_enable_set(void *data, u64 val) | 
|  | 107 | { | 
|  | 108 | struct clk *clock = data; | 
|  | 109 | int rc = 0; | 
|  | 110 |  | 
|  | 111 | if (val) | 
| Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 112 | rc = clk_prepare_enable(clock); | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 113 | else | 
| Stephen Boyd | 3bbf346 | 2012-01-12 00:19:23 -0800 | [diff] [blame] | 114 | clk_disable_unprepare(clock); | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 115 |  | 
|  | 116 | return rc; | 
|  | 117 | } | 
|  | 118 |  | 
|  | 119 | static int clock_debug_enable_get(void *data, u64 *val) | 
|  | 120 | { | 
|  | 121 | struct clk *clock = data; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 122 | int enabled; | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 123 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 124 | if (clock->ops->is_enabled) | 
|  | 125 | enabled = clock->ops->is_enabled(clock); | 
|  | 126 | else | 
|  | 127 | enabled = !!(clock->count); | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 128 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 129 | *val = enabled; | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 130 | return 0; | 
|  | 131 | } | 
|  | 132 |  | 
|  | 133 | DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get, | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 134 | clock_debug_enable_set, "%lld\n"); | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 135 |  | 
|  | 136 | static int clock_debug_local_get(void *data, u64 *val) | 
|  | 137 | { | 
|  | 138 | struct clk *clock = data; | 
|  | 139 |  | 
| Matt Wagantall | acb8d02 | 2012-02-14 15:28:23 -0800 | [diff] [blame] | 140 | if (!clock->ops->is_local) | 
|  | 141 | *val = true; | 
|  | 142 | else | 
|  | 143 | *val = clock->ops->is_local(clock); | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 144 |  | 
|  | 145 | return 0; | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | DEFINE_SIMPLE_ATTRIBUTE(clock_local_fops, clock_debug_local_get, | 
|  | 149 | NULL, "%llu\n"); | 
|  | 150 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 151 | static int clock_debug_hwcg_get(void *data, u64 *val) | 
|  | 152 | { | 
|  | 153 | struct clk *clock = data; | 
| Stephen Boyd | 0f7e564 | 2012-08-02 12:59:33 -0700 | [diff] [blame] | 154 | if (clock->ops->in_hwcg_mode) | 
|  | 155 | *val = !!clock->ops->in_hwcg_mode(clock); | 
|  | 156 | else | 
|  | 157 | *val = 0; | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 158 | return 0; | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | DEFINE_SIMPLE_ATTRIBUTE(clock_hwcg_fops, clock_debug_hwcg_get, | 
|  | 162 | NULL, "%llu\n"); | 
|  | 163 |  | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 164 | static int fmax_rates_show(struct seq_file *m, void *unused) | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 165 | { | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 166 | struct clk *clock = m->private; | 
|  | 167 | int level = 0; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 168 |  | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 169 | int vdd_level = find_vdd_level(clock, clock->rate); | 
|  | 170 | if (vdd_level < 0) { | 
|  | 171 | seq_printf(m, "could not find_vdd_level for %s, %ld\n", | 
|  | 172 | clock->dbg_name, clock->rate); | 
|  | 173 | return 0; | 
|  | 174 | } | 
| Saravana Kannan | 55e959d | 2012-10-15 22:16:04 -0700 | [diff] [blame] | 175 | for (level = 0; level < clock->num_fmax; level++) { | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 176 | if (vdd_level == level) | 
|  | 177 | seq_printf(m, "[%lu] ", clock->fmax[level]); | 
|  | 178 | else | 
|  | 179 | seq_printf(m, "%lu ", clock->fmax[level]); | 
|  | 180 | } | 
|  | 181 | seq_printf(m, "\n"); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 182 |  | 
| Stephen Boyd | 31c01e8 | 2012-04-13 15:22:00 -0700 | [diff] [blame] | 183 | return 0; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 184 | } | 
|  | 185 |  | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 186 | static int fmax_rates_open(struct inode *inode, struct file *file) | 
| Stephen Boyd | cd50fae | 2011-11-03 11:05:42 -0700 | [diff] [blame] | 187 | { | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 188 | return single_open(file, fmax_rates_show, inode->i_private); | 
| Stephen Boyd | cd50fae | 2011-11-03 11:05:42 -0700 | [diff] [blame] | 189 | } | 
|  | 190 |  | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 191 | static const struct file_operations fmax_rates_fops = { | 
|  | 192 | .open		= fmax_rates_open, | 
|  | 193 | .read		= seq_read, | 
|  | 194 | .llseek		= seq_lseek, | 
|  | 195 | .release	= seq_release, | 
|  | 196 | }; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 197 |  | 
|  | 198 | static int list_rates_show(struct seq_file *m, void *unused) | 
|  | 199 | { | 
|  | 200 | struct clk *clock = m->private; | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 201 | int rate, level, fmax = 0, i = 0; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 202 |  | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 203 | /* Find max frequency supported within voltage constraints. */ | 
|  | 204 | if (!clock->vdd_class) { | 
| Matt Wagantall | e426f904 | 2011-11-01 16:21:34 -0700 | [diff] [blame] | 205 | fmax = INT_MAX; | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 206 | } else { | 
| Saravana Kannan | 55e959d | 2012-10-15 22:16:04 -0700 | [diff] [blame] | 207 | for (level = 0; level < clock->num_fmax; level++) | 
| Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 208 | if (clock->fmax[level]) | 
|  | 209 | fmax = clock->fmax[level]; | 
|  | 210 | } | 
|  | 211 |  | 
|  | 212 | /* | 
|  | 213 | * List supported frequencies <= fmax. Higher frequencies may appear in | 
|  | 214 | * the frequency table, but are not valid and should not be listed. | 
|  | 215 | */ | 
|  | 216 | while ((rate = clock->ops->list_rate(clock, i++)) >= 0) { | 
|  | 217 | if (rate <= fmax) | 
|  | 218 | seq_printf(m, "%u\n", rate); | 
|  | 219 | } | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 220 |  | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 221 | return 0; | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 222 | } | 
|  | 223 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 224 | static int list_rates_open(struct inode *inode, struct file *file) | 
|  | 225 | { | 
|  | 226 | return single_open(file, list_rates_show, inode->i_private); | 
|  | 227 | } | 
|  | 228 |  | 
|  | 229 | static const struct file_operations list_rates_fops = { | 
|  | 230 | .open		= list_rates_open, | 
|  | 231 | .read		= seq_read, | 
|  | 232 | .llseek		= seq_lseek, | 
|  | 233 | .release	= seq_release, | 
|  | 234 | }; | 
|  | 235 |  | 
| Vikram Mulukutla | a0073af | 2013-04-10 14:24:38 -0700 | [diff] [blame] | 236 | static ssize_t clock_parent_read(struct file *filp, char __user *ubuf, | 
|  | 237 | size_t cnt, loff_t *ppos) | 
| Saravana Kannan | 531051f | 2012-09-27 16:19:07 -0700 | [diff] [blame] | 238 | { | 
| Vikram Mulukutla | a0073af | 2013-04-10 14:24:38 -0700 | [diff] [blame] | 239 | struct clk *clock = filp->private_data; | 
|  | 240 | struct clk *p = clock->parent; | 
|  | 241 | char name[256] = {0}; | 
| Saravana Kannan | 531051f | 2012-09-27 16:19:07 -0700 | [diff] [blame] | 242 |  | 
| Vikram Mulukutla | a0073af | 2013-04-10 14:24:38 -0700 | [diff] [blame] | 243 | snprintf(name, sizeof(name), "%s\n", p ? p->dbg_name : "None\n"); | 
| Saravana Kannan | 531051f | 2012-09-27 16:19:07 -0700 | [diff] [blame] | 244 |  | 
| Vikram Mulukutla | a0073af | 2013-04-10 14:24:38 -0700 | [diff] [blame] | 245 | return simple_read_from_buffer(ubuf, cnt, ppos, name, strlen(name)); | 
| Saravana Kannan | 531051f | 2012-09-27 16:19:07 -0700 | [diff] [blame] | 246 | } | 
|  | 247 |  | 
| Vikram Mulukutla | a0073af | 2013-04-10 14:24:38 -0700 | [diff] [blame] | 248 |  | 
|  | 249 | static ssize_t clock_parent_write(struct file *filp, | 
|  | 250 | const char __user *ubuf, size_t cnt, loff_t *ppos) | 
| Saravana Kannan | 531051f | 2012-09-27 16:19:07 -0700 | [diff] [blame] | 251 | { | 
| Vikram Mulukutla | a0073af | 2013-04-10 14:24:38 -0700 | [diff] [blame] | 252 | struct clk *clock = filp->private_data; | 
|  | 253 | char buf[256]; | 
|  | 254 | char *cmp; | 
|  | 255 | unsigned long flags; | 
|  | 256 | struct clk_table *table; | 
|  | 257 | int i, ret; | 
|  | 258 | struct clk *parent = NULL; | 
|  | 259 |  | 
|  | 260 | cnt = min(cnt, sizeof(buf) - 1); | 
|  | 261 | if (copy_from_user(&buf, ubuf, cnt)) | 
|  | 262 | return -EFAULT; | 
|  | 263 | buf[cnt] = '\0'; | 
|  | 264 | cmp = strstrip(buf); | 
|  | 265 |  | 
|  | 266 | spin_lock_irqsave(&clk_list_lock, flags); | 
|  | 267 | list_for_each_entry(table, &clk_list, node) { | 
|  | 268 | for (i = 0; i < table->num_clocks; i++) | 
|  | 269 | if (!strcmp(cmp, table->clocks[i].clk->dbg_name)) { | 
|  | 270 | parent = table->clocks[i].clk; | 
|  | 271 | break; | 
|  | 272 | } | 
|  | 273 | if (parent) | 
|  | 274 | break; | 
|  | 275 | } | 
|  | 276 |  | 
|  | 277 | if (!parent) { | 
|  | 278 | ret = -EINVAL; | 
|  | 279 | goto err; | 
|  | 280 | } | 
|  | 281 |  | 
|  | 282 | spin_unlock_irqrestore(&clk_list_lock, flags); | 
|  | 283 | ret = clk_set_parent(clock, table->clocks[i].clk); | 
|  | 284 | if (ret) | 
|  | 285 | return ret; | 
|  | 286 |  | 
|  | 287 | return cnt; | 
|  | 288 | err: | 
|  | 289 | spin_unlock_irqrestore(&clk_list_lock, flags); | 
|  | 290 | return ret; | 
| Saravana Kannan | 531051f | 2012-09-27 16:19:07 -0700 | [diff] [blame] | 291 | } | 
|  | 292 |  | 
| Vikram Mulukutla | a0073af | 2013-04-10 14:24:38 -0700 | [diff] [blame] | 293 |  | 
| Saravana Kannan | 531051f | 2012-09-27 16:19:07 -0700 | [diff] [blame] | 294 | static const struct file_operations clock_parent_fops = { | 
| Vikram Mulukutla | a0073af | 2013-04-10 14:24:38 -0700 | [diff] [blame] | 295 | .open		= simple_open, | 
|  | 296 | .read		= clock_parent_read, | 
|  | 297 | .write		= clock_parent_write, | 
| Patrick Daly | 0a78a0e | 2012-07-23 13:18:59 -0700 | [diff] [blame] | 298 | }; | 
|  | 299 |  | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 300 | static int clock_debug_add(struct clk *clock) | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 301 | { | 
|  | 302 | char temp[50], *ptr; | 
| Stephen Boyd | 6e6d9b5 | 2011-01-26 16:20:55 -0800 | [diff] [blame] | 303 | struct dentry *clk_dir; | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 304 |  | 
| Stephen Boyd | 6e6d9b5 | 2011-01-26 16:20:55 -0800 | [diff] [blame] | 305 | if (!debugfs_base) | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 306 | return -ENOMEM; | 
|  | 307 |  | 
| Saravana Kannan | 7f62698 | 2011-09-26 19:02:02 -0700 | [diff] [blame] | 308 | strlcpy(temp, clock->dbg_name, ARRAY_SIZE(temp)); | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 309 | for (ptr = temp; *ptr; ptr++) | 
|  | 310 | *ptr = tolower(*ptr); | 
|  | 311 |  | 
| Stephen Boyd | 6e6d9b5 | 2011-01-26 16:20:55 -0800 | [diff] [blame] | 312 | clk_dir = debugfs_create_dir(temp, debugfs_base); | 
|  | 313 | if (!clk_dir) | 
|  | 314 | return -ENOMEM; | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 315 |  | 
| Stephen Boyd | 6e6d9b5 | 2011-01-26 16:20:55 -0800 | [diff] [blame] | 316 | if (!debugfs_create_file("rate", S_IRUGO | S_IWUSR, clk_dir, | 
|  | 317 | clock, &clock_rate_fops)) | 
|  | 318 | goto error; | 
|  | 319 |  | 
|  | 320 | if (!debugfs_create_file("enable", S_IRUGO | S_IWUSR, clk_dir, | 
|  | 321 | clock, &clock_enable_fops)) | 
|  | 322 | goto error; | 
|  | 323 |  | 
|  | 324 | if (!debugfs_create_file("is_local", S_IRUGO, clk_dir, clock, | 
|  | 325 | &clock_local_fops)) | 
|  | 326 | goto error; | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 327 |  | 
| Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 328 | if (!debugfs_create_file("has_hw_gating", S_IRUGO, clk_dir, clock, | 
|  | 329 | &clock_hwcg_fops)) | 
|  | 330 | goto error; | 
|  | 331 |  | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 332 | if (measure && | 
|  | 333 | !clk_set_parent(measure, clock) && | 
|  | 334 | !debugfs_create_file("measure", S_IRUGO, clk_dir, clock, | 
|  | 335 | &clock_measure_fops)) | 
|  | 336 | goto error; | 
|  | 337 |  | 
|  | 338 | if (clock->ops->list_rate) | 
|  | 339 | if (!debugfs_create_file("list_rates", | 
|  | 340 | S_IRUGO, clk_dir, clock, &list_rates_fops)) | 
|  | 341 | goto error; | 
|  | 342 |  | 
| Patrick Daly | 0a78a0e | 2012-07-23 13:18:59 -0700 | [diff] [blame] | 343 | if (clock->vdd_class && !debugfs_create_file("fmax_rates", | 
|  | 344 | S_IRUGO, clk_dir, clock, &fmax_rates_fops)) | 
|  | 345 | goto error; | 
|  | 346 |  | 
| Saravana Kannan | 531051f | 2012-09-27 16:19:07 -0700 | [diff] [blame] | 347 | if (!debugfs_create_file("parent", S_IRUGO, clk_dir, clock, | 
|  | 348 | &clock_parent_fops)) | 
|  | 349 | goto error; | 
|  | 350 |  | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 351 | return 0; | 
| Stephen Boyd | 6e6d9b5 | 2011-01-26 16:20:55 -0800 | [diff] [blame] | 352 | error: | 
|  | 353 | debugfs_remove_recursive(clk_dir); | 
|  | 354 | return -ENOMEM; | 
| Matt Wagantall | d64560fe | 2011-01-26 16:20:54 -0800 | [diff] [blame] | 355 | } | 
| Vikram Mulukutla | 4785ab6 | 2012-12-10 20:51:22 -0800 | [diff] [blame] | 356 | static DEFINE_MUTEX(clk_debug_lock); | 
|  | 357 | static int clk_debug_init_once; | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 358 |  | 
|  | 359 | /** | 
|  | 360 | * clock_debug_init() - Initialize clock debugfs | 
| Vikram Mulukutla | 4785ab6 | 2012-12-10 20:51:22 -0800 | [diff] [blame] | 361 | * Lock clk_debug_lock before invoking this function. | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 362 | */ | 
| Vikram Mulukutla | 4785ab6 | 2012-12-10 20:51:22 -0800 | [diff] [blame] | 363 | static int clock_debug_init(void) | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 364 | { | 
| Vikram Mulukutla | 4785ab6 | 2012-12-10 20:51:22 -0800 | [diff] [blame] | 365 | if (clk_debug_init_once) | 
|  | 366 | return 0; | 
|  | 367 |  | 
|  | 368 | clk_debug_init_once = 1; | 
|  | 369 |  | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 370 | debugfs_base = debugfs_create_dir("clk", NULL); | 
|  | 371 | if (!debugfs_base) | 
|  | 372 | return -ENOMEM; | 
| Vikram Mulukutla | 4785ab6 | 2012-12-10 20:51:22 -0800 | [diff] [blame] | 373 |  | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 374 | if (!debugfs_create_u32("debug_suspend", S_IRUGO | S_IWUSR, | 
|  | 375 | debugfs_base, &debug_suspend)) { | 
|  | 376 | debugfs_remove_recursive(debugfs_base); | 
|  | 377 | return -ENOMEM; | 
|  | 378 | } | 
|  | 379 |  | 
|  | 380 | measure = clk_get_sys("debug", "measure"); | 
|  | 381 | if (IS_ERR(measure)) | 
|  | 382 | measure = NULL; | 
|  | 383 |  | 
|  | 384 | return 0; | 
|  | 385 | } | 
|  | 386 |  | 
| Vikram Mulukutla | 4785ab6 | 2012-12-10 20:51:22 -0800 | [diff] [blame] | 387 | /** | 
|  | 388 | * clock_debug_register() - Add additional clocks to clock debugfs hierarchy | 
|  | 389 | * @table: Table of clocks to create debugfs nodes for | 
|  | 390 | * @size: Size of @table | 
|  | 391 | * | 
|  | 392 | */ | 
|  | 393 | int clock_debug_register(struct clk_lookup *table, size_t size) | 
|  | 394 | { | 
|  | 395 | struct clk_table *clk_table; | 
|  | 396 | unsigned long flags; | 
|  | 397 | int i, ret; | 
|  | 398 |  | 
|  | 399 | mutex_lock(&clk_debug_lock); | 
|  | 400 |  | 
|  | 401 | ret = clock_debug_init(); | 
|  | 402 | if (ret) | 
|  | 403 | goto out; | 
|  | 404 |  | 
|  | 405 | clk_table = kmalloc(sizeof(*clk_table), GFP_KERNEL); | 
|  | 406 | if (!clk_table) { | 
|  | 407 | ret = -ENOMEM; | 
|  | 408 | goto out; | 
|  | 409 | } | 
|  | 410 |  | 
|  | 411 | clk_table->clocks = table; | 
|  | 412 | clk_table->num_clocks = size; | 
|  | 413 |  | 
|  | 414 | spin_lock_irqsave(&clk_list_lock, flags); | 
|  | 415 | list_add_tail(&clk_table->node, &clk_list); | 
|  | 416 | spin_unlock_irqrestore(&clk_list_lock, flags); | 
|  | 417 |  | 
|  | 418 | for (i = 0; i < size; i++) | 
|  | 419 | clock_debug_add(table[i].clk); | 
|  | 420 |  | 
|  | 421 | out: | 
|  | 422 | mutex_unlock(&clk_debug_lock); | 
|  | 423 | return ret; | 
|  | 424 | } | 
|  | 425 |  | 
| Matt Wagantall | 665f0cf | 2012-02-27 15:54:43 -0800 | [diff] [blame] | 426 | static int clock_debug_print_clock(struct clk *c) | 
|  | 427 | { | 
|  | 428 | char *start = ""; | 
|  | 429 |  | 
|  | 430 | if (!c || !c->prepare_count) | 
|  | 431 | return 0; | 
|  | 432 |  | 
|  | 433 | pr_info("\t"); | 
|  | 434 | do { | 
|  | 435 | if (c->vdd_class) | 
|  | 436 | pr_cont("%s%s:%u:%u [%ld, %lu]", start, c->dbg_name, | 
|  | 437 | c->prepare_count, c->count, c->rate, | 
|  | 438 | c->vdd_class->cur_level); | 
|  | 439 | else | 
|  | 440 | pr_cont("%s%s:%u:%u [%ld]", start, c->dbg_name, | 
|  | 441 | c->prepare_count, c->count, c->rate); | 
|  | 442 | start = " -> "; | 
|  | 443 | } while ((c = clk_get_parent(c))); | 
|  | 444 |  | 
|  | 445 | pr_cont("\n"); | 
|  | 446 |  | 
|  | 447 | return 1; | 
|  | 448 | } | 
|  | 449 |  | 
|  | 450 | /** | 
|  | 451 | * clock_debug_print_enabled() - Print names of enabled clocks for suspend debug | 
|  | 452 | * | 
|  | 453 | * Print the names of enabled clocks and their parents if debug_suspend is set | 
|  | 454 | */ | 
|  | 455 | void clock_debug_print_enabled(void) | 
|  | 456 | { | 
|  | 457 | struct clk_table *table; | 
|  | 458 | unsigned long flags; | 
|  | 459 | int i, cnt = 0; | 
|  | 460 |  | 
|  | 461 | if (likely(!debug_suspend)) | 
|  | 462 | return; | 
|  | 463 |  | 
|  | 464 | pr_info("Enabled clocks:\n"); | 
|  | 465 | spin_lock_irqsave(&clk_list_lock, flags); | 
|  | 466 | list_for_each_entry(table, &clk_list, node) { | 
|  | 467 | for (i = 0; i < table->num_clocks; i++) | 
|  | 468 | cnt += clock_debug_print_clock(table->clocks[i].clk); | 
|  | 469 | } | 
|  | 470 | spin_unlock_irqrestore(&clk_list_lock, flags); | 
|  | 471 |  | 
|  | 472 | if (cnt) | 
|  | 473 | pr_info("Enabled clock count: %d\n", cnt); | 
|  | 474 | else | 
|  | 475 | pr_info("No clocks enabled.\n"); | 
|  | 476 |  | 
|  | 477 | } |