Paul Mundt | de9186c | 2010-10-18 21:32:58 +0900 | [diff] [blame] | 1 | /* |
| 2 | * Helper routines for SuperH Clock Pulse Generator blocks (CPG). |
| 3 | * |
| 4 | * Copyright (C) 2010 Magnus Damm |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 5 | * Copyright (C) 2010 - 2012 Paul Mundt |
Paul Mundt | de9186c | 2010-10-18 21:32:58 +0900 | [diff] [blame] | 6 | * |
| 7 | * This file is subject to the terms and conditions of the GNU General Public |
| 8 | * License. See the file "COPYING" in the main directory of this archive |
| 9 | * for more details. |
| 10 | */ |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 11 | #include <linux/clk.h> |
| 12 | #include <linux/compiler.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/io.h> |
| 15 | #include <linux/sh_clk.h> |
| 16 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 17 | static unsigned int sh_clk_read(struct clk *clk) |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 18 | { |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 19 | if (clk->flags & CLK_ENABLE_REG_8BIT) |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 20 | return ioread8(clk->mapped_reg); |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 21 | else if (clk->flags & CLK_ENABLE_REG_16BIT) |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 22 | return ioread16(clk->mapped_reg); |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 23 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 24 | return ioread32(clk->mapped_reg); |
| 25 | } |
| 26 | |
| 27 | static void sh_clk_write(int value, struct clk *clk) |
| 28 | { |
| 29 | if (clk->flags & CLK_ENABLE_REG_8BIT) |
| 30 | iowrite8(value, clk->mapped_reg); |
| 31 | else if (clk->flags & CLK_ENABLE_REG_16BIT) |
| 32 | iowrite16(value, clk->mapped_reg); |
| 33 | else |
| 34 | iowrite32(value, clk->mapped_reg); |
| 35 | } |
| 36 | |
| 37 | static int sh_clk_mstp_enable(struct clk *clk) |
| 38 | { |
| 39 | sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 40 | return 0; |
| 41 | } |
| 42 | |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 43 | static void sh_clk_mstp_disable(struct clk *clk) |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 44 | { |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 45 | sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 46 | } |
| 47 | |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 48 | static struct sh_clk_ops sh_clk_mstp_clk_ops = { |
| 49 | .enable = sh_clk_mstp_enable, |
| 50 | .disable = sh_clk_mstp_disable, |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 51 | .recalc = followparent_recalc, |
| 52 | }; |
| 53 | |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 54 | int __init sh_clk_mstp_register(struct clk *clks, int nr) |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 55 | { |
| 56 | struct clk *clkp; |
| 57 | int ret = 0; |
| 58 | int k; |
| 59 | |
| 60 | for (k = 0; !ret && (k < nr); k++) { |
| 61 | clkp = clks + k; |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 62 | clkp->ops = &sh_clk_mstp_clk_ops; |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 63 | ret |= clk_register(clkp); |
| 64 | } |
| 65 | |
| 66 | return ret; |
| 67 | } |
| 68 | |
| 69 | static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate) |
| 70 | { |
| 71 | return clk_rate_table_round(clk, clk->freq_table, rate); |
| 72 | } |
| 73 | |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame^] | 74 | /* |
| 75 | * Div/mult table lookup helpers |
| 76 | */ |
| 77 | static inline struct clk_div_table *clk_to_div_table(struct clk *clk) |
| 78 | { |
| 79 | return clk->priv; |
| 80 | } |
| 81 | |
| 82 | static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk) |
| 83 | { |
| 84 | return clk_to_div_table(clk)->div_mult_table; |
| 85 | } |
| 86 | |
| 87 | /* |
| 88 | * div6 support |
| 89 | */ |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 90 | static int sh_clk_div6_divisors[64] = { |
| 91 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, |
| 92 | 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, |
| 93 | 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, |
| 94 | 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 |
| 95 | }; |
| 96 | |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame^] | 97 | static struct clk_div_mult_table div6_div_mult_table = { |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 98 | .divisors = sh_clk_div6_divisors, |
| 99 | .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors), |
| 100 | }; |
| 101 | |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame^] | 102 | static struct clk_div_table sh_clk_div6_table = { |
| 103 | .div_mult_table = &div6_div_mult_table, |
| 104 | }; |
| 105 | |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 106 | static unsigned long sh_clk_div6_recalc(struct clk *clk) |
| 107 | { |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame^] | 108 | struct clk_div_mult_table *table = clk_to_div_mult_table(clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 109 | unsigned int idx; |
| 110 | |
| 111 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, |
| 112 | table, NULL); |
| 113 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 114 | idx = sh_clk_read(clk) & 0x003f; |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 115 | |
| 116 | return clk->freq_table[idx].frequency; |
| 117 | } |
| 118 | |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 119 | static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent) |
| 120 | { |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame^] | 121 | struct clk_div_mult_table *table = clk_to_div_mult_table(clk); |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 122 | u32 value; |
| 123 | int ret, i; |
| 124 | |
| 125 | if (!clk->parent_table || !clk->parent_num) |
| 126 | return -EINVAL; |
| 127 | |
| 128 | /* Search the parent */ |
| 129 | for (i = 0; i < clk->parent_num; i++) |
| 130 | if (clk->parent_table[i] == parent) |
| 131 | break; |
| 132 | |
| 133 | if (i == clk->parent_num) |
| 134 | return -ENODEV; |
| 135 | |
| 136 | ret = clk_reparent(clk, parent); |
| 137 | if (ret < 0) |
| 138 | return ret; |
| 139 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 140 | value = sh_clk_read(clk) & |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 141 | ~(((1 << clk->src_width) - 1) << clk->src_shift); |
| 142 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 143 | sh_clk_write(value | (i << clk->src_shift), clk); |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 144 | |
| 145 | /* Rebuild the frequency table */ |
| 146 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, |
Kuninori Morimoto | 52c10ad | 2011-04-14 17:13:53 +0900 | [diff] [blame] | 147 | table, NULL); |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 148 | |
| 149 | return 0; |
| 150 | } |
| 151 | |
Paul Mundt | 35a96c7 | 2010-11-15 18:18:32 +0900 | [diff] [blame] | 152 | static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate) |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 153 | { |
| 154 | unsigned long value; |
| 155 | int idx; |
| 156 | |
| 157 | idx = clk_rate_table_find(clk, clk->freq_table, rate); |
| 158 | if (idx < 0) |
| 159 | return idx; |
| 160 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 161 | value = sh_clk_read(clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 162 | value &= ~0x3f; |
| 163 | value |= idx; |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 164 | sh_clk_write(value, clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 165 | return 0; |
| 166 | } |
| 167 | |
| 168 | static int sh_clk_div6_enable(struct clk *clk) |
| 169 | { |
| 170 | unsigned long value; |
| 171 | int ret; |
| 172 | |
Paul Mundt | f278ea8 | 2010-11-19 16:40:35 +0900 | [diff] [blame] | 173 | ret = sh_clk_div6_set_rate(clk, clk->rate); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 174 | if (ret == 0) { |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 175 | value = sh_clk_read(clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 176 | value &= ~0x100; /* clear stop bit to enable clock */ |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 177 | sh_clk_write(value, clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 178 | } |
| 179 | return ret; |
| 180 | } |
| 181 | |
| 182 | static void sh_clk_div6_disable(struct clk *clk) |
| 183 | { |
| 184 | unsigned long value; |
| 185 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 186 | value = sh_clk_read(clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 187 | value |= 0x100; /* stop clock */ |
| 188 | value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */ |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 189 | sh_clk_write(value, clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 190 | } |
| 191 | |
Magnus Damm | a0ec360 | 2012-02-29 22:16:21 +0900 | [diff] [blame] | 192 | static struct sh_clk_ops sh_clk_div6_clk_ops = { |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 193 | .recalc = sh_clk_div6_recalc, |
| 194 | .round_rate = sh_clk_div_round_rate, |
| 195 | .set_rate = sh_clk_div6_set_rate, |
| 196 | .enable = sh_clk_div6_enable, |
| 197 | .disable = sh_clk_div6_disable, |
| 198 | }; |
| 199 | |
Magnus Damm | a0ec360 | 2012-02-29 22:16:21 +0900 | [diff] [blame] | 200 | static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = { |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 201 | .recalc = sh_clk_div6_recalc, |
| 202 | .round_rate = sh_clk_div_round_rate, |
| 203 | .set_rate = sh_clk_div6_set_rate, |
| 204 | .enable = sh_clk_div6_enable, |
| 205 | .disable = sh_clk_div6_disable, |
| 206 | .set_parent = sh_clk_div6_set_parent, |
| 207 | }; |
| 208 | |
Kuninori Morimoto | 56242a1 | 2011-11-21 21:33:18 -0800 | [diff] [blame] | 209 | static int __init sh_clk_init_parent(struct clk *clk) |
| 210 | { |
| 211 | u32 val; |
| 212 | |
| 213 | if (clk->parent) |
| 214 | return 0; |
| 215 | |
| 216 | if (!clk->parent_table || !clk->parent_num) |
| 217 | return 0; |
| 218 | |
| 219 | if (!clk->src_width) { |
| 220 | pr_err("sh_clk_init_parent: cannot select parent clock\n"); |
| 221 | return -EINVAL; |
| 222 | } |
| 223 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 224 | val = (sh_clk_read(clk) >> clk->src_shift); |
Kuninori Morimoto | 56242a1 | 2011-11-21 21:33:18 -0800 | [diff] [blame] | 225 | val &= (1 << clk->src_width) - 1; |
| 226 | |
| 227 | if (val >= clk->parent_num) { |
| 228 | pr_err("sh_clk_init_parent: parent table size failed\n"); |
| 229 | return -EINVAL; |
| 230 | } |
| 231 | |
Kuninori Morimoto | 64dea57 | 2012-01-19 01:00:40 -0800 | [diff] [blame] | 232 | clk_reparent(clk, clk->parent_table[val]); |
Kuninori Morimoto | 56242a1 | 2011-11-21 21:33:18 -0800 | [diff] [blame] | 233 | if (!clk->parent) { |
| 234 | pr_err("sh_clk_init_parent: unable to set parent"); |
| 235 | return -EINVAL; |
| 236 | } |
| 237 | |
| 238 | return 0; |
| 239 | } |
| 240 | |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 241 | static int __init sh_clk_div6_register_ops(struct clk *clks, int nr, |
Magnus Damm | a0ec360 | 2012-02-29 22:16:21 +0900 | [diff] [blame] | 242 | struct sh_clk_ops *ops) |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 243 | { |
| 244 | struct clk *clkp; |
| 245 | void *freq_table; |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame^] | 246 | struct clk_div_table *table = &sh_clk_div6_table; |
| 247 | int nr_divs = table->div_mult_table->nr_divisors; |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 248 | int freq_table_size = sizeof(struct cpufreq_frequency_table); |
| 249 | int ret = 0; |
| 250 | int k; |
| 251 | |
| 252 | freq_table_size *= (nr_divs + 1); |
| 253 | freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL); |
| 254 | if (!freq_table) { |
| 255 | pr_err("sh_clk_div6_register: unable to alloc memory\n"); |
| 256 | return -ENOMEM; |
| 257 | } |
| 258 | |
| 259 | for (k = 0; !ret && (k < nr); k++) { |
| 260 | clkp = clks + k; |
| 261 | |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 262 | clkp->ops = ops; |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame^] | 263 | clkp->priv = table; |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 264 | clkp->freq_table = freq_table + (k * freq_table_size); |
| 265 | clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END; |
Kuninori Morimoto | 7784f4d | 2011-12-11 19:02:09 -0800 | [diff] [blame] | 266 | ret = clk_register(clkp); |
Kuninori Morimoto | 56242a1 | 2011-11-21 21:33:18 -0800 | [diff] [blame] | 267 | if (ret < 0) |
| 268 | break; |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 269 | |
Kuninori Morimoto | 7784f4d | 2011-12-11 19:02:09 -0800 | [diff] [blame] | 270 | ret = sh_clk_init_parent(clkp); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 271 | } |
| 272 | |
| 273 | return ret; |
| 274 | } |
| 275 | |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 276 | int __init sh_clk_div6_register(struct clk *clks, int nr) |
| 277 | { |
| 278 | return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops); |
| 279 | } |
| 280 | |
| 281 | int __init sh_clk_div6_reparent_register(struct clk *clks, int nr) |
| 282 | { |
| 283 | return sh_clk_div6_register_ops(clks, nr, |
| 284 | &sh_clk_div6_reparent_clk_ops); |
| 285 | } |
| 286 | |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame^] | 287 | /* |
| 288 | * div4 support |
| 289 | */ |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 290 | static unsigned long sh_clk_div4_recalc(struct clk *clk) |
| 291 | { |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame^] | 292 | struct clk_div_mult_table *table = clk_to_div_mult_table(clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 293 | unsigned int idx; |
| 294 | |
| 295 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, |
| 296 | table, &clk->arch_flags); |
| 297 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 298 | idx = (sh_clk_read(clk) >> clk->enable_bit) & 0x000f; |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 299 | |
| 300 | return clk->freq_table[idx].frequency; |
| 301 | } |
| 302 | |
| 303 | static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent) |
| 304 | { |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame^] | 305 | struct clk_div_mult_table *table = clk_to_div_mult_table(clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 306 | u32 value; |
| 307 | int ret; |
| 308 | |
| 309 | /* we really need a better way to determine parent index, but for |
| 310 | * now assume internal parent comes with CLK_ENABLE_ON_INIT set, |
| 311 | * no CLK_ENABLE_ON_INIT means external clock... |
| 312 | */ |
| 313 | |
| 314 | if (parent->flags & CLK_ENABLE_ON_INIT) |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 315 | value = sh_clk_read(clk) & ~(1 << 7); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 316 | else |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 317 | value = sh_clk_read(clk) | (1 << 7); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 318 | |
| 319 | ret = clk_reparent(clk, parent); |
| 320 | if (ret < 0) |
| 321 | return ret; |
| 322 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 323 | sh_clk_write(value, clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 324 | |
| 325 | /* Rebiuld the frequency table */ |
| 326 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, |
| 327 | table, &clk->arch_flags); |
| 328 | |
| 329 | return 0; |
| 330 | } |
| 331 | |
Paul Mundt | 35a96c7 | 2010-11-15 18:18:32 +0900 | [diff] [blame] | 332 | static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate) |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 333 | { |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame^] | 334 | struct clk_div_table *dt = clk_to_div_table(clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 335 | unsigned long value; |
| 336 | int idx = clk_rate_table_find(clk, clk->freq_table, rate); |
| 337 | if (idx < 0) |
| 338 | return idx; |
| 339 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 340 | value = sh_clk_read(clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 341 | value &= ~(0xf << clk->enable_bit); |
| 342 | value |= (idx << clk->enable_bit); |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 343 | sh_clk_write(value, clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 344 | |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame^] | 345 | /* XXX: Should use a post-change notifier */ |
| 346 | if (dt->kick) |
| 347 | dt->kick(clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 348 | |
| 349 | return 0; |
| 350 | } |
| 351 | |
| 352 | static int sh_clk_div4_enable(struct clk *clk) |
| 353 | { |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 354 | sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 355 | return 0; |
| 356 | } |
| 357 | |
| 358 | static void sh_clk_div4_disable(struct clk *clk) |
| 359 | { |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 360 | sh_clk_write(sh_clk_read(clk) | (1 << 8), clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 361 | } |
| 362 | |
Magnus Damm | a0ec360 | 2012-02-29 22:16:21 +0900 | [diff] [blame] | 363 | static struct sh_clk_ops sh_clk_div4_clk_ops = { |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 364 | .recalc = sh_clk_div4_recalc, |
| 365 | .set_rate = sh_clk_div4_set_rate, |
| 366 | .round_rate = sh_clk_div_round_rate, |
| 367 | }; |
| 368 | |
Magnus Damm | a0ec360 | 2012-02-29 22:16:21 +0900 | [diff] [blame] | 369 | static struct sh_clk_ops sh_clk_div4_enable_clk_ops = { |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 370 | .recalc = sh_clk_div4_recalc, |
| 371 | .set_rate = sh_clk_div4_set_rate, |
| 372 | .round_rate = sh_clk_div_round_rate, |
| 373 | .enable = sh_clk_div4_enable, |
| 374 | .disable = sh_clk_div4_disable, |
| 375 | }; |
| 376 | |
Magnus Damm | a0ec360 | 2012-02-29 22:16:21 +0900 | [diff] [blame] | 377 | static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = { |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 378 | .recalc = sh_clk_div4_recalc, |
| 379 | .set_rate = sh_clk_div4_set_rate, |
| 380 | .round_rate = sh_clk_div_round_rate, |
| 381 | .enable = sh_clk_div4_enable, |
| 382 | .disable = sh_clk_div4_disable, |
| 383 | .set_parent = sh_clk_div4_set_parent, |
| 384 | }; |
| 385 | |
| 386 | static int __init sh_clk_div4_register_ops(struct clk *clks, int nr, |
Magnus Damm | a0ec360 | 2012-02-29 22:16:21 +0900 | [diff] [blame] | 387 | struct clk_div4_table *table, struct sh_clk_ops *ops) |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 388 | { |
| 389 | struct clk *clkp; |
| 390 | void *freq_table; |
| 391 | int nr_divs = table->div_mult_table->nr_divisors; |
| 392 | int freq_table_size = sizeof(struct cpufreq_frequency_table); |
| 393 | int ret = 0; |
| 394 | int k; |
| 395 | |
| 396 | freq_table_size *= (nr_divs + 1); |
| 397 | freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL); |
| 398 | if (!freq_table) { |
| 399 | pr_err("sh_clk_div4_register: unable to alloc memory\n"); |
| 400 | return -ENOMEM; |
| 401 | } |
| 402 | |
| 403 | for (k = 0; !ret && (k < nr); k++) { |
| 404 | clkp = clks + k; |
| 405 | |
| 406 | clkp->ops = ops; |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 407 | clkp->priv = table; |
| 408 | |
| 409 | clkp->freq_table = freq_table + (k * freq_table_size); |
| 410 | clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END; |
| 411 | |
| 412 | ret = clk_register(clkp); |
| 413 | } |
| 414 | |
| 415 | return ret; |
| 416 | } |
| 417 | |
| 418 | int __init sh_clk_div4_register(struct clk *clks, int nr, |
| 419 | struct clk_div4_table *table) |
| 420 | { |
| 421 | return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops); |
| 422 | } |
| 423 | |
| 424 | int __init sh_clk_div4_enable_register(struct clk *clks, int nr, |
| 425 | struct clk_div4_table *table) |
| 426 | { |
| 427 | return sh_clk_div4_register_ops(clks, nr, table, |
| 428 | &sh_clk_div4_enable_clk_ops); |
| 429 | } |
| 430 | |
| 431 | int __init sh_clk_div4_reparent_register(struct clk *clks, int nr, |
| 432 | struct clk_div4_table *table) |
| 433 | { |
| 434 | return sh_clk_div4_register_ops(clks, nr, table, |
| 435 | &sh_clk_div4_reparent_clk_ops); |
| 436 | } |