Paul Mundt | de9186c | 2010-10-18 21:32:58 +0900 | [diff] [blame] | 1 | /* |
| 2 | * Helper routines for SuperH Clock Pulse Generator blocks (CPG). |
| 3 | * |
| 4 | * Copyright (C) 2010 Magnus Damm |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 5 | * Copyright (C) 2010 - 2012 Paul Mundt |
Paul Mundt | de9186c | 2010-10-18 21:32:58 +0900 | [diff] [blame] | 6 | * |
| 7 | * This file is subject to the terms and conditions of the GNU General Public |
| 8 | * License. See the file "COPYING" in the main directory of this archive |
| 9 | * for more details. |
| 10 | */ |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 11 | #include <linux/clk.h> |
| 12 | #include <linux/compiler.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/io.h> |
| 15 | #include <linux/sh_clk.h> |
| 16 | |
Paul Mundt | 764f4e4 | 2012-05-25 16:34:48 +0900 | [diff] [blame] | 17 | #define CPG_CKSTP_BIT BIT(8) |
| 18 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 19 | static unsigned int sh_clk_read(struct clk *clk) |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 20 | { |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 21 | if (clk->flags & CLK_ENABLE_REG_8BIT) |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 22 | return ioread8(clk->mapped_reg); |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 23 | else if (clk->flags & CLK_ENABLE_REG_16BIT) |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 24 | return ioread16(clk->mapped_reg); |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 25 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 26 | return ioread32(clk->mapped_reg); |
| 27 | } |
| 28 | |
| 29 | static void sh_clk_write(int value, struct clk *clk) |
| 30 | { |
| 31 | if (clk->flags & CLK_ENABLE_REG_8BIT) |
| 32 | iowrite8(value, clk->mapped_reg); |
| 33 | else if (clk->flags & CLK_ENABLE_REG_16BIT) |
| 34 | iowrite16(value, clk->mapped_reg); |
| 35 | else |
| 36 | iowrite32(value, clk->mapped_reg); |
| 37 | } |
| 38 | |
Guennadi Liakhovetski | a028c6d | 2013-12-14 16:23:51 +0100 | [diff] [blame] | 39 | static unsigned int r8(const void __iomem *addr) |
| 40 | { |
| 41 | return ioread8(addr); |
| 42 | } |
| 43 | |
| 44 | static unsigned int r16(const void __iomem *addr) |
| 45 | { |
| 46 | return ioread16(addr); |
| 47 | } |
| 48 | |
| 49 | static unsigned int r32(const void __iomem *addr) |
| 50 | { |
| 51 | return ioread32(addr); |
| 52 | } |
| 53 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 54 | static int sh_clk_mstp_enable(struct clk *clk) |
| 55 | { |
| 56 | sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk); |
Guennadi Liakhovetski | a028c6d | 2013-12-14 16:23:51 +0100 | [diff] [blame] | 57 | if (clk->status_reg) { |
| 58 | unsigned int (*read)(const void __iomem *addr); |
| 59 | int i; |
| 60 | void __iomem *mapped_status = (phys_addr_t)clk->status_reg - |
| 61 | (phys_addr_t)clk->enable_reg + clk->mapped_reg; |
| 62 | |
| 63 | if (clk->flags & CLK_ENABLE_REG_8BIT) |
| 64 | read = r8; |
| 65 | else if (clk->flags & CLK_ENABLE_REG_16BIT) |
| 66 | read = r16; |
| 67 | else |
| 68 | read = r32; |
| 69 | |
| 70 | for (i = 1000; |
| 71 | (read(mapped_status) & (1 << clk->enable_bit)) && i; |
| 72 | i--) |
| 73 | cpu_relax(); |
| 74 | if (!i) { |
| 75 | pr_err("cpg: failed to enable %p[%d]\n", |
| 76 | clk->enable_reg, clk->enable_bit); |
| 77 | return -ETIMEDOUT; |
| 78 | } |
| 79 | } |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 80 | return 0; |
| 81 | } |
| 82 | |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 83 | static void sh_clk_mstp_disable(struct clk *clk) |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 84 | { |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 85 | sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 86 | } |
| 87 | |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 88 | static struct sh_clk_ops sh_clk_mstp_clk_ops = { |
| 89 | .enable = sh_clk_mstp_enable, |
| 90 | .disable = sh_clk_mstp_disable, |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 91 | .recalc = followparent_recalc, |
| 92 | }; |
| 93 | |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 94 | int __init sh_clk_mstp_register(struct clk *clks, int nr) |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 95 | { |
| 96 | struct clk *clkp; |
| 97 | int ret = 0; |
| 98 | int k; |
| 99 | |
| 100 | for (k = 0; !ret && (k < nr); k++) { |
| 101 | clkp = clks + k; |
Paul Mundt | 4d6ddb0 | 2012-04-11 12:05:50 +0900 | [diff] [blame] | 102 | clkp->ops = &sh_clk_mstp_clk_ops; |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 103 | ret |= clk_register(clkp); |
| 104 | } |
| 105 | |
| 106 | return ret; |
| 107 | } |
| 108 | |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame] | 109 | /* |
| 110 | * Div/mult table lookup helpers |
| 111 | */ |
| 112 | static inline struct clk_div_table *clk_to_div_table(struct clk *clk) |
| 113 | { |
| 114 | return clk->priv; |
| 115 | } |
| 116 | |
| 117 | static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk) |
| 118 | { |
| 119 | return clk_to_div_table(clk)->div_mult_table; |
| 120 | } |
| 121 | |
| 122 | /* |
Paul Mundt | 75f5f8a | 2012-05-25 15:26:01 +0900 | [diff] [blame] | 123 | * Common div ops |
| 124 | */ |
| 125 | static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate) |
| 126 | { |
| 127 | return clk_rate_table_round(clk, clk->freq_table, rate); |
| 128 | } |
| 129 | |
| 130 | static unsigned long sh_clk_div_recalc(struct clk *clk) |
| 131 | { |
| 132 | struct clk_div_mult_table *table = clk_to_div_mult_table(clk); |
| 133 | unsigned int idx; |
| 134 | |
| 135 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, |
| 136 | table, clk->arch_flags ? &clk->arch_flags : NULL); |
| 137 | |
| 138 | idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask; |
| 139 | |
| 140 | return clk->freq_table[idx].frequency; |
| 141 | } |
| 142 | |
Paul Mundt | 0fa2216 | 2012-05-25 15:52:10 +0900 | [diff] [blame] | 143 | static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate) |
| 144 | { |
| 145 | struct clk_div_table *dt = clk_to_div_table(clk); |
| 146 | unsigned long value; |
| 147 | int idx; |
| 148 | |
| 149 | idx = clk_rate_table_find(clk, clk->freq_table, rate); |
| 150 | if (idx < 0) |
| 151 | return idx; |
| 152 | |
| 153 | value = sh_clk_read(clk); |
| 154 | value &= ~(clk->div_mask << clk->enable_bit); |
| 155 | value |= (idx << clk->enable_bit); |
| 156 | sh_clk_write(value, clk); |
| 157 | |
| 158 | /* XXX: Should use a post-change notifier */ |
| 159 | if (dt->kick) |
| 160 | dt->kick(clk); |
| 161 | |
| 162 | return 0; |
| 163 | } |
| 164 | |
Paul Mundt | 764f4e4 | 2012-05-25 16:34:48 +0900 | [diff] [blame] | 165 | static int sh_clk_div_enable(struct clk *clk) |
| 166 | { |
Kuninori Morimoto | 5a799b8 | 2012-11-25 22:01:46 -0800 | [diff] [blame] | 167 | if (clk->div_mask == SH_CLK_DIV6_MSK) { |
| 168 | int ret = sh_clk_div_set_rate(clk, clk->rate); |
| 169 | if (ret < 0) |
| 170 | return ret; |
| 171 | } |
| 172 | |
Paul Mundt | 764f4e4 | 2012-05-25 16:34:48 +0900 | [diff] [blame] | 173 | sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk); |
| 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | static void sh_clk_div_disable(struct clk *clk) |
| 178 | { |
| 179 | unsigned int val; |
| 180 | |
| 181 | val = sh_clk_read(clk); |
| 182 | val |= CPG_CKSTP_BIT; |
| 183 | |
| 184 | /* |
| 185 | * div6 clocks require the divisor field to be non-zero or the |
| 186 | * above CKSTP toggle silently fails. Ensure that the divisor |
| 187 | * array is reset to its initial state on disable. |
| 188 | */ |
| 189 | if (clk->flags & CLK_MASK_DIV_ON_DISABLE) |
| 190 | val |= clk->div_mask; |
| 191 | |
| 192 | sh_clk_write(val, clk); |
| 193 | } |
| 194 | |
Paul Mundt | e3c8760 | 2012-05-25 16:43:42 +0900 | [diff] [blame] | 195 | static struct sh_clk_ops sh_clk_div_clk_ops = { |
| 196 | .recalc = sh_clk_div_recalc, |
| 197 | .set_rate = sh_clk_div_set_rate, |
| 198 | .round_rate = sh_clk_div_round_rate, |
| 199 | }; |
| 200 | |
| 201 | static struct sh_clk_ops sh_clk_div_enable_clk_ops = { |
| 202 | .recalc = sh_clk_div_recalc, |
| 203 | .set_rate = sh_clk_div_set_rate, |
| 204 | .round_rate = sh_clk_div_round_rate, |
| 205 | .enable = sh_clk_div_enable, |
| 206 | .disable = sh_clk_div_disable, |
| 207 | }; |
| 208 | |
Paul Mundt | 609d755 | 2012-05-25 16:55:05 +0900 | [diff] [blame] | 209 | static int __init sh_clk_init_parent(struct clk *clk) |
| 210 | { |
| 211 | u32 val; |
| 212 | |
| 213 | if (clk->parent) |
| 214 | return 0; |
| 215 | |
| 216 | if (!clk->parent_table || !clk->parent_num) |
| 217 | return 0; |
| 218 | |
| 219 | if (!clk->src_width) { |
| 220 | pr_err("sh_clk_init_parent: cannot select parent clock\n"); |
| 221 | return -EINVAL; |
| 222 | } |
| 223 | |
| 224 | val = (sh_clk_read(clk) >> clk->src_shift); |
| 225 | val &= (1 << clk->src_width) - 1; |
| 226 | |
| 227 | if (val >= clk->parent_num) { |
| 228 | pr_err("sh_clk_init_parent: parent table size failed\n"); |
| 229 | return -EINVAL; |
| 230 | } |
| 231 | |
| 232 | clk_reparent(clk, clk->parent_table[val]); |
| 233 | if (!clk->parent) { |
| 234 | pr_err("sh_clk_init_parent: unable to set parent"); |
| 235 | return -EINVAL; |
| 236 | } |
| 237 | |
| 238 | return 0; |
| 239 | } |
| 240 | |
| 241 | static int __init sh_clk_div_register_ops(struct clk *clks, int nr, |
| 242 | struct clk_div_table *table, struct sh_clk_ops *ops) |
| 243 | { |
| 244 | struct clk *clkp; |
| 245 | void *freq_table; |
| 246 | int nr_divs = table->div_mult_table->nr_divisors; |
| 247 | int freq_table_size = sizeof(struct cpufreq_frequency_table); |
| 248 | int ret = 0; |
| 249 | int k; |
| 250 | |
| 251 | freq_table_size *= (nr_divs + 1); |
| 252 | freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL); |
| 253 | if (!freq_table) { |
| 254 | pr_err("%s: unable to alloc memory\n", __func__); |
| 255 | return -ENOMEM; |
| 256 | } |
| 257 | |
| 258 | for (k = 0; !ret && (k < nr); k++) { |
| 259 | clkp = clks + k; |
| 260 | |
| 261 | clkp->ops = ops; |
| 262 | clkp->priv = table; |
| 263 | |
| 264 | clkp->freq_table = freq_table + (k * freq_table_size); |
| 265 | clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END; |
| 266 | |
| 267 | ret = clk_register(clkp); |
| 268 | if (ret == 0) |
| 269 | ret = sh_clk_init_parent(clkp); |
| 270 | } |
| 271 | |
| 272 | return ret; |
| 273 | } |
| 274 | |
Paul Mundt | 75f5f8a | 2012-05-25 15:26:01 +0900 | [diff] [blame] | 275 | /* |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame] | 276 | * div6 support |
| 277 | */ |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 278 | static int sh_clk_div6_divisors[64] = { |
| 279 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, |
| 280 | 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, |
| 281 | 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, |
| 282 | 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 |
| 283 | }; |
| 284 | |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame] | 285 | static struct clk_div_mult_table div6_div_mult_table = { |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 286 | .divisors = sh_clk_div6_divisors, |
| 287 | .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors), |
| 288 | }; |
| 289 | |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame] | 290 | static struct clk_div_table sh_clk_div6_table = { |
| 291 | .div_mult_table = &div6_div_mult_table, |
| 292 | }; |
| 293 | |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 294 | static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent) |
| 295 | { |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame] | 296 | struct clk_div_mult_table *table = clk_to_div_mult_table(clk); |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 297 | u32 value; |
| 298 | int ret, i; |
| 299 | |
| 300 | if (!clk->parent_table || !clk->parent_num) |
| 301 | return -EINVAL; |
| 302 | |
| 303 | /* Search the parent */ |
| 304 | for (i = 0; i < clk->parent_num; i++) |
| 305 | if (clk->parent_table[i] == parent) |
| 306 | break; |
| 307 | |
| 308 | if (i == clk->parent_num) |
| 309 | return -ENODEV; |
| 310 | |
| 311 | ret = clk_reparent(clk, parent); |
| 312 | if (ret < 0) |
| 313 | return ret; |
| 314 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 315 | value = sh_clk_read(clk) & |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 316 | ~(((1 << clk->src_width) - 1) << clk->src_shift); |
| 317 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 318 | sh_clk_write(value | (i << clk->src_shift), clk); |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 319 | |
| 320 | /* Rebuild the frequency table */ |
| 321 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, |
Kuninori Morimoto | 52c10ad | 2011-04-14 17:13:53 +0900 | [diff] [blame] | 322 | table, NULL); |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 323 | |
| 324 | return 0; |
| 325 | } |
| 326 | |
Magnus Damm | a0ec360 | 2012-02-29 22:16:21 +0900 | [diff] [blame] | 327 | static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = { |
Paul Mundt | 75f5f8a | 2012-05-25 15:26:01 +0900 | [diff] [blame] | 328 | .recalc = sh_clk_div_recalc, |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 329 | .round_rate = sh_clk_div_round_rate, |
Paul Mundt | 0fa2216 | 2012-05-25 15:52:10 +0900 | [diff] [blame] | 330 | .set_rate = sh_clk_div_set_rate, |
Paul Mundt | 764f4e4 | 2012-05-25 16:34:48 +0900 | [diff] [blame] | 331 | .enable = sh_clk_div_enable, |
| 332 | .disable = sh_clk_div_disable, |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 333 | .set_parent = sh_clk_div6_set_parent, |
| 334 | }; |
| 335 | |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 336 | int __init sh_clk_div6_register(struct clk *clks, int nr) |
| 337 | { |
Paul Mundt | 609d755 | 2012-05-25 16:55:05 +0900 | [diff] [blame] | 338 | return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table, |
| 339 | &sh_clk_div_enable_clk_ops); |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 340 | } |
| 341 | |
| 342 | int __init sh_clk_div6_reparent_register(struct clk *clks, int nr) |
| 343 | { |
Paul Mundt | 609d755 | 2012-05-25 16:55:05 +0900 | [diff] [blame] | 344 | return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table, |
| 345 | &sh_clk_div6_reparent_clk_ops); |
Guennadi Liakhovetski | b3dd51a | 2010-07-21 10:13:10 +0000 | [diff] [blame] | 346 | } |
| 347 | |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame] | 348 | /* |
| 349 | * div4 support |
| 350 | */ |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 351 | static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent) |
| 352 | { |
Paul Mundt | a60977a | 2012-05-25 14:59:26 +0900 | [diff] [blame] | 353 | struct clk_div_mult_table *table = clk_to_div_mult_table(clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 354 | u32 value; |
| 355 | int ret; |
| 356 | |
| 357 | /* we really need a better way to determine parent index, but for |
| 358 | * now assume internal parent comes with CLK_ENABLE_ON_INIT set, |
| 359 | * no CLK_ENABLE_ON_INIT means external clock... |
| 360 | */ |
| 361 | |
| 362 | if (parent->flags & CLK_ENABLE_ON_INIT) |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 363 | value = sh_clk_read(clk) & ~(1 << 7); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 364 | else |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 365 | value = sh_clk_read(clk) | (1 << 7); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 366 | |
| 367 | ret = clk_reparent(clk, parent); |
| 368 | if (ret < 0) |
| 369 | return ret; |
| 370 | |
Paul Mundt | 104fa61 | 2012-04-12 19:50:40 +0900 | [diff] [blame] | 371 | sh_clk_write(value, clk); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 372 | |
| 373 | /* Rebiuld the frequency table */ |
| 374 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, |
| 375 | table, &clk->arch_flags); |
| 376 | |
| 377 | return 0; |
| 378 | } |
| 379 | |
Magnus Damm | a0ec360 | 2012-02-29 22:16:21 +0900 | [diff] [blame] | 380 | static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = { |
Paul Mundt | 75f5f8a | 2012-05-25 15:26:01 +0900 | [diff] [blame] | 381 | .recalc = sh_clk_div_recalc, |
Paul Mundt | 0fa2216 | 2012-05-25 15:52:10 +0900 | [diff] [blame] | 382 | .set_rate = sh_clk_div_set_rate, |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 383 | .round_rate = sh_clk_div_round_rate, |
Paul Mundt | 764f4e4 | 2012-05-25 16:34:48 +0900 | [diff] [blame] | 384 | .enable = sh_clk_div_enable, |
| 385 | .disable = sh_clk_div_disable, |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 386 | .set_parent = sh_clk_div4_set_parent, |
| 387 | }; |
| 388 | |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 389 | int __init sh_clk_div4_register(struct clk *clks, int nr, |
| 390 | struct clk_div4_table *table) |
| 391 | { |
Paul Mundt | 609d755 | 2012-05-25 16:55:05 +0900 | [diff] [blame] | 392 | return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 393 | } |
| 394 | |
| 395 | int __init sh_clk_div4_enable_register(struct clk *clks, int nr, |
| 396 | struct clk_div4_table *table) |
| 397 | { |
Paul Mundt | 609d755 | 2012-05-25 16:55:05 +0900 | [diff] [blame] | 398 | return sh_clk_div_register_ops(clks, nr, table, |
| 399 | &sh_clk_div_enable_clk_ops); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 400 | } |
| 401 | |
| 402 | int __init sh_clk_div4_reparent_register(struct clk *clks, int nr, |
| 403 | struct clk_div4_table *table) |
| 404 | { |
Paul Mundt | 609d755 | 2012-05-25 16:55:05 +0900 | [diff] [blame] | 405 | return sh_clk_div_register_ops(clks, nr, table, |
| 406 | &sh_clk_div4_reparent_clk_ops); |
Magnus Damm | fa676ca | 2010-05-11 13:29:34 +0000 | [diff] [blame] | 407 | } |
Kuninori Morimoto | 9d626ec | 2012-10-30 20:06:55 -0700 | [diff] [blame] | 408 | |
| 409 | /* FSI-DIV */ |
| 410 | static unsigned long fsidiv_recalc(struct clk *clk) |
| 411 | { |
| 412 | u32 value; |
| 413 | |
| 414 | value = __raw_readl(clk->mapping->base); |
| 415 | |
| 416 | value >>= 16; |
| 417 | if (value < 2) |
| 418 | return clk->parent->rate; |
| 419 | |
| 420 | return clk->parent->rate / value; |
| 421 | } |
| 422 | |
| 423 | static long fsidiv_round_rate(struct clk *clk, unsigned long rate) |
| 424 | { |
| 425 | return clk_rate_div_range_round(clk, 1, 0xffff, rate); |
| 426 | } |
| 427 | |
| 428 | static void fsidiv_disable(struct clk *clk) |
| 429 | { |
| 430 | __raw_writel(0, clk->mapping->base); |
| 431 | } |
| 432 | |
| 433 | static int fsidiv_enable(struct clk *clk) |
| 434 | { |
| 435 | u32 value; |
| 436 | |
| 437 | value = __raw_readl(clk->mapping->base) >> 16; |
| 438 | if (value < 2) |
| 439 | return 0; |
| 440 | |
| 441 | __raw_writel((value << 16) | 0x3, clk->mapping->base); |
| 442 | |
| 443 | return 0; |
| 444 | } |
| 445 | |
| 446 | static int fsidiv_set_rate(struct clk *clk, unsigned long rate) |
| 447 | { |
Kuninori Morimoto | 9d626ec | 2012-10-30 20:06:55 -0700 | [diff] [blame] | 448 | int idx; |
| 449 | |
| 450 | idx = (clk->parent->rate / rate) & 0xffff; |
| 451 | if (idx < 2) |
| 452 | __raw_writel(0, clk->mapping->base); |
| 453 | else |
| 454 | __raw_writel(idx << 16, clk->mapping->base); |
| 455 | |
| 456 | return 0; |
| 457 | } |
| 458 | |
| 459 | static struct sh_clk_ops fsidiv_clk_ops = { |
| 460 | .recalc = fsidiv_recalc, |
| 461 | .round_rate = fsidiv_round_rate, |
| 462 | .set_rate = fsidiv_set_rate, |
| 463 | .enable = fsidiv_enable, |
| 464 | .disable = fsidiv_disable, |
| 465 | }; |
| 466 | |
| 467 | int __init sh_clk_fsidiv_register(struct clk *clks, int nr) |
| 468 | { |
| 469 | struct clk_mapping *map; |
| 470 | int i; |
| 471 | |
| 472 | for (i = 0; i < nr; i++) { |
| 473 | |
| 474 | map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL); |
| 475 | if (!map) { |
| 476 | pr_err("%s: unable to alloc memory\n", __func__); |
| 477 | return -ENOMEM; |
| 478 | } |
| 479 | |
| 480 | /* clks[i].enable_reg came from SH_CLK_FSIDIV() */ |
| 481 | map->phys = (phys_addr_t)clks[i].enable_reg; |
| 482 | map->len = 8; |
| 483 | |
| 484 | clks[i].enable_reg = 0; /* remove .enable_reg */ |
| 485 | clks[i].ops = &fsidiv_clk_ops; |
| 486 | clks[i].mapping = map; |
| 487 | |
| 488 | clk_register(&clks[i]); |
| 489 | } |
| 490 | |
| 491 | return 0; |
| 492 | } |