Matt Wagantall | 7e0b6c9 | 2012-01-20 18:48:05 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | #ifndef __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_H |
| 15 | #define __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_H |
| 16 | |
| 17 | #include <linux/spinlock.h> |
| 18 | #include "clock.h" |
| 19 | |
| 20 | /* |
| 21 | * Bit manipulation macros |
| 22 | */ |
| 23 | #define BM(msb, lsb) (((((uint32_t)-1) << (31-msb)) >> (31-msb+lsb)) << lsb) |
| 24 | #define BVAL(msb, lsb, val) (((val) << lsb) & BM(msb, lsb)) |
| 25 | |
Stephen Boyd | d10d264 | 2012-01-23 18:09:00 -0800 | [diff] [blame] | 26 | #define MN_MODE_DUAL_EDGE 0x2 |
| 27 | |
| 28 | /* MD Registers */ |
| 29 | #define MD4(m_lsb, m, n_lsb, n) \ |
| 30 | (BVAL((m_lsb+3), m_lsb, m) | BVAL((n_lsb+3), n_lsb, ~(n))) |
| 31 | #define MD8(m_lsb, m, n_lsb, n) \ |
| 32 | (BVAL((m_lsb+7), m_lsb, m) | BVAL((n_lsb+7), n_lsb, ~(n))) |
| 33 | #define MD16(m, n) (BVAL(31, 16, m) | BVAL(15, 0, ~(n))) |
| 34 | |
| 35 | /* NS Registers */ |
| 36 | #define NS(n_msb, n_lsb, n, m, mde_lsb, d_msb, d_lsb, d, s_msb, s_lsb, s) \ |
Stephen Boyd | fad06d5 | 2012-01-23 18:15:22 -0800 | [diff] [blame] | 37 | (BVAL(n_msb, n_lsb, ~(n-m) * !!(n)) \ |
Stephen Boyd | d10d264 | 2012-01-23 18:09:00 -0800 | [diff] [blame] | 38 | | (BVAL((mde_lsb+1), mde_lsb, MN_MODE_DUAL_EDGE) * !!(n)) \ |
| 39 | | BVAL(d_msb, d_lsb, (d-1)) | BVAL(s_msb, s_lsb, s)) |
| 40 | |
| 41 | #define NS_MM(n_msb, n_lsb, n, m, d_msb, d_lsb, d, s_msb, s_lsb, s) \ |
Stephen Boyd | fad06d5 | 2012-01-23 18:15:22 -0800 | [diff] [blame] | 42 | (BVAL(n_msb, n_lsb, ~(n-m) * !!(n))|BVAL(d_msb, d_lsb, (d-1)) \ |
Stephen Boyd | d10d264 | 2012-01-23 18:09:00 -0800 | [diff] [blame] | 43 | | BVAL(s_msb, s_lsb, s)) |
| 44 | |
| 45 | #define NS_DIVSRC(d_msb, d_lsb, d, s_msb, s_lsb, s) \ |
| 46 | (BVAL(d_msb, d_lsb, (d-1)) | BVAL(s_msb, s_lsb, s)) |
| 47 | |
| 48 | #define NS_DIV(d_msb, d_lsb, d) \ |
| 49 | BVAL(d_msb, d_lsb, (d-1)) |
| 50 | |
| 51 | #define NS_SRC_SEL(s_msb, s_lsb, s) \ |
| 52 | BVAL(s_msb, s_lsb, s) |
| 53 | |
| 54 | #define NS_MND_BANKED4(n0_lsb, n1_lsb, n, m, s0_lsb, s1_lsb, s) \ |
Stephen Boyd | fad06d5 | 2012-01-23 18:15:22 -0800 | [diff] [blame] | 55 | (BVAL((n0_lsb+3), n0_lsb, ~(n-m) * !!(n)) \ |
| 56 | | BVAL((n1_lsb+3), n1_lsb, ~(n-m) * !!(n)) \ |
Stephen Boyd | d10d264 | 2012-01-23 18:09:00 -0800 | [diff] [blame] | 57 | | BVAL((s0_lsb+2), s0_lsb, s) \ |
| 58 | | BVAL((s1_lsb+2), s1_lsb, s)) |
| 59 | |
| 60 | #define NS_MND_BANKED8(n0_lsb, n1_lsb, n, m, s0_lsb, s1_lsb, s) \ |
Stephen Boyd | fad06d5 | 2012-01-23 18:15:22 -0800 | [diff] [blame] | 61 | (BVAL((n0_lsb+7), n0_lsb, ~(n-m) * !!(n)) \ |
| 62 | | BVAL((n1_lsb+7), n1_lsb, ~(n-m) * !!(n)) \ |
Stephen Boyd | d10d264 | 2012-01-23 18:09:00 -0800 | [diff] [blame] | 63 | | BVAL((s0_lsb+2), s0_lsb, s) \ |
| 64 | | BVAL((s1_lsb+2), s1_lsb, s)) |
| 65 | |
| 66 | #define NS_DIVSRC_BANKED(d0_msb, d0_lsb, d1_msb, d1_lsb, d, \ |
| 67 | s0_msb, s0_lsb, s1_msb, s1_lsb, s) \ |
| 68 | (BVAL(d0_msb, d0_lsb, (d-1)) | BVAL(d1_msb, d1_lsb, (d-1)) \ |
| 69 | | BVAL(s0_msb, s0_lsb, s) \ |
| 70 | | BVAL(s1_msb, s1_lsb, s)) |
| 71 | |
| 72 | /* CC Registers */ |
| 73 | #define CC(mde_lsb, n) (BVAL((mde_lsb+1), mde_lsb, MN_MODE_DUAL_EDGE) * !!(n)) |
| 74 | #define CC_BANKED(mde0_lsb, mde1_lsb, n) \ |
| 75 | ((BVAL((mde0_lsb+1), mde0_lsb, MN_MODE_DUAL_EDGE) \ |
| 76 | | BVAL((mde1_lsb+1), mde1_lsb, MN_MODE_DUAL_EDGE)) \ |
| 77 | * !!(n)) |
| 78 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 79 | /* |
| 80 | * Halt/Status Checking Mode Macros |
| 81 | */ |
| 82 | #define HALT 0 /* Bit pol: 1 = halted */ |
| 83 | #define NOCHECK 1 /* No bit to check, do nothing */ |
| 84 | #define HALT_VOTED 2 /* Bit pol: 1 = halted; delay on disable */ |
| 85 | #define ENABLE 3 /* Bit pol: 1 = running */ |
| 86 | #define ENABLE_VOTED 4 /* Bit pol: 1 = running; delay on disable */ |
| 87 | #define DELAY 5 /* No bit to check, just delay */ |
| 88 | |
| 89 | /* |
| 90 | * Clock Definition Macros |
| 91 | */ |
| 92 | #define DEFINE_CLK_MEASURE(name) \ |
| 93 | struct clk name = { \ |
| 94 | .ops = &clk_ops_measure, \ |
| 95 | .dbg_name = #name, \ |
| 96 | CLK_INIT(name), \ |
| 97 | }; \ |
| 98 | |
| 99 | /* |
| 100 | * Generic frequency-definition structs and macros |
| 101 | */ |
| 102 | struct clk_freq_tbl { |
| 103 | const uint32_t freq_hz; |
| 104 | struct clk *src_clk; |
| 105 | const uint32_t md_val; |
| 106 | const uint32_t ns_val; |
| 107 | const uint32_t ctl_val; |
| 108 | uint32_t mnd_en_mask; |
| 109 | const unsigned sys_vdd; |
| 110 | void *const extra_freq_data; |
| 111 | }; |
| 112 | |
| 113 | /* Some clocks have two banks to avoid glitches when switching frequencies. |
| 114 | * The unused bank is programmed while running on the other bank, and |
| 115 | * switched to afterwards. The following two structs describe the banks. */ |
| 116 | struct bank_mask_info { |
| 117 | void *const md_reg; |
| 118 | const uint32_t ns_mask; |
| 119 | const uint32_t rst_mask; |
| 120 | const uint32_t mnd_en_mask; |
| 121 | const uint32_t mode_mask; |
| 122 | }; |
| 123 | |
| 124 | struct bank_masks { |
| 125 | const uint32_t bank_sel_mask; |
| 126 | const struct bank_mask_info bank0_mask; |
| 127 | const struct bank_mask_info bank1_mask; |
| 128 | }; |
| 129 | |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 130 | #define F_RAW(f, sc, m_v, n_v, c_v, m_m, e) { \ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 131 | .freq_hz = f, \ |
| 132 | .src_clk = sc, \ |
| 133 | .md_val = m_v, \ |
| 134 | .ns_val = n_v, \ |
| 135 | .ctl_val = c_v, \ |
| 136 | .mnd_en_mask = m_m, \ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 137 | .extra_freq_data = e, \ |
| 138 | } |
| 139 | #define FREQ_END (UINT_MAX-1) |
Matt Wagantall | e18bbc8 | 2011-10-06 10:07:28 -0700 | [diff] [blame] | 140 | #define F_END { .freq_hz = FREQ_END } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 141 | |
| 142 | /** |
| 143 | * struct branch - branch on/off |
| 144 | * @ctl_reg: clock control register |
| 145 | * @en_mask: ORed with @ctl_reg to enable the clock |
Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 146 | * @hwcg_reg: hardware clock gating register |
| 147 | * @hwcg_mask: ORed with @hwcg_reg to enable hardware clock gating |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 148 | * @halt_reg: halt register |
| 149 | * @halt_check: type of halt check to perform |
| 150 | * @halt_bit: ANDed with @halt_reg to test for clock halted |
| 151 | * @reset_reg: reset register |
| 152 | * @reset_mask: ORed with @reset_reg to reset the clock domain |
| 153 | */ |
| 154 | struct branch { |
| 155 | void __iomem *const ctl_reg; |
| 156 | const u32 en_mask; |
| 157 | |
Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 158 | void __iomem *hwcg_reg; |
| 159 | u32 hwcg_mask; |
| 160 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 161 | void __iomem *const halt_reg; |
| 162 | const u16 halt_check; |
| 163 | const u16 halt_bit; |
| 164 | |
| 165 | void __iomem *const reset_reg; |
| 166 | const u32 reset_mask; |
Matt Wagantall | 7e0b6c9 | 2012-01-20 18:48:05 -0800 | [diff] [blame] | 167 | |
| 168 | void __iomem *const retain_reg; |
| 169 | const u32 retain_mask; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 170 | }; |
| 171 | |
Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 172 | int branch_reset(struct branch *b, enum clk_reset_action action); |
Stephen Boyd | 092fd18 | 2011-10-21 15:56:30 -0700 | [diff] [blame] | 173 | void __branch_clk_enable_reg(const struct branch *clk, const char *name); |
| 174 | u32 __branch_clk_disable_reg(const struct branch *clk, const char *name); |
Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 175 | int branch_clk_handoff(struct clk *c); |
Matt Wagantall | 7e0b6c9 | 2012-01-20 18:48:05 -0800 | [diff] [blame] | 176 | int branch_clk_set_flags(struct clk *clk, unsigned flags); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 177 | |
| 178 | /* |
| 179 | * Generic clock-definition struct and macros |
| 180 | */ |
| 181 | struct rcg_clk { |
| 182 | bool enabled; |
| 183 | void *const ns_reg; |
| 184 | void *const md_reg; |
| 185 | |
| 186 | const uint32_t root_en_mask; |
| 187 | uint32_t ns_mask; |
| 188 | const uint32_t ctl_mask; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 189 | |
Stephen Boyd | c78d9a7 | 2011-07-20 00:46:24 -0700 | [diff] [blame] | 190 | void *bank_info; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 191 | void (*set_rate)(struct rcg_clk *, struct clk_freq_tbl *); |
Stephen Boyd | c78d9a7 | 2011-07-20 00:46:24 -0700 | [diff] [blame] | 192 | |
Tianyi Gou | baf6d34 | 2011-08-30 21:49:02 -0700 | [diff] [blame] | 193 | struct clk_freq_tbl *freq_tbl; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 194 | struct clk_freq_tbl *current_freq; |
| 195 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 196 | struct branch b; |
| 197 | struct clk c; |
| 198 | }; |
| 199 | |
| 200 | static inline struct rcg_clk *to_rcg_clk(struct clk *clk) |
| 201 | { |
| 202 | return container_of(clk, struct rcg_clk, c); |
| 203 | } |
| 204 | |
Matt Wagantall | 84f43fd | 2011-08-16 23:28:38 -0700 | [diff] [blame] | 205 | extern struct clk_freq_tbl rcg_dummy_freq; |
| 206 | |
Matt Wagantall | 0625ea0 | 2011-07-13 18:51:56 -0700 | [diff] [blame] | 207 | int rcg_clk_enable(struct clk *clk); |
| 208 | void rcg_clk_disable(struct clk *clk); |
Matt Wagantall | 9de3bfb | 2011-11-03 20:13:12 -0700 | [diff] [blame] | 209 | int rcg_clk_set_rate(struct clk *clk, unsigned long rate); |
Matt Wagantall | 9de3bfb | 2011-11-03 20:13:12 -0700 | [diff] [blame] | 210 | unsigned long rcg_clk_get_rate(struct clk *clk); |
Matt Wagantall | 0625ea0 | 2011-07-13 18:51:56 -0700 | [diff] [blame] | 211 | int rcg_clk_list_rate(struct clk *clk, unsigned n); |
| 212 | int rcg_clk_is_enabled(struct clk *clk); |
Matt Wagantall | 9de3bfb | 2011-11-03 20:13:12 -0700 | [diff] [blame] | 213 | long rcg_clk_round_rate(struct clk *clk, unsigned long rate); |
Matt Wagantall | 0625ea0 | 2011-07-13 18:51:56 -0700 | [diff] [blame] | 214 | struct clk *rcg_clk_get_parent(struct clk *c); |
Matt Wagantall | 271a6cd | 2011-09-20 16:06:31 -0700 | [diff] [blame] | 215 | int rcg_clk_handoff(struct clk *c); |
Stephen Boyd | 7bf2814 | 2011-12-07 00:30:52 -0800 | [diff] [blame] | 216 | int rcg_clk_reset(struct clk *clk, enum clk_reset_action action); |
Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 217 | void rcg_clk_enable_hwcg(struct clk *clk); |
| 218 | void rcg_clk_disable_hwcg(struct clk *clk); |
| 219 | int rcg_clk_in_hwcg_mode(struct clk *c); |
Matt Wagantall | 7e0b6c9 | 2012-01-20 18:48:05 -0800 | [diff] [blame] | 220 | int rcg_clk_set_flags(struct clk *clk, unsigned flags); |
Matt Wagantall | 0625ea0 | 2011-07-13 18:51:56 -0700 | [diff] [blame] | 221 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 222 | /** |
Stephen Boyd | b8ad822 | 2011-11-28 12:17:58 -0800 | [diff] [blame] | 223 | * struct cdiv_clk - integer divider clock with external source selection |
| 224 | * @ns_reg: source select and divider settings register |
| 225 | * @ext_mask: bit to set to select an external source |
| 226 | * @cur_div: current divider setting (or 0 for external source) |
| 227 | * @max_div: maximum divider value supported (must be power of 2) |
| 228 | * @div_offset: number of bits to shift divider left by in @ns_reg |
| 229 | * @b: branch |
| 230 | * @c: clock |
| 231 | */ |
| 232 | struct cdiv_clk { |
| 233 | void __iomem *const ns_reg; |
| 234 | u32 ext_mask; |
| 235 | |
| 236 | unsigned long cur_div; |
| 237 | u8 div_offset; |
| 238 | u32 max_div; |
| 239 | |
| 240 | struct branch b; |
| 241 | struct clk c; |
| 242 | }; |
| 243 | |
| 244 | static inline struct cdiv_clk *to_cdiv_clk(struct clk *clk) |
| 245 | { |
| 246 | return container_of(clk, struct cdiv_clk, c); |
| 247 | } |
| 248 | |
| 249 | extern struct clk_ops clk_ops_cdiv; |
| 250 | |
| 251 | /** |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 252 | * struct fixed_clk - fixed rate clock (used for crystal oscillators) |
| 253 | * @rate: output rate |
| 254 | * @c: clk |
| 255 | */ |
| 256 | struct fixed_clk { |
| 257 | unsigned long rate; |
| 258 | struct clk c; |
| 259 | }; |
| 260 | |
| 261 | static inline struct fixed_clk *to_fixed_clk(struct clk *clk) |
| 262 | { |
| 263 | return container_of(clk, struct fixed_clk, c); |
| 264 | } |
| 265 | |
Matt Wagantall | 9de3bfb | 2011-11-03 20:13:12 -0700 | [diff] [blame] | 266 | static inline unsigned long fixed_clk_get_rate(struct clk *clk) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 267 | { |
| 268 | struct fixed_clk *f = to_fixed_clk(clk); |
| 269 | return f->rate; |
| 270 | } |
| 271 | |
| 272 | |
| 273 | /** |
| 274 | * struct pll_vote_clk - phase locked loop (HW voteable) |
| 275 | * @rate: output rate |
Vikram Mulukutla | 31680ae | 2011-11-04 14:23:55 -0700 | [diff] [blame] | 276 | * @soft_vote: soft voting variable for multiple PLL software instances |
| 277 | * @soft_vote_mask: soft voting mask for multiple PLL software instances |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 278 | * @en_reg: enable register |
| 279 | * @en_mask: ORed with @en_reg to enable the clock |
| 280 | * @status_reg: status register |
| 281 | * @parent: clock source |
| 282 | * @c: clk |
| 283 | */ |
| 284 | struct pll_vote_clk { |
| 285 | unsigned long rate; |
| 286 | |
Vikram Mulukutla | 31680ae | 2011-11-04 14:23:55 -0700 | [diff] [blame] | 287 | u32 *soft_vote; |
| 288 | const u32 soft_vote_mask; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 289 | void __iomem *const en_reg; |
| 290 | const u32 en_mask; |
| 291 | |
| 292 | void __iomem *const status_reg; |
| 293 | |
| 294 | struct clk *parent; |
| 295 | struct clk c; |
| 296 | }; |
| 297 | |
| 298 | extern struct clk_ops clk_ops_pll_vote; |
| 299 | |
| 300 | static inline struct pll_vote_clk *to_pll_vote_clk(struct clk *clk) |
| 301 | { |
| 302 | return container_of(clk, struct pll_vote_clk, c); |
| 303 | } |
| 304 | |
| 305 | /** |
| 306 | * struct pll_clk - phase locked loop |
| 307 | * @rate: output rate |
| 308 | * @mode_reg: enable register |
| 309 | * @parent: clock source |
| 310 | * @c: clk |
| 311 | */ |
| 312 | struct pll_clk { |
| 313 | unsigned long rate; |
| 314 | |
| 315 | void __iomem *const mode_reg; |
| 316 | |
| 317 | struct clk *parent; |
| 318 | struct clk c; |
| 319 | }; |
| 320 | |
| 321 | extern struct clk_ops clk_ops_pll; |
| 322 | |
| 323 | static inline struct pll_clk *to_pll_clk(struct clk *clk) |
| 324 | { |
| 325 | return container_of(clk, struct pll_clk, c); |
| 326 | } |
| 327 | |
Vikram Mulukutla | 489e39e | 2011-08-31 18:04:05 -0700 | [diff] [blame] | 328 | int sr_pll_clk_enable(struct clk *clk); |
| 329 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 330 | /** |
| 331 | * struct branch_clk - branch |
| 332 | * @enabled: true if clock is on, false otherwise |
| 333 | * @b: branch |
| 334 | * @parent: clock source |
| 335 | * @c: clk |
| 336 | * |
| 337 | * An on/off switch with a rate derived from the parent. |
| 338 | */ |
| 339 | struct branch_clk { |
| 340 | bool enabled; |
| 341 | struct branch b; |
| 342 | struct clk *parent; |
| 343 | struct clk c; |
| 344 | }; |
| 345 | |
| 346 | static inline struct branch_clk *to_branch_clk(struct clk *clk) |
| 347 | { |
| 348 | return container_of(clk, struct branch_clk, c); |
| 349 | } |
| 350 | |
| 351 | int branch_clk_enable(struct clk *clk); |
| 352 | void branch_clk_disable(struct clk *clk); |
| 353 | struct clk *branch_clk_get_parent(struct clk *clk); |
| 354 | int branch_clk_set_parent(struct clk *clk, struct clk *parent); |
| 355 | int branch_clk_is_enabled(struct clk *clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 356 | int branch_clk_reset(struct clk *c, enum clk_reset_action action); |
Stephen Boyd | a52d7e3 | 2011-11-10 11:59:00 -0800 | [diff] [blame] | 357 | void branch_clk_enable_hwcg(struct clk *clk); |
| 358 | void branch_clk_disable_hwcg(struct clk *clk); |
| 359 | int branch_clk_in_hwcg_mode(struct clk *c); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 360 | |
| 361 | /** |
| 362 | * struct measure_clk - for rate measurement debug use |
| 363 | * @sample_ticks: sample period in reference clock ticks |
| 364 | * @multiplier: measurement scale-up factor |
| 365 | * @divider: measurement scale-down factor |
| 366 | * @c: clk |
| 367 | */ |
| 368 | struct measure_clk { |
| 369 | u64 sample_ticks; |
| 370 | u32 multiplier; |
| 371 | u32 divider; |
| 372 | struct clk c; |
| 373 | }; |
| 374 | |
| 375 | extern struct clk_ops clk_ops_measure; |
| 376 | |
| 377 | static inline struct measure_clk *to_measure_clk(struct clk *clk) |
| 378 | { |
| 379 | return container_of(clk, struct measure_clk, c); |
| 380 | } |
| 381 | |
| 382 | /* |
| 383 | * Variables from clock-local driver |
| 384 | */ |
| 385 | extern spinlock_t local_clock_reg_lock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 386 | extern struct fixed_clk gnd_clk; |
| 387 | |
| 388 | /* |
| 389 | * Local-clock APIs |
| 390 | */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 391 | bool local_clk_is_local(struct clk *clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 392 | |
| 393 | /* |
Vikram Mulukutla | 31680ae | 2011-11-04 14:23:55 -0700 | [diff] [blame] | 394 | * PLL vote clock APIs |
| 395 | */ |
| 396 | int pll_vote_clk_enable(struct clk *clk); |
| 397 | void pll_vote_clk_disable(struct clk *clk); |
| 398 | unsigned long pll_vote_clk_get_rate(struct clk *clk); |
| 399 | struct clk *pll_vote_clk_get_parent(struct clk *clk); |
| 400 | int pll_vote_clk_is_enabled(struct clk *clk); |
| 401 | |
| 402 | /* |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 403 | * Generic set-rate implementations |
| 404 | */ |
| 405 | void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf); |
| 406 | void set_rate_nop(struct rcg_clk *clk, struct clk_freq_tbl *nf); |
| 407 | void set_rate_mnd_8(struct rcg_clk *clk, struct clk_freq_tbl *nf); |
| 408 | void set_rate_mnd_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf); |
| 409 | void set_rate_div_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf); |
| 410 | |
| 411 | #endif /* __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_H */ |
| 412 | |