blob: 60c72395ec6bf9f90a98b79f2698c72e161ae41e [file] [log] [blame]
Magnus Dammd28bdf02010-05-11 13:29:17 +00001#ifndef __SH_CLOCK_H
2#define __SH_CLOCK_H
3
4#include <linux/list.h>
5#include <linux/seq_file.h>
6#include <linux/cpufreq.h>
Paul Mundt28085bc2010-10-15 16:46:37 +09007#include <linux/types.h>
8#include <linux/kref.h>
Magnus Dammd28bdf02010-05-11 13:29:17 +00009#include <linux/clk.h>
10#include <linux/err.h>
11
12struct clk;
13
Paul Mundt28085bc2010-10-15 16:46:37 +090014struct clk_mapping {
15 phys_addr_t phys;
16 void __iomem *base;
17 unsigned long len;
18 struct kref ref;
19};
20
Magnus Damm84c36ff2012-02-29 22:18:19 +090021struct sh_clk_ops {
Paul Mundt549015c2010-11-15 18:48:25 +090022#ifdef CONFIG_SH_CLK_CPG_LEGACY
Magnus Dammd28bdf02010-05-11 13:29:17 +000023 void (*init)(struct clk *clk);
Paul Mundt549015c2010-11-15 18:48:25 +090024#endif
Magnus Dammd28bdf02010-05-11 13:29:17 +000025 int (*enable)(struct clk *clk);
26 void (*disable)(struct clk *clk);
27 unsigned long (*recalc)(struct clk *clk);
Paul Mundt35a96c72010-11-15 18:18:32 +090028 int (*set_rate)(struct clk *clk, unsigned long rate);
Magnus Dammd28bdf02010-05-11 13:29:17 +000029 int (*set_parent)(struct clk *clk, struct clk *parent);
30 long (*round_rate)(struct clk *clk, unsigned long rate);
31};
32
Paul Mundt1111cc12012-05-25 15:21:43 +090033#define SH_CLK_DIV_MSK(div) ((1 << (div)) - 1)
34#define SH_CLK_DIV4_MSK SH_CLK_DIV_MSK(4)
35#define SH_CLK_DIV6_MSK SH_CLK_DIV_MSK(6)
36
Magnus Dammd28bdf02010-05-11 13:29:17 +000037struct clk {
38 struct list_head node;
Magnus Dammd28bdf02010-05-11 13:29:17 +000039 struct clk *parent;
Guennadi Liakhovetskib5272b502010-07-21 10:13:06 +000040 struct clk **parent_table; /* list of parents to */
41 unsigned short parent_num; /* choose between */
42 unsigned char src_shift; /* source clock field in the */
43 unsigned char src_width; /* configuration register */
Magnus Damm84c36ff2012-02-29 22:18:19 +090044 struct sh_clk_ops *ops;
Magnus Dammd28bdf02010-05-11 13:29:17 +000045
46 struct list_head children;
47 struct list_head sibling; /* node for children */
48
49 int usecount;
50
51 unsigned long rate;
52 unsigned long flags;
53
54 void __iomem *enable_reg;
55 unsigned int enable_bit;
Magnus Dammeda20302011-12-08 22:58:54 +090056 void __iomem *mapped_reg;
Magnus Dammd28bdf02010-05-11 13:29:17 +000057
Paul Mundt1111cc12012-05-25 15:21:43 +090058 unsigned int div_mask;
Magnus Dammd28bdf02010-05-11 13:29:17 +000059 unsigned long arch_flags;
60 void *priv;
Paul Mundt28085bc2010-10-15 16:46:37 +090061 struct clk_mapping *mapping;
Magnus Dammd28bdf02010-05-11 13:29:17 +000062 struct cpufreq_frequency_table *freq_table;
Paul Mundtf5869032010-10-15 18:17:35 +090063 unsigned int nr_freqs;
Magnus Dammd28bdf02010-05-11 13:29:17 +000064};
65
Paul Mundt4d6ddb02012-04-11 12:05:50 +090066#define CLK_ENABLE_ON_INIT BIT(0)
67
68#define CLK_ENABLE_REG_32BIT BIT(1) /* default access size */
69#define CLK_ENABLE_REG_16BIT BIT(2)
70#define CLK_ENABLE_REG_8BIT BIT(3)
71
Paul Mundt764f4e42012-05-25 16:34:48 +090072#define CLK_MASK_DIV_ON_DISABLE BIT(4)
73
Paul Mundt4d6ddb02012-04-11 12:05:50 +090074#define CLK_ENABLE_REG_MASK (CLK_ENABLE_REG_32BIT | \
75 CLK_ENABLE_REG_16BIT | \
76 CLK_ENABLE_REG_8BIT)
Magnus Dammd28bdf02010-05-11 13:29:17 +000077
Paul Mundta71ba092010-05-13 18:42:25 +090078/* drivers/sh/clk.c */
Magnus Dammd28bdf02010-05-11 13:29:17 +000079unsigned long followparent_recalc(struct clk *);
80void recalculate_root_clocks(void);
81void propagate_rate(struct clk *);
82int clk_reparent(struct clk *child, struct clk *parent);
83int clk_register(struct clk *);
84void clk_unregister(struct clk *);
Magnus Damm8b5ee112010-05-11 13:29:25 +000085void clk_enable_init_clocks(void);
Magnus Dammd28bdf02010-05-11 13:29:17 +000086
Magnus Dammd28bdf02010-05-11 13:29:17 +000087struct clk_div_mult_table {
88 unsigned int *divisors;
89 unsigned int nr_divisors;
90 unsigned int *multipliers;
91 unsigned int nr_multipliers;
92};
93
94struct cpufreq_frequency_table;
95void clk_rate_table_build(struct clk *clk,
96 struct cpufreq_frequency_table *freq_table,
97 int nr_freqs,
98 struct clk_div_mult_table *src_table,
99 unsigned long *bitmap);
100
101long clk_rate_table_round(struct clk *clk,
102 struct cpufreq_frequency_table *freq_table,
103 unsigned long rate);
104
105int clk_rate_table_find(struct clk *clk,
106 struct cpufreq_frequency_table *freq_table,
107 unsigned long rate);
108
Paul Mundt8e122db2010-10-15 18:33:24 +0900109long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
110 unsigned int div_max, unsigned long rate);
111
Kuninori Morimotodd2c0ca2011-09-19 18:51:13 -0700112long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
113 unsigned int mult_max, unsigned long rate);
114
Guennadi Liakhovetski6af26c62010-11-02 11:27:24 +0000115long clk_round_parent(struct clk *clk, unsigned long target,
116 unsigned long *best_freq, unsigned long *parent_freq,
117 unsigned int div_min, unsigned int div_max);
118
Paul Mundt4d6ddb02012-04-11 12:05:50 +0900119#define SH_CLK_MSTP(_parent, _enable_reg, _enable_bit, _flags) \
Magnus Dammd28bdf02010-05-11 13:29:17 +0000120{ \
121 .parent = _parent, \
122 .enable_reg = (void __iomem *)_enable_reg, \
123 .enable_bit = _enable_bit, \
124 .flags = _flags, \
125}
126
Paul Mundt4d6ddb02012-04-11 12:05:50 +0900127#define SH_CLK_MSTP32(_p, _r, _b, _f) \
128 SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_32BIT)
129
130#define SH_CLK_MSTP16(_p, _r, _b, _f) \
131 SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_16BIT)
132
133#define SH_CLK_MSTP8(_p, _r, _b, _f) \
134 SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_8BIT)
135
136int sh_clk_mstp_register(struct clk *clks, int nr);
137
138/*
139 * MSTP registration never really cared about access size, despite the
140 * original enable/disable pairs assuming a 32-bit access. Clocks are
141 * responsible for defining their access sizes either directly or via the
142 * clock definition wrappers.
143 */
144static inline int __deprecated sh_clk_mstp32_register(struct clk *clks, int nr)
145{
146 return sh_clk_mstp_register(clks, nr);
147}
Magnus Dammd28bdf02010-05-11 13:29:17 +0000148
149#define SH_CLK_DIV4(_parent, _reg, _shift, _div_bitmap, _flags) \
150{ \
151 .parent = _parent, \
152 .enable_reg = (void __iomem *)_reg, \
153 .enable_bit = _shift, \
154 .arch_flags = _div_bitmap, \
Paul Mundt1111cc12012-05-25 15:21:43 +0900155 .div_mask = SH_CLK_DIV4_MSK, \
Magnus Dammd28bdf02010-05-11 13:29:17 +0000156 .flags = _flags, \
157}
158
Paul Mundta60977a2012-05-25 14:59:26 +0900159struct clk_div_table {
Magnus Dammd28bdf02010-05-11 13:29:17 +0000160 struct clk_div_mult_table *div_mult_table;
161 void (*kick)(struct clk *clk);
162};
163
Paul Mundta60977a2012-05-25 14:59:26 +0900164#define clk_div4_table clk_div_table
165
Magnus Dammd28bdf02010-05-11 13:29:17 +0000166int sh_clk_div4_register(struct clk *clks, int nr,
167 struct clk_div4_table *table);
168int sh_clk_div4_enable_register(struct clk *clks, int nr,
169 struct clk_div4_table *table);
170int sh_clk_div4_reparent_register(struct clk *clks, int nr,
171 struct clk_div4_table *table);
172
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800173#define SH_CLK_DIV6_EXT(_reg, _flags, _parents, \
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000174 _num_parents, _src_shift, _src_width) \
175{ \
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000176 .enable_reg = (void __iomem *)_reg, \
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900177 .enable_bit = 0, /* unused */ \
Paul Mundt764f4e42012-05-25 16:34:48 +0900178 .flags = _flags | CLK_MASK_DIV_ON_DISABLE, \
Paul Mundt1111cc12012-05-25 15:21:43 +0900179 .div_mask = SH_CLK_DIV6_MSK, \
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000180 .parent_table = _parents, \
181 .parent_num = _num_parents, \
182 .src_shift = _src_shift, \
183 .src_width = _src_width, \
Magnus Dammd28bdf02010-05-11 13:29:17 +0000184}
185
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000186#define SH_CLK_DIV6(_parent, _reg, _flags) \
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800187{ \
188 .parent = _parent, \
189 .enable_reg = (void __iomem *)_reg, \
Paul Mundt75f5f8a2012-05-25 15:26:01 +0900190 .enable_bit = 0, /* unused */ \
Paul Mundt1111cc12012-05-25 15:21:43 +0900191 .div_mask = SH_CLK_DIV6_MSK, \
Paul Mundt764f4e42012-05-25 16:34:48 +0900192 .flags = _flags | CLK_MASK_DIV_ON_DISABLE, \
Kuninori Morimoto56242a12011-11-21 21:33:18 -0800193}
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000194
Magnus Dammd28bdf02010-05-11 13:29:17 +0000195int sh_clk_div6_register(struct clk *clks, int nr);
Guennadi Liakhovetskib3dd51a2010-07-21 10:13:10 +0000196int sh_clk_div6_reparent_register(struct clk *clks, int nr);
Magnus Dammd28bdf02010-05-11 13:29:17 +0000197
Kuninori Morimoto15220432011-07-06 02:54:11 +0000198#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
199#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
200#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk }
201
Kuninori Morimoto9d626ec2012-10-30 20:06:55 -0700202/* .enable_reg will be updated to .mapping on sh_clk_fsidiv_register() */
203#define SH_CLK_FSIDIV(_reg, _parent) \
204{ \
205 .enable_reg = (void __iomem *)_reg, \
206 .parent = _parent, \
207}
208
209int sh_clk_fsidiv_register(struct clk *clks, int nr);
210
Magnus Dammd28bdf02010-05-11 13:29:17 +0000211#endif /* __SH_CLOCK_H */