blob: fdf625fb10faa03fc8c394555612fdcdf855ac09 [file] [log] [blame]
Heikki Krogeruse2d0e902014-05-15 16:40:25 +03001/*
2 * Copyright (C) 2014 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Adjustable fractional divider clock implementation.
9 * Output rate = (m / n) * parent_rate.
Andy Shevchenko07775912015-09-22 18:54:11 +030010 * Uses rational best approximation algorithm.
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030011 */
12
13#include <linux/clk-provider.h>
14#include <linux/module.h>
15#include <linux/device.h>
16#include <linux/slab.h>
Andy Shevchenko07775912015-09-22 18:54:11 +030017#include <linux/rational.h>
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030018
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030019static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
20 unsigned long parent_rate)
21{
22 struct clk_fractional_divider *fd = to_clk_fd(hw);
23 unsigned long flags = 0;
Andy Shevchenko07775912015-09-22 18:54:11 +030024 unsigned long m, n;
25 u32 val;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030026 u64 ret;
27
28 if (fd->lock)
29 spin_lock_irqsave(fd->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -070030 else
31 __acquire(fd->lock);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030032
33 val = clk_readl(fd->reg);
34
35 if (fd->lock)
36 spin_unlock_irqrestore(fd->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -070037 else
38 __release(fd->lock);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030039
40 m = (val & fd->mmask) >> fd->mshift;
41 n = (val & fd->nmask) >> fd->nshift;
42
Heikki Krogerus6b547832015-02-02 15:37:04 +020043 if (!n || !m)
44 return parent_rate;
45
Heiko Stübnerfeaefa02014-08-28 12:46:10 +020046 ret = (u64)parent_rate * m;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030047 do_div(ret, n);
48
49 return ret;
50}
51
Elaine Zhangec52e462017-08-01 18:21:22 +020052static void clk_fd_general_approximation(struct clk_hw *hw, unsigned long rate,
53 unsigned long *parent_rate,
54 unsigned long *m, unsigned long *n)
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030055{
56 struct clk_fractional_divider *fd = to_clk_fd(hw);
Andy Shevchenko07775912015-09-22 18:54:11 +030057 unsigned long scale;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030058
Andy Shevchenko07775912015-09-22 18:54:11 +030059 /*
60 * Get rate closer to *parent_rate to guarantee there is no overflow
61 * for m and n. In the result it will be the nearest rate left shifted
62 * by (scale - fd->nwidth) bits.
63 */
64 scale = fls_long(*parent_rate / rate - 1);
65 if (scale > fd->nwidth)
66 rate <<= scale - fd->nwidth;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030067
Andy Shevchenko07775912015-09-22 18:54:11 +030068 rational_best_approximation(rate, *parent_rate,
69 GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
Elaine Zhangec52e462017-08-01 18:21:22 +020070 m, n);
71}
72
73static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
74 unsigned long *parent_rate)
75{
76 struct clk_fractional_divider *fd = to_clk_fd(hw);
77 unsigned long m, n;
78 u64 ret;
79
80 if (!rate || rate >= *parent_rate)
81 return *parent_rate;
82
83 if (fd->approximation)
84 fd->approximation(hw, rate, parent_rate, &m, &n);
85 else
86 clk_fd_general_approximation(hw, rate, parent_rate, &m, &n);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030087
Andy Shevchenko07775912015-09-22 18:54:11 +030088 ret = (u64)*parent_rate * m;
89 do_div(ret, n);
90
91 return ret;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030092}
93
94static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
95 unsigned long parent_rate)
96{
97 struct clk_fractional_divider *fd = to_clk_fd(hw);
98 unsigned long flags = 0;
Andy Shevchenko07775912015-09-22 18:54:11 +030099 unsigned long m, n;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300100 u32 val;
101
Andy Shevchenko07775912015-09-22 18:54:11 +0300102 rational_best_approximation(rate, parent_rate,
103 GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
104 &m, &n);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300105
106 if (fd->lock)
107 spin_lock_irqsave(fd->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -0700108 else
109 __acquire(fd->lock);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300110
111 val = clk_readl(fd->reg);
112 val &= ~(fd->mmask | fd->nmask);
113 val |= (m << fd->mshift) | (n << fd->nshift);
114 clk_writel(val, fd->reg);
115
116 if (fd->lock)
117 spin_unlock_irqrestore(fd->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -0700118 else
119 __release(fd->lock);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300120
121 return 0;
122}
123
124const struct clk_ops clk_fractional_divider_ops = {
125 .recalc_rate = clk_fd_recalc_rate,
126 .round_rate = clk_fd_round_rate,
127 .set_rate = clk_fd_set_rate,
128};
129EXPORT_SYMBOL_GPL(clk_fractional_divider_ops);
130
Stephen Boyd39b44cf2016-02-07 00:15:09 -0800131struct clk_hw *clk_hw_register_fractional_divider(struct device *dev,
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300132 const char *name, const char *parent_name, unsigned long flags,
133 void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
134 u8 clk_divider_flags, spinlock_t *lock)
135{
136 struct clk_fractional_divider *fd;
137 struct clk_init_data init;
Stephen Boyd39b44cf2016-02-07 00:15:09 -0800138 struct clk_hw *hw;
139 int ret;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300140
141 fd = kzalloc(sizeof(*fd), GFP_KERNEL);
Stephen Boydd122db72015-05-14 16:47:10 -0700142 if (!fd)
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300143 return ERR_PTR(-ENOMEM);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300144
145 init.name = name;
146 init.ops = &clk_fractional_divider_ops;
147 init.flags = flags | CLK_IS_BASIC;
148 init.parent_names = parent_name ? &parent_name : NULL;
149 init.num_parents = parent_name ? 1 : 0;
150
151 fd->reg = reg;
152 fd->mshift = mshift;
Andy Shevchenko934e2532015-09-22 18:54:09 +0300153 fd->mwidth = mwidth;
154 fd->mmask = GENMASK(mwidth - 1, 0) << mshift;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300155 fd->nshift = nshift;
Andy Shevchenko934e2532015-09-22 18:54:09 +0300156 fd->nwidth = nwidth;
157 fd->nmask = GENMASK(nwidth - 1, 0) << nshift;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300158 fd->flags = clk_divider_flags;
159 fd->lock = lock;
160 fd->hw.init = &init;
161
Stephen Boyd39b44cf2016-02-07 00:15:09 -0800162 hw = &fd->hw;
163 ret = clk_hw_register(dev, hw);
164 if (ret) {
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300165 kfree(fd);
Stephen Boyd39b44cf2016-02-07 00:15:09 -0800166 hw = ERR_PTR(ret);
167 }
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300168
Stephen Boyd39b44cf2016-02-07 00:15:09 -0800169 return hw;
170}
171EXPORT_SYMBOL_GPL(clk_hw_register_fractional_divider);
172
173struct clk *clk_register_fractional_divider(struct device *dev,
174 const char *name, const char *parent_name, unsigned long flags,
175 void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
176 u8 clk_divider_flags, spinlock_t *lock)
177{
178 struct clk_hw *hw;
179
180 hw = clk_hw_register_fractional_divider(dev, name, parent_name, flags,
181 reg, mshift, mwidth, nshift, nwidth, clk_divider_flags,
182 lock);
183 if (IS_ERR(hw))
184 return ERR_CAST(hw);
185 return hw->clk;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300186}
187EXPORT_SYMBOL_GPL(clk_register_fractional_divider);
Stephen Boyd39b44cf2016-02-07 00:15:09 -0800188
189void clk_hw_unregister_fractional_divider(struct clk_hw *hw)
190{
191 struct clk_fractional_divider *fd;
192
193 fd = to_clk_fd(hw);
194
195 clk_hw_unregister(hw);
196 kfree(fd);
197}