blob: 5c4955e33f7a259711b960593941df38d526f2bb [file] [log] [blame]
Heikki Krogeruse2d0e902014-05-15 16:40:25 +03001/*
2 * Copyright (C) 2014 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Adjustable fractional divider clock implementation.
9 * Output rate = (m / n) * parent_rate.
Andy Shevchenko07775912015-09-22 18:54:11 +030010 * Uses rational best approximation algorithm.
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030011 */
12
13#include <linux/clk-provider.h>
14#include <linux/module.h>
15#include <linux/device.h>
16#include <linux/slab.h>
Andy Shevchenko07775912015-09-22 18:54:11 +030017#include <linux/rational.h>
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030018
19#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw)
20
21static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
22 unsigned long parent_rate)
23{
24 struct clk_fractional_divider *fd = to_clk_fd(hw);
25 unsigned long flags = 0;
Andy Shevchenko07775912015-09-22 18:54:11 +030026 unsigned long m, n;
27 u32 val;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030028 u64 ret;
29
30 if (fd->lock)
31 spin_lock_irqsave(fd->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -070032 else
33 __acquire(fd->lock);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030034
35 val = clk_readl(fd->reg);
36
37 if (fd->lock)
38 spin_unlock_irqrestore(fd->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -070039 else
40 __release(fd->lock);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030041
42 m = (val & fd->mmask) >> fd->mshift;
43 n = (val & fd->nmask) >> fd->nshift;
44
Heikki Krogerus6b547832015-02-02 15:37:04 +020045 if (!n || !m)
46 return parent_rate;
47
Heiko Stübnerfeaefa02014-08-28 12:46:10 +020048 ret = (u64)parent_rate * m;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030049 do_div(ret, n);
50
51 return ret;
52}
53
54static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
Andy Shevchenkof7f087c2015-09-22 18:54:08 +030055 unsigned long *parent_rate)
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030056{
57 struct clk_fractional_divider *fd = to_clk_fd(hw);
Andy Shevchenko07775912015-09-22 18:54:11 +030058 unsigned long scale;
59 unsigned long m, n;
60 u64 ret;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030061
Andy Shevchenkof7f087c2015-09-22 18:54:08 +030062 if (!rate || rate >= *parent_rate)
63 return *parent_rate;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030064
Andy Shevchenko07775912015-09-22 18:54:11 +030065 /*
66 * Get rate closer to *parent_rate to guarantee there is no overflow
67 * for m and n. In the result it will be the nearest rate left shifted
68 * by (scale - fd->nwidth) bits.
69 */
70 scale = fls_long(*parent_rate / rate - 1);
71 if (scale > fd->nwidth)
72 rate <<= scale - fd->nwidth;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030073
Andy Shevchenko07775912015-09-22 18:54:11 +030074 rational_best_approximation(rate, *parent_rate,
75 GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
76 &m, &n);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030077
Andy Shevchenko07775912015-09-22 18:54:11 +030078 ret = (u64)*parent_rate * m;
79 do_div(ret, n);
80
81 return ret;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030082}
83
84static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
85 unsigned long parent_rate)
86{
87 struct clk_fractional_divider *fd = to_clk_fd(hw);
88 unsigned long flags = 0;
Andy Shevchenko07775912015-09-22 18:54:11 +030089 unsigned long m, n;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030090 u32 val;
91
Andy Shevchenko07775912015-09-22 18:54:11 +030092 rational_best_approximation(rate, parent_rate,
93 GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
94 &m, &n);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +030095
96 if (fd->lock)
97 spin_lock_irqsave(fd->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -070098 else
99 __acquire(fd->lock);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300100
101 val = clk_readl(fd->reg);
102 val &= ~(fd->mmask | fd->nmask);
103 val |= (m << fd->mshift) | (n << fd->nshift);
104 clk_writel(val, fd->reg);
105
106 if (fd->lock)
107 spin_unlock_irqrestore(fd->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -0700108 else
109 __release(fd->lock);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300110
111 return 0;
112}
113
114const struct clk_ops clk_fractional_divider_ops = {
115 .recalc_rate = clk_fd_recalc_rate,
116 .round_rate = clk_fd_round_rate,
117 .set_rate = clk_fd_set_rate,
118};
119EXPORT_SYMBOL_GPL(clk_fractional_divider_ops);
120
121struct clk *clk_register_fractional_divider(struct device *dev,
122 const char *name, const char *parent_name, unsigned long flags,
123 void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
124 u8 clk_divider_flags, spinlock_t *lock)
125{
126 struct clk_fractional_divider *fd;
127 struct clk_init_data init;
128 struct clk *clk;
129
130 fd = kzalloc(sizeof(*fd), GFP_KERNEL);
Stephen Boydd122db72015-05-14 16:47:10 -0700131 if (!fd)
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300132 return ERR_PTR(-ENOMEM);
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300133
134 init.name = name;
135 init.ops = &clk_fractional_divider_ops;
136 init.flags = flags | CLK_IS_BASIC;
137 init.parent_names = parent_name ? &parent_name : NULL;
138 init.num_parents = parent_name ? 1 : 0;
139
140 fd->reg = reg;
141 fd->mshift = mshift;
Andy Shevchenko934e2532015-09-22 18:54:09 +0300142 fd->mwidth = mwidth;
143 fd->mmask = GENMASK(mwidth - 1, 0) << mshift;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300144 fd->nshift = nshift;
Andy Shevchenko934e2532015-09-22 18:54:09 +0300145 fd->nwidth = nwidth;
146 fd->nmask = GENMASK(nwidth - 1, 0) << nshift;
Heikki Krogeruse2d0e902014-05-15 16:40:25 +0300147 fd->flags = clk_divider_flags;
148 fd->lock = lock;
149 fd->hw.init = &init;
150
151 clk = clk_register(dev, &fd->hw);
152 if (IS_ERR(clk))
153 kfree(fd);
154
155 return clk;
156}
157EXPORT_SYMBOL_GPL(clk_register_fractional_divider);