blob: e544a38106ddaf4a1068b8346c9770519008e2fe [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Thomas Abraham721c42a2013-03-09 17:02:44 +09002/*
3 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
4 * Copyright (c) 2013 Linaro Ltd.
5 * Author: Thomas Abraham <thomas.ab@samsung.com>
6 *
Thomas Abraham721c42a2013-03-09 17:02:44 +09007 * This file includes utility functions to register clocks to common
8 * clock framework for Samsung platforms.
9*/
10
Stephen Boyd6f1ed072015-06-19 15:00:46 -070011#include <linux/slab.h>
12#include <linux/clkdev.h>
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
Stephen Boyd62e59c42019-04-18 15:20:22 -070015#include <linux/io.h>
Pankaj Dubey8b2f6362014-09-29 13:17:48 +053016#include <linux/of_address.h>
Thomas Abraham721c42a2013-03-09 17:02:44 +090017#include <linux/syscore_ops.h>
Pankaj Dubey8b2f6362014-09-29 13:17:48 +053018
Thomas Abraham721c42a2013-03-09 17:02:44 +090019#include "clk.h"
20
Naveen Krishna Ch16a90132014-09-22 10:17:02 +053021static LIST_HEAD(clock_reg_cache_list);
22
Tomasz Figa3ccefbd2014-02-14 08:16:00 +090023void samsung_clk_save(void __iomem *base,
24 struct samsung_clk_reg_dump *rd,
25 unsigned int num_regs)
26{
27 for (; num_regs > 0; --num_regs, ++rd)
28 rd->value = readl(base + rd->offset);
29}
30
31void samsung_clk_restore(void __iomem *base,
32 const struct samsung_clk_reg_dump *rd,
33 unsigned int num_regs)
34{
35 for (; num_regs > 0; --num_regs, ++rd)
36 writel(rd->value, base + rd->offset);
37}
38
Tomasz Figac3b6c1d2014-02-14 08:16:00 +090039struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump(
40 const unsigned long *rdump,
41 unsigned long nr_rdump)
Tomasz Figa3ccefbd2014-02-14 08:16:00 +090042{
43 struct samsung_clk_reg_dump *rd;
44 unsigned int i;
45
46 rd = kcalloc(nr_rdump, sizeof(*rd), GFP_KERNEL);
47 if (!rd)
48 return NULL;
49
50 for (i = 0; i < nr_rdump; ++i)
51 rd[i].offset = rdump[i];
52
53 return rd;
54}
55
Thomas Abraham721c42a2013-03-09 17:02:44 +090056/* setup the essentials required to support clock lookup using ccf */
Rahul Sharma976face2014-03-12 20:26:44 +053057struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np,
58 void __iomem *base, unsigned long nr_clks)
Thomas Abraham721c42a2013-03-09 17:02:44 +090059{
Rahul Sharma976face2014-03-12 20:26:44 +053060 struct samsung_clk_provider *ctx;
Tomasz Figa91a12632014-02-06 19:33:11 +010061 int i;
62
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +020063 ctx = kzalloc(sizeof(struct samsung_clk_provider) +
64 sizeof(*ctx->clk_data.hws) * nr_clks, GFP_KERNEL);
Rahul Sharma976face2014-03-12 20:26:44 +053065 if (!ctx)
66 panic("could not allocate clock provider context.\n");
Thomas Abraham721c42a2013-03-09 17:02:44 +090067
Tomasz Figa91a12632014-02-06 19:33:11 +010068 for (i = 0; i < nr_clks; ++i)
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +020069 ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
Tomasz Figa91a12632014-02-06 19:33:11 +010070
Rahul Sharma976face2014-03-12 20:26:44 +053071 ctx->reg_base = base;
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +020072 ctx->clk_data.num = nr_clks;
Rahul Sharma976face2014-03-12 20:26:44 +053073 spin_lock_init(&ctx->lock);
Heiko Stuebner6e92bf5a2013-03-18 13:43:52 +090074
Rahul Sharma976face2014-03-12 20:26:44 +053075 return ctx;
Thomas Abraham721c42a2013-03-09 17:02:44 +090076}
77
Sylwester Nawrockid5e136a2014-06-18 17:46:52 +020078void __init samsung_clk_of_add_provider(struct device_node *np,
79 struct samsung_clk_provider *ctx)
80{
81 if (np) {
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +020082 if (of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
Sylwester Nawrockid5e136a2014-06-18 17:46:52 +020083 &ctx->clk_data))
84 panic("could not register clk provider\n");
85 }
86}
87
Thomas Abraham721c42a2013-03-09 17:02:44 +090088/* add a clock instance to the clock lookup table used for dt based lookup */
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +020089void samsung_clk_add_lookup(struct samsung_clk_provider *ctx,
90 struct clk_hw *clk_hw, unsigned int id)
Thomas Abraham721c42a2013-03-09 17:02:44 +090091{
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +020092 if (id)
93 ctx->clk_data.hws[id] = clk_hw;
Thomas Abraham721c42a2013-03-09 17:02:44 +090094}
95
Heiko Stuebner5e2e0192013-03-18 13:43:56 +090096/* register a list of aliases */
Rahul Sharma976face2014-03-12 20:26:44 +053097void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +020098 const struct samsung_clock_alias *list,
Rahul Sharma976face2014-03-12 20:26:44 +053099 unsigned int nr_clk)
Heiko Stuebner5e2e0192013-03-18 13:43:56 +0900100{
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200101 struct clk_hw *clk_hw;
Heiko Stuebner5e2e0192013-03-18 13:43:56 +0900102 unsigned int idx, ret;
103
Heiko Stuebner5e2e0192013-03-18 13:43:56 +0900104 for (idx = 0; idx < nr_clk; idx++, list++) {
105 if (!list->id) {
106 pr_err("%s: clock id missing for index %d\n", __func__,
107 idx);
108 continue;
109 }
110
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200111 clk_hw = ctx->clk_data.hws[list->id];
112 if (!clk_hw) {
Heiko Stuebner5e2e0192013-03-18 13:43:56 +0900113 pr_err("%s: failed to find clock %d\n", __func__,
114 list->id);
115 continue;
116 }
117
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200118 ret = clk_hw_register_clkdev(clk_hw, list->alias,
119 list->dev_name);
Heiko Stuebner5e2e0192013-03-18 13:43:56 +0900120 if (ret)
121 pr_err("%s: failed to register lookup %s\n",
122 __func__, list->alias);
123 }
124}
125
Thomas Abraham721c42a2013-03-09 17:02:44 +0900126/* register a list of fixed clocks */
Rahul Sharma976face2014-03-12 20:26:44 +0530127void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200128 const struct samsung_fixed_rate_clock *list,
129 unsigned int nr_clk)
Thomas Abraham721c42a2013-03-09 17:02:44 +0900130{
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200131 struct clk_hw *clk_hw;
Thomas Abraham721c42a2013-03-09 17:02:44 +0900132 unsigned int idx, ret;
133
134 for (idx = 0; idx < nr_clk; idx++, list++) {
Marek Szyprowskid2f18d72017-08-21 10:05:00 +0200135 clk_hw = clk_hw_register_fixed_rate(ctx->dev, list->name,
Thomas Abraham721c42a2013-03-09 17:02:44 +0900136 list->parent_name, list->flags, list->fixed_rate);
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200137 if (IS_ERR(clk_hw)) {
Thomas Abraham721c42a2013-03-09 17:02:44 +0900138 pr_err("%s: failed to register clock %s\n", __func__,
139 list->name);
140 continue;
141 }
142
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200143 samsung_clk_add_lookup(ctx, clk_hw, list->id);
Thomas Abraham721c42a2013-03-09 17:02:44 +0900144
145 /*
146 * Unconditionally add a clock lookup for the fixed rate clocks.
147 * There are not many of these on any of Samsung platforms.
148 */
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200149 ret = clk_hw_register_clkdev(clk_hw, list->name, NULL);
Thomas Abraham721c42a2013-03-09 17:02:44 +0900150 if (ret)
151 pr_err("%s: failed to register clock lookup for %s",
152 __func__, list->name);
153 }
154}
155
156/* register a list of fixed factor clocks */
Rahul Sharma976face2014-03-12 20:26:44 +0530157void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200158 const struct samsung_fixed_factor_clock *list, unsigned int nr_clk)
Thomas Abraham721c42a2013-03-09 17:02:44 +0900159{
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200160 struct clk_hw *clk_hw;
Thomas Abraham721c42a2013-03-09 17:02:44 +0900161 unsigned int idx;
162
163 for (idx = 0; idx < nr_clk; idx++, list++) {
Marek Szyprowskid2f18d72017-08-21 10:05:00 +0200164 clk_hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
Thomas Abraham721c42a2013-03-09 17:02:44 +0900165 list->parent_name, list->flags, list->mult, list->div);
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200166 if (IS_ERR(clk_hw)) {
Thomas Abraham721c42a2013-03-09 17:02:44 +0900167 pr_err("%s: failed to register clock %s\n", __func__,
168 list->name);
169 continue;
170 }
171
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200172 samsung_clk_add_lookup(ctx, clk_hw, list->id);
Thomas Abraham721c42a2013-03-09 17:02:44 +0900173 }
174}
175
176/* register a list of mux clocks */
Rahul Sharma976face2014-03-12 20:26:44 +0530177void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200178 const struct samsung_mux_clock *list,
Rahul Sharma976face2014-03-12 20:26:44 +0530179 unsigned int nr_clk)
Thomas Abraham721c42a2013-03-09 17:02:44 +0900180{
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200181 struct clk_hw *clk_hw;
Marek Szyprowskia4f21e92017-10-03 12:00:16 +0200182 unsigned int idx;
Thomas Abraham721c42a2013-03-09 17:02:44 +0900183
184 for (idx = 0; idx < nr_clk; idx++, list++) {
Marek Szyprowskid2f18d72017-08-21 10:05:00 +0200185 clk_hw = clk_hw_register_mux(ctx->dev, list->name,
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200186 list->parent_names, list->num_parents, list->flags,
Rahul Sharma976face2014-03-12 20:26:44 +0530187 ctx->reg_base + list->offset,
188 list->shift, list->width, list->mux_flags, &ctx->lock);
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200189 if (IS_ERR(clk_hw)) {
Thomas Abraham721c42a2013-03-09 17:02:44 +0900190 pr_err("%s: failed to register clock %s\n", __func__,
191 list->name);
192 continue;
193 }
194
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200195 samsung_clk_add_lookup(ctx, clk_hw, list->id);
Thomas Abraham721c42a2013-03-09 17:02:44 +0900196 }
197}
198
199/* register a list of div clocks */
Rahul Sharma976face2014-03-12 20:26:44 +0530200void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200201 const struct samsung_div_clock *list,
Rahul Sharma976face2014-03-12 20:26:44 +0530202 unsigned int nr_clk)
Thomas Abraham721c42a2013-03-09 17:02:44 +0900203{
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200204 struct clk_hw *clk_hw;
Marek Szyprowskia4f21e92017-10-03 12:00:16 +0200205 unsigned int idx;
Thomas Abraham721c42a2013-03-09 17:02:44 +0900206
207 for (idx = 0; idx < nr_clk; idx++, list++) {
Heiko Stuebner798ed612013-03-18 13:43:52 +0900208 if (list->table)
Marek Szyprowskid2f18d72017-08-21 10:05:00 +0200209 clk_hw = clk_hw_register_divider_table(ctx->dev,
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200210 list->name, list->parent_name, list->flags,
Rahul Sharma976face2014-03-12 20:26:44 +0530211 ctx->reg_base + list->offset,
212 list->shift, list->width, list->div_flags,
213 list->table, &ctx->lock);
Heiko Stuebner798ed612013-03-18 13:43:52 +0900214 else
Marek Szyprowskid2f18d72017-08-21 10:05:00 +0200215 clk_hw = clk_hw_register_divider(ctx->dev, list->name,
Rahul Sharma976face2014-03-12 20:26:44 +0530216 list->parent_name, list->flags,
217 ctx->reg_base + list->offset, list->shift,
218 list->width, list->div_flags, &ctx->lock);
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200219 if (IS_ERR(clk_hw)) {
Thomas Abraham721c42a2013-03-09 17:02:44 +0900220 pr_err("%s: failed to register clock %s\n", __func__,
221 list->name);
222 continue;
223 }
224
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200225 samsung_clk_add_lookup(ctx, clk_hw, list->id);
Thomas Abraham721c42a2013-03-09 17:02:44 +0900226 }
227}
228
229/* register a list of gate clocks */
Rahul Sharma976face2014-03-12 20:26:44 +0530230void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200231 const struct samsung_gate_clock *list,
Rahul Sharma976face2014-03-12 20:26:44 +0530232 unsigned int nr_clk)
Thomas Abraham721c42a2013-03-09 17:02:44 +0900233{
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200234 struct clk_hw *clk_hw;
Marek Szyprowskia4f21e92017-10-03 12:00:16 +0200235 unsigned int idx;
Thomas Abraham721c42a2013-03-09 17:02:44 +0900236
237 for (idx = 0; idx < nr_clk; idx++, list++) {
Marek Szyprowskid2f18d72017-08-21 10:05:00 +0200238 clk_hw = clk_hw_register_gate(ctx->dev, list->name, list->parent_name,
Rahul Sharma976face2014-03-12 20:26:44 +0530239 list->flags, ctx->reg_base + list->offset,
240 list->bit_idx, list->gate_flags, &ctx->lock);
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200241 if (IS_ERR(clk_hw)) {
Thomas Abraham721c42a2013-03-09 17:02:44 +0900242 pr_err("%s: failed to register clock %s\n", __func__,
243 list->name);
244 continue;
245 }
246
Marek Szyprowskiecb1f1f2017-04-24 08:42:20 +0200247 samsung_clk_add_lookup(ctx, clk_hw, list->id);
Thomas Abraham721c42a2013-03-09 17:02:44 +0900248 }
249}
250
251/*
252 * obtain the clock speed of all external fixed clock sources from device
253 * tree and register it
254 */
Rahul Sharma976face2014-03-12 20:26:44 +0530255void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
Thomas Abraham721c42a2013-03-09 17:02:44 +0900256 struct samsung_fixed_rate_clock *fixed_rate_clk,
257 unsigned int nr_fixed_rate_clk,
Krzysztof Kozlowski305cfab2014-06-26 14:00:06 +0200258 const struct of_device_id *clk_matches)
Thomas Abraham721c42a2013-03-09 17:02:44 +0900259{
260 const struct of_device_id *match;
Rahul Sharma976face2014-03-12 20:26:44 +0530261 struct device_node *clk_np;
Thomas Abraham721c42a2013-03-09 17:02:44 +0900262 u32 freq;
263
Rahul Sharma976face2014-03-12 20:26:44 +0530264 for_each_matching_node_and_match(clk_np, clk_matches, &match) {
265 if (of_property_read_u32(clk_np, "clock-frequency", &freq))
Thomas Abraham721c42a2013-03-09 17:02:44 +0900266 continue;
Pankaj Dubey42fb57c2014-02-26 11:42:41 +0900267 fixed_rate_clk[(unsigned long)match->data].fixed_rate = freq;
Thomas Abraham721c42a2013-03-09 17:02:44 +0900268 }
Rahul Sharma976face2014-03-12 20:26:44 +0530269 samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk);
Thomas Abraham721c42a2013-03-09 17:02:44 +0900270}
Thomas Abraham721c42a2013-03-09 17:02:44 +0900271
272/* utility function to get the rate of a specified clock */
273unsigned long _get_rate(const char *clk_name)
274{
275 struct clk *clk;
Thomas Abraham721c42a2013-03-09 17:02:44 +0900276
Tomasz Figa3a647892013-08-26 19:09:00 +0200277 clk = __clk_lookup(clk_name);
278 if (!clk) {
Thomas Abraham721c42a2013-03-09 17:02:44 +0900279 pr_err("%s: could not find clock %s\n", __func__, clk_name);
280 return 0;
281 }
Tomasz Figa3a647892013-08-26 19:09:00 +0200282
283 return clk_get_rate(clk);
Thomas Abraham721c42a2013-03-09 17:02:44 +0900284}
Naveen Krishna Ch16a90132014-09-22 10:17:02 +0530285
286#ifdef CONFIG_PM_SLEEP
287static int samsung_clk_suspend(void)
288{
289 struct samsung_clock_reg_cache *reg_cache;
290
Marek Szyprowski8bf27ea2018-09-06 17:55:30 +0200291 list_for_each_entry(reg_cache, &clock_reg_cache_list, node) {
Naveen Krishna Ch16a90132014-09-22 10:17:02 +0530292 samsung_clk_save(reg_cache->reg_base, reg_cache->rdump,
293 reg_cache->rd_num);
Marek Szyprowski8bf27ea2018-09-06 17:55:30 +0200294 samsung_clk_restore(reg_cache->reg_base, reg_cache->rsuspend,
295 reg_cache->rsuspend_num);
296 }
Naveen Krishna Ch16a90132014-09-22 10:17:02 +0530297 return 0;
298}
299
300static void samsung_clk_resume(void)
301{
302 struct samsung_clock_reg_cache *reg_cache;
303
304 list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
305 samsung_clk_restore(reg_cache->reg_base, reg_cache->rdump,
306 reg_cache->rd_num);
307}
308
309static struct syscore_ops samsung_clk_syscore_ops = {
310 .suspend = samsung_clk_suspend,
311 .resume = samsung_clk_resume,
312};
313
Marek Szyprowski8bf27ea2018-09-06 17:55:30 +0200314void samsung_clk_extended_sleep_init(void __iomem *reg_base,
Bartlomiej Zolnierkiewicz0c0cd592016-05-24 15:19:15 +0200315 const unsigned long *rdump,
Marek Szyprowski8bf27ea2018-09-06 17:55:30 +0200316 unsigned long nr_rdump,
317 const struct samsung_clk_reg_dump *rsuspend,
318 unsigned long nr_rsuspend)
Naveen Krishna Ch16a90132014-09-22 10:17:02 +0530319{
320 struct samsung_clock_reg_cache *reg_cache;
321
322 reg_cache = kzalloc(sizeof(struct samsung_clock_reg_cache),
323 GFP_KERNEL);
324 if (!reg_cache)
325 panic("could not allocate register reg_cache.\n");
326 reg_cache->rdump = samsung_clk_alloc_reg_dump(rdump, nr_rdump);
327
328 if (!reg_cache->rdump)
329 panic("could not allocate register dump storage.\n");
330
331 if (list_empty(&clock_reg_cache_list))
332 register_syscore_ops(&samsung_clk_syscore_ops);
333
334 reg_cache->reg_base = reg_base;
335 reg_cache->rd_num = nr_rdump;
Marek Szyprowski8bf27ea2018-09-06 17:55:30 +0200336 reg_cache->rsuspend = rsuspend;
337 reg_cache->rsuspend_num = nr_rsuspend;
Naveen Krishna Ch16a90132014-09-22 10:17:02 +0530338 list_add_tail(&reg_cache->node, &clock_reg_cache_list);
339}
Naveen Krishna Ch16a90132014-09-22 10:17:02 +0530340#endif
341
342/*
343 * Common function which registers plls, muxes, dividers and gates
344 * for each CMU. It also add CMU register list to register cache.
345 */
Chanwoo Choi151d4d32014-12-23 16:40:21 +0900346struct samsung_clk_provider * __init samsung_cmu_register_one(
347 struct device_node *np,
Krzysztof Kozlowski9f92c0b2016-05-11 14:01:57 +0200348 const struct samsung_cmu_info *cmu)
Naveen Krishna Ch16a90132014-09-22 10:17:02 +0530349{
350 void __iomem *reg_base;
351 struct samsung_clk_provider *ctx;
352
353 reg_base = of_iomap(np, 0);
Chanwoo Choi151d4d32014-12-23 16:40:21 +0900354 if (!reg_base) {
Naveen Krishna Ch16a90132014-09-22 10:17:02 +0530355 panic("%s: failed to map registers\n", __func__);
Chanwoo Choi151d4d32014-12-23 16:40:21 +0900356 return NULL;
357 }
Naveen Krishna Ch16a90132014-09-22 10:17:02 +0530358
359 ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
Chanwoo Choi151d4d32014-12-23 16:40:21 +0900360 if (!ctx) {
Shailendra Vermac3063172015-05-21 23:26:03 +0530361 panic("%s: unable to allocate ctx\n", __func__);
Chanwoo Choi151d4d32014-12-23 16:40:21 +0900362 return ctx;
363 }
Naveen Krishna Ch16a90132014-09-22 10:17:02 +0530364
365 if (cmu->pll_clks)
366 samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
367 reg_base);
368 if (cmu->mux_clks)
369 samsung_clk_register_mux(ctx, cmu->mux_clks,
370 cmu->nr_mux_clks);
371 if (cmu->div_clks)
372 samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
373 if (cmu->gate_clks)
374 samsung_clk_register_gate(ctx, cmu->gate_clks,
375 cmu->nr_gate_clks);
376 if (cmu->fixed_clks)
377 samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
378 cmu->nr_fixed_clks);
Naveen Krishna Ch0e5af272014-09-22 10:17:03 +0530379 if (cmu->fixed_factor_clks)
380 samsung_clk_register_fixed_factor(ctx, cmu->fixed_factor_clks,
381 cmu->nr_fixed_factor_clks);
Naveen Krishna Ch16a90132014-09-22 10:17:02 +0530382 if (cmu->clk_regs)
Marek Szyprowski8bf27ea2018-09-06 17:55:30 +0200383 samsung_clk_extended_sleep_init(reg_base,
384 cmu->clk_regs, cmu->nr_clk_regs,
385 cmu->suspend_regs, cmu->nr_suspend_regs);
Naveen Krishna Ch16a90132014-09-22 10:17:02 +0530386
387 samsung_clk_of_add_provider(np, ctx);
Chanwoo Choi151d4d32014-12-23 16:40:21 +0900388
389 return ctx;
Naveen Krishna Ch16a90132014-09-22 10:17:02 +0530390}