blob: 9cc9b1ca86b99cb031c1a3a608a75e5d63dbb793 [file] [log] [blame]
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -07001// SPDX-License-Identifier: GPL-2.0-only
2/*
Aditya Bavanari76901542019-12-30 18:20:47 +05303 * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -07004 */
5
6#include <linux/of_platform.h>
7#include <linux/module.h>
8#include <linux/io.h>
9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <linux/kernel.h>
12#include <linux/clk.h>
Meng Wang8ef0cc22019-05-08 15:12:56 +080013#include <linux/clk-provider.h>
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -070014#include "bolero-cdc.h"
15#include "bolero-clk-rsc.h"
16
17#define DRV_NAME "bolero-clk-rsc"
18#define BOLERO_CLK_NAME_LENGTH 30
19#define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
20
21static char clk_src_name[MAX_CLK][BOLERO_CLK_NAME_LENGTH] = {
22 "tx_core_clk",
23 "rx_core_clk",
24 "wsa_core_clk",
25 "va_core_clk",
26 "tx_npl_clk",
27 "rx_npl_clk",
28 "wsa_npl_clk",
29 "va_npl_clk",
30};
31
32struct bolero_clk_rsc {
33 struct device *dev;
34 struct mutex rsc_clk_lock;
Aditya Bavanari7100fb82019-12-06 19:49:01 +053035 struct mutex fs_gen_lock;
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -070036 struct clk *clk[MAX_CLK];
37 int clk_cnt[MAX_CLK];
38 int reg_seq_en_cnt;
39 int va_tx_clk_cnt;
40 bool dev_up;
Aditya Bavanarib258e092020-01-20 20:38:05 +053041 bool dev_up_gfmux;
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -070042 u32 num_fs_reg;
43 u32 *fs_gen_seq;
44 int default_clk_id[MAX_CLK];
45 struct regmap *regmap;
46 char __iomem *rx_clk_muxsel;
47 char __iomem *wsa_clk_muxsel;
48 char __iomem *va_clk_muxsel;
49};
50
51static int bolero_clk_rsc_cb(struct device *dev, u16 event)
52{
53 struct bolero_clk_rsc *priv;
54
55 if (!dev) {
56 pr_err("%s: Invalid device pointer\n",
57 __func__);
58 return -EINVAL;
59 }
60
61 priv = dev_get_drvdata(dev);
62 if (!priv) {
63 pr_err("%s: Invalid clk rsc priviate data\n",
64 __func__);
65 return -EINVAL;
66 }
67
68 mutex_lock(&priv->rsc_clk_lock);
Aditya Bavanarib258e092020-01-20 20:38:05 +053069 if (event == BOLERO_MACRO_EVT_SSR_UP) {
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -070070 priv->dev_up = true;
Aditya Bavanarib258e092020-01-20 20:38:05 +053071 } else if (event == BOLERO_MACRO_EVT_SSR_DOWN) {
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -070072 priv->dev_up = false;
Aditya Bavanarib258e092020-01-20 20:38:05 +053073 priv->dev_up_gfmux = false;
74 } else if (event == BOLERO_MACRO_EVT_SSR_GFMUX_UP) {
75 priv->dev_up_gfmux = true;
76 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -070077 mutex_unlock(&priv->rsc_clk_lock);
78
79 return 0;
80}
81
82static char __iomem *bolero_clk_rsc_get_clk_muxsel(struct bolero_clk_rsc *priv,
83 int clk_id)
84{
85 switch (clk_id) {
86 case RX_CORE_CLK:
87 return priv->rx_clk_muxsel;
88 case WSA_CORE_CLK:
89 return priv->wsa_clk_muxsel;
90 case VA_CORE_CLK:
91 return priv->va_clk_muxsel;
92 case TX_CORE_CLK:
93 default:
94 dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
95 break;
96 }
97
98 return NULL;
99}
100
Meng Wang8ef0cc22019-05-08 15:12:56 +0800101int bolero_rsc_clk_reset(struct device *dev, int clk_id)
102{
103 struct device *clk_dev = NULL;
104 struct bolero_clk_rsc *priv = NULL;
105 int count = 0;
106
107 if (!dev) {
Xiao Lid8bb93c2020-01-07 12:59:05 +0800108 pr_err("%s: dev is null\n", __func__);
Meng Wang8ef0cc22019-05-08 15:12:56 +0800109 return -EINVAL;
110 }
111
112 if (clk_id < 0 || clk_id >= MAX_CLK - NPL_CLK_OFFSET) {
113 pr_err("%s: Invalid clk_id: %d\n",
114 __func__, clk_id);
115 return -EINVAL;
116 }
117
118 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
119 if (!clk_dev) {
120 pr_err("%s: Invalid rsc clk device\n", __func__);
121 return -EINVAL;
122 }
123
124 priv = dev_get_drvdata(clk_dev);
125 if (!priv) {
126 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
127 return -EINVAL;
128 }
129 mutex_lock(&priv->rsc_clk_lock);
130 while (__clk_is_enabled(priv->clk[clk_id])) {
131 clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
132 clk_disable_unprepare(priv->clk[clk_id]);
133 count++;
134 }
135 dev_dbg(priv->dev,
136 "%s: clock reset after ssr, count %d\n", __func__, count);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -0700137
138 trace_printk("%s: clock reset after ssr, count %d\n", __func__, count);
Meng Wang8ef0cc22019-05-08 15:12:56 +0800139 while (count--) {
140 clk_prepare_enable(priv->clk[clk_id]);
141 clk_prepare_enable(priv->clk[clk_id + NPL_CLK_OFFSET]);
142 }
143 mutex_unlock(&priv->rsc_clk_lock);
144 return 0;
145}
146EXPORT_SYMBOL(bolero_rsc_clk_reset);
147
Aditya Bavanaricfc65e82019-10-03 22:34:23 +0530148void bolero_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
149{
150 struct device *clk_dev = NULL;
151 struct bolero_clk_rsc *priv = NULL;
152 int i = 0;
153
154 if (!dev) {
Xiao Lid8bb93c2020-01-07 12:59:05 +0800155 pr_err("%s: dev is null\n", __func__);
Aditya Bavanaricfc65e82019-10-03 22:34:23 +0530156 return;
157 }
158
159 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
160 if (!clk_dev) {
161 pr_err("%s: Invalid rsc clk device\n", __func__);
162 return;
163 }
164
165 priv = dev_get_drvdata(clk_dev);
166 if (!priv) {
167 pr_err("%s: Invalid rsc clk private data\n", __func__);
168 return;
169 }
170 mutex_lock(&priv->rsc_clk_lock);
171 for (i = 0; i < MAX_CLK - NPL_CLK_OFFSET; i++) {
172 if (enable) {
173 if (priv->clk[i])
174 clk_prepare_enable(priv->clk[i]);
175 if (priv->clk[i + NPL_CLK_OFFSET])
176 clk_prepare_enable(
177 priv->clk[i + NPL_CLK_OFFSET]);
178 } else {
179 if (priv->clk[i + NPL_CLK_OFFSET])
180 clk_disable_unprepare(
181 priv->clk[i + NPL_CLK_OFFSET]);
182 if (priv->clk[i])
183 clk_disable_unprepare(priv->clk[i]);
184 }
185 }
186 mutex_unlock(&priv->rsc_clk_lock);
187 return;
188}
189EXPORT_SYMBOL(bolero_clk_rsc_enable_all_clocks);
190
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700191static int bolero_clk_rsc_mux0_clk_request(struct bolero_clk_rsc *priv,
192 int clk_id,
193 bool enable)
194{
195 int ret = 0;
196
197 if (enable) {
198 /* Enable Requested Core clk */
199 if (priv->clk_cnt[clk_id] == 0) {
200 ret = clk_prepare_enable(priv->clk[clk_id]);
201 if (ret < 0) {
202 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
203 __func__, clk_id);
204 goto done;
205 }
206 if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
207 ret = clk_prepare_enable(
208 priv->clk[clk_id + NPL_CLK_OFFSET]);
209 if (ret < 0) {
210 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
211 __func__,
212 clk_id + NPL_CLK_OFFSET);
213 goto err;
214 }
215 }
216 }
217 priv->clk_cnt[clk_id]++;
218 } else {
219 if (priv->clk_cnt[clk_id] <= 0) {
220 dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
221 __func__, clk_id);
222 priv->clk_cnt[clk_id] = 0;
223 goto done;
224 }
225 priv->clk_cnt[clk_id]--;
226 if (priv->clk_cnt[clk_id] == 0) {
227 if (priv->clk[clk_id + NPL_CLK_OFFSET])
228 clk_disable_unprepare(
229 priv->clk[clk_id + NPL_CLK_OFFSET]);
230 clk_disable_unprepare(priv->clk[clk_id]);
231 }
232 }
233 return ret;
234
235err:
236 clk_disable_unprepare(priv->clk[clk_id]);
237done:
238 return ret;
239}
240
241static int bolero_clk_rsc_mux1_clk_request(struct bolero_clk_rsc *priv,
242 int clk_id,
243 bool enable)
244{
245 char __iomem *clk_muxsel = NULL;
246 int ret = 0;
247 int default_clk_id = priv->default_clk_id[clk_id];
Aditya Bavanarif500a1d2019-09-16 18:27:51 -0700248 u32 muxsel = 0;
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700249
250 clk_muxsel = bolero_clk_rsc_get_clk_muxsel(priv, clk_id);
251 if (!clk_muxsel) {
252 ret = -EINVAL;
253 goto done;
254 }
255
256 if (enable) {
257 if (priv->clk_cnt[clk_id] == 0) {
Aditya Bavanari76901542019-12-30 18:20:47 +0530258 if (clk_id != VA_CORE_CLK) {
259 ret = bolero_clk_rsc_mux0_clk_request(priv,
260 default_clk_id,
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700261 true);
Aditya Bavanari76901542019-12-30 18:20:47 +0530262 if (ret < 0)
263 goto done;
264 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700265
266 ret = clk_prepare_enable(priv->clk[clk_id]);
267 if (ret < 0) {
268 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
269 __func__, clk_id);
270 goto err_clk;
271 }
272 if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
273 ret = clk_prepare_enable(
274 priv->clk[clk_id + NPL_CLK_OFFSET]);
275 if (ret < 0) {
276 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
277 __func__,
278 clk_id + NPL_CLK_OFFSET);
279 goto err_npl_clk;
280 }
281 }
Aditya Bavanari76901542019-12-30 18:20:47 +0530282
283 /*
284 * Temp SW workaround to address a glitch issue of
285 * VA GFMux instance responsible for switching from
286 * TX MCLK to VA MCLK. This configuration would be taken
287 * care in DSP itself
288 */
289 if (clk_id != VA_CORE_CLK) {
Aditya Bavanarib258e092020-01-20 20:38:05 +0530290 if (priv->dev_up_gfmux) {
291 iowrite32(0x1, clk_muxsel);
292 muxsel = ioread32(clk_muxsel);
293 trace_printk("%s: muxsel value after enable: %d\n",
294 __func__, muxsel);
295 }
Aditya Bavanari76901542019-12-30 18:20:47 +0530296 bolero_clk_rsc_mux0_clk_request(priv,
297 default_clk_id,
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700298 false);
Aditya Bavanari76901542019-12-30 18:20:47 +0530299 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700300 }
301 priv->clk_cnt[clk_id]++;
302 } else {
303 if (priv->clk_cnt[clk_id] <= 0) {
304 dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
305 __func__, clk_id);
306 priv->clk_cnt[clk_id] = 0;
307 goto done;
308 }
309 priv->clk_cnt[clk_id]--;
310 if (priv->clk_cnt[clk_id] == 0) {
Aditya Bavanari76901542019-12-30 18:20:47 +0530311 if (clk_id != VA_CORE_CLK) {
312 ret = bolero_clk_rsc_mux0_clk_request(priv,
Meng Wangbd930242019-06-25 09:39:48 +0800313 default_clk_id, true);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700314
Aditya Bavanari76901542019-12-30 18:20:47 +0530315 if (!ret) {
316 /*
317 * Temp SW workaround to address a glitch issue
318 * of VA GFMux instance responsible for
319 * switching from TX MCLK to VA MCLK.
320 * This configuration would be taken
321 * care in DSP itself.
322 */
Aditya Bavanarib258e092020-01-20 20:38:05 +0530323 if (priv->dev_up_gfmux) {
324 iowrite32(0x0, clk_muxsel);
325 muxsel = ioread32(clk_muxsel);
326 trace_printk("%s: muxsel value after disable: %d\n",
327 __func__, muxsel);
328 }
Aditya Bavanari76901542019-12-30 18:20:47 +0530329 }
330 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700331 if (priv->clk[clk_id + NPL_CLK_OFFSET])
332 clk_disable_unprepare(
333 priv->clk[clk_id + NPL_CLK_OFFSET]);
334 clk_disable_unprepare(priv->clk[clk_id]);
335
Aditya Bavanari76901542019-12-30 18:20:47 +0530336 if (clk_id != VA_CORE_CLK) {
337 if (!ret)
338 bolero_clk_rsc_mux0_clk_request(priv,
Meng Wangbd930242019-06-25 09:39:48 +0800339 default_clk_id, false);
Aditya Bavanari76901542019-12-30 18:20:47 +0530340 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700341 }
342 }
343 return ret;
344
345err_npl_clk:
346 clk_disable_unprepare(priv->clk[clk_id]);
347
348err_clk:
Aditya Bavanari76901542019-12-30 18:20:47 +0530349 if (clk_id != VA_CORE_CLK)
350 bolero_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700351done:
352 return ret;
353}
354
355static int bolero_clk_rsc_check_and_update_va_clk(struct bolero_clk_rsc *priv,
356 bool mux_switch,
357 int clk_id,
358 bool enable)
359{
360 int ret = 0;
361
362 if (enable) {
363 if (clk_id == VA_CORE_CLK && mux_switch) {
364 /*
365 * Handle the following usecase scenarios during enable
366 * 1. VA only, Active clk is VA_CORE_CLK
367 * 2. record -> record + VA, Active clk is TX_CORE_CLK
368 */
369 if (priv->clk_cnt[TX_CORE_CLK] == 0) {
370 ret = bolero_clk_rsc_mux1_clk_request(priv,
371 VA_CORE_CLK, enable);
372 if (ret < 0)
373 goto err;
374 } else {
375 ret = bolero_clk_rsc_mux0_clk_request(priv,
376 TX_CORE_CLK, enable);
377 if (ret < 0)
378 goto err;
379 priv->va_tx_clk_cnt++;
380 }
381 } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
382 (priv->clk_cnt[VA_CORE_CLK] > 0)) {
383 /*
384 * Handle following concurrency scenario during enable
385 * 1. VA-> Record+VA, Increment TX CLK and Disable VA
386 * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
387 */
388 while (priv->clk_cnt[VA_CORE_CLK] > 0) {
389 ret = bolero_clk_rsc_mux0_clk_request(priv,
390 TX_CORE_CLK, true);
391 if (ret < 0)
392 goto err;
393
394 bolero_clk_rsc_mux1_clk_request(priv,
395 VA_CORE_CLK, false);
396 priv->va_tx_clk_cnt++;
397 }
398 }
399 } else {
400 if (clk_id == VA_CORE_CLK && mux_switch) {
401 /*
402 * Handle the following usecase scenarios during disable
403 * 1. VA only, disable VA_CORE_CLK
404 * 2. Record + VA -> Record, decrement TX CLK count
405 */
406 if (priv->clk_cnt[VA_CORE_CLK]) {
407 bolero_clk_rsc_mux1_clk_request(priv,
408 VA_CORE_CLK, enable);
409 } else if (priv->va_tx_clk_cnt) {
410 bolero_clk_rsc_mux0_clk_request(priv,
411 TX_CORE_CLK, enable);
412 priv->va_tx_clk_cnt--;
413 }
414 } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
415 /*
416 * Handle the following usecase scenarios during disable
417 * Record+VA-> VA: enable VA CLK, decrement TX CLK count
418 */
419 while (priv->va_tx_clk_cnt) {
420 ret = bolero_clk_rsc_mux1_clk_request(priv,
421 VA_CORE_CLK, true);
422 if (ret < 0)
423 goto err;
424
425 bolero_clk_rsc_mux0_clk_request(priv,
426 TX_CORE_CLK, false);
427 priv->va_tx_clk_cnt--;
428 }
429 }
430 }
431
432err:
433 return ret;
434}
435
436/**
437 * bolero_clk_rsc_fs_gen_request - request to enable/disable fs generation
438 * sequence
439 *
440 * @dev: Macro device pointer
441 * @enable: enable or disable flag
442 */
443void bolero_clk_rsc_fs_gen_request(struct device *dev, bool enable)
444{
445 int i;
446 struct regmap *regmap;
447 struct device *clk_dev = NULL;
448 struct bolero_clk_rsc *priv = NULL;
449
450 if (!dev) {
Xiao Lid8bb93c2020-01-07 12:59:05 +0800451 pr_err("%s: dev is null\n", __func__);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700452 return;
453 }
454 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
455 if (!clk_dev) {
456 pr_err("%s: Invalid rsc clk device\n", __func__);
457 return;
458 }
459 priv = dev_get_drvdata(clk_dev);
460 if (!priv) {
461 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
462 return;
463 }
464 regmap = dev_get_regmap(priv->dev->parent, NULL);
Karthikeyan Mani9aca5b12019-05-01 10:47:43 -0700465 if (!regmap) {
466 pr_err("%s: regmap is null\n", __func__);
467 return;
468 }
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530469 mutex_lock(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700470 if (enable) {
471 if (priv->reg_seq_en_cnt++ == 0) {
472 for (i = 0; i < (priv->num_fs_reg * 2); i += 2) {
473 dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
474 __func__, priv->fs_gen_seq[i],
475 priv->fs_gen_seq[i + 1]);
476 regmap_update_bits(regmap,
477 priv->fs_gen_seq[i],
478 priv->fs_gen_seq[i + 1],
479 priv->fs_gen_seq[i + 1]);
480 }
481 }
482 } else {
483 if (priv->reg_seq_en_cnt <= 0) {
484 dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
485 __func__, priv->reg_seq_en_cnt);
486 priv->reg_seq_en_cnt = 0;
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530487 mutex_unlock(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700488 return;
489 }
490 if (--priv->reg_seq_en_cnt == 0) {
491 for (i = ((priv->num_fs_reg - 1) * 2); i >= 0; i -= 2) {
492 dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
493 __func__, priv->fs_gen_seq[i],
494 priv->fs_gen_seq[i + 1]);
495 regmap_update_bits(regmap, priv->fs_gen_seq[i],
496 priv->fs_gen_seq[i + 1], 0x0);
497 }
498 }
499 }
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530500 mutex_unlock(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700501}
502EXPORT_SYMBOL(bolero_clk_rsc_fs_gen_request);
503
504/**
505 * bolero_clk_rsc_request_clock - request for clock to
506 * enable/disable
507 *
508 * @dev: Macro device pointer.
509 * @default_clk_id: mux0 Core clock ID input.
510 * @clk_id_req: Core clock ID requested to enable/disable
511 * @enable: enable or disable clock flag
512 *
513 * Returns 0 on success or -EINVAL on error.
514 */
515int bolero_clk_rsc_request_clock(struct device *dev,
516 int default_clk_id,
517 int clk_id_req,
518 bool enable)
519{
520 int ret = 0;
521 struct device *clk_dev = NULL;
522 struct bolero_clk_rsc *priv = NULL;
523 bool mux_switch = false;
524
525 if (!dev) {
Xiao Lid8bb93c2020-01-07 12:59:05 +0800526 pr_err("%s: dev is null\n", __func__);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700527 return -EINVAL;
528 }
529 if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
530 (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
531 pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
532 __func__, clk_id_req, default_clk_id);
533 return -EINVAL;
534 }
535 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
536 if (!clk_dev) {
537 pr_err("%s: Invalid rsc clk device\n", __func__);
538 return -EINVAL;
539 }
540 priv = dev_get_drvdata(clk_dev);
541 if (!priv) {
542 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
543 return -EINVAL;
544 }
545
546 mutex_lock(&priv->rsc_clk_lock);
Aditya Bavanarif4a471d2019-02-19 17:57:12 +0530547 if (!priv->dev_up && enable) {
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700548 dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
549 __func__);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -0700550 trace_printk("%s: SSR is in progress..\n", __func__);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700551 ret = -EINVAL;
552 goto err;
553 }
554 priv->default_clk_id[clk_id_req] = default_clk_id;
555 if (default_clk_id != clk_id_req)
556 mux_switch = true;
557
558 if (mux_switch) {
559 if (clk_id_req != VA_CORE_CLK) {
560 ret = bolero_clk_rsc_mux1_clk_request(priv, clk_id_req,
561 enable);
562 if (ret < 0)
563 goto err;
564 }
565 } else {
566 ret = bolero_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
567 if (ret < 0)
568 goto err;
569 }
570
571 ret = bolero_clk_rsc_check_and_update_va_clk(priv, mux_switch,
572 clk_id_req,
573 enable);
574 if (ret < 0)
575 goto err;
576
577 dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
578 __func__, priv->clk_cnt[clk_id_req], clk_id_req,
579 enable);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -0700580 trace_printk("%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
581 __func__, priv->clk_cnt[clk_id_req], clk_id_req,
582 enable);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700583
584 mutex_unlock(&priv->rsc_clk_lock);
585
586 return 0;
587
588err:
589 mutex_unlock(&priv->rsc_clk_lock);
590 return ret;
591}
592EXPORT_SYMBOL(bolero_clk_rsc_request_clock);
593
594
595static int bolero_clk_rsc_probe(struct platform_device *pdev)
596{
597 int ret = 0, fs_gen_size, i, j;
598 const char **clk_name_array;
599 int clk_cnt;
600 struct clk *clk;
601 struct bolero_clk_rsc *priv = NULL;
602 u32 muxsel = 0;
603
604 priv = devm_kzalloc(&pdev->dev, sizeof(struct bolero_clk_rsc),
605 GFP_KERNEL);
606 if (!priv)
607 return -ENOMEM;
608
609 /* Get clk fs gen sequence from device tree */
610 if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
611 &fs_gen_size)) {
612 dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
613 __func__);
614 ret = -EINVAL;
615 goto err;
616 }
617 priv->num_fs_reg = fs_gen_size/(2 * sizeof(u32));
618 priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
619 if (!priv->fs_gen_seq) {
620 ret = -ENOMEM;
621 goto err;
622 }
623 dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
624 /* Parse fs-gen-sequence */
625 ret = of_property_read_u32_array(pdev->dev.of_node,
626 "qcom,fs-gen-sequence",
627 priv->fs_gen_seq,
628 priv->num_fs_reg * 2);
629 if (ret < 0) {
630 dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
631 __func__, ret);
632 goto err;
633 }
634
635 /* Get clk details from device tree */
636 clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
637 if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
638 dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
639 __func__, clk_cnt);
640 ret = -EINVAL;
641 goto err;
642 }
643 clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
644 GFP_KERNEL);
Karthikeyan Mani9aca5b12019-05-01 10:47:43 -0700645 if (!clk_name_array) {
646 ret = -ENOMEM;
647 goto err;
648 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700649
650 ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
651 clk_name_array, clk_cnt);
652
653 for (i = 0; i < MAX_CLK; i++) {
654 priv->clk[i] = NULL;
655 for (j = 0; j < clk_cnt; j++) {
656 if (!strcmp(clk_src_name[i], clk_name_array[j])) {
657 clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
658 if (IS_ERR(clk)) {
659 ret = PTR_ERR(clk);
660 dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
661 __func__, clk_src_name[i], ret);
662 goto err;
663 }
664 priv->clk[i] = clk;
665 dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
666 __func__, clk_src_name[i]);
667 }
668 }
669 }
670 ret = of_property_read_u32(pdev->dev.of_node,
671 "qcom,rx_mclk_mode_muxsel", &muxsel);
672 if (ret) {
673 dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
674 __func__);
675 } else {
676 priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
677 if (!priv->rx_clk_muxsel) {
678 dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
679 __func__);
680 return -ENOMEM;
681 }
682 }
683 ret = of_property_read_u32(pdev->dev.of_node,
684 "qcom,wsa_mclk_mode_muxsel", &muxsel);
685 if (ret) {
686 dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
687 __func__);
688 } else {
689 priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
690 if (!priv->wsa_clk_muxsel) {
691 dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
692 __func__);
693 return -ENOMEM;
694 }
695 }
696 ret = of_property_read_u32(pdev->dev.of_node,
697 "qcom,va_mclk_mode_muxsel", &muxsel);
698 if (ret) {
699 dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
700 __func__);
701 } else {
702 priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
703 if (!priv->va_clk_muxsel) {
704 dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
705 __func__);
706 return -ENOMEM;
707 }
708 }
709
710 ret = bolero_register_res_clk(&pdev->dev, bolero_clk_rsc_cb);
711 if (ret < 0) {
712 dev_err(&pdev->dev, "%s: Failed to register cb %d",
713 __func__, ret);
714 goto err;
715 }
716 priv->dev = &pdev->dev;
717 priv->dev_up = true;
Aditya Bavanarib258e092020-01-20 20:38:05 +0530718 priv->dev_up_gfmux = true;
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700719 mutex_init(&priv->rsc_clk_lock);
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530720 mutex_init(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700721 dev_set_drvdata(&pdev->dev, priv);
722
723err:
724 return ret;
725}
726
727static int bolero_clk_rsc_remove(struct platform_device *pdev)
728{
729 struct bolero_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
730
731 bolero_unregister_res_clk(&pdev->dev);
732 of_platform_depopulate(&pdev->dev);
733 if (!priv)
734 return -EINVAL;
735 mutex_destroy(&priv->rsc_clk_lock);
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530736 mutex_destroy(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700737
738 return 0;
739}
740
741static const struct of_device_id bolero_clk_rsc_dt_match[] = {
742 {.compatible = "qcom,bolero-clk-rsc-mngr"},
743 {}
744};
745MODULE_DEVICE_TABLE(of, bolero_clk_rsc_dt_match);
746
747static struct platform_driver bolero_clk_rsc_mgr = {
748 .driver = {
749 .name = "bolero-clk-rsc-mngr",
750 .owner = THIS_MODULE,
751 .of_match_table = bolero_clk_rsc_dt_match,
Xiaojun Sang53cd13a2018-06-29 15:14:37 +0800752 .suppress_bind_attrs = true,
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700753 },
754 .probe = bolero_clk_rsc_probe,
755 .remove = bolero_clk_rsc_remove,
756};
757
758int bolero_clk_rsc_mgr_init(void)
759{
760 return platform_driver_register(&bolero_clk_rsc_mgr);
761}
762
763void bolero_clk_rsc_mgr_exit(void)
764{
765 platform_driver_unregister(&bolero_clk_rsc_mgr);
766}
767MODULE_DESCRIPTION("Bolero clock resource manager driver");
768MODULE_LICENSE("GPL v2");