blob: b8ed6641633c41dad3faa5c994b5e1f835aa442e [file] [log] [blame]
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -07001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/of_platform.h>
7#include <linux/module.h>
8#include <linux/io.h>
9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <linux/kernel.h>
12#include <linux/clk.h>
Meng Wang8ef0cc22019-05-08 15:12:56 +080013#include <linux/clk-provider.h>
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -070014#include "bolero-cdc.h"
15#include "bolero-clk-rsc.h"
16
17#define DRV_NAME "bolero-clk-rsc"
18#define BOLERO_CLK_NAME_LENGTH 30
19#define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
20
21static char clk_src_name[MAX_CLK][BOLERO_CLK_NAME_LENGTH] = {
22 "tx_core_clk",
23 "rx_core_clk",
24 "wsa_core_clk",
25 "va_core_clk",
26 "tx_npl_clk",
27 "rx_npl_clk",
28 "wsa_npl_clk",
29 "va_npl_clk",
30};
31
32struct bolero_clk_rsc {
33 struct device *dev;
34 struct mutex rsc_clk_lock;
Aditya Bavanari7100fb82019-12-06 19:49:01 +053035 struct mutex fs_gen_lock;
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -070036 struct clk *clk[MAX_CLK];
37 int clk_cnt[MAX_CLK];
38 int reg_seq_en_cnt;
39 int va_tx_clk_cnt;
40 bool dev_up;
41 u32 num_fs_reg;
42 u32 *fs_gen_seq;
43 int default_clk_id[MAX_CLK];
44 struct regmap *regmap;
45 char __iomem *rx_clk_muxsel;
46 char __iomem *wsa_clk_muxsel;
47 char __iomem *va_clk_muxsel;
48};
49
50static int bolero_clk_rsc_cb(struct device *dev, u16 event)
51{
52 struct bolero_clk_rsc *priv;
53
54 if (!dev) {
55 pr_err("%s: Invalid device pointer\n",
56 __func__);
57 return -EINVAL;
58 }
59
60 priv = dev_get_drvdata(dev);
61 if (!priv) {
62 pr_err("%s: Invalid clk rsc priviate data\n",
63 __func__);
64 return -EINVAL;
65 }
66
67 mutex_lock(&priv->rsc_clk_lock);
68 if (event == BOLERO_MACRO_EVT_SSR_UP)
69 priv->dev_up = true;
70 else if (event == BOLERO_MACRO_EVT_SSR_DOWN)
71 priv->dev_up = false;
72 mutex_unlock(&priv->rsc_clk_lock);
73
74 return 0;
75}
76
77static char __iomem *bolero_clk_rsc_get_clk_muxsel(struct bolero_clk_rsc *priv,
78 int clk_id)
79{
80 switch (clk_id) {
81 case RX_CORE_CLK:
82 return priv->rx_clk_muxsel;
83 case WSA_CORE_CLK:
84 return priv->wsa_clk_muxsel;
85 case VA_CORE_CLK:
86 return priv->va_clk_muxsel;
87 case TX_CORE_CLK:
88 default:
89 dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
90 break;
91 }
92
93 return NULL;
94}
95
Meng Wang8ef0cc22019-05-08 15:12:56 +080096int bolero_rsc_clk_reset(struct device *dev, int clk_id)
97{
98 struct device *clk_dev = NULL;
99 struct bolero_clk_rsc *priv = NULL;
100 int count = 0;
101
102 if (!dev) {
103 pr_err("%s: dev is null %d\n", __func__);
104 return -EINVAL;
105 }
106
107 if (clk_id < 0 || clk_id >= MAX_CLK - NPL_CLK_OFFSET) {
108 pr_err("%s: Invalid clk_id: %d\n",
109 __func__, clk_id);
110 return -EINVAL;
111 }
112
113 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
114 if (!clk_dev) {
115 pr_err("%s: Invalid rsc clk device\n", __func__);
116 return -EINVAL;
117 }
118
119 priv = dev_get_drvdata(clk_dev);
120 if (!priv) {
121 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
122 return -EINVAL;
123 }
124 mutex_lock(&priv->rsc_clk_lock);
125 while (__clk_is_enabled(priv->clk[clk_id])) {
126 clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
127 clk_disable_unprepare(priv->clk[clk_id]);
128 count++;
129 }
130 dev_dbg(priv->dev,
131 "%s: clock reset after ssr, count %d\n", __func__, count);
132 while (count--) {
133 clk_prepare_enable(priv->clk[clk_id]);
134 clk_prepare_enable(priv->clk[clk_id + NPL_CLK_OFFSET]);
135 }
136 mutex_unlock(&priv->rsc_clk_lock);
137 return 0;
138}
139EXPORT_SYMBOL(bolero_rsc_clk_reset);
140
Aditya Bavanaricfc65e82019-10-03 22:34:23 +0530141void bolero_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
142{
143 struct device *clk_dev = NULL;
144 struct bolero_clk_rsc *priv = NULL;
145 int i = 0;
146
147 if (!dev) {
148 pr_err("%s: dev is null %d\n", __func__);
149 return;
150 }
151
152 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
153 if (!clk_dev) {
154 pr_err("%s: Invalid rsc clk device\n", __func__);
155 return;
156 }
157
158 priv = dev_get_drvdata(clk_dev);
159 if (!priv) {
160 pr_err("%s: Invalid rsc clk private data\n", __func__);
161 return;
162 }
163 mutex_lock(&priv->rsc_clk_lock);
164 for (i = 0; i < MAX_CLK - NPL_CLK_OFFSET; i++) {
165 if (enable) {
166 if (priv->clk[i])
167 clk_prepare_enable(priv->clk[i]);
168 if (priv->clk[i + NPL_CLK_OFFSET])
169 clk_prepare_enable(
170 priv->clk[i + NPL_CLK_OFFSET]);
171 } else {
172 if (priv->clk[i + NPL_CLK_OFFSET])
173 clk_disable_unprepare(
174 priv->clk[i + NPL_CLK_OFFSET]);
175 if (priv->clk[i])
176 clk_disable_unprepare(priv->clk[i]);
177 }
178 }
179 mutex_unlock(&priv->rsc_clk_lock);
180 return;
181}
182EXPORT_SYMBOL(bolero_clk_rsc_enable_all_clocks);
183
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700184static int bolero_clk_rsc_mux0_clk_request(struct bolero_clk_rsc *priv,
185 int clk_id,
186 bool enable)
187{
188 int ret = 0;
189
190 if (enable) {
191 /* Enable Requested Core clk */
192 if (priv->clk_cnt[clk_id] == 0) {
193 ret = clk_prepare_enable(priv->clk[clk_id]);
194 if (ret < 0) {
195 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
196 __func__, clk_id);
197 goto done;
198 }
199 if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
200 ret = clk_prepare_enable(
201 priv->clk[clk_id + NPL_CLK_OFFSET]);
202 if (ret < 0) {
203 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
204 __func__,
205 clk_id + NPL_CLK_OFFSET);
206 goto err;
207 }
208 }
209 }
210 priv->clk_cnt[clk_id]++;
211 } else {
212 if (priv->clk_cnt[clk_id] <= 0) {
213 dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
214 __func__, clk_id);
215 priv->clk_cnt[clk_id] = 0;
216 goto done;
217 }
218 priv->clk_cnt[clk_id]--;
219 if (priv->clk_cnt[clk_id] == 0) {
220 if (priv->clk[clk_id + NPL_CLK_OFFSET])
221 clk_disable_unprepare(
222 priv->clk[clk_id + NPL_CLK_OFFSET]);
223 clk_disable_unprepare(priv->clk[clk_id]);
224 }
225 }
226 return ret;
227
228err:
229 clk_disable_unprepare(priv->clk[clk_id]);
230done:
231 return ret;
232}
233
234static int bolero_clk_rsc_mux1_clk_request(struct bolero_clk_rsc *priv,
235 int clk_id,
236 bool enable)
237{
238 char __iomem *clk_muxsel = NULL;
239 int ret = 0;
240 int default_clk_id = priv->default_clk_id[clk_id];
241
242 clk_muxsel = bolero_clk_rsc_get_clk_muxsel(priv, clk_id);
243 if (!clk_muxsel) {
244 ret = -EINVAL;
245 goto done;
246 }
247
248 if (enable) {
249 if (priv->clk_cnt[clk_id] == 0) {
250 ret = bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
251 true);
252 if (ret < 0)
253 goto done;
254
255 ret = clk_prepare_enable(priv->clk[clk_id]);
256 if (ret < 0) {
257 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
258 __func__, clk_id);
259 goto err_clk;
260 }
261 if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
262 ret = clk_prepare_enable(
263 priv->clk[clk_id + NPL_CLK_OFFSET]);
264 if (ret < 0) {
265 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
266 __func__,
267 clk_id + NPL_CLK_OFFSET);
268 goto err_npl_clk;
269 }
270 }
271 iowrite32(0x1, clk_muxsel);
272 bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
273 false);
274 }
275 priv->clk_cnt[clk_id]++;
276 } else {
277 if (priv->clk_cnt[clk_id] <= 0) {
278 dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
279 __func__, clk_id);
280 priv->clk_cnt[clk_id] = 0;
281 goto done;
282 }
283 priv->clk_cnt[clk_id]--;
284 if (priv->clk_cnt[clk_id] == 0) {
Meng Wangbd930242019-06-25 09:39:48 +0800285 ret = bolero_clk_rsc_mux0_clk_request(priv,
286 default_clk_id, true);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700287
Meng Wangbd930242019-06-25 09:39:48 +0800288 if (!ret)
289 iowrite32(0x0, clk_muxsel);
290
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700291 if (priv->clk[clk_id + NPL_CLK_OFFSET])
292 clk_disable_unprepare(
293 priv->clk[clk_id + NPL_CLK_OFFSET]);
294 clk_disable_unprepare(priv->clk[clk_id]);
295
Meng Wangbd930242019-06-25 09:39:48 +0800296 if (!ret)
297 bolero_clk_rsc_mux0_clk_request(priv,
298 default_clk_id, false);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700299 }
300 }
301 return ret;
302
303err_npl_clk:
304 clk_disable_unprepare(priv->clk[clk_id]);
305
306err_clk:
307 bolero_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
308done:
309 return ret;
310}
311
312static int bolero_clk_rsc_check_and_update_va_clk(struct bolero_clk_rsc *priv,
313 bool mux_switch,
314 int clk_id,
315 bool enable)
316{
317 int ret = 0;
318
319 if (enable) {
320 if (clk_id == VA_CORE_CLK && mux_switch) {
321 /*
322 * Handle the following usecase scenarios during enable
323 * 1. VA only, Active clk is VA_CORE_CLK
324 * 2. record -> record + VA, Active clk is TX_CORE_CLK
325 */
326 if (priv->clk_cnt[TX_CORE_CLK] == 0) {
327 ret = bolero_clk_rsc_mux1_clk_request(priv,
328 VA_CORE_CLK, enable);
329 if (ret < 0)
330 goto err;
331 } else {
332 ret = bolero_clk_rsc_mux0_clk_request(priv,
333 TX_CORE_CLK, enable);
334 if (ret < 0)
335 goto err;
336 priv->va_tx_clk_cnt++;
337 }
338 } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
339 (priv->clk_cnt[VA_CORE_CLK] > 0)) {
340 /*
341 * Handle following concurrency scenario during enable
342 * 1. VA-> Record+VA, Increment TX CLK and Disable VA
343 * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
344 */
345 while (priv->clk_cnt[VA_CORE_CLK] > 0) {
346 ret = bolero_clk_rsc_mux0_clk_request(priv,
347 TX_CORE_CLK, true);
348 if (ret < 0)
349 goto err;
350
351 bolero_clk_rsc_mux1_clk_request(priv,
352 VA_CORE_CLK, false);
353 priv->va_tx_clk_cnt++;
354 }
355 }
356 } else {
357 if (clk_id == VA_CORE_CLK && mux_switch) {
358 /*
359 * Handle the following usecase scenarios during disable
360 * 1. VA only, disable VA_CORE_CLK
361 * 2. Record + VA -> Record, decrement TX CLK count
362 */
363 if (priv->clk_cnt[VA_CORE_CLK]) {
364 bolero_clk_rsc_mux1_clk_request(priv,
365 VA_CORE_CLK, enable);
366 } else if (priv->va_tx_clk_cnt) {
367 bolero_clk_rsc_mux0_clk_request(priv,
368 TX_CORE_CLK, enable);
369 priv->va_tx_clk_cnt--;
370 }
371 } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
372 /*
373 * Handle the following usecase scenarios during disable
374 * Record+VA-> VA: enable VA CLK, decrement TX CLK count
375 */
376 while (priv->va_tx_clk_cnt) {
377 ret = bolero_clk_rsc_mux1_clk_request(priv,
378 VA_CORE_CLK, true);
379 if (ret < 0)
380 goto err;
381
382 bolero_clk_rsc_mux0_clk_request(priv,
383 TX_CORE_CLK, false);
384 priv->va_tx_clk_cnt--;
385 }
386 }
387 }
388
389err:
390 return ret;
391}
392
393/**
394 * bolero_clk_rsc_fs_gen_request - request to enable/disable fs generation
395 * sequence
396 *
397 * @dev: Macro device pointer
398 * @enable: enable or disable flag
399 */
400void bolero_clk_rsc_fs_gen_request(struct device *dev, bool enable)
401{
402 int i;
403 struct regmap *regmap;
404 struct device *clk_dev = NULL;
405 struct bolero_clk_rsc *priv = NULL;
406
407 if (!dev) {
408 pr_err("%s: dev is null %d\n", __func__);
409 return;
410 }
411 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
412 if (!clk_dev) {
413 pr_err("%s: Invalid rsc clk device\n", __func__);
414 return;
415 }
416 priv = dev_get_drvdata(clk_dev);
417 if (!priv) {
418 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
419 return;
420 }
421 regmap = dev_get_regmap(priv->dev->parent, NULL);
Karthikeyan Mani9aca5b12019-05-01 10:47:43 -0700422 if (!regmap) {
423 pr_err("%s: regmap is null\n", __func__);
424 return;
425 }
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530426 mutex_lock(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700427 if (enable) {
428 if (priv->reg_seq_en_cnt++ == 0) {
429 for (i = 0; i < (priv->num_fs_reg * 2); i += 2) {
430 dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
431 __func__, priv->fs_gen_seq[i],
432 priv->fs_gen_seq[i + 1]);
433 regmap_update_bits(regmap,
434 priv->fs_gen_seq[i],
435 priv->fs_gen_seq[i + 1],
436 priv->fs_gen_seq[i + 1]);
437 }
438 }
439 } else {
440 if (priv->reg_seq_en_cnt <= 0) {
441 dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
442 __func__, priv->reg_seq_en_cnt);
443 priv->reg_seq_en_cnt = 0;
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530444 mutex_unlock(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700445 return;
446 }
447 if (--priv->reg_seq_en_cnt == 0) {
448 for (i = ((priv->num_fs_reg - 1) * 2); i >= 0; i -= 2) {
449 dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
450 __func__, priv->fs_gen_seq[i],
451 priv->fs_gen_seq[i + 1]);
452 regmap_update_bits(regmap, priv->fs_gen_seq[i],
453 priv->fs_gen_seq[i + 1], 0x0);
454 }
455 }
456 }
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530457 mutex_unlock(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700458}
459EXPORT_SYMBOL(bolero_clk_rsc_fs_gen_request);
460
461/**
462 * bolero_clk_rsc_request_clock - request for clock to
463 * enable/disable
464 *
465 * @dev: Macro device pointer.
466 * @default_clk_id: mux0 Core clock ID input.
467 * @clk_id_req: Core clock ID requested to enable/disable
468 * @enable: enable or disable clock flag
469 *
470 * Returns 0 on success or -EINVAL on error.
471 */
472int bolero_clk_rsc_request_clock(struct device *dev,
473 int default_clk_id,
474 int clk_id_req,
475 bool enable)
476{
477 int ret = 0;
478 struct device *clk_dev = NULL;
479 struct bolero_clk_rsc *priv = NULL;
480 bool mux_switch = false;
481
482 if (!dev) {
483 pr_err("%s: dev is null %d\n", __func__);
484 return -EINVAL;
485 }
486 if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
487 (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
488 pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
489 __func__, clk_id_req, default_clk_id);
490 return -EINVAL;
491 }
492 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
493 if (!clk_dev) {
494 pr_err("%s: Invalid rsc clk device\n", __func__);
495 return -EINVAL;
496 }
497 priv = dev_get_drvdata(clk_dev);
498 if (!priv) {
499 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
500 return -EINVAL;
501 }
502
503 mutex_lock(&priv->rsc_clk_lock);
Aditya Bavanarif4a471d2019-02-19 17:57:12 +0530504 if (!priv->dev_up && enable) {
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700505 dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
506 __func__);
507 ret = -EINVAL;
508 goto err;
509 }
510 priv->default_clk_id[clk_id_req] = default_clk_id;
511 if (default_clk_id != clk_id_req)
512 mux_switch = true;
513
514 if (mux_switch) {
515 if (clk_id_req != VA_CORE_CLK) {
516 ret = bolero_clk_rsc_mux1_clk_request(priv, clk_id_req,
517 enable);
518 if (ret < 0)
519 goto err;
520 }
521 } else {
522 ret = bolero_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
523 if (ret < 0)
524 goto err;
525 }
526
527 ret = bolero_clk_rsc_check_and_update_va_clk(priv, mux_switch,
528 clk_id_req,
529 enable);
530 if (ret < 0)
531 goto err;
532
533 dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
534 __func__, priv->clk_cnt[clk_id_req], clk_id_req,
535 enable);
536
537 mutex_unlock(&priv->rsc_clk_lock);
538
539 return 0;
540
541err:
542 mutex_unlock(&priv->rsc_clk_lock);
543 return ret;
544}
545EXPORT_SYMBOL(bolero_clk_rsc_request_clock);
546
547
548static int bolero_clk_rsc_probe(struct platform_device *pdev)
549{
550 int ret = 0, fs_gen_size, i, j;
551 const char **clk_name_array;
552 int clk_cnt;
553 struct clk *clk;
554 struct bolero_clk_rsc *priv = NULL;
555 u32 muxsel = 0;
556
557 priv = devm_kzalloc(&pdev->dev, sizeof(struct bolero_clk_rsc),
558 GFP_KERNEL);
559 if (!priv)
560 return -ENOMEM;
561
562 /* Get clk fs gen sequence from device tree */
563 if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
564 &fs_gen_size)) {
565 dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
566 __func__);
567 ret = -EINVAL;
568 goto err;
569 }
570 priv->num_fs_reg = fs_gen_size/(2 * sizeof(u32));
571 priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
572 if (!priv->fs_gen_seq) {
573 ret = -ENOMEM;
574 goto err;
575 }
576 dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
577 /* Parse fs-gen-sequence */
578 ret = of_property_read_u32_array(pdev->dev.of_node,
579 "qcom,fs-gen-sequence",
580 priv->fs_gen_seq,
581 priv->num_fs_reg * 2);
582 if (ret < 0) {
583 dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
584 __func__, ret);
585 goto err;
586 }
587
588 /* Get clk details from device tree */
589 clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
590 if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
591 dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
592 __func__, clk_cnt);
593 ret = -EINVAL;
594 goto err;
595 }
596 clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
597 GFP_KERNEL);
Karthikeyan Mani9aca5b12019-05-01 10:47:43 -0700598 if (!clk_name_array) {
599 ret = -ENOMEM;
600 goto err;
601 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700602
603 ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
604 clk_name_array, clk_cnt);
605
606 for (i = 0; i < MAX_CLK; i++) {
607 priv->clk[i] = NULL;
608 for (j = 0; j < clk_cnt; j++) {
609 if (!strcmp(clk_src_name[i], clk_name_array[j])) {
610 clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
611 if (IS_ERR(clk)) {
612 ret = PTR_ERR(clk);
613 dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
614 __func__, clk_src_name[i], ret);
615 goto err;
616 }
617 priv->clk[i] = clk;
618 dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
619 __func__, clk_src_name[i]);
620 }
621 }
622 }
623 ret = of_property_read_u32(pdev->dev.of_node,
624 "qcom,rx_mclk_mode_muxsel", &muxsel);
625 if (ret) {
626 dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
627 __func__);
628 } else {
629 priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
630 if (!priv->rx_clk_muxsel) {
631 dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
632 __func__);
633 return -ENOMEM;
634 }
635 }
636 ret = of_property_read_u32(pdev->dev.of_node,
637 "qcom,wsa_mclk_mode_muxsel", &muxsel);
638 if (ret) {
639 dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
640 __func__);
641 } else {
642 priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
643 if (!priv->wsa_clk_muxsel) {
644 dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
645 __func__);
646 return -ENOMEM;
647 }
648 }
649 ret = of_property_read_u32(pdev->dev.of_node,
650 "qcom,va_mclk_mode_muxsel", &muxsel);
651 if (ret) {
652 dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
653 __func__);
654 } else {
655 priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
656 if (!priv->va_clk_muxsel) {
657 dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
658 __func__);
659 return -ENOMEM;
660 }
661 }
662
663 ret = bolero_register_res_clk(&pdev->dev, bolero_clk_rsc_cb);
664 if (ret < 0) {
665 dev_err(&pdev->dev, "%s: Failed to register cb %d",
666 __func__, ret);
667 goto err;
668 }
669 priv->dev = &pdev->dev;
670 priv->dev_up = true;
671 mutex_init(&priv->rsc_clk_lock);
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530672 mutex_init(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700673 dev_set_drvdata(&pdev->dev, priv);
674
675err:
676 return ret;
677}
678
679static int bolero_clk_rsc_remove(struct platform_device *pdev)
680{
681 struct bolero_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
682
683 bolero_unregister_res_clk(&pdev->dev);
684 of_platform_depopulate(&pdev->dev);
685 if (!priv)
686 return -EINVAL;
687 mutex_destroy(&priv->rsc_clk_lock);
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530688 mutex_destroy(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700689
690 return 0;
691}
692
693static const struct of_device_id bolero_clk_rsc_dt_match[] = {
694 {.compatible = "qcom,bolero-clk-rsc-mngr"},
695 {}
696};
697MODULE_DEVICE_TABLE(of, bolero_clk_rsc_dt_match);
698
699static struct platform_driver bolero_clk_rsc_mgr = {
700 .driver = {
701 .name = "bolero-clk-rsc-mngr",
702 .owner = THIS_MODULE,
703 .of_match_table = bolero_clk_rsc_dt_match,
Xiaojun Sang53cd13a2018-06-29 15:14:37 +0800704 .suppress_bind_attrs = true,
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700705 },
706 .probe = bolero_clk_rsc_probe,
707 .remove = bolero_clk_rsc_remove,
708};
709
710int bolero_clk_rsc_mgr_init(void)
711{
712 return platform_driver_register(&bolero_clk_rsc_mgr);
713}
714
715void bolero_clk_rsc_mgr_exit(void)
716{
717 platform_driver_unregister(&bolero_clk_rsc_mgr);
718}
719MODULE_DESCRIPTION("Bolero clock resource manager driver");
720MODULE_LICENSE("GPL v2");