blob: b80a2671253e2450a6a27d8f2f8b116a27f0c8d8 [file] [log] [blame]
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -07001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/of_platform.h>
7#include <linux/module.h>
8#include <linux/io.h>
9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <linux/kernel.h>
12#include <linux/clk.h>
Meng Wang8ef0cc22019-05-08 15:12:56 +080013#include <linux/clk-provider.h>
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -070014#include "bolero-cdc.h"
15#include "bolero-clk-rsc.h"
16
17#define DRV_NAME "bolero-clk-rsc"
18#define BOLERO_CLK_NAME_LENGTH 30
19#define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
20
21static char clk_src_name[MAX_CLK][BOLERO_CLK_NAME_LENGTH] = {
22 "tx_core_clk",
23 "rx_core_clk",
24 "wsa_core_clk",
25 "va_core_clk",
26 "tx_npl_clk",
27 "rx_npl_clk",
28 "wsa_npl_clk",
29 "va_npl_clk",
30};
31
32struct bolero_clk_rsc {
33 struct device *dev;
34 struct mutex rsc_clk_lock;
35 struct clk *clk[MAX_CLK];
36 int clk_cnt[MAX_CLK];
37 int reg_seq_en_cnt;
38 int va_tx_clk_cnt;
39 bool dev_up;
40 u32 num_fs_reg;
41 u32 *fs_gen_seq;
42 int default_clk_id[MAX_CLK];
43 struct regmap *regmap;
44 char __iomem *rx_clk_muxsel;
45 char __iomem *wsa_clk_muxsel;
46 char __iomem *va_clk_muxsel;
47};
48
49static int bolero_clk_rsc_cb(struct device *dev, u16 event)
50{
51 struct bolero_clk_rsc *priv;
52
53 if (!dev) {
54 pr_err("%s: Invalid device pointer\n",
55 __func__);
56 return -EINVAL;
57 }
58
59 priv = dev_get_drvdata(dev);
60 if (!priv) {
61 pr_err("%s: Invalid clk rsc priviate data\n",
62 __func__);
63 return -EINVAL;
64 }
65
66 mutex_lock(&priv->rsc_clk_lock);
67 if (event == BOLERO_MACRO_EVT_SSR_UP)
68 priv->dev_up = true;
69 else if (event == BOLERO_MACRO_EVT_SSR_DOWN)
70 priv->dev_up = false;
71 mutex_unlock(&priv->rsc_clk_lock);
72
73 return 0;
74}
75
76static char __iomem *bolero_clk_rsc_get_clk_muxsel(struct bolero_clk_rsc *priv,
77 int clk_id)
78{
79 switch (clk_id) {
80 case RX_CORE_CLK:
81 return priv->rx_clk_muxsel;
82 case WSA_CORE_CLK:
83 return priv->wsa_clk_muxsel;
84 case VA_CORE_CLK:
85 return priv->va_clk_muxsel;
86 case TX_CORE_CLK:
87 default:
88 dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
89 break;
90 }
91
92 return NULL;
93}
94
Meng Wang8ef0cc22019-05-08 15:12:56 +080095int bolero_rsc_clk_reset(struct device *dev, int clk_id)
96{
97 struct device *clk_dev = NULL;
98 struct bolero_clk_rsc *priv = NULL;
99 int count = 0;
100
101 if (!dev) {
102 pr_err("%s: dev is null %d\n", __func__);
103 return -EINVAL;
104 }
105
106 if (clk_id < 0 || clk_id >= MAX_CLK - NPL_CLK_OFFSET) {
107 pr_err("%s: Invalid clk_id: %d\n",
108 __func__, clk_id);
109 return -EINVAL;
110 }
111
112 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
113 if (!clk_dev) {
114 pr_err("%s: Invalid rsc clk device\n", __func__);
115 return -EINVAL;
116 }
117
118 priv = dev_get_drvdata(clk_dev);
119 if (!priv) {
120 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
121 return -EINVAL;
122 }
123 mutex_lock(&priv->rsc_clk_lock);
124 while (__clk_is_enabled(priv->clk[clk_id])) {
125 clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
126 clk_disable_unprepare(priv->clk[clk_id]);
127 count++;
128 }
129 dev_dbg(priv->dev,
130 "%s: clock reset after ssr, count %d\n", __func__, count);
131 while (count--) {
132 clk_prepare_enable(priv->clk[clk_id]);
133 clk_prepare_enable(priv->clk[clk_id + NPL_CLK_OFFSET]);
134 }
135 mutex_unlock(&priv->rsc_clk_lock);
136 return 0;
137}
138EXPORT_SYMBOL(bolero_rsc_clk_reset);
139
Aditya Bavanaricfc65e82019-10-03 22:34:23 +0530140void bolero_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
141{
142 struct device *clk_dev = NULL;
143 struct bolero_clk_rsc *priv = NULL;
144 int i = 0;
145
146 if (!dev) {
147 pr_err("%s: dev is null %d\n", __func__);
148 return;
149 }
150
151 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
152 if (!clk_dev) {
153 pr_err("%s: Invalid rsc clk device\n", __func__);
154 return;
155 }
156
157 priv = dev_get_drvdata(clk_dev);
158 if (!priv) {
159 pr_err("%s: Invalid rsc clk private data\n", __func__);
160 return;
161 }
162 mutex_lock(&priv->rsc_clk_lock);
163 for (i = 0; i < MAX_CLK - NPL_CLK_OFFSET; i++) {
164 if (enable) {
165 if (priv->clk[i])
166 clk_prepare_enable(priv->clk[i]);
167 if (priv->clk[i + NPL_CLK_OFFSET])
168 clk_prepare_enable(
169 priv->clk[i + NPL_CLK_OFFSET]);
170 } else {
171 if (priv->clk[i + NPL_CLK_OFFSET])
172 clk_disable_unprepare(
173 priv->clk[i + NPL_CLK_OFFSET]);
174 if (priv->clk[i])
175 clk_disable_unprepare(priv->clk[i]);
176 }
177 }
178 mutex_unlock(&priv->rsc_clk_lock);
179 return;
180}
181EXPORT_SYMBOL(bolero_clk_rsc_enable_all_clocks);
182
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700183static int bolero_clk_rsc_mux0_clk_request(struct bolero_clk_rsc *priv,
184 int clk_id,
185 bool enable)
186{
187 int ret = 0;
188
189 if (enable) {
190 /* Enable Requested Core clk */
191 if (priv->clk_cnt[clk_id] == 0) {
192 ret = clk_prepare_enable(priv->clk[clk_id]);
193 if (ret < 0) {
194 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
195 __func__, clk_id);
196 goto done;
197 }
198 if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
199 ret = clk_prepare_enable(
200 priv->clk[clk_id + NPL_CLK_OFFSET]);
201 if (ret < 0) {
202 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
203 __func__,
204 clk_id + NPL_CLK_OFFSET);
205 goto err;
206 }
207 }
208 }
209 priv->clk_cnt[clk_id]++;
210 } else {
211 if (priv->clk_cnt[clk_id] <= 0) {
212 dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
213 __func__, clk_id);
214 priv->clk_cnt[clk_id] = 0;
215 goto done;
216 }
217 priv->clk_cnt[clk_id]--;
218 if (priv->clk_cnt[clk_id] == 0) {
219 if (priv->clk[clk_id + NPL_CLK_OFFSET])
220 clk_disable_unprepare(
221 priv->clk[clk_id + NPL_CLK_OFFSET]);
222 clk_disable_unprepare(priv->clk[clk_id]);
223 }
224 }
225 return ret;
226
227err:
228 clk_disable_unprepare(priv->clk[clk_id]);
229done:
230 return ret;
231}
232
233static int bolero_clk_rsc_mux1_clk_request(struct bolero_clk_rsc *priv,
234 int clk_id,
235 bool enable)
236{
237 char __iomem *clk_muxsel = NULL;
238 int ret = 0;
239 int default_clk_id = priv->default_clk_id[clk_id];
240
241 clk_muxsel = bolero_clk_rsc_get_clk_muxsel(priv, clk_id);
242 if (!clk_muxsel) {
243 ret = -EINVAL;
244 goto done;
245 }
246
247 if (enable) {
248 if (priv->clk_cnt[clk_id] == 0) {
249 ret = bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
250 true);
251 if (ret < 0)
252 goto done;
253
254 ret = clk_prepare_enable(priv->clk[clk_id]);
255 if (ret < 0) {
256 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
257 __func__, clk_id);
258 goto err_clk;
259 }
260 if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
261 ret = clk_prepare_enable(
262 priv->clk[clk_id + NPL_CLK_OFFSET]);
263 if (ret < 0) {
264 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
265 __func__,
266 clk_id + NPL_CLK_OFFSET);
267 goto err_npl_clk;
268 }
269 }
270 iowrite32(0x1, clk_muxsel);
271 bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
272 false);
273 }
274 priv->clk_cnt[clk_id]++;
275 } else {
276 if (priv->clk_cnt[clk_id] <= 0) {
277 dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
278 __func__, clk_id);
279 priv->clk_cnt[clk_id] = 0;
280 goto done;
281 }
282 priv->clk_cnt[clk_id]--;
283 if (priv->clk_cnt[clk_id] == 0) {
Meng Wangbd930242019-06-25 09:39:48 +0800284 ret = bolero_clk_rsc_mux0_clk_request(priv,
285 default_clk_id, true);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700286
Meng Wangbd930242019-06-25 09:39:48 +0800287 if (!ret)
288 iowrite32(0x0, clk_muxsel);
289
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700290 if (priv->clk[clk_id + NPL_CLK_OFFSET])
291 clk_disable_unprepare(
292 priv->clk[clk_id + NPL_CLK_OFFSET]);
293 clk_disable_unprepare(priv->clk[clk_id]);
294
Meng Wangbd930242019-06-25 09:39:48 +0800295 if (!ret)
296 bolero_clk_rsc_mux0_clk_request(priv,
297 default_clk_id, false);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700298 }
299 }
300 return ret;
301
302err_npl_clk:
303 clk_disable_unprepare(priv->clk[clk_id]);
304
305err_clk:
306 bolero_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
307done:
308 return ret;
309}
310
311static int bolero_clk_rsc_check_and_update_va_clk(struct bolero_clk_rsc *priv,
312 bool mux_switch,
313 int clk_id,
314 bool enable)
315{
316 int ret = 0;
317
318 if (enable) {
319 if (clk_id == VA_CORE_CLK && mux_switch) {
320 /*
321 * Handle the following usecase scenarios during enable
322 * 1. VA only, Active clk is VA_CORE_CLK
323 * 2. record -> record + VA, Active clk is TX_CORE_CLK
324 */
325 if (priv->clk_cnt[TX_CORE_CLK] == 0) {
326 ret = bolero_clk_rsc_mux1_clk_request(priv,
327 VA_CORE_CLK, enable);
328 if (ret < 0)
329 goto err;
330 } else {
331 ret = bolero_clk_rsc_mux0_clk_request(priv,
332 TX_CORE_CLK, enable);
333 if (ret < 0)
334 goto err;
335 priv->va_tx_clk_cnt++;
336 }
337 } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
338 (priv->clk_cnt[VA_CORE_CLK] > 0)) {
339 /*
340 * Handle following concurrency scenario during enable
341 * 1. VA-> Record+VA, Increment TX CLK and Disable VA
342 * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
343 */
344 while (priv->clk_cnt[VA_CORE_CLK] > 0) {
345 ret = bolero_clk_rsc_mux0_clk_request(priv,
346 TX_CORE_CLK, true);
347 if (ret < 0)
348 goto err;
349
350 bolero_clk_rsc_mux1_clk_request(priv,
351 VA_CORE_CLK, false);
352 priv->va_tx_clk_cnt++;
353 }
354 }
355 } else {
356 if (clk_id == VA_CORE_CLK && mux_switch) {
357 /*
358 * Handle the following usecase scenarios during disable
359 * 1. VA only, disable VA_CORE_CLK
360 * 2. Record + VA -> Record, decrement TX CLK count
361 */
362 if (priv->clk_cnt[VA_CORE_CLK]) {
363 bolero_clk_rsc_mux1_clk_request(priv,
364 VA_CORE_CLK, enable);
365 } else if (priv->va_tx_clk_cnt) {
366 bolero_clk_rsc_mux0_clk_request(priv,
367 TX_CORE_CLK, enable);
368 priv->va_tx_clk_cnt--;
369 }
370 } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
371 /*
372 * Handle the following usecase scenarios during disable
373 * Record+VA-> VA: enable VA CLK, decrement TX CLK count
374 */
375 while (priv->va_tx_clk_cnt) {
376 ret = bolero_clk_rsc_mux1_clk_request(priv,
377 VA_CORE_CLK, true);
378 if (ret < 0)
379 goto err;
380
381 bolero_clk_rsc_mux0_clk_request(priv,
382 TX_CORE_CLK, false);
383 priv->va_tx_clk_cnt--;
384 }
385 }
386 }
387
388err:
389 return ret;
390}
391
392/**
393 * bolero_clk_rsc_fs_gen_request - request to enable/disable fs generation
394 * sequence
395 *
396 * @dev: Macro device pointer
397 * @enable: enable or disable flag
398 */
399void bolero_clk_rsc_fs_gen_request(struct device *dev, bool enable)
400{
401 int i;
402 struct regmap *regmap;
403 struct device *clk_dev = NULL;
404 struct bolero_clk_rsc *priv = NULL;
405
406 if (!dev) {
407 pr_err("%s: dev is null %d\n", __func__);
408 return;
409 }
410 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
411 if (!clk_dev) {
412 pr_err("%s: Invalid rsc clk device\n", __func__);
413 return;
414 }
415 priv = dev_get_drvdata(clk_dev);
416 if (!priv) {
417 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
418 return;
419 }
420 regmap = dev_get_regmap(priv->dev->parent, NULL);
Karthikeyan Mani9aca5b12019-05-01 10:47:43 -0700421 if (!regmap) {
422 pr_err("%s: regmap is null\n", __func__);
423 return;
424 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700425 if (enable) {
426 if (priv->reg_seq_en_cnt++ == 0) {
427 for (i = 0; i < (priv->num_fs_reg * 2); i += 2) {
428 dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
429 __func__, priv->fs_gen_seq[i],
430 priv->fs_gen_seq[i + 1]);
431 regmap_update_bits(regmap,
432 priv->fs_gen_seq[i],
433 priv->fs_gen_seq[i + 1],
434 priv->fs_gen_seq[i + 1]);
435 }
436 }
437 } else {
438 if (priv->reg_seq_en_cnt <= 0) {
439 dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
440 __func__, priv->reg_seq_en_cnt);
441 priv->reg_seq_en_cnt = 0;
442 return;
443 }
444 if (--priv->reg_seq_en_cnt == 0) {
445 for (i = ((priv->num_fs_reg - 1) * 2); i >= 0; i -= 2) {
446 dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
447 __func__, priv->fs_gen_seq[i],
448 priv->fs_gen_seq[i + 1]);
449 regmap_update_bits(regmap, priv->fs_gen_seq[i],
450 priv->fs_gen_seq[i + 1], 0x0);
451 }
452 }
453 }
454}
455EXPORT_SYMBOL(bolero_clk_rsc_fs_gen_request);
456
457/**
458 * bolero_clk_rsc_request_clock - request for clock to
459 * enable/disable
460 *
461 * @dev: Macro device pointer.
462 * @default_clk_id: mux0 Core clock ID input.
463 * @clk_id_req: Core clock ID requested to enable/disable
464 * @enable: enable or disable clock flag
465 *
466 * Returns 0 on success or -EINVAL on error.
467 */
468int bolero_clk_rsc_request_clock(struct device *dev,
469 int default_clk_id,
470 int clk_id_req,
471 bool enable)
472{
473 int ret = 0;
474 struct device *clk_dev = NULL;
475 struct bolero_clk_rsc *priv = NULL;
476 bool mux_switch = false;
477
478 if (!dev) {
479 pr_err("%s: dev is null %d\n", __func__);
480 return -EINVAL;
481 }
482 if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
483 (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
484 pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
485 __func__, clk_id_req, default_clk_id);
486 return -EINVAL;
487 }
488 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
489 if (!clk_dev) {
490 pr_err("%s: Invalid rsc clk device\n", __func__);
491 return -EINVAL;
492 }
493 priv = dev_get_drvdata(clk_dev);
494 if (!priv) {
495 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
496 return -EINVAL;
497 }
498
499 mutex_lock(&priv->rsc_clk_lock);
Aditya Bavanarif4a471d2019-02-19 17:57:12 +0530500 if (!priv->dev_up && enable) {
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700501 dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
502 __func__);
503 ret = -EINVAL;
504 goto err;
505 }
506 priv->default_clk_id[clk_id_req] = default_clk_id;
507 if (default_clk_id != clk_id_req)
508 mux_switch = true;
509
510 if (mux_switch) {
511 if (clk_id_req != VA_CORE_CLK) {
512 ret = bolero_clk_rsc_mux1_clk_request(priv, clk_id_req,
513 enable);
514 if (ret < 0)
515 goto err;
516 }
517 } else {
518 ret = bolero_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
519 if (ret < 0)
520 goto err;
521 }
522
523 ret = bolero_clk_rsc_check_and_update_va_clk(priv, mux_switch,
524 clk_id_req,
525 enable);
526 if (ret < 0)
527 goto err;
528
529 dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
530 __func__, priv->clk_cnt[clk_id_req], clk_id_req,
531 enable);
532
533 mutex_unlock(&priv->rsc_clk_lock);
534
535 return 0;
536
537err:
538 mutex_unlock(&priv->rsc_clk_lock);
539 return ret;
540}
541EXPORT_SYMBOL(bolero_clk_rsc_request_clock);
542
543
544static int bolero_clk_rsc_probe(struct platform_device *pdev)
545{
546 int ret = 0, fs_gen_size, i, j;
547 const char **clk_name_array;
548 int clk_cnt;
549 struct clk *clk;
550 struct bolero_clk_rsc *priv = NULL;
551 u32 muxsel = 0;
552
553 priv = devm_kzalloc(&pdev->dev, sizeof(struct bolero_clk_rsc),
554 GFP_KERNEL);
555 if (!priv)
556 return -ENOMEM;
557
558 /* Get clk fs gen sequence from device tree */
559 if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
560 &fs_gen_size)) {
561 dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
562 __func__);
563 ret = -EINVAL;
564 goto err;
565 }
566 priv->num_fs_reg = fs_gen_size/(2 * sizeof(u32));
567 priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
568 if (!priv->fs_gen_seq) {
569 ret = -ENOMEM;
570 goto err;
571 }
572 dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
573 /* Parse fs-gen-sequence */
574 ret = of_property_read_u32_array(pdev->dev.of_node,
575 "qcom,fs-gen-sequence",
576 priv->fs_gen_seq,
577 priv->num_fs_reg * 2);
578 if (ret < 0) {
579 dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
580 __func__, ret);
581 goto err;
582 }
583
584 /* Get clk details from device tree */
585 clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
586 if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
587 dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
588 __func__, clk_cnt);
589 ret = -EINVAL;
590 goto err;
591 }
592 clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
593 GFP_KERNEL);
Karthikeyan Mani9aca5b12019-05-01 10:47:43 -0700594 if (!clk_name_array) {
595 ret = -ENOMEM;
596 goto err;
597 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700598
599 ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
600 clk_name_array, clk_cnt);
601
602 for (i = 0; i < MAX_CLK; i++) {
603 priv->clk[i] = NULL;
604 for (j = 0; j < clk_cnt; j++) {
605 if (!strcmp(clk_src_name[i], clk_name_array[j])) {
606 clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
607 if (IS_ERR(clk)) {
608 ret = PTR_ERR(clk);
609 dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
610 __func__, clk_src_name[i], ret);
611 goto err;
612 }
613 priv->clk[i] = clk;
614 dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
615 __func__, clk_src_name[i]);
616 }
617 }
618 }
619 ret = of_property_read_u32(pdev->dev.of_node,
620 "qcom,rx_mclk_mode_muxsel", &muxsel);
621 if (ret) {
622 dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
623 __func__);
624 } else {
625 priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
626 if (!priv->rx_clk_muxsel) {
627 dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
628 __func__);
629 return -ENOMEM;
630 }
631 }
632 ret = of_property_read_u32(pdev->dev.of_node,
633 "qcom,wsa_mclk_mode_muxsel", &muxsel);
634 if (ret) {
635 dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
636 __func__);
637 } else {
638 priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
639 if (!priv->wsa_clk_muxsel) {
640 dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
641 __func__);
642 return -ENOMEM;
643 }
644 }
645 ret = of_property_read_u32(pdev->dev.of_node,
646 "qcom,va_mclk_mode_muxsel", &muxsel);
647 if (ret) {
648 dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
649 __func__);
650 } else {
651 priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
652 if (!priv->va_clk_muxsel) {
653 dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
654 __func__);
655 return -ENOMEM;
656 }
657 }
658
659 ret = bolero_register_res_clk(&pdev->dev, bolero_clk_rsc_cb);
660 if (ret < 0) {
661 dev_err(&pdev->dev, "%s: Failed to register cb %d",
662 __func__, ret);
663 goto err;
664 }
665 priv->dev = &pdev->dev;
666 priv->dev_up = true;
667 mutex_init(&priv->rsc_clk_lock);
668 dev_set_drvdata(&pdev->dev, priv);
669
670err:
671 return ret;
672}
673
674static int bolero_clk_rsc_remove(struct platform_device *pdev)
675{
676 struct bolero_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
677
678 bolero_unregister_res_clk(&pdev->dev);
679 of_platform_depopulate(&pdev->dev);
680 if (!priv)
681 return -EINVAL;
682 mutex_destroy(&priv->rsc_clk_lock);
683
684 return 0;
685}
686
687static const struct of_device_id bolero_clk_rsc_dt_match[] = {
688 {.compatible = "qcom,bolero-clk-rsc-mngr"},
689 {}
690};
691MODULE_DEVICE_TABLE(of, bolero_clk_rsc_dt_match);
692
693static struct platform_driver bolero_clk_rsc_mgr = {
694 .driver = {
695 .name = "bolero-clk-rsc-mngr",
696 .owner = THIS_MODULE,
697 .of_match_table = bolero_clk_rsc_dt_match,
Xiaojun Sang53cd13a2018-06-29 15:14:37 +0800698 .suppress_bind_attrs = true,
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700699 },
700 .probe = bolero_clk_rsc_probe,
701 .remove = bolero_clk_rsc_remove,
702};
703
704int bolero_clk_rsc_mgr_init(void)
705{
706 return platform_driver_register(&bolero_clk_rsc_mgr);
707}
708
709void bolero_clk_rsc_mgr_exit(void)
710{
711 platform_driver_unregister(&bolero_clk_rsc_mgr);
712}
713MODULE_DESCRIPTION("Bolero clock resource manager driver");
714MODULE_LICENSE("GPL v2");