blob: 0e6faf012282959bfe0f13280f095d820775619f [file] [log] [blame]
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -07001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/of_platform.h>
7#include <linux/module.h>
8#include <linux/io.h>
9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <linux/kernel.h>
12#include <linux/clk.h>
13#include "bolero-cdc.h"
14#include "bolero-clk-rsc.h"
15
16#define DRV_NAME "bolero-clk-rsc"
17#define BOLERO_CLK_NAME_LENGTH 30
18#define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
19
20static char clk_src_name[MAX_CLK][BOLERO_CLK_NAME_LENGTH] = {
21 "tx_core_clk",
22 "rx_core_clk",
23 "wsa_core_clk",
24 "va_core_clk",
25 "tx_npl_clk",
26 "rx_npl_clk",
27 "wsa_npl_clk",
28 "va_npl_clk",
29};
30
31struct bolero_clk_rsc {
32 struct device *dev;
33 struct mutex rsc_clk_lock;
34 struct clk *clk[MAX_CLK];
35 int clk_cnt[MAX_CLK];
36 int reg_seq_en_cnt;
37 int va_tx_clk_cnt;
38 bool dev_up;
39 u32 num_fs_reg;
40 u32 *fs_gen_seq;
41 int default_clk_id[MAX_CLK];
42 struct regmap *regmap;
43 char __iomem *rx_clk_muxsel;
44 char __iomem *wsa_clk_muxsel;
45 char __iomem *va_clk_muxsel;
46};
47
48static int bolero_clk_rsc_cb(struct device *dev, u16 event)
49{
50 struct bolero_clk_rsc *priv;
51
52 if (!dev) {
53 pr_err("%s: Invalid device pointer\n",
54 __func__);
55 return -EINVAL;
56 }
57
58 priv = dev_get_drvdata(dev);
59 if (!priv) {
60 pr_err("%s: Invalid clk rsc priviate data\n",
61 __func__);
62 return -EINVAL;
63 }
64
65 mutex_lock(&priv->rsc_clk_lock);
66 if (event == BOLERO_MACRO_EVT_SSR_UP)
67 priv->dev_up = true;
68 else if (event == BOLERO_MACRO_EVT_SSR_DOWN)
69 priv->dev_up = false;
70 mutex_unlock(&priv->rsc_clk_lock);
71
72 return 0;
73}
74
75static char __iomem *bolero_clk_rsc_get_clk_muxsel(struct bolero_clk_rsc *priv,
76 int clk_id)
77{
78 switch (clk_id) {
79 case RX_CORE_CLK:
80 return priv->rx_clk_muxsel;
81 case WSA_CORE_CLK:
82 return priv->wsa_clk_muxsel;
83 case VA_CORE_CLK:
84 return priv->va_clk_muxsel;
85 case TX_CORE_CLK:
86 default:
87 dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
88 break;
89 }
90
91 return NULL;
92}
93
94static int bolero_clk_rsc_mux0_clk_request(struct bolero_clk_rsc *priv,
95 int clk_id,
96 bool enable)
97{
98 int ret = 0;
99
100 if (enable) {
101 /* Enable Requested Core clk */
102 if (priv->clk_cnt[clk_id] == 0) {
103 ret = clk_prepare_enable(priv->clk[clk_id]);
104 if (ret < 0) {
105 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
106 __func__, clk_id);
107 goto done;
108 }
109 if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
110 ret = clk_prepare_enable(
111 priv->clk[clk_id + NPL_CLK_OFFSET]);
112 if (ret < 0) {
113 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
114 __func__,
115 clk_id + NPL_CLK_OFFSET);
116 goto err;
117 }
118 }
119 }
120 priv->clk_cnt[clk_id]++;
121 } else {
122 if (priv->clk_cnt[clk_id] <= 0) {
123 dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
124 __func__, clk_id);
125 priv->clk_cnt[clk_id] = 0;
126 goto done;
127 }
128 priv->clk_cnt[clk_id]--;
129 if (priv->clk_cnt[clk_id] == 0) {
130 if (priv->clk[clk_id + NPL_CLK_OFFSET])
131 clk_disable_unprepare(
132 priv->clk[clk_id + NPL_CLK_OFFSET]);
133 clk_disable_unprepare(priv->clk[clk_id]);
134 }
135 }
136 return ret;
137
138err:
139 clk_disable_unprepare(priv->clk[clk_id]);
140done:
141 return ret;
142}
143
144static int bolero_clk_rsc_mux1_clk_request(struct bolero_clk_rsc *priv,
145 int clk_id,
146 bool enable)
147{
148 char __iomem *clk_muxsel = NULL;
149 int ret = 0;
150 int default_clk_id = priv->default_clk_id[clk_id];
151
152 clk_muxsel = bolero_clk_rsc_get_clk_muxsel(priv, clk_id);
153 if (!clk_muxsel) {
154 ret = -EINVAL;
155 goto done;
156 }
157
158 if (enable) {
159 if (priv->clk_cnt[clk_id] == 0) {
160 ret = bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
161 true);
162 if (ret < 0)
163 goto done;
164
165 ret = clk_prepare_enable(priv->clk[clk_id]);
166 if (ret < 0) {
167 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
168 __func__, clk_id);
169 goto err_clk;
170 }
171 if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
172 ret = clk_prepare_enable(
173 priv->clk[clk_id + NPL_CLK_OFFSET]);
174 if (ret < 0) {
175 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
176 __func__,
177 clk_id + NPL_CLK_OFFSET);
178 goto err_npl_clk;
179 }
180 }
181 iowrite32(0x1, clk_muxsel);
182 bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
183 false);
184 }
185 priv->clk_cnt[clk_id]++;
186 } else {
187 if (priv->clk_cnt[clk_id] <= 0) {
188 dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
189 __func__, clk_id);
190 priv->clk_cnt[clk_id] = 0;
191 goto done;
192 }
193 priv->clk_cnt[clk_id]--;
194 if (priv->clk_cnt[clk_id] == 0) {
195 bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
196 true);
197
198 iowrite32(0x0, clk_muxsel);
199 if (priv->clk[clk_id + NPL_CLK_OFFSET])
200 clk_disable_unprepare(
201 priv->clk[clk_id + NPL_CLK_OFFSET]);
202 clk_disable_unprepare(priv->clk[clk_id]);
203
204 bolero_clk_rsc_mux0_clk_request(priv, default_clk_id,
205 false);
206 }
207 }
208 return ret;
209
210err_npl_clk:
211 clk_disable_unprepare(priv->clk[clk_id]);
212
213err_clk:
214 bolero_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
215done:
216 return ret;
217}
218
219static int bolero_clk_rsc_check_and_update_va_clk(struct bolero_clk_rsc *priv,
220 bool mux_switch,
221 int clk_id,
222 bool enable)
223{
224 int ret = 0;
225
226 if (enable) {
227 if (clk_id == VA_CORE_CLK && mux_switch) {
228 /*
229 * Handle the following usecase scenarios during enable
230 * 1. VA only, Active clk is VA_CORE_CLK
231 * 2. record -> record + VA, Active clk is TX_CORE_CLK
232 */
233 if (priv->clk_cnt[TX_CORE_CLK] == 0) {
234 ret = bolero_clk_rsc_mux1_clk_request(priv,
235 VA_CORE_CLK, enable);
236 if (ret < 0)
237 goto err;
238 } else {
239 ret = bolero_clk_rsc_mux0_clk_request(priv,
240 TX_CORE_CLK, enable);
241 if (ret < 0)
242 goto err;
243 priv->va_tx_clk_cnt++;
244 }
245 } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
246 (priv->clk_cnt[VA_CORE_CLK] > 0)) {
247 /*
248 * Handle following concurrency scenario during enable
249 * 1. VA-> Record+VA, Increment TX CLK and Disable VA
250 * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
251 */
252 while (priv->clk_cnt[VA_CORE_CLK] > 0) {
253 ret = bolero_clk_rsc_mux0_clk_request(priv,
254 TX_CORE_CLK, true);
255 if (ret < 0)
256 goto err;
257
258 bolero_clk_rsc_mux1_clk_request(priv,
259 VA_CORE_CLK, false);
260 priv->va_tx_clk_cnt++;
261 }
262 }
263 } else {
264 if (clk_id == VA_CORE_CLK && mux_switch) {
265 /*
266 * Handle the following usecase scenarios during disable
267 * 1. VA only, disable VA_CORE_CLK
268 * 2. Record + VA -> Record, decrement TX CLK count
269 */
270 if (priv->clk_cnt[VA_CORE_CLK]) {
271 bolero_clk_rsc_mux1_clk_request(priv,
272 VA_CORE_CLK, enable);
273 } else if (priv->va_tx_clk_cnt) {
274 bolero_clk_rsc_mux0_clk_request(priv,
275 TX_CORE_CLK, enable);
276 priv->va_tx_clk_cnt--;
277 }
278 } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
279 /*
280 * Handle the following usecase scenarios during disable
281 * Record+VA-> VA: enable VA CLK, decrement TX CLK count
282 */
283 while (priv->va_tx_clk_cnt) {
284 ret = bolero_clk_rsc_mux1_clk_request(priv,
285 VA_CORE_CLK, true);
286 if (ret < 0)
287 goto err;
288
289 bolero_clk_rsc_mux0_clk_request(priv,
290 TX_CORE_CLK, false);
291 priv->va_tx_clk_cnt--;
292 }
293 }
294 }
295
296err:
297 return ret;
298}
299
300/**
301 * bolero_clk_rsc_fs_gen_request - request to enable/disable fs generation
302 * sequence
303 *
304 * @dev: Macro device pointer
305 * @enable: enable or disable flag
306 */
307void bolero_clk_rsc_fs_gen_request(struct device *dev, bool enable)
308{
309 int i;
310 struct regmap *regmap;
311 struct device *clk_dev = NULL;
312 struct bolero_clk_rsc *priv = NULL;
313
314 if (!dev) {
315 pr_err("%s: dev is null %d\n", __func__);
316 return;
317 }
318 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
319 if (!clk_dev) {
320 pr_err("%s: Invalid rsc clk device\n", __func__);
321 return;
322 }
323 priv = dev_get_drvdata(clk_dev);
324 if (!priv) {
325 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
326 return;
327 }
328 regmap = dev_get_regmap(priv->dev->parent, NULL);
Karthikeyan Mani9aca5b12019-05-01 10:47:43 -0700329 if (!regmap) {
330 pr_err("%s: regmap is null\n", __func__);
331 return;
332 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700333 if (enable) {
334 if (priv->reg_seq_en_cnt++ == 0) {
335 for (i = 0; i < (priv->num_fs_reg * 2); i += 2) {
336 dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
337 __func__, priv->fs_gen_seq[i],
338 priv->fs_gen_seq[i + 1]);
339 regmap_update_bits(regmap,
340 priv->fs_gen_seq[i],
341 priv->fs_gen_seq[i + 1],
342 priv->fs_gen_seq[i + 1]);
343 }
344 }
345 } else {
346 if (priv->reg_seq_en_cnt <= 0) {
347 dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
348 __func__, priv->reg_seq_en_cnt);
349 priv->reg_seq_en_cnt = 0;
350 return;
351 }
352 if (--priv->reg_seq_en_cnt == 0) {
353 for (i = ((priv->num_fs_reg - 1) * 2); i >= 0; i -= 2) {
354 dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
355 __func__, priv->fs_gen_seq[i],
356 priv->fs_gen_seq[i + 1]);
357 regmap_update_bits(regmap, priv->fs_gen_seq[i],
358 priv->fs_gen_seq[i + 1], 0x0);
359 }
360 }
361 }
362}
363EXPORT_SYMBOL(bolero_clk_rsc_fs_gen_request);
364
365/**
366 * bolero_clk_rsc_request_clock - request for clock to
367 * enable/disable
368 *
369 * @dev: Macro device pointer.
370 * @default_clk_id: mux0 Core clock ID input.
371 * @clk_id_req: Core clock ID requested to enable/disable
372 * @enable: enable or disable clock flag
373 *
374 * Returns 0 on success or -EINVAL on error.
375 */
376int bolero_clk_rsc_request_clock(struct device *dev,
377 int default_clk_id,
378 int clk_id_req,
379 bool enable)
380{
381 int ret = 0;
382 struct device *clk_dev = NULL;
383 struct bolero_clk_rsc *priv = NULL;
384 bool mux_switch = false;
385
386 if (!dev) {
387 pr_err("%s: dev is null %d\n", __func__);
388 return -EINVAL;
389 }
390 if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
391 (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
392 pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
393 __func__, clk_id_req, default_clk_id);
394 return -EINVAL;
395 }
396 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
397 if (!clk_dev) {
398 pr_err("%s: Invalid rsc clk device\n", __func__);
399 return -EINVAL;
400 }
401 priv = dev_get_drvdata(clk_dev);
402 if (!priv) {
403 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
404 return -EINVAL;
405 }
406
407 mutex_lock(&priv->rsc_clk_lock);
408 if (!priv->dev_up) {
409 dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
410 __func__);
411 ret = -EINVAL;
412 goto err;
413 }
414 priv->default_clk_id[clk_id_req] = default_clk_id;
415 if (default_clk_id != clk_id_req)
416 mux_switch = true;
417
418 if (mux_switch) {
419 if (clk_id_req != VA_CORE_CLK) {
420 ret = bolero_clk_rsc_mux1_clk_request(priv, clk_id_req,
421 enable);
422 if (ret < 0)
423 goto err;
424 }
425 } else {
426 ret = bolero_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
427 if (ret < 0)
428 goto err;
429 }
430
431 ret = bolero_clk_rsc_check_and_update_va_clk(priv, mux_switch,
432 clk_id_req,
433 enable);
434 if (ret < 0)
435 goto err;
436
437 dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
438 __func__, priv->clk_cnt[clk_id_req], clk_id_req,
439 enable);
440
441 mutex_unlock(&priv->rsc_clk_lock);
442
443 return 0;
444
445err:
446 mutex_unlock(&priv->rsc_clk_lock);
447 return ret;
448}
449EXPORT_SYMBOL(bolero_clk_rsc_request_clock);
450
451
452static int bolero_clk_rsc_probe(struct platform_device *pdev)
453{
454 int ret = 0, fs_gen_size, i, j;
455 const char **clk_name_array;
456 int clk_cnt;
457 struct clk *clk;
458 struct bolero_clk_rsc *priv = NULL;
459 u32 muxsel = 0;
460
461 priv = devm_kzalloc(&pdev->dev, sizeof(struct bolero_clk_rsc),
462 GFP_KERNEL);
463 if (!priv)
464 return -ENOMEM;
465
466 /* Get clk fs gen sequence from device tree */
467 if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
468 &fs_gen_size)) {
469 dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
470 __func__);
471 ret = -EINVAL;
472 goto err;
473 }
474 priv->num_fs_reg = fs_gen_size/(2 * sizeof(u32));
475 priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
476 if (!priv->fs_gen_seq) {
477 ret = -ENOMEM;
478 goto err;
479 }
480 dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
481 /* Parse fs-gen-sequence */
482 ret = of_property_read_u32_array(pdev->dev.of_node,
483 "qcom,fs-gen-sequence",
484 priv->fs_gen_seq,
485 priv->num_fs_reg * 2);
486 if (ret < 0) {
487 dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
488 __func__, ret);
489 goto err;
490 }
491
492 /* Get clk details from device tree */
493 clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
494 if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
495 dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
496 __func__, clk_cnt);
497 ret = -EINVAL;
498 goto err;
499 }
500 clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
501 GFP_KERNEL);
Karthikeyan Mani9aca5b12019-05-01 10:47:43 -0700502 if (!clk_name_array) {
503 ret = -ENOMEM;
504 goto err;
505 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700506
507 ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
508 clk_name_array, clk_cnt);
509
510 for (i = 0; i < MAX_CLK; i++) {
511 priv->clk[i] = NULL;
512 for (j = 0; j < clk_cnt; j++) {
513 if (!strcmp(clk_src_name[i], clk_name_array[j])) {
514 clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
515 if (IS_ERR(clk)) {
516 ret = PTR_ERR(clk);
517 dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
518 __func__, clk_src_name[i], ret);
519 goto err;
520 }
521 priv->clk[i] = clk;
522 dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
523 __func__, clk_src_name[i]);
524 }
525 }
526 }
527 ret = of_property_read_u32(pdev->dev.of_node,
528 "qcom,rx_mclk_mode_muxsel", &muxsel);
529 if (ret) {
530 dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
531 __func__);
532 } else {
533 priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
534 if (!priv->rx_clk_muxsel) {
535 dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
536 __func__);
537 return -ENOMEM;
538 }
539 }
540 ret = of_property_read_u32(pdev->dev.of_node,
541 "qcom,wsa_mclk_mode_muxsel", &muxsel);
542 if (ret) {
543 dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
544 __func__);
545 } else {
546 priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
547 if (!priv->wsa_clk_muxsel) {
548 dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
549 __func__);
550 return -ENOMEM;
551 }
552 }
553 ret = of_property_read_u32(pdev->dev.of_node,
554 "qcom,va_mclk_mode_muxsel", &muxsel);
555 if (ret) {
556 dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
557 __func__);
558 } else {
559 priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
560 if (!priv->va_clk_muxsel) {
561 dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
562 __func__);
563 return -ENOMEM;
564 }
565 }
566
567 ret = bolero_register_res_clk(&pdev->dev, bolero_clk_rsc_cb);
568 if (ret < 0) {
569 dev_err(&pdev->dev, "%s: Failed to register cb %d",
570 __func__, ret);
571 goto err;
572 }
573 priv->dev = &pdev->dev;
574 priv->dev_up = true;
575 mutex_init(&priv->rsc_clk_lock);
576 dev_set_drvdata(&pdev->dev, priv);
577
578err:
579 return ret;
580}
581
582static int bolero_clk_rsc_remove(struct platform_device *pdev)
583{
584 struct bolero_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
585
586 bolero_unregister_res_clk(&pdev->dev);
587 of_platform_depopulate(&pdev->dev);
588 if (!priv)
589 return -EINVAL;
590 mutex_destroy(&priv->rsc_clk_lock);
591
592 return 0;
593}
594
595static const struct of_device_id bolero_clk_rsc_dt_match[] = {
596 {.compatible = "qcom,bolero-clk-rsc-mngr"},
597 {}
598};
599MODULE_DEVICE_TABLE(of, bolero_clk_rsc_dt_match);
600
601static struct platform_driver bolero_clk_rsc_mgr = {
602 .driver = {
603 .name = "bolero-clk-rsc-mngr",
604 .owner = THIS_MODULE,
605 .of_match_table = bolero_clk_rsc_dt_match,
606 },
607 .probe = bolero_clk_rsc_probe,
608 .remove = bolero_clk_rsc_remove,
609};
610
611int bolero_clk_rsc_mgr_init(void)
612{
613 return platform_driver_register(&bolero_clk_rsc_mgr);
614}
615
616void bolero_clk_rsc_mgr_exit(void)
617{
618 platform_driver_unregister(&bolero_clk_rsc_mgr);
619}
620MODULE_DESCRIPTION("Bolero clock resource manager driver");
621MODULE_LICENSE("GPL v2");