blob: 7e28c5bfeb869cb5cf9136ec800f966844283554 [file] [log] [blame]
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -07001// SPDX-License-Identifier: GPL-2.0-only
2/*
Aditya Bavanari76901542019-12-30 18:20:47 +05303 * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -07004 */
5
6#include <linux/of_platform.h>
7#include <linux/module.h>
8#include <linux/io.h>
9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <linux/kernel.h>
12#include <linux/clk.h>
Meng Wang8ef0cc22019-05-08 15:12:56 +080013#include <linux/clk-provider.h>
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -070014#include "bolero-cdc.h"
15#include "bolero-clk-rsc.h"
16
17#define DRV_NAME "bolero-clk-rsc"
18#define BOLERO_CLK_NAME_LENGTH 30
19#define NPL_CLK_OFFSET (TX_NPL_CLK - TX_CORE_CLK)
20
21static char clk_src_name[MAX_CLK][BOLERO_CLK_NAME_LENGTH] = {
22 "tx_core_clk",
23 "rx_core_clk",
24 "wsa_core_clk",
25 "va_core_clk",
26 "tx_npl_clk",
27 "rx_npl_clk",
28 "wsa_npl_clk",
29 "va_npl_clk",
30};
31
32struct bolero_clk_rsc {
33 struct device *dev;
34 struct mutex rsc_clk_lock;
Aditya Bavanari7100fb82019-12-06 19:49:01 +053035 struct mutex fs_gen_lock;
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -070036 struct clk *clk[MAX_CLK];
37 int clk_cnt[MAX_CLK];
38 int reg_seq_en_cnt;
39 int va_tx_clk_cnt;
40 bool dev_up;
41 u32 num_fs_reg;
42 u32 *fs_gen_seq;
43 int default_clk_id[MAX_CLK];
44 struct regmap *regmap;
45 char __iomem *rx_clk_muxsel;
46 char __iomem *wsa_clk_muxsel;
47 char __iomem *va_clk_muxsel;
48};
49
50static int bolero_clk_rsc_cb(struct device *dev, u16 event)
51{
52 struct bolero_clk_rsc *priv;
53
54 if (!dev) {
55 pr_err("%s: Invalid device pointer\n",
56 __func__);
57 return -EINVAL;
58 }
59
60 priv = dev_get_drvdata(dev);
61 if (!priv) {
62 pr_err("%s: Invalid clk rsc priviate data\n",
63 __func__);
64 return -EINVAL;
65 }
66
67 mutex_lock(&priv->rsc_clk_lock);
68 if (event == BOLERO_MACRO_EVT_SSR_UP)
69 priv->dev_up = true;
70 else if (event == BOLERO_MACRO_EVT_SSR_DOWN)
71 priv->dev_up = false;
72 mutex_unlock(&priv->rsc_clk_lock);
73
74 return 0;
75}
76
77static char __iomem *bolero_clk_rsc_get_clk_muxsel(struct bolero_clk_rsc *priv,
78 int clk_id)
79{
80 switch (clk_id) {
81 case RX_CORE_CLK:
82 return priv->rx_clk_muxsel;
83 case WSA_CORE_CLK:
84 return priv->wsa_clk_muxsel;
85 case VA_CORE_CLK:
86 return priv->va_clk_muxsel;
87 case TX_CORE_CLK:
88 default:
89 dev_err_ratelimited(priv->dev, "%s: Invalid case\n", __func__);
90 break;
91 }
92
93 return NULL;
94}
95
Meng Wang8ef0cc22019-05-08 15:12:56 +080096int bolero_rsc_clk_reset(struct device *dev, int clk_id)
97{
98 struct device *clk_dev = NULL;
99 struct bolero_clk_rsc *priv = NULL;
100 int count = 0;
101
102 if (!dev) {
103 pr_err("%s: dev is null %d\n", __func__);
104 return -EINVAL;
105 }
106
107 if (clk_id < 0 || clk_id >= MAX_CLK - NPL_CLK_OFFSET) {
108 pr_err("%s: Invalid clk_id: %d\n",
109 __func__, clk_id);
110 return -EINVAL;
111 }
112
113 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
114 if (!clk_dev) {
115 pr_err("%s: Invalid rsc clk device\n", __func__);
116 return -EINVAL;
117 }
118
119 priv = dev_get_drvdata(clk_dev);
120 if (!priv) {
121 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
122 return -EINVAL;
123 }
124 mutex_lock(&priv->rsc_clk_lock);
125 while (__clk_is_enabled(priv->clk[clk_id])) {
126 clk_disable_unprepare(priv->clk[clk_id + NPL_CLK_OFFSET]);
127 clk_disable_unprepare(priv->clk[clk_id]);
128 count++;
129 }
130 dev_dbg(priv->dev,
131 "%s: clock reset after ssr, count %d\n", __func__, count);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -0700132
133 trace_printk("%s: clock reset after ssr, count %d\n", __func__, count);
Meng Wang8ef0cc22019-05-08 15:12:56 +0800134 while (count--) {
135 clk_prepare_enable(priv->clk[clk_id]);
136 clk_prepare_enable(priv->clk[clk_id + NPL_CLK_OFFSET]);
137 }
138 mutex_unlock(&priv->rsc_clk_lock);
139 return 0;
140}
141EXPORT_SYMBOL(bolero_rsc_clk_reset);
142
Aditya Bavanaricfc65e82019-10-03 22:34:23 +0530143void bolero_clk_rsc_enable_all_clocks(struct device *dev, bool enable)
144{
145 struct device *clk_dev = NULL;
146 struct bolero_clk_rsc *priv = NULL;
147 int i = 0;
148
149 if (!dev) {
150 pr_err("%s: dev is null %d\n", __func__);
151 return;
152 }
153
154 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
155 if (!clk_dev) {
156 pr_err("%s: Invalid rsc clk device\n", __func__);
157 return;
158 }
159
160 priv = dev_get_drvdata(clk_dev);
161 if (!priv) {
162 pr_err("%s: Invalid rsc clk private data\n", __func__);
163 return;
164 }
165 mutex_lock(&priv->rsc_clk_lock);
166 for (i = 0; i < MAX_CLK - NPL_CLK_OFFSET; i++) {
167 if (enable) {
168 if (priv->clk[i])
169 clk_prepare_enable(priv->clk[i]);
170 if (priv->clk[i + NPL_CLK_OFFSET])
171 clk_prepare_enable(
172 priv->clk[i + NPL_CLK_OFFSET]);
173 } else {
174 if (priv->clk[i + NPL_CLK_OFFSET])
175 clk_disable_unprepare(
176 priv->clk[i + NPL_CLK_OFFSET]);
177 if (priv->clk[i])
178 clk_disable_unprepare(priv->clk[i]);
179 }
180 }
181 mutex_unlock(&priv->rsc_clk_lock);
182 return;
183}
184EXPORT_SYMBOL(bolero_clk_rsc_enable_all_clocks);
185
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700186static int bolero_clk_rsc_mux0_clk_request(struct bolero_clk_rsc *priv,
187 int clk_id,
188 bool enable)
189{
190 int ret = 0;
191
192 if (enable) {
193 /* Enable Requested Core clk */
194 if (priv->clk_cnt[clk_id] == 0) {
195 ret = clk_prepare_enable(priv->clk[clk_id]);
196 if (ret < 0) {
197 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
198 __func__, clk_id);
199 goto done;
200 }
201 if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
202 ret = clk_prepare_enable(
203 priv->clk[clk_id + NPL_CLK_OFFSET]);
204 if (ret < 0) {
205 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
206 __func__,
207 clk_id + NPL_CLK_OFFSET);
208 goto err;
209 }
210 }
211 }
212 priv->clk_cnt[clk_id]++;
213 } else {
214 if (priv->clk_cnt[clk_id] <= 0) {
215 dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
216 __func__, clk_id);
217 priv->clk_cnt[clk_id] = 0;
218 goto done;
219 }
220 priv->clk_cnt[clk_id]--;
221 if (priv->clk_cnt[clk_id] == 0) {
222 if (priv->clk[clk_id + NPL_CLK_OFFSET])
223 clk_disable_unprepare(
224 priv->clk[clk_id + NPL_CLK_OFFSET]);
225 clk_disable_unprepare(priv->clk[clk_id]);
226 }
227 }
228 return ret;
229
230err:
231 clk_disable_unprepare(priv->clk[clk_id]);
232done:
233 return ret;
234}
235
236static int bolero_clk_rsc_mux1_clk_request(struct bolero_clk_rsc *priv,
237 int clk_id,
238 bool enable)
239{
240 char __iomem *clk_muxsel = NULL;
241 int ret = 0;
242 int default_clk_id = priv->default_clk_id[clk_id];
Aditya Bavanarif500a1d2019-09-16 18:27:51 -0700243 u32 muxsel = 0;
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700244
245 clk_muxsel = bolero_clk_rsc_get_clk_muxsel(priv, clk_id);
246 if (!clk_muxsel) {
247 ret = -EINVAL;
248 goto done;
249 }
250
251 if (enable) {
252 if (priv->clk_cnt[clk_id] == 0) {
Aditya Bavanari76901542019-12-30 18:20:47 +0530253 if (clk_id != VA_CORE_CLK) {
254 ret = bolero_clk_rsc_mux0_clk_request(priv,
255 default_clk_id,
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700256 true);
Aditya Bavanari76901542019-12-30 18:20:47 +0530257 if (ret < 0)
258 goto done;
259 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700260
261 ret = clk_prepare_enable(priv->clk[clk_id]);
262 if (ret < 0) {
263 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
264 __func__, clk_id);
265 goto err_clk;
266 }
267 if (priv->clk[clk_id + NPL_CLK_OFFSET]) {
268 ret = clk_prepare_enable(
269 priv->clk[clk_id + NPL_CLK_OFFSET]);
270 if (ret < 0) {
271 dev_err_ratelimited(priv->dev, "%s:clk_id %d enable failed\n",
272 __func__,
273 clk_id + NPL_CLK_OFFSET);
274 goto err_npl_clk;
275 }
276 }
Aditya Bavanari76901542019-12-30 18:20:47 +0530277
278 /*
279 * Temp SW workaround to address a glitch issue of
280 * VA GFMux instance responsible for switching from
281 * TX MCLK to VA MCLK. This configuration would be taken
282 * care in DSP itself
283 */
284 if (clk_id != VA_CORE_CLK) {
285 iowrite32(0x1, clk_muxsel);
286 muxsel = ioread32(clk_muxsel);
287 trace_printk("%s: muxsel value after enable: %d\n",
288 __func__, muxsel);
289 bolero_clk_rsc_mux0_clk_request(priv,
290 default_clk_id,
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700291 false);
Aditya Bavanari76901542019-12-30 18:20:47 +0530292 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700293 }
294 priv->clk_cnt[clk_id]++;
295 } else {
296 if (priv->clk_cnt[clk_id] <= 0) {
297 dev_err_ratelimited(priv->dev, "%s: clk_id: %d is already disabled\n",
298 __func__, clk_id);
299 priv->clk_cnt[clk_id] = 0;
300 goto done;
301 }
302 priv->clk_cnt[clk_id]--;
303 if (priv->clk_cnt[clk_id] == 0) {
Aditya Bavanari76901542019-12-30 18:20:47 +0530304 if (clk_id != VA_CORE_CLK) {
305 ret = bolero_clk_rsc_mux0_clk_request(priv,
Meng Wangbd930242019-06-25 09:39:48 +0800306 default_clk_id, true);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700307
Aditya Bavanari76901542019-12-30 18:20:47 +0530308 if (!ret) {
309 /*
310 * Temp SW workaround to address a glitch issue
311 * of VA GFMux instance responsible for
312 * switching from TX MCLK to VA MCLK.
313 * This configuration would be taken
314 * care in DSP itself.
315 */
316 iowrite32(0x0, clk_muxsel);
317 muxsel = ioread32(clk_muxsel);
318 trace_printk("%s: muxsel value after disable: %d\n",
319 __func__, muxsel);
320 }
321 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700322 if (priv->clk[clk_id + NPL_CLK_OFFSET])
323 clk_disable_unprepare(
324 priv->clk[clk_id + NPL_CLK_OFFSET]);
325 clk_disable_unprepare(priv->clk[clk_id]);
326
Aditya Bavanari76901542019-12-30 18:20:47 +0530327 if (clk_id != VA_CORE_CLK) {
328 if (!ret)
329 bolero_clk_rsc_mux0_clk_request(priv,
Meng Wangbd930242019-06-25 09:39:48 +0800330 default_clk_id, false);
Aditya Bavanari76901542019-12-30 18:20:47 +0530331 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700332 }
333 }
334 return ret;
335
336err_npl_clk:
337 clk_disable_unprepare(priv->clk[clk_id]);
338
339err_clk:
Aditya Bavanari76901542019-12-30 18:20:47 +0530340 if (clk_id != VA_CORE_CLK)
341 bolero_clk_rsc_mux0_clk_request(priv, default_clk_id, false);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700342done:
343 return ret;
344}
345
346static int bolero_clk_rsc_check_and_update_va_clk(struct bolero_clk_rsc *priv,
347 bool mux_switch,
348 int clk_id,
349 bool enable)
350{
351 int ret = 0;
352
353 if (enable) {
354 if (clk_id == VA_CORE_CLK && mux_switch) {
355 /*
356 * Handle the following usecase scenarios during enable
357 * 1. VA only, Active clk is VA_CORE_CLK
358 * 2. record -> record + VA, Active clk is TX_CORE_CLK
359 */
360 if (priv->clk_cnt[TX_CORE_CLK] == 0) {
361 ret = bolero_clk_rsc_mux1_clk_request(priv,
362 VA_CORE_CLK, enable);
363 if (ret < 0)
364 goto err;
365 } else {
366 ret = bolero_clk_rsc_mux0_clk_request(priv,
367 TX_CORE_CLK, enable);
368 if (ret < 0)
369 goto err;
370 priv->va_tx_clk_cnt++;
371 }
372 } else if ((priv->clk_cnt[TX_CORE_CLK] > 0) &&
373 (priv->clk_cnt[VA_CORE_CLK] > 0)) {
374 /*
375 * Handle following concurrency scenario during enable
376 * 1. VA-> Record+VA, Increment TX CLK and Disable VA
377 * 2. VA-> Playback+VA, Increment TX CLK and Disable VA
378 */
379 while (priv->clk_cnt[VA_CORE_CLK] > 0) {
380 ret = bolero_clk_rsc_mux0_clk_request(priv,
381 TX_CORE_CLK, true);
382 if (ret < 0)
383 goto err;
384
385 bolero_clk_rsc_mux1_clk_request(priv,
386 VA_CORE_CLK, false);
387 priv->va_tx_clk_cnt++;
388 }
389 }
390 } else {
391 if (clk_id == VA_CORE_CLK && mux_switch) {
392 /*
393 * Handle the following usecase scenarios during disable
394 * 1. VA only, disable VA_CORE_CLK
395 * 2. Record + VA -> Record, decrement TX CLK count
396 */
397 if (priv->clk_cnt[VA_CORE_CLK]) {
398 bolero_clk_rsc_mux1_clk_request(priv,
399 VA_CORE_CLK, enable);
400 } else if (priv->va_tx_clk_cnt) {
401 bolero_clk_rsc_mux0_clk_request(priv,
402 TX_CORE_CLK, enable);
403 priv->va_tx_clk_cnt--;
404 }
405 } else if (priv->va_tx_clk_cnt == priv->clk_cnt[TX_CORE_CLK]) {
406 /*
407 * Handle the following usecase scenarios during disable
408 * Record+VA-> VA: enable VA CLK, decrement TX CLK count
409 */
410 while (priv->va_tx_clk_cnt) {
411 ret = bolero_clk_rsc_mux1_clk_request(priv,
412 VA_CORE_CLK, true);
413 if (ret < 0)
414 goto err;
415
416 bolero_clk_rsc_mux0_clk_request(priv,
417 TX_CORE_CLK, false);
418 priv->va_tx_clk_cnt--;
419 }
420 }
421 }
422
423err:
424 return ret;
425}
426
427/**
428 * bolero_clk_rsc_fs_gen_request - request to enable/disable fs generation
429 * sequence
430 *
431 * @dev: Macro device pointer
432 * @enable: enable or disable flag
433 */
434void bolero_clk_rsc_fs_gen_request(struct device *dev, bool enable)
435{
436 int i;
437 struct regmap *regmap;
438 struct device *clk_dev = NULL;
439 struct bolero_clk_rsc *priv = NULL;
440
441 if (!dev) {
442 pr_err("%s: dev is null %d\n", __func__);
443 return;
444 }
445 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
446 if (!clk_dev) {
447 pr_err("%s: Invalid rsc clk device\n", __func__);
448 return;
449 }
450 priv = dev_get_drvdata(clk_dev);
451 if (!priv) {
452 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
453 return;
454 }
455 regmap = dev_get_regmap(priv->dev->parent, NULL);
Karthikeyan Mani9aca5b12019-05-01 10:47:43 -0700456 if (!regmap) {
457 pr_err("%s: regmap is null\n", __func__);
458 return;
459 }
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530460 mutex_lock(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700461 if (enable) {
462 if (priv->reg_seq_en_cnt++ == 0) {
463 for (i = 0; i < (priv->num_fs_reg * 2); i += 2) {
464 dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
465 __func__, priv->fs_gen_seq[i],
466 priv->fs_gen_seq[i + 1]);
467 regmap_update_bits(regmap,
468 priv->fs_gen_seq[i],
469 priv->fs_gen_seq[i + 1],
470 priv->fs_gen_seq[i + 1]);
471 }
472 }
473 } else {
474 if (priv->reg_seq_en_cnt <= 0) {
475 dev_err_ratelimited(priv->dev, "%s: req_seq_cnt: %d is already disabled\n",
476 __func__, priv->reg_seq_en_cnt);
477 priv->reg_seq_en_cnt = 0;
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530478 mutex_unlock(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700479 return;
480 }
481 if (--priv->reg_seq_en_cnt == 0) {
482 for (i = ((priv->num_fs_reg - 1) * 2); i >= 0; i -= 2) {
483 dev_dbg(priv->dev, "%s: Register: %d, value: %d\n",
484 __func__, priv->fs_gen_seq[i],
485 priv->fs_gen_seq[i + 1]);
486 regmap_update_bits(regmap, priv->fs_gen_seq[i],
487 priv->fs_gen_seq[i + 1], 0x0);
488 }
489 }
490 }
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530491 mutex_unlock(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700492}
493EXPORT_SYMBOL(bolero_clk_rsc_fs_gen_request);
494
495/**
496 * bolero_clk_rsc_request_clock - request for clock to
497 * enable/disable
498 *
499 * @dev: Macro device pointer.
500 * @default_clk_id: mux0 Core clock ID input.
501 * @clk_id_req: Core clock ID requested to enable/disable
502 * @enable: enable or disable clock flag
503 *
504 * Returns 0 on success or -EINVAL on error.
505 */
506int bolero_clk_rsc_request_clock(struct device *dev,
507 int default_clk_id,
508 int clk_id_req,
509 bool enable)
510{
511 int ret = 0;
512 struct device *clk_dev = NULL;
513 struct bolero_clk_rsc *priv = NULL;
514 bool mux_switch = false;
515
516 if (!dev) {
517 pr_err("%s: dev is null %d\n", __func__);
518 return -EINVAL;
519 }
520 if ((clk_id_req < 0 || clk_id_req >= MAX_CLK) &&
521 (default_clk_id < 0 || default_clk_id >= MAX_CLK)) {
522 pr_err("%s: Invalid clk_id_req: %d or default_clk_id: %d\n",
523 __func__, clk_id_req, default_clk_id);
524 return -EINVAL;
525 }
526 clk_dev = bolero_get_rsc_clk_device_ptr(dev->parent);
527 if (!clk_dev) {
528 pr_err("%s: Invalid rsc clk device\n", __func__);
529 return -EINVAL;
530 }
531 priv = dev_get_drvdata(clk_dev);
532 if (!priv) {
533 pr_err("%s: Invalid rsc clk priviate data\n", __func__);
534 return -EINVAL;
535 }
536
537 mutex_lock(&priv->rsc_clk_lock);
Aditya Bavanarif4a471d2019-02-19 17:57:12 +0530538 if (!priv->dev_up && enable) {
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700539 dev_err_ratelimited(priv->dev, "%s: SSR is in progress..\n",
540 __func__);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -0700541 trace_printk("%s: SSR is in progress..\n", __func__);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700542 ret = -EINVAL;
543 goto err;
544 }
545 priv->default_clk_id[clk_id_req] = default_clk_id;
546 if (default_clk_id != clk_id_req)
547 mux_switch = true;
548
549 if (mux_switch) {
550 if (clk_id_req != VA_CORE_CLK) {
551 ret = bolero_clk_rsc_mux1_clk_request(priv, clk_id_req,
552 enable);
553 if (ret < 0)
554 goto err;
555 }
556 } else {
557 ret = bolero_clk_rsc_mux0_clk_request(priv, clk_id_req, enable);
558 if (ret < 0)
559 goto err;
560 }
561
562 ret = bolero_clk_rsc_check_and_update_va_clk(priv, mux_switch,
563 clk_id_req,
564 enable);
565 if (ret < 0)
566 goto err;
567
568 dev_dbg(priv->dev, "%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
569 __func__, priv->clk_cnt[clk_id_req], clk_id_req,
570 enable);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -0700571 trace_printk("%s: clk_cnt: %d for requested clk: %d, enable: %d\n",
572 __func__, priv->clk_cnt[clk_id_req], clk_id_req,
573 enable);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700574
575 mutex_unlock(&priv->rsc_clk_lock);
576
577 return 0;
578
579err:
580 mutex_unlock(&priv->rsc_clk_lock);
581 return ret;
582}
583EXPORT_SYMBOL(bolero_clk_rsc_request_clock);
584
585
586static int bolero_clk_rsc_probe(struct platform_device *pdev)
587{
588 int ret = 0, fs_gen_size, i, j;
589 const char **clk_name_array;
590 int clk_cnt;
591 struct clk *clk;
592 struct bolero_clk_rsc *priv = NULL;
593 u32 muxsel = 0;
594
595 priv = devm_kzalloc(&pdev->dev, sizeof(struct bolero_clk_rsc),
596 GFP_KERNEL);
597 if (!priv)
598 return -ENOMEM;
599
600 /* Get clk fs gen sequence from device tree */
601 if (!of_find_property(pdev->dev.of_node, "qcom,fs-gen-sequence",
602 &fs_gen_size)) {
603 dev_err(&pdev->dev, "%s: unable to find qcom,fs-gen-sequence property\n",
604 __func__);
605 ret = -EINVAL;
606 goto err;
607 }
608 priv->num_fs_reg = fs_gen_size/(2 * sizeof(u32));
609 priv->fs_gen_seq = devm_kzalloc(&pdev->dev, fs_gen_size, GFP_KERNEL);
610 if (!priv->fs_gen_seq) {
611 ret = -ENOMEM;
612 goto err;
613 }
614 dev_dbg(&pdev->dev, "%s: num_fs_reg %d\n", __func__, priv->num_fs_reg);
615 /* Parse fs-gen-sequence */
616 ret = of_property_read_u32_array(pdev->dev.of_node,
617 "qcom,fs-gen-sequence",
618 priv->fs_gen_seq,
619 priv->num_fs_reg * 2);
620 if (ret < 0) {
621 dev_err(&pdev->dev, "%s: unable to parse fs-gen-sequence, ret = %d\n",
622 __func__, ret);
623 goto err;
624 }
625
626 /* Get clk details from device tree */
627 clk_cnt = of_property_count_strings(pdev->dev.of_node, "clock-names");
628 if (clk_cnt <= 0 || clk_cnt > MAX_CLK) {
629 dev_err(&pdev->dev, "%s: Invalid number of clocks %d",
630 __func__, clk_cnt);
631 ret = -EINVAL;
632 goto err;
633 }
634 clk_name_array = devm_kzalloc(&pdev->dev, clk_cnt * sizeof(char *),
635 GFP_KERNEL);
Karthikeyan Mani9aca5b12019-05-01 10:47:43 -0700636 if (!clk_name_array) {
637 ret = -ENOMEM;
638 goto err;
639 }
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700640
641 ret = of_property_read_string_array(pdev->dev.of_node, "clock-names",
642 clk_name_array, clk_cnt);
643
644 for (i = 0; i < MAX_CLK; i++) {
645 priv->clk[i] = NULL;
646 for (j = 0; j < clk_cnt; j++) {
647 if (!strcmp(clk_src_name[i], clk_name_array[j])) {
648 clk = devm_clk_get(&pdev->dev, clk_src_name[i]);
649 if (IS_ERR(clk)) {
650 ret = PTR_ERR(clk);
651 dev_err(&pdev->dev, "%s: clk get failed for %s with ret %d\n",
652 __func__, clk_src_name[i], ret);
653 goto err;
654 }
655 priv->clk[i] = clk;
656 dev_dbg(&pdev->dev, "%s: clk get success for clk name %s\n",
657 __func__, clk_src_name[i]);
658 }
659 }
660 }
661 ret = of_property_read_u32(pdev->dev.of_node,
662 "qcom,rx_mclk_mode_muxsel", &muxsel);
663 if (ret) {
664 dev_dbg(&pdev->dev, "%s: could not find qcom,rx_mclk_mode_muxsel entry in dt\n",
665 __func__);
666 } else {
667 priv->rx_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
668 if (!priv->rx_clk_muxsel) {
669 dev_err(&pdev->dev, "%s: ioremap failed for rx muxsel\n",
670 __func__);
671 return -ENOMEM;
672 }
673 }
674 ret = of_property_read_u32(pdev->dev.of_node,
675 "qcom,wsa_mclk_mode_muxsel", &muxsel);
676 if (ret) {
677 dev_dbg(&pdev->dev, "%s: could not find qcom,wsa_mclk_mode_muxsel entry in dt\n",
678 __func__);
679 } else {
680 priv->wsa_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
681 if (!priv->wsa_clk_muxsel) {
682 dev_err(&pdev->dev, "%s: ioremap failed for wsa muxsel\n",
683 __func__);
684 return -ENOMEM;
685 }
686 }
687 ret = of_property_read_u32(pdev->dev.of_node,
688 "qcom,va_mclk_mode_muxsel", &muxsel);
689 if (ret) {
690 dev_dbg(&pdev->dev, "%s: could not find qcom,va_mclk_mode_muxsel entry in dt\n",
691 __func__);
692 } else {
693 priv->va_clk_muxsel = devm_ioremap(&pdev->dev, muxsel, 0x4);
694 if (!priv->va_clk_muxsel) {
695 dev_err(&pdev->dev, "%s: ioremap failed for va muxsel\n",
696 __func__);
697 return -ENOMEM;
698 }
699 }
700
701 ret = bolero_register_res_clk(&pdev->dev, bolero_clk_rsc_cb);
702 if (ret < 0) {
703 dev_err(&pdev->dev, "%s: Failed to register cb %d",
704 __func__, ret);
705 goto err;
706 }
707 priv->dev = &pdev->dev;
708 priv->dev_up = true;
709 mutex_init(&priv->rsc_clk_lock);
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530710 mutex_init(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700711 dev_set_drvdata(&pdev->dev, priv);
712
713err:
714 return ret;
715}
716
717static int bolero_clk_rsc_remove(struct platform_device *pdev)
718{
719 struct bolero_clk_rsc *priv = dev_get_drvdata(&pdev->dev);
720
721 bolero_unregister_res_clk(&pdev->dev);
722 of_platform_depopulate(&pdev->dev);
723 if (!priv)
724 return -EINVAL;
725 mutex_destroy(&priv->rsc_clk_lock);
Aditya Bavanari7100fb82019-12-06 19:49:01 +0530726 mutex_destroy(&priv->fs_gen_lock);
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700727
728 return 0;
729}
730
731static const struct of_device_id bolero_clk_rsc_dt_match[] = {
732 {.compatible = "qcom,bolero-clk-rsc-mngr"},
733 {}
734};
735MODULE_DEVICE_TABLE(of, bolero_clk_rsc_dt_match);
736
737static struct platform_driver bolero_clk_rsc_mgr = {
738 .driver = {
739 .name = "bolero-clk-rsc-mngr",
740 .owner = THIS_MODULE,
741 .of_match_table = bolero_clk_rsc_dt_match,
Xiaojun Sang53cd13a2018-06-29 15:14:37 +0800742 .suppress_bind_attrs = true,
Vidyakumar Athota5d45f4c2019-03-10 22:35:07 -0700743 },
744 .probe = bolero_clk_rsc_probe,
745 .remove = bolero_clk_rsc_remove,
746};
747
748int bolero_clk_rsc_mgr_init(void)
749{
750 return platform_driver_register(&bolero_clk_rsc_mgr);
751}
752
753void bolero_clk_rsc_mgr_exit(void)
754{
755 platform_driver_unregister(&bolero_clk_rsc_mgr);
756}
757MODULE_DESCRIPTION("Bolero clock resource manager driver");
758MODULE_LICENSE("GPL v2");