blob: 21bf8013d4bf94d9ea5b50a7ef4ae8b80a6e95e0 [file] [log] [blame]
Meng Wang43bbb872018-12-10 12:32:05 +08001// SPDX-License-Identifier: GPL-2.0-only
Meng Wang61af6842018-09-10 17:47:55 +08002/*
Sudheer Papothiae5c3632019-11-27 06:52:06 +05303 * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05304 */
5
6#include <linux/irq.h>
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/io.h>
11#include <linux/interrupt.h>
12#include <linux/platform_device.h>
13#include <linux/delay.h>
14#include <linux/kthread.h>
Ramprasad Katkamcab8d722018-09-28 15:54:06 +053015#include <linux/bitops.h>
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053016#include <linux/clk.h>
Laxminath Kasama60239e2019-01-10 14:43:03 +053017#include <linux/gpio.h>
18#include <linux/of_gpio.h>
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053019#include <linux/pm_runtime.h>
20#include <linux/of.h>
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053021#include <soc/soundwire.h>
Sudheer Papothi3d1596e2018-10-27 06:19:18 +053022#include <soc/swr-common.h>
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053023#include <linux/regmap.h>
Ramprasad Katkam68765ab2018-08-30 11:46:32 +053024#include <dsp/msm-audio-event-notify.h>
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053025#include "swrm_registers.h"
26#include "swr-mstr-ctrl.h"
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053027
Sudheer Papothiae5c3632019-11-27 06:52:06 +053028#define SWR_NUM_PORTS 4 /* TODO - Get this info from DT */
29
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +053030#define SWRM_FRAME_SYNC_SEL 4000 /* 4KHz */
Sudheer Papothi8a8b12b2019-11-15 23:06:41 +053031#define SWRM_FRAME_SYNC_SEL_NATIVE 3675 /* 3.675KHz */
Ramprasad Katkam57349872018-11-11 18:34:57 +053032#define SWRM_SYSTEM_RESUME_TIMEOUT_MS 700
33#define SWRM_SYS_SUSPEND_WAIT 1
Sudheer Papothi3d1596e2018-10-27 06:19:18 +053034
Sudheer Papothi4c322b12018-10-31 06:34:01 +053035#define SWRM_DSD_PARAMS_PORT 4
36
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053037#define SWR_BROADCAST_CMD_ID 0x0F
Sudheer Papothi3590b312019-06-04 23:51:30 +053038#define SWR_AUTO_SUSPEND_DELAY 1 /* delay in sec */
Sudheer Papothi7c067e82018-11-15 06:53:35 +053039#define SWR_DEV_ID_MASK 0xFFFFFFFFFFFF
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053040#define SWR_REG_VAL_PACK(data, dev, id, reg) \
41 ((reg) | ((id) << 16) | ((dev) << 20) | ((data) << 24))
42
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +053043#define SWR_INVALID_PARAM 0xFF
Laxminath Kasam990c70b2018-11-09 23:15:09 +053044#define SWR_HSTOP_MAX_VAL 0xF
45#define SWR_HSTART_MIN_VAL 0x0
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +053046
Vatsal Buchae50b5002019-09-19 14:32:20 +053047#define ERR_AUTO_SUSPEND_TIMER_VAL 0x1
48
Ramprasad Katkam83303512018-10-11 17:34:22 +053049#define SWRM_INTERRUPT_STATUS_MASK 0x1FDFD
Laxminath Kasamec8c9092019-12-17 13:12:58 +053050#define SWRM_LINK_STATUS_RETRY_CNT 100
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +053051
52#define SWRM_ROW_48 48
53#define SWRM_ROW_50 50
54#define SWRM_ROW_64 64
55#define SWRM_COL_02 02
56#define SWRM_COL_16 16
57
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053058/* pm runtime auto suspend timer in msecs */
59static int auto_suspend_timer = SWR_AUTO_SUSPEND_DELAY * 1000;
60module_param(auto_suspend_timer, int, 0664);
61MODULE_PARM_DESC(auto_suspend_timer, "timer for auto suspend");
62
63enum {
64 SWR_NOT_PRESENT, /* Device is detached/not present on the bus */
65 SWR_ATTACHED_OK, /* Device is attached */
66 SWR_ALERT, /* Device alters master for any interrupts */
67 SWR_RESERVED, /* Reserved */
68};
69
70enum {
71 MASTER_ID_WSA = 1,
72 MASTER_ID_RX,
73 MASTER_ID_TX
74};
Ramprasad Katkamcab8d722018-09-28 15:54:06 +053075
76enum {
77 ENABLE_PENDING,
78 DISABLE_PENDING
79};
Sudheer Papothi384addd2019-06-14 02:26:52 +053080
81enum {
82 LPASS_HW_CORE,
83 LPASS_AUDIO_CORE,
84};
85
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053086#define TRUE 1
87#define FALSE 0
88
Ramprasad Katkam1f221262018-08-23 15:01:22 +053089#define SWRM_MAX_PORT_REG 120
Ramprasad Katkam83303512018-10-11 17:34:22 +053090#define SWRM_MAX_INIT_REG 11
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053091
Laxminath Kasamfbcaf322018-07-18 00:38:14 +053092#define MAX_FIFO_RD_FAIL_RETRY 3
93
Ramprasad Katkam57349872018-11-11 18:34:57 +053094static bool swrm_lock_sleep(struct swr_mstr_ctrl *swrm);
95static void swrm_unlock_sleep(struct swr_mstr_ctrl *swrm);
Sudheer Papothi96c842a2019-08-29 12:11:21 +053096static u32 swr_master_read(struct swr_mstr_ctrl *swrm, unsigned int reg_addr);
97static void swr_master_write(struct swr_mstr_ctrl *swrm, u16 reg_addr, u32 val);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053098
Sudheer Papothiae5c3632019-11-27 06:52:06 +053099
100static u8 swrm_get_clk_div(int mclk_freq, int bus_clk_freq)
101{
102 int clk_div = 0;
103 u8 div_val = 0;
104
105 if (!mclk_freq || !bus_clk_freq)
106 return 0;
107
108 clk_div = (mclk_freq / bus_clk_freq);
109
110 switch (clk_div) {
111 case 32:
112 div_val = 5;
113 break;
114 case 16:
115 div_val = 4;
116 break;
117 case 8:
118 div_val = 3;
119 break;
120 case 4:
121 div_val = 2;
122 break;
123 case 2:
124 div_val = 1;
125 break;
126 case 1:
127 default:
128 div_val = 0;
129 break;
130 }
131
132 return div_val;
133}
134
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530135static bool swrm_is_msm_variant(int val)
136{
137 return (val == SWRM_VERSION_1_3);
138}
139
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530140#ifdef CONFIG_DEBUG_FS
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530141static int swrm_debug_open(struct inode *inode, struct file *file)
142{
143 file->private_data = inode->i_private;
144 return 0;
145}
146
147static int get_parameters(char *buf, u32 *param1, int num_of_par)
148{
149 char *token;
150 int base, cnt;
151
152 token = strsep(&buf, " ");
153 for (cnt = 0; cnt < num_of_par; cnt++) {
154 if (token) {
155 if ((token[1] == 'x') || (token[1] == 'X'))
156 base = 16;
157 else
158 base = 10;
159
160 if (kstrtou32(token, base, &param1[cnt]) != 0)
161 return -EINVAL;
162
163 token = strsep(&buf, " ");
164 } else
165 return -EINVAL;
166 }
167 return 0;
168}
169
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530170static ssize_t swrm_reg_show(struct swr_mstr_ctrl *swrm, char __user *ubuf,
171 size_t count, loff_t *ppos)
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530172{
173 int i, reg_val, len;
174 ssize_t total = 0;
175 char tmp_buf[SWR_MSTR_MAX_BUF_LEN];
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530176 int rem = 0;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530177
178 if (!ubuf || !ppos)
179 return 0;
180
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530181 i = ((int) *ppos + SWR_MSTR_START_REG_ADDR);
182 rem = i%4;
183
184 if (rem)
185 i = (i - rem);
186
187 for (; i <= SWR_MSTR_MAX_REG_ADDR; i += 4) {
188 usleep_range(100, 150);
189 reg_val = swr_master_read(swrm, i);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530190 len = snprintf(tmp_buf, 25, "0x%.3x: 0x%.2x\n", i, reg_val);
Aditya Bavanari9f599b42019-08-27 22:18:41 +0530191 if (len < 0) {
192 pr_err("%s: fail to fill the buffer\n", __func__);
193 total = -EFAULT;
194 goto copy_err;
195 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530196 if ((total + len) >= count - 1)
197 break;
198 if (copy_to_user((ubuf + total), tmp_buf, len)) {
199 pr_err("%s: fail to copy reg dump\n", __func__);
200 total = -EFAULT;
201 goto copy_err;
202 }
203 *ppos += len;
204 total += len;
205 }
206
207copy_err:
208 return total;
209}
210
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530211static ssize_t swrm_debug_reg_dump(struct file *file, char __user *ubuf,
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530212 size_t count, loff_t *ppos)
213{
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530214 struct swr_mstr_ctrl *swrm;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530215
216 if (!count || !file || !ppos || !ubuf)
217 return -EINVAL;
218
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530219 swrm = file->private_data;
220 if (!swrm)
221 return -EINVAL;
222
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530223 if (*ppos < 0)
224 return -EINVAL;
225
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530226 return swrm_reg_show(swrm, ubuf, count, ppos);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530227}
228
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530229static ssize_t swrm_debug_read(struct file *file, char __user *ubuf,
230 size_t count, loff_t *ppos)
231{
232 char lbuf[SWR_MSTR_RD_BUF_LEN];
233 struct swr_mstr_ctrl *swrm = NULL;
234
235 if (!count || !file || !ppos || !ubuf)
236 return -EINVAL;
237
238 swrm = file->private_data;
239 if (!swrm)
240 return -EINVAL;
241
242 if (*ppos < 0)
243 return -EINVAL;
244
245 snprintf(lbuf, sizeof(lbuf), "0x%x\n", swrm->read_data);
246
247 return simple_read_from_buffer(ubuf, count, ppos, lbuf,
248 strnlen(lbuf, 7));
249}
250
251static ssize_t swrm_debug_peek_write(struct file *file, const char __user *ubuf,
252 size_t count, loff_t *ppos)
253{
254 char lbuf[SWR_MSTR_RD_BUF_LEN];
255 int rc;
256 u32 param[5];
257 struct swr_mstr_ctrl *swrm = NULL;
258
259 if (!count || !file || !ppos || !ubuf)
260 return -EINVAL;
261
262 swrm = file->private_data;
263 if (!swrm)
264 return -EINVAL;
265
266 if (*ppos < 0)
267 return -EINVAL;
268
269 if (count > sizeof(lbuf) - 1)
270 return -EINVAL;
271
272 rc = copy_from_user(lbuf, ubuf, count);
273 if (rc)
274 return -EFAULT;
275
276 lbuf[count] = '\0';
277 rc = get_parameters(lbuf, param, 1);
278 if ((param[0] <= SWR_MSTR_MAX_REG_ADDR) && (rc == 0))
279 swrm->read_data = swr_master_read(swrm, param[0]);
280 else
281 rc = -EINVAL;
282
283 if (rc == 0)
284 rc = count;
285 else
286 dev_err(swrm->dev, "%s: rc = %d\n", __func__, rc);
287
288 return rc;
289}
290
291static ssize_t swrm_debug_write(struct file *file,
292 const char __user *ubuf, size_t count, loff_t *ppos)
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530293{
294 char lbuf[SWR_MSTR_WR_BUF_LEN];
295 int rc;
296 u32 param[5];
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530297 struct swr_mstr_ctrl *swrm;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530298
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530299 if (!file || !ppos || !ubuf)
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530300 return -EINVAL;
301
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530302 swrm = file->private_data;
303 if (!swrm)
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530304 return -EINVAL;
305
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530306 if (count > sizeof(lbuf) - 1)
307 return -EINVAL;
308
309 rc = copy_from_user(lbuf, ubuf, count);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530310 if (rc)
311 return -EFAULT;
312
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530313 lbuf[count] = '\0';
314 rc = get_parameters(lbuf, param, 2);
315 if ((param[0] <= SWR_MSTR_MAX_REG_ADDR) &&
316 (param[1] <= 0xFFFFFFFF) &&
317 (rc == 0))
318 swr_master_write(swrm, param[0], param[1]);
319 else
320 rc = -EINVAL;
321
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530322 if (rc == 0)
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530323 rc = count;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530324 else
325 pr_err("%s: rc = %d\n", __func__, rc);
326
327 return rc;
328}
329
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530330static const struct file_operations swrm_debug_read_ops = {
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530331 .open = swrm_debug_open,
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530332 .write = swrm_debug_peek_write,
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530333 .read = swrm_debug_read,
334};
335
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530336static const struct file_operations swrm_debug_write_ops = {
337 .open = swrm_debug_open,
338 .write = swrm_debug_write,
339};
340
341static const struct file_operations swrm_debug_dump_ops = {
342 .open = swrm_debug_open,
343 .read = swrm_debug_reg_dump,
344};
345#endif
346
Sudheer Papothi0016db12019-06-11 04:42:38 +0530347static void swrm_reg_dump(struct swr_mstr_ctrl *swrm,
348 u32 *reg, u32 *val, int len, const char* func)
349{
350 int i = 0;
351
352 for (i = 0; i < len; i++)
353 dev_dbg(swrm->dev, "%s: reg = 0x%x val = 0x%x\n",
354 func, reg[i], val[i]);
355}
356
Sudheer Papothi921b8652019-10-03 02:13:32 +0530357static bool is_swr_clk_needed(struct swr_mstr_ctrl *swrm)
358{
359 return ((swrm->version <= SWRM_VERSION_1_5_1) ? true : false);
360}
361
Sudheer Papothi384addd2019-06-14 02:26:52 +0530362static int swrm_request_hw_vote(struct swr_mstr_ctrl *swrm,
363 int core_type, bool enable)
364{
365 int ret = 0;
366
Aditya Bavanarieb044612019-12-22 17:14:15 +0530367 mutex_lock(&swrm->devlock);
Sudheer Papothi384addd2019-06-14 02:26:52 +0530368 if (core_type == LPASS_HW_CORE) {
369 if (swrm->lpass_core_hw_vote) {
370 if (enable) {
Aditya Bavanarieb044612019-12-22 17:14:15 +0530371 if (!swrm->dev_up) {
372 dev_dbg(swrm->dev, "%s: device is down or SSR state\n",
373 __func__);
374 trace_printk("%s: device is down or SSR state\n",
375 __func__);
376 mutex_unlock(&swrm->devlock);
377 return -ENODEV;
378 }
379 if (++swrm->hw_core_clk_en == 1) {
380 ret =
381 clk_prepare_enable(
382 swrm->lpass_core_hw_vote);
383 if (ret < 0) {
384 dev_err(swrm->dev,
385 "%s:lpass core hw enable failed\n",
386 __func__);
387 --swrm->hw_core_clk_en;
388 }
389 }
390 } else {
391 --swrm->hw_core_clk_en;
392 if (swrm->hw_core_clk_en < 0)
393 swrm->hw_core_clk_en = 0;
394 else if (swrm->hw_core_clk_en == 0)
395 clk_disable_unprepare(
396 swrm->lpass_core_hw_vote);
397 }
Sudheer Papothi384addd2019-06-14 02:26:52 +0530398 }
399 }
400 if (core_type == LPASS_AUDIO_CORE) {
401 if (swrm->lpass_core_audio) {
402 if (enable) {
Aditya Bavanarieb044612019-12-22 17:14:15 +0530403 if (!swrm->dev_up) {
404 dev_dbg(swrm->dev, "%s: device is down or SSR state\n",
405 __func__);
406 trace_printk("%s: device is down or SSR state\n",
407 __func__);
408 mutex_unlock(&swrm->devlock);
409 return -ENODEV;
410 }
411 if (++swrm->aud_core_clk_en == 1) {
412 ret =
413 clk_prepare_enable(
414 swrm->lpass_core_audio);
415 if (ret < 0) {
416 dev_err(swrm->dev,
417 "%s:lpass audio hw enable failed\n",
418 __func__);
419 --swrm->aud_core_clk_en;
420 }
421 }
422 } else {
423 --swrm->aud_core_clk_en;
424 if (swrm->aud_core_clk_en < 0)
425 swrm->aud_core_clk_en = 0;
426 else if (swrm->aud_core_clk_en == 0)
427 clk_disable_unprepare(
428 swrm->lpass_core_audio);
429 }
Sudheer Papothi384addd2019-06-14 02:26:52 +0530430 }
431 }
432
Aditya Bavanarieb044612019-12-22 17:14:15 +0530433 mutex_unlock(&swrm->devlock);
434 dev_dbg(swrm->dev, "%s: hw_clk_en: %d audio_core_clk_en: %d\n",
435 __func__, swrm->hw_core_clk_en, swrm->aud_core_clk_en);
436 trace_printk("%s: hw_clk_en: %d audio_core_clk_en: %d\n",
437 __func__, swrm->hw_core_clk_en, swrm->aud_core_clk_en);
Sudheer Papothi384addd2019-06-14 02:26:52 +0530438 return ret;
439}
440
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +0530441static int swrm_get_ssp_period(struct swr_mstr_ctrl *swrm,
442 int row, int col,
443 int frame_sync)
444{
445 if (!swrm || !row || !col || !frame_sync)
446 return 1;
447
448 return ((swrm->bus_clk * 2) / ((row * col) * frame_sync));
449}
450
Sudheer Papothi921b8652019-10-03 02:13:32 +0530451static int swrm_core_vote_request(struct swr_mstr_ctrl *swrm)
452{
453 int ret = 0;
454
455 if (!swrm->handle)
456 return -EINVAL;
457
458 mutex_lock(&swrm->clklock);
459 if (!swrm->dev_up) {
460 ret = -ENODEV;
461 goto exit;
462 }
463 if (swrm->core_vote) {
464 ret = swrm->core_vote(swrm->handle, true);
465 if (ret)
466 dev_err_ratelimited(swrm->dev,
467 "%s: core vote request failed\n", __func__);
468 }
469exit:
470 mutex_unlock(&swrm->clklock);
471
472 return ret;
473}
474
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530475static int swrm_clk_request(struct swr_mstr_ctrl *swrm, bool enable)
476{
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530477 int ret = 0;
478
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530479 if (!swrm->clk || !swrm->handle)
480 return -EINVAL;
481
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530482 mutex_lock(&swrm->clklock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530483 if (enable) {
Aditya Bavanarif4a471d2019-02-19 17:57:12 +0530484 if (!swrm->dev_up) {
485 ret = -ENODEV;
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530486 goto exit;
Aditya Bavanarif4a471d2019-02-19 17:57:12 +0530487 }
Sudheer Papothi921b8652019-10-03 02:13:32 +0530488 if (is_swr_clk_needed(swrm)) {
489 if (swrm->core_vote) {
490 ret = swrm->core_vote(swrm->handle, true);
491 if (ret) {
492 dev_err_ratelimited(swrm->dev,
493 "%s: core vote request failed\n",
494 __func__);
495 goto exit;
496 }
Karthikeyan Mani1d750fe2019-09-06 14:36:09 -0700497 }
498 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530499 swrm->clk_ref_count++;
500 if (swrm->clk_ref_count == 1) {
Aditya Bavanarif500a1d2019-09-16 18:27:51 -0700501 trace_printk("%s: clock enable count %d",
502 __func__, swrm->clk_ref_count);
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530503 ret = swrm->clk(swrm->handle, true);
504 if (ret) {
Ramprasad Katkam14efed62019-03-07 13:16:50 +0530505 dev_err_ratelimited(swrm->dev,
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530506 "%s: clock enable req failed",
507 __func__);
508 --swrm->clk_ref_count;
509 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530510 }
511 } else if (--swrm->clk_ref_count == 0) {
Aditya Bavanarif500a1d2019-09-16 18:27:51 -0700512 trace_printk("%s: clock disable count %d",
513 __func__, swrm->clk_ref_count);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530514 swrm->clk(swrm->handle, false);
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530515 complete(&swrm->clk_off_complete);
516 }
517 if (swrm->clk_ref_count < 0) {
Meng Wang8c60bb52019-06-19 15:49:06 +0800518 dev_err(swrm->dev, "%s: swrm clk count mismatch\n", __func__);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530519 swrm->clk_ref_count = 0;
520 }
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530521
522exit:
523 mutex_unlock(&swrm->clklock);
524 return ret;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530525}
526
527static int swrm_ahb_write(struct swr_mstr_ctrl *swrm,
528 u16 reg, u32 *value)
529{
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530530 u32 temp = (u32)(*value);
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530531 int ret = 0;
532
533 mutex_lock(&swrm->devlock);
534 if (!swrm->dev_up)
535 goto err;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530536
Sudheer Papothi921b8652019-10-03 02:13:32 +0530537 if (is_swr_clk_needed(swrm)) {
538 ret = swrm_clk_request(swrm, TRUE);
539 if (ret) {
540 dev_err_ratelimited(swrm->dev,
541 "%s: clock request failed\n",
542 __func__);
543 goto err;
544 }
545 } else if (swrm_core_vote_request(swrm)) {
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530546 goto err;
547 }
Sudheer Papothi921b8652019-10-03 02:13:32 +0530548
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530549 iowrite32(temp, swrm->swrm_dig_base + reg);
Sudheer Papothi921b8652019-10-03 02:13:32 +0530550 if (is_swr_clk_needed(swrm))
551 swrm_clk_request(swrm, FALSE);
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530552err:
553 mutex_unlock(&swrm->devlock);
554 return ret;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530555}
556
557static int swrm_ahb_read(struct swr_mstr_ctrl *swrm,
558 u16 reg, u32 *value)
559{
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530560 u32 temp = 0;
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530561 int ret = 0;
562
563 mutex_lock(&swrm->devlock);
564 if (!swrm->dev_up)
565 goto err;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530566
Sudheer Papothi921b8652019-10-03 02:13:32 +0530567 if (is_swr_clk_needed(swrm)) {
568 ret = swrm_clk_request(swrm, TRUE);
569 if (ret) {
570 dev_err_ratelimited(swrm->dev, "%s: clock request failed\n",
571 __func__);
572 goto err;
573 }
574 } else if (swrm_core_vote_request(swrm)) {
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530575 goto err;
576 }
Sudheer Papothi921b8652019-10-03 02:13:32 +0530577
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530578 temp = ioread32(swrm->swrm_dig_base + reg);
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530579 *value = temp;
Sudheer Papothi921b8652019-10-03 02:13:32 +0530580 if (is_swr_clk_needed(swrm))
581 swrm_clk_request(swrm, FALSE);
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530582err:
583 mutex_unlock(&swrm->devlock);
584 return ret;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530585}
586
587static u32 swr_master_read(struct swr_mstr_ctrl *swrm, unsigned int reg_addr)
588{
589 u32 val = 0;
590
591 if (swrm->read)
592 val = swrm->read(swrm->handle, reg_addr);
593 else
594 swrm_ahb_read(swrm, reg_addr, &val);
595 return val;
596}
597
598static void swr_master_write(struct swr_mstr_ctrl *swrm, u16 reg_addr, u32 val)
599{
600 if (swrm->write)
601 swrm->write(swrm->handle, reg_addr, val);
602 else
603 swrm_ahb_write(swrm, reg_addr, &val);
604}
605
606static int swr_master_bulk_write(struct swr_mstr_ctrl *swrm, u32 *reg_addr,
607 u32 *val, unsigned int length)
608{
609 int i = 0;
610
611 if (swrm->bulk_write)
612 swrm->bulk_write(swrm->handle, reg_addr, val, length);
613 else {
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530614 mutex_lock(&swrm->iolock);
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530615 for (i = 0; i < length; i++) {
616 /* wait for FIFO WR command to complete to avoid overflow */
Karthikeyan Mani5d52dd82019-08-15 16:58:08 -0700617 /*
618 * Reduce sleep from 100us to 10us to meet KPIs
619 * This still meets the hardware spec
620 */
621 usleep_range(10, 12);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530622 swr_master_write(swrm, reg_addr[i], val[i]);
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530623 }
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530624 mutex_unlock(&swrm->iolock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530625 }
626 return 0;
627}
628
Laxminath Kasame2291972019-11-08 14:51:59 +0530629static bool swrm_check_link_status(struct swr_mstr_ctrl *swrm, bool active)
630{
631 int retry = SWRM_LINK_STATUS_RETRY_CNT;
632 int ret = false;
633 int status = active ? 0x1 : 0x0;
Laxminath Kasam09819e92019-11-18 14:38:11 +0530634 int comp_sts = 0x0;
Laxminath Kasame2291972019-11-08 14:51:59 +0530635
636 if ((swrm->version <= SWRM_VERSION_1_5_1))
637 return true;
638
639 do {
Laxminath Kasam09819e92019-11-18 14:38:11 +0530640 comp_sts = swr_master_read(swrm, SWRM_COMP_STATUS) & 0x01;
641 /* check comp status and status requested met */
642 if ((comp_sts && status) || (!comp_sts && !status)) {
Laxminath Kasame2291972019-11-08 14:51:59 +0530643 ret = true;
644 break;
645 }
646 retry--;
647 usleep_range(500, 510);
648 } while (retry);
649
650 if (retry == 0)
651 dev_err(swrm->dev, "%s: link status not %s\n", __func__,
652 active ? "connected" : "disconnected");
653
654 return ret;
655}
656
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530657static bool swrm_is_port_en(struct swr_master *mstr)
658{
659 return !!(mstr->num_port);
660}
661
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +0530662static void copy_port_tables(struct swr_mstr_ctrl *swrm,
663 struct port_params *params)
664{
665 u8 i;
666 struct port_params *config = params;
667
668 for (i = 0; i < SWR_MSTR_PORT_LEN; i++) {
669 /* wsa uses single frame structure for all configurations */
670 if (!swrm->mport_cfg[i].port_en)
671 continue;
672 swrm->mport_cfg[i].sinterval = config[i].si;
673 swrm->mport_cfg[i].offset1 = config[i].off1;
674 swrm->mport_cfg[i].offset2 = config[i].off2;
675 swrm->mport_cfg[i].hstart = config[i].hstart;
676 swrm->mport_cfg[i].hstop = config[i].hstop;
677 swrm->mport_cfg[i].blk_pack_mode = config[i].bp_mode;
678 swrm->mport_cfg[i].blk_grp_count = config[i].bgp_ctrl;
679 swrm->mport_cfg[i].word_length = config[i].wd_len;
680 swrm->mport_cfg[i].lane_ctrl = config[i].lane_ctrl;
681 }
682}
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530683static int swrm_get_port_config(struct swr_mstr_ctrl *swrm)
684{
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +0530685 struct port_params *params;
Sudheer Papothi4c322b12018-10-31 06:34:01 +0530686 u32 usecase = 0;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530687
Sudheer Papothi4c322b12018-10-31 06:34:01 +0530688 /* TODO - Send usecase information to avoid checking for master_id */
689 if (swrm->mport_cfg[SWRM_DSD_PARAMS_PORT].port_en &&
690 (swrm->master_id == MASTER_ID_RX))
691 usecase = 1;
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +0530692
Sudheer Papothi4c322b12018-10-31 06:34:01 +0530693 params = swrm->port_param[usecase];
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +0530694 copy_port_tables(swrm, params);
Sudheer Papothi4c322b12018-10-31 06:34:01 +0530695
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530696 return 0;
697}
698
699static int swrm_get_master_port(struct swr_mstr_ctrl *swrm, u8 *mstr_port_id,
700 u8 *mstr_ch_mask, u8 mstr_prt_type,
701 u8 slv_port_id)
702{
703 int i, j;
704 *mstr_port_id = 0;
705
706 for (i = 1; i <= swrm->num_ports; i++) {
707 for (j = 0; j < SWR_MAX_CH_PER_PORT; j++) {
708 if (swrm->port_mapping[i][j].port_type == mstr_prt_type)
709 goto found;
710 }
711 }
712found:
713 if (i > swrm->num_ports || j == SWR_MAX_CH_PER_PORT) {
714 dev_err(swrm->dev, "%s: port type not supported by master\n",
715 __func__);
716 return -EINVAL;
717 }
718 /* id 0 corresponds to master port 1 */
719 *mstr_port_id = i - 1;
720 *mstr_ch_mask = swrm->port_mapping[i][j].ch_mask;
721
722 return 0;
723
724}
725
726static u32 swrm_get_packed_reg_val(u8 *cmd_id, u8 cmd_data,
727 u8 dev_addr, u16 reg_addr)
728{
729 u32 val;
730 u8 id = *cmd_id;
731
732 if (id != SWR_BROADCAST_CMD_ID) {
733 if (id < 14)
734 id += 1;
735 else
736 id = 0;
737 *cmd_id = id;
738 }
739 val = SWR_REG_VAL_PACK(cmd_data, dev_addr, id, reg_addr);
740
741 return val;
742}
743
744static int swrm_cmd_fifo_rd_cmd(struct swr_mstr_ctrl *swrm, int *cmd_data,
745 u8 dev_addr, u8 cmd_id, u16 reg_addr,
746 u32 len)
747{
748 u32 val;
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530749 u32 retry_attempt = 0;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530750
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530751 mutex_lock(&swrm->iolock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530752 val = swrm_get_packed_reg_val(&swrm->rcmd_id, len, dev_addr, reg_addr);
Ramprasad Katkam1e906202019-01-30 14:16:34 +0530753 if (swrm->read) {
754 /* skip delay if read is handled in platform driver */
755 swr_master_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
756 } else {
757 /* wait for FIFO RD to complete to avoid overflow */
758 usleep_range(100, 105);
759 swr_master_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
760 /* wait for FIFO RD CMD complete to avoid overflow */
761 usleep_range(250, 255);
762 }
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530763retry_read:
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530764 *cmd_data = swr_master_read(swrm, SWRM_CMD_FIFO_RD_FIFO_ADDR);
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530765 dev_dbg(swrm->dev, "%s: reg: 0x%x, cmd_id: 0x%x, rcmd_id: 0x%x, \
766 dev_num: 0x%x, cmd_data: 0x%x\n", __func__, reg_addr,
767 cmd_id, swrm->rcmd_id, dev_addr, *cmd_data);
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530768 if ((((*cmd_data) & 0xF00) >> 8) != swrm->rcmd_id) {
769 if (retry_attempt < MAX_FIFO_RD_FAIL_RETRY) {
770 /* wait 500 us before retry on fifo read failure */
771 usleep_range(500, 505);
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +0530772 if (retry_attempt == (MAX_FIFO_RD_FAIL_RETRY - 1)) {
773 swr_master_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
774 swr_master_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
775 }
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530776 retry_attempt++;
777 goto retry_read;
778 } else {
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530779 dev_err_ratelimited(swrm->dev, "%s: reg: 0x%x, cmd_id: 0x%x, \
780 rcmd_id: 0x%x, dev_num: 0x%x, cmd_data: 0x%x\n",
781 __func__, reg_addr, cmd_id, swrm->rcmd_id,
782 dev_addr, *cmd_data);
783
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530784 dev_err_ratelimited(swrm->dev,
785 "%s: failed to read fifo\n", __func__);
786 }
787 }
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530788 mutex_unlock(&swrm->iolock);
789
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530790 return 0;
791}
792
793static int swrm_cmd_fifo_wr_cmd(struct swr_mstr_ctrl *swrm, u8 cmd_data,
794 u8 dev_addr, u8 cmd_id, u16 reg_addr)
795{
796 u32 val;
797 int ret = 0;
798
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530799 mutex_lock(&swrm->iolock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530800 if (!cmd_id)
801 val = swrm_get_packed_reg_val(&swrm->wcmd_id, cmd_data,
802 dev_addr, reg_addr);
803 else
804 val = swrm_get_packed_reg_val(&cmd_id, cmd_data,
805 dev_addr, reg_addr);
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530806 dev_dbg(swrm->dev, "%s: reg: 0x%x, cmd_id: 0x%x,wcmd_id: 0x%x, \
807 dev_num: 0x%x, cmd_data: 0x%x\n", __func__,
808 reg_addr, cmd_id, swrm->wcmd_id,dev_addr, cmd_data);
Ramprasad Katkamb4c7c682018-12-19 18:58:36 +0530809 swr_master_write(swrm, SWRM_CMD_FIFO_WR_CMD, val);
Ramprasad Katkam1e906202019-01-30 14:16:34 +0530810 /*
811 * wait for FIFO WR command to complete to avoid overflow
812 * skip delay if write is handled in platform driver.
813 */
814 if(!swrm->write)
Karthikeyan Mani5d52dd82019-08-15 16:58:08 -0700815 usleep_range(150, 155);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530816 if (cmd_id == 0xF) {
817 /*
818 * sleep for 10ms for MSM soundwire variant to allow broadcast
819 * command to complete.
820 */
821 if (swrm_is_msm_variant(swrm->version))
822 usleep_range(10000, 10100);
823 else
824 wait_for_completion_timeout(&swrm->broadcast,
825 (2 * HZ/10));
826 }
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530827 mutex_unlock(&swrm->iolock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530828 return ret;
829}
830
831static int swrm_read(struct swr_master *master, u8 dev_num, u16 reg_addr,
832 void *buf, u32 len)
833{
834 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
835 int ret = 0;
836 int val;
837 u8 *reg_val = (u8 *)buf;
838
839 if (!swrm) {
840 dev_err(&master->dev, "%s: swrm is NULL\n", __func__);
841 return -EINVAL;
842 }
Ramprasad Katkam0db48012018-11-09 11:01:23 +0530843 if (!dev_num) {
844 dev_err(&master->dev, "%s: invalid slave dev num\n", __func__);
845 return -EINVAL;
846 }
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530847 mutex_lock(&swrm->devlock);
848 if (!swrm->dev_up) {
849 mutex_unlock(&swrm->devlock);
850 return 0;
851 }
852 mutex_unlock(&swrm->devlock);
853
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530854 pm_runtime_get_sync(swrm->dev);
Ramprasad Katkam0db48012018-11-09 11:01:23 +0530855 ret = swrm_cmd_fifo_rd_cmd(swrm, &val, dev_num, 0, reg_addr, len);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530856
857 if (!ret)
858 *reg_val = (u8)val;
859
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530860 pm_runtime_put_autosuspend(swrm->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530861 pm_runtime_mark_last_busy(swrm->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530862 return ret;
863}
864
865static int swrm_write(struct swr_master *master, u8 dev_num, u16 reg_addr,
866 const void *buf)
867{
868 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
869 int ret = 0;
870 u8 reg_val = *(u8 *)buf;
871
872 if (!swrm) {
873 dev_err(&master->dev, "%s: swrm is NULL\n", __func__);
874 return -EINVAL;
875 }
Ramprasad Katkam0db48012018-11-09 11:01:23 +0530876 if (!dev_num) {
877 dev_err(&master->dev, "%s: invalid slave dev num\n", __func__);
878 return -EINVAL;
879 }
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530880 mutex_lock(&swrm->devlock);
881 if (!swrm->dev_up) {
882 mutex_unlock(&swrm->devlock);
883 return 0;
884 }
885 mutex_unlock(&swrm->devlock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530886
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530887 pm_runtime_get_sync(swrm->dev);
Ramprasad Katkam0db48012018-11-09 11:01:23 +0530888 ret = swrm_cmd_fifo_wr_cmd(swrm, reg_val, dev_num, 0, reg_addr);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530889
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530890 pm_runtime_put_autosuspend(swrm->dev);
891 pm_runtime_mark_last_busy(swrm->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530892 return ret;
893}
894
895static int swrm_bulk_write(struct swr_master *master, u8 dev_num, void *reg,
896 const void *buf, size_t len)
897{
898 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
899 int ret = 0;
900 int i;
901 u32 *val;
902 u32 *swr_fifo_reg;
903
904 if (!swrm || !swrm->handle) {
905 dev_err(&master->dev, "%s: swrm is NULL\n", __func__);
906 return -EINVAL;
907 }
908 if (len <= 0)
909 return -EINVAL;
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530910 mutex_lock(&swrm->devlock);
911 if (!swrm->dev_up) {
912 mutex_unlock(&swrm->devlock);
913 return 0;
914 }
915 mutex_unlock(&swrm->devlock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530916
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530917 pm_runtime_get_sync(swrm->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530918 if (dev_num) {
919 swr_fifo_reg = kcalloc(len, sizeof(u32), GFP_KERNEL);
920 if (!swr_fifo_reg) {
921 ret = -ENOMEM;
922 goto err;
923 }
924 val = kcalloc(len, sizeof(u32), GFP_KERNEL);
925 if (!val) {
926 ret = -ENOMEM;
927 goto mem_fail;
928 }
929
930 for (i = 0; i < len; i++) {
931 val[i] = swrm_get_packed_reg_val(&swrm->wcmd_id,
932 ((u8 *)buf)[i],
933 dev_num,
934 ((u16 *)reg)[i]);
935 swr_fifo_reg[i] = SWRM_CMD_FIFO_WR_CMD;
936 }
937 ret = swr_master_bulk_write(swrm, swr_fifo_reg, val, len);
938 if (ret) {
939 dev_err(&master->dev, "%s: bulk write failed\n",
940 __func__);
941 ret = -EINVAL;
942 }
943 } else {
944 dev_err(&master->dev,
945 "%s: No support of Bulk write for master regs\n",
946 __func__);
947 ret = -EINVAL;
948 goto err;
949 }
950 kfree(val);
951mem_fail:
952 kfree(swr_fifo_reg);
953err:
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530954 pm_runtime_put_autosuspend(swrm->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530955 pm_runtime_mark_last_busy(swrm->dev);
956 return ret;
957}
958
959static u8 get_inactive_bank_num(struct swr_mstr_ctrl *swrm)
960{
961 return (swr_master_read(swrm, SWRM_MCP_STATUS) &
962 SWRM_MCP_STATUS_BANK_NUM_MASK) ? 0 : 1;
963}
964
965static void enable_bank_switch(struct swr_mstr_ctrl *swrm, u8 bank,
966 u8 row, u8 col)
967{
968 swrm_cmd_fifo_wr_cmd(swrm, ((row << 3) | col), 0xF, 0xF,
969 SWRS_SCP_FRAME_CTRL_BANK(bank));
970}
971
Sudheer Papothi8a8b12b2019-11-15 23:06:41 +0530972static void swrm_switch_frame_shape(struct swr_mstr_ctrl *swrm, int mclk_freq)
973{
974 u8 bank;
975 u32 n_row, n_col;
976 u32 value = 0;
977 u32 row = 0, col = 0;
978 u8 ssp_period = 0;
979 int frame_sync = SWRM_FRAME_SYNC_SEL;
980
981 if (mclk_freq == MCLK_FREQ_NATIVE) {
982 n_col = SWR_MAX_COL;
983 col = SWRM_COL_16;
984 n_row = SWR_ROW_64;
985 row = SWRM_ROW_64;
986 frame_sync = SWRM_FRAME_SYNC_SEL_NATIVE;
987 } else {
988 n_col = SWR_MIN_COL;
989 col = SWRM_COL_02;
990 n_row = SWR_ROW_50;
991 row = SWRM_ROW_50;
992 frame_sync = SWRM_FRAME_SYNC_SEL;
993 }
994
995 bank = get_inactive_bank_num(swrm);
996 ssp_period = swrm_get_ssp_period(swrm, row, col, frame_sync);
997 dev_dbg(swrm->dev, "%s: ssp_period: %d\n", __func__, ssp_period);
998 value = ((n_row << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT) |
999 (n_col << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT) |
1000 ((ssp_period - 1) << SWRM_MCP_FRAME_CTRL_BANK_SSP_PERIOD_SHFT));
1001 swr_master_write(swrm, SWRM_MCP_FRAME_CTRL_BANK_ADDR(bank), value);
1002 enable_bank_switch(swrm, bank, n_row, n_col);
1003}
1004
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301005static struct swr_port_info *swrm_get_port_req(struct swrm_mports *mport,
1006 u8 slv_port, u8 dev_num)
1007{
1008 struct swr_port_info *port_req = NULL;
1009
1010 list_for_each_entry(port_req, &mport->port_req_list, list) {
1011 /* Store dev_id instead of dev_num if enumeration is changed run_time */
1012 if ((port_req->slave_port_id == slv_port)
1013 && (port_req->dev_num == dev_num))
1014 return port_req;
1015 }
1016 return NULL;
1017}
1018
1019static bool swrm_remove_from_group(struct swr_master *master)
1020{
1021 struct swr_device *swr_dev;
1022 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
1023 bool is_removed = false;
1024
1025 if (!swrm)
1026 goto end;
1027
1028 mutex_lock(&swrm->mlock);
1029 if ((swrm->num_rx_chs > 1) &&
1030 (swrm->num_rx_chs == swrm->num_cfg_devs)) {
1031 list_for_each_entry(swr_dev, &master->devices,
1032 dev_list) {
1033 swr_dev->group_id = SWR_GROUP_NONE;
1034 master->gr_sid = 0;
1035 }
1036 is_removed = true;
1037 }
1038 mutex_unlock(&swrm->mlock);
1039
1040end:
1041 return is_removed;
1042}
1043
Sudheer Papothiae5c3632019-11-27 06:52:06 +05301044int swrm_get_clk_div_rate(int mclk_freq, int bus_clk_freq)
1045{
1046 if (!bus_clk_freq)
1047 return mclk_freq;
1048
1049 if (mclk_freq == SWR_CLK_RATE_9P6MHZ) {
1050 if (bus_clk_freq <= SWR_CLK_RATE_0P6MHZ)
1051 bus_clk_freq = SWR_CLK_RATE_0P6MHZ;
1052 else if (bus_clk_freq <= SWR_CLK_RATE_1P2MHZ)
1053 bus_clk_freq = SWR_CLK_RATE_1P2MHZ;
1054 else if (bus_clk_freq <= SWR_CLK_RATE_2P4MHZ)
1055 bus_clk_freq = SWR_CLK_RATE_2P4MHZ;
1056 else if(bus_clk_freq <= SWR_CLK_RATE_4P8MHZ)
1057 bus_clk_freq = SWR_CLK_RATE_4P8MHZ;
1058 else if(bus_clk_freq <= SWR_CLK_RATE_9P6MHZ)
1059 bus_clk_freq = SWR_CLK_RATE_9P6MHZ;
1060 } else if (mclk_freq == SWR_CLK_RATE_11P2896MHZ)
1061 bus_clk_freq = SWR_CLK_RATE_11P2896MHZ;
1062
1063 return bus_clk_freq;
1064}
1065
1066static int swrm_update_bus_clk(struct swr_mstr_ctrl *swrm)
1067{
1068 int ret = 0;
1069 int agg_clk = 0;
1070 int i;
1071
1072 for (i = 0; i < SWR_MSTR_PORT_LEN; i++)
1073 agg_clk += swrm->mport_cfg[i].ch_rate;
1074
1075 if (agg_clk)
1076 swrm->bus_clk = swrm_get_clk_div_rate(swrm->mclk_freq,
1077 agg_clk);
1078 else
1079 swrm->bus_clk = swrm->mclk_freq;
1080
1081 dev_dbg(swrm->dev, "%s: all_port_clk: %d, bus_clk: %d\n",
1082 __func__, agg_clk, swrm->bus_clk);
1083
1084 return ret;
1085}
1086
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301087static void swrm_disable_ports(struct swr_master *master,
1088 u8 bank)
1089{
1090 u32 value;
1091 struct swr_port_info *port_req;
1092 int i;
1093 struct swrm_mports *mport;
1094 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
1095
1096 if (!swrm) {
1097 pr_err("%s: swrm is null\n", __func__);
1098 return;
1099 }
1100
1101 dev_dbg(swrm->dev, "%s: master num_port: %d\n", __func__,
1102 master->num_port);
1103
1104
1105 for (i = 0; i < SWR_MSTR_PORT_LEN ; i++) {
1106
1107 mport = &(swrm->mport_cfg[i]);
1108 if (!mport->port_en)
1109 continue;
1110
1111 list_for_each_entry(port_req, &mport->port_req_list, list) {
1112 /* skip ports with no change req's*/
1113 if (port_req->req_ch == port_req->ch_en)
1114 continue;
1115
1116 swrm_cmd_fifo_wr_cmd(swrm, port_req->req_ch,
1117 port_req->dev_num, 0x00,
1118 SWRS_DP_CHANNEL_ENABLE_BANK(port_req->slave_port_id,
1119 bank));
1120 dev_dbg(swrm->dev, "%s: mport :%d, reg: 0x%x\n",
1121 __func__, i,
1122 (SWRM_DP_PORT_CTRL_BANK(i + 1, bank)));
1123 }
1124 value = ((mport->req_ch)
1125 << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
1126 value |= ((mport->offset2)
1127 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT);
1128 value |= ((mport->offset1)
1129 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT);
1130 value |= mport->sinterval;
1131
1132 swr_master_write(swrm,
1133 SWRM_DP_PORT_CTRL_BANK(i+1, bank),
1134 value);
1135 dev_dbg(swrm->dev, "%s: mport :%d, reg: 0x%x, val: 0x%x\n",
1136 __func__, i,
1137 (SWRM_DP_PORT_CTRL_BANK(i+1, bank)), value);
1138 }
1139}
1140
1141static void swrm_cleanup_disabled_port_reqs(struct swr_master *master)
1142{
1143 struct swr_port_info *port_req, *next;
1144 int i;
1145 struct swrm_mports *mport;
1146 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
1147
1148 if (!swrm) {
1149 pr_err("%s: swrm is null\n", __func__);
1150 return;
1151 }
1152 dev_dbg(swrm->dev, "%s: master num_port: %d\n", __func__,
1153 master->num_port);
1154
1155 for (i = 0; i < SWR_MSTR_PORT_LEN; i++) {
1156 mport = &(swrm->mport_cfg[i]);
1157 list_for_each_entry_safe(port_req, next,
1158 &mport->port_req_list, list) {
1159 /* skip ports without new ch req */
1160 if (port_req->ch_en == port_req->req_ch)
1161 continue;
1162
1163 /* remove new ch req's*/
Ramprasad Katkamc8d52a12018-08-31 02:30:00 +05301164 port_req->ch_en = port_req->req_ch;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301165
1166 /* If no streams enabled on port, remove the port req */
1167 if (port_req->ch_en == 0) {
1168 list_del(&port_req->list);
1169 kfree(port_req);
1170 }
1171 }
1172 /* remove new ch req's on mport*/
Ramprasad Katkamc8d52a12018-08-31 02:30:00 +05301173 mport->ch_en = mport->req_ch;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301174
1175 if (!(mport->ch_en)) {
1176 mport->port_en = false;
1177 master->port_en_mask &= ~i;
1178 }
1179 }
1180}
1181static void swrm_copy_data_port_config(struct swr_master *master, u8 bank)
1182{
1183 u32 value, slv_id;
1184 struct swr_port_info *port_req;
1185 int i;
1186 struct swrm_mports *mport;
1187 u32 reg[SWRM_MAX_PORT_REG];
1188 u32 val[SWRM_MAX_PORT_REG];
1189 int len = 0;
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301190 u8 hparams;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301191 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
1192
1193 if (!swrm) {
1194 pr_err("%s: swrm is null\n", __func__);
1195 return;
1196 }
1197
1198 dev_dbg(swrm->dev, "%s: master num_port: %d\n", __func__,
1199 master->num_port);
1200
1201 for (i = 0; i < SWR_MSTR_PORT_LEN; i++) {
1202 mport = &(swrm->mport_cfg[i]);
1203 if (!mport->port_en)
1204 continue;
1205
1206 list_for_each_entry(port_req, &mport->port_req_list, list) {
1207 slv_id = port_req->slave_port_id;
1208 reg[len] = SWRM_CMD_FIFO_WR_CMD;
1209 val[len++] = SWR_REG_VAL_PACK(port_req->req_ch,
1210 port_req->dev_num, 0x00,
1211 SWRS_DP_CHANNEL_ENABLE_BANK(slv_id,
1212 bank));
1213
1214 reg[len] = SWRM_CMD_FIFO_WR_CMD;
1215 val[len++] = SWR_REG_VAL_PACK(mport->sinterval,
1216 port_req->dev_num, 0x00,
1217 SWRS_DP_SAMPLE_CONTROL_1_BANK(slv_id,
1218 bank));
1219
1220 reg[len] = SWRM_CMD_FIFO_WR_CMD;
1221 val[len++] = SWR_REG_VAL_PACK(mport->offset1,
1222 port_req->dev_num, 0x00,
1223 SWRS_DP_OFFSET_CONTROL_1_BANK(slv_id,
1224 bank));
1225
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301226 if (mport->offset2 != SWR_INVALID_PARAM) {
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301227 reg[len] = SWRM_CMD_FIFO_WR_CMD;
1228 val[len++] = SWR_REG_VAL_PACK(mport->offset2,
1229 port_req->dev_num, 0x00,
1230 SWRS_DP_OFFSET_CONTROL_2_BANK(
1231 slv_id, bank));
1232 }
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301233 if (mport->hstart != SWR_INVALID_PARAM
1234 && mport->hstop != SWR_INVALID_PARAM) {
1235 hparams = (mport->hstart << 4) | mport->hstop;
1236
1237 reg[len] = SWRM_CMD_FIFO_WR_CMD;
1238 val[len++] = SWR_REG_VAL_PACK(hparams,
1239 port_req->dev_num, 0x00,
1240 SWRS_DP_HCONTROL_BANK(slv_id,
1241 bank));
1242 }
1243 if (mport->word_length != SWR_INVALID_PARAM) {
1244 reg[len] = SWRM_CMD_FIFO_WR_CMD;
1245 val[len++] =
1246 SWR_REG_VAL_PACK(mport->word_length,
1247 port_req->dev_num, 0x00,
1248 SWRS_DP_BLOCK_CONTROL_1(slv_id));
1249 }
Ramprasad Katkam2a0996b2018-09-25 20:13:30 +05301250 if (mport->blk_pack_mode != SWR_INVALID_PARAM
1251 && swrm->master_id != MASTER_ID_WSA) {
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301252 reg[len] = SWRM_CMD_FIFO_WR_CMD;
1253 val[len++] =
1254 SWR_REG_VAL_PACK(mport->blk_pack_mode,
1255 port_req->dev_num, 0x00,
1256 SWRS_DP_BLOCK_CONTROL_3_BANK(slv_id,
1257 bank));
1258 }
1259 if (mport->blk_grp_count != SWR_INVALID_PARAM) {
1260 reg[len] = SWRM_CMD_FIFO_WR_CMD;
1261 val[len++] =
1262 SWR_REG_VAL_PACK(mport->blk_grp_count,
1263 port_req->dev_num, 0x00,
1264 SWRS_DP_BLOCK_CONTROL_2_BANK(slv_id,
1265 bank));
1266 }
1267 if (mport->lane_ctrl != SWR_INVALID_PARAM) {
1268 reg[len] = SWRM_CMD_FIFO_WR_CMD;
1269 val[len++] =
1270 SWR_REG_VAL_PACK(mport->lane_ctrl,
1271 port_req->dev_num, 0x00,
1272 SWRS_DP_LANE_CONTROL_BANK(slv_id,
1273 bank));
1274 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301275 port_req->ch_en = port_req->req_ch;
1276 }
1277 value = ((mport->req_ch)
1278 << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
Ramprasad Katkam2a0996b2018-09-25 20:13:30 +05301279
1280 if (mport->offset2 != SWR_INVALID_PARAM)
1281 value |= ((mport->offset2)
1282 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301283 value |= ((mport->offset1)
1284 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT);
1285 value |= mport->sinterval;
1286
1287
1288 reg[len] = SWRM_DP_PORT_CTRL_BANK(i + 1, bank);
1289 val[len++] = value;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301290 dev_dbg(swrm->dev, "%s: mport :%d, reg: 0x%x, val: 0x%x\n",
1291 __func__, i,
1292 (SWRM_DP_PORT_CTRL_BANK(i + 1, bank)), value);
1293
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301294 if (mport->lane_ctrl != SWR_INVALID_PARAM) {
1295 reg[len] = SWRM_DP_PORT_CTRL_2_BANK(i + 1, bank);
1296 val[len++] = mport->lane_ctrl;
1297 }
1298 if (mport->word_length != SWR_INVALID_PARAM) {
1299 reg[len] = SWRM_DP_BLOCK_CTRL_1(i + 1);
1300 val[len++] = mport->word_length;
1301 }
1302
1303 if (mport->blk_grp_count != SWR_INVALID_PARAM) {
1304 reg[len] = SWRM_DP_BLOCK_CTRL2_BANK(i + 1, bank);
1305 val[len++] = mport->blk_grp_count;
1306 }
1307 if (mport->hstart != SWR_INVALID_PARAM
1308 && mport->hstop != SWR_INVALID_PARAM) {
1309 reg[len] = SWRM_DP_PORT_HCTRL_BANK(i + 1, bank);
Laxminath Kasame30eef72018-11-05 17:40:09 +05301310 hparams = (mport->hstop << 4) | mport->hstart;
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301311 val[len++] = hparams;
Laxminath Kasam990c70b2018-11-09 23:15:09 +05301312 } else {
1313 reg[len] = SWRM_DP_PORT_HCTRL_BANK(i + 1, bank);
1314 hparams = (SWR_HSTOP_MAX_VAL << 4) | SWR_HSTART_MIN_VAL;
1315 val[len++] = hparams;
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301316 }
1317 if (mport->blk_pack_mode != SWR_INVALID_PARAM) {
1318 reg[len] = SWRM_DP_BLOCK_CTRL3_BANK(i + 1, bank);
1319 val[len++] = mport->blk_pack_mode;
1320 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301321 mport->ch_en = mport->req_ch;
1322
1323 }
Sudheer Papothi0016db12019-06-11 04:42:38 +05301324 swrm_reg_dump(swrm, reg, val, len, __func__);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301325 swr_master_bulk_write(swrm, reg, val, len);
1326}
1327
1328static void swrm_apply_port_config(struct swr_master *master)
1329{
1330 u8 bank;
1331 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
1332
1333 if (!swrm) {
1334 pr_err("%s: Invalid handle to swr controller\n",
1335 __func__);
1336 return;
1337 }
1338
1339 bank = get_inactive_bank_num(swrm);
1340 dev_dbg(swrm->dev, "%s: enter bank: %d master_ports: %d\n",
1341 __func__, bank, master->num_port);
1342
Vatsal Bucha80995e62020-02-20 14:35:44 +05301343
1344 swrm_cmd_fifo_wr_cmd(swrm, 0x01, 0xF, 0x00,
1345 SWRS_SCP_HOST_CLK_DIV2_CTL_BANK(bank));
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301346
1347 swrm_copy_data_port_config(master, bank);
1348}
1349
1350static int swrm_slvdev_datapath_control(struct swr_master *master, bool enable)
1351{
1352 u8 bank;
Sudheer Papothiae5c3632019-11-27 06:52:06 +05301353 u32 value = 0, n_row = 0, n_col = 0;
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05301354 u32 row = 0, col = 0;
Sudheer Papothiae5c3632019-11-27 06:52:06 +05301355 int bus_clk_div_factor;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301356 int ret;
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05301357 u8 ssp_period = 0;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301358 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
1359 int mask = (SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK |
1360 SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK |
Sudheer Papothiae5c3632019-11-27 06:52:06 +05301361 SWRM_MCP_FRAME_CTRL_BANK_CLK_DIV_VALUE_BMSK |
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301362 SWRM_MCP_FRAME_CTRL_BANK_SSP_PERIOD_BMSK);
1363 u8 inactive_bank;
Sudheer Papothi8a8b12b2019-11-15 23:06:41 +05301364 int frame_sync = SWRM_FRAME_SYNC_SEL;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301365
1366 if (!swrm) {
1367 pr_err("%s: swrm is null\n", __func__);
1368 return -EFAULT;
1369 }
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301370
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301371 mutex_lock(&swrm->mlock);
1372
Ramprasad Katkam979b7c92019-05-17 15:31:21 +05301373 /*
1374 * During disable if master is already down, which implies an ssr/pdr
1375 * scenario, just mark ports as disabled and exit
1376 */
1377 if (swrm->state == SWR_MSTR_SSR && !enable) {
1378 if (!test_bit(DISABLE_PENDING, &swrm->port_req_pending)) {
1379 dev_dbg(swrm->dev, "%s:No pending disconn port req\n",
1380 __func__);
1381 goto exit;
1382 }
1383 clear_bit(DISABLE_PENDING, &swrm->port_req_pending);
1384 swrm_cleanup_disabled_port_reqs(master);
1385 if (!swrm_is_port_en(master)) {
1386 dev_dbg(&master->dev, "%s: pm_runtime auto suspend triggered\n",
1387 __func__);
1388 pm_runtime_mark_last_busy(swrm->dev);
1389 pm_runtime_put_autosuspend(swrm->dev);
1390 }
1391 goto exit;
1392 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301393 bank = get_inactive_bank_num(swrm);
1394
1395 if (enable) {
Ramprasad Katkamcab8d722018-09-28 15:54:06 +05301396 if (!test_bit(ENABLE_PENDING, &swrm->port_req_pending)) {
1397 dev_dbg(swrm->dev, "%s:No pending connect port req\n",
1398 __func__);
1399 goto exit;
1400 }
1401 clear_bit(ENABLE_PENDING, &swrm->port_req_pending);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301402 ret = swrm_get_port_config(swrm);
1403 if (ret) {
1404 /* cannot accommodate ports */
1405 swrm_cleanup_disabled_port_reqs(master);
1406 mutex_unlock(&swrm->mlock);
1407 return -EINVAL;
1408 }
Ramprasad Katkam18bc8e22018-10-25 15:04:24 +05301409 swr_master_write(swrm, SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN,
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301410 SWRM_INTERRUPT_STATUS_MASK);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301411 /* apply the new port config*/
1412 swrm_apply_port_config(master);
1413 } else {
Ramprasad Katkamcab8d722018-09-28 15:54:06 +05301414 if (!test_bit(DISABLE_PENDING, &swrm->port_req_pending)) {
1415 dev_dbg(swrm->dev, "%s:No pending disconn port req\n",
1416 __func__);
1417 goto exit;
1418 }
1419 clear_bit(DISABLE_PENDING, &swrm->port_req_pending);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301420 swrm_disable_ports(master, bank);
1421 }
Sudheer Papothiae5c3632019-11-27 06:52:06 +05301422 dev_dbg(swrm->dev, "%s: enable: %d, cfg_devs: %d freq %d\n",
1423 __func__, enable, swrm->num_cfg_devs, swrm->mclk_freq);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301424
1425 if (enable) {
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301426 /* set col = 16 */
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301427 n_col = SWR_MAX_COL;
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05301428 col = SWRM_COL_16;
Sudheer Papothiae5c3632019-11-27 06:52:06 +05301429 if (swrm->bus_clk == MCLK_FREQ_LP) {
1430 n_col = SWR_MIN_COL;
1431 col = SWRM_COL_02;
1432 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301433 } else {
1434 /*
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301435 * Do not change to col = 2 if there are still active ports
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301436 */
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05301437 if (!master->num_port) {
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301438 n_col = SWR_MIN_COL;
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05301439 col = SWRM_COL_02;
1440 } else {
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301441 n_col = SWR_MAX_COL;
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05301442 col = SWRM_COL_16;
1443 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301444 }
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301445 /* Use default 50 * x, frame shape. Change based on mclk */
Laxminath Kasamb0f27cd2018-09-06 12:17:11 +05301446 if (swrm->mclk_freq == MCLK_FREQ_NATIVE) {
Sudheer Papothiae5c3632019-11-27 06:52:06 +05301447 dev_dbg(swrm->dev, "setting 64 x %d frameshape\n", col);
Laxminath Kasamb0f27cd2018-09-06 12:17:11 +05301448 n_row = SWR_ROW_64;
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05301449 row = SWRM_ROW_64;
Sudheer Papothi8a8b12b2019-11-15 23:06:41 +05301450 frame_sync = SWRM_FRAME_SYNC_SEL_NATIVE;
Laxminath Kasamb0f27cd2018-09-06 12:17:11 +05301451 } else {
Sudheer Papothiae5c3632019-11-27 06:52:06 +05301452 dev_dbg(swrm->dev, "setting 50 x %d frameshape\n", col);
Laxminath Kasamb0f27cd2018-09-06 12:17:11 +05301453 n_row = SWR_ROW_50;
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05301454 row = SWRM_ROW_50;
Sudheer Papothi8a8b12b2019-11-15 23:06:41 +05301455 frame_sync = SWRM_FRAME_SYNC_SEL;
Laxminath Kasamb0f27cd2018-09-06 12:17:11 +05301456 }
Sudheer Papothi8a8b12b2019-11-15 23:06:41 +05301457 ssp_period = swrm_get_ssp_period(swrm, row, col, frame_sync);
Sudheer Papothiae5c3632019-11-27 06:52:06 +05301458 bus_clk_div_factor = swrm_get_clk_div(swrm->mclk_freq, swrm->bus_clk);
1459 dev_dbg(swrm->dev, "%s: ssp_period: %d, bus_clk_div:%d \n", __func__,
1460 ssp_period, bus_clk_div_factor);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301461 value = swr_master_read(swrm, SWRM_MCP_FRAME_CTRL_BANK_ADDR(bank));
1462 value &= (~mask);
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301463 value |= ((n_row << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT) |
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301464 (n_col << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT) |
Sudheer Papothiae5c3632019-11-27 06:52:06 +05301465 (bus_clk_div_factor <<
1466 SWRM_MCP_FRAME_CTRL_BANK_CLK_DIV_VALUE_SHFT) |
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05301467 ((ssp_period - 1) << SWRM_MCP_FRAME_CTRL_BANK_SSP_PERIOD_SHFT));
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301468 swr_master_write(swrm, SWRM_MCP_FRAME_CTRL_BANK_ADDR(bank), value);
1469
1470 dev_dbg(swrm->dev, "%s: regaddr: 0x%x, value: 0x%x\n", __func__,
1471 SWRM_MCP_FRAME_CTRL_BANK_ADDR(bank), value);
1472
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301473 enable_bank_switch(swrm, bank, n_row, n_col);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301474 inactive_bank = bank ? 0 : 1;
1475
1476 if (enable)
1477 swrm_copy_data_port_config(master, inactive_bank);
1478 else {
1479 swrm_disable_ports(master, inactive_bank);
1480 swrm_cleanup_disabled_port_reqs(master);
Ramprasad Katkam7cb4ff62018-09-12 04:00:26 +05301481 }
1482 if (!swrm_is_port_en(master)) {
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301483 dev_dbg(&master->dev, "%s: pm_runtime auto suspend triggered\n",
1484 __func__);
1485 pm_runtime_mark_last_busy(swrm->dev);
1486 pm_runtime_put_autosuspend(swrm->dev);
1487 }
Ramprasad Katkamcab8d722018-09-28 15:54:06 +05301488exit:
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301489 mutex_unlock(&swrm->mlock);
1490return 0;
1491}
1492
1493static int swrm_connect_port(struct swr_master *master,
1494 struct swr_params *portinfo)
1495{
1496 int i;
1497 struct swr_port_info *port_req;
1498 int ret = 0;
1499 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
1500 struct swrm_mports *mport;
1501 u8 mstr_port_id, mstr_ch_msk;
1502
1503 dev_dbg(&master->dev, "%s: enter\n", __func__);
1504 if (!portinfo)
1505 return -EINVAL;
1506
1507 if (!swrm) {
1508 dev_err(&master->dev,
1509 "%s: Invalid handle to swr controller\n",
1510 __func__);
1511 return -EINVAL;
1512 }
1513
1514 mutex_lock(&swrm->mlock);
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05301515 mutex_lock(&swrm->devlock);
1516 if (!swrm->dev_up) {
1517 mutex_unlock(&swrm->devlock);
1518 mutex_unlock(&swrm->mlock);
1519 return -EINVAL;
1520 }
1521 mutex_unlock(&swrm->devlock);
Ramprasad Katkam7cb4ff62018-09-12 04:00:26 +05301522 if (!swrm_is_port_en(master))
1523 pm_runtime_get_sync(swrm->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301524
1525 for (i = 0; i < portinfo->num_port; i++) {
1526 ret = swrm_get_master_port(swrm, &mstr_port_id, &mstr_ch_msk,
1527 portinfo->port_type[i],
1528 portinfo->port_id[i]);
1529 if (ret) {
1530 dev_err(&master->dev,
1531 "%s: mstr portid for slv port %d not found\n",
1532 __func__, portinfo->port_id[i]);
1533 goto port_fail;
1534 }
1535
1536 mport = &(swrm->mport_cfg[mstr_port_id]);
1537 /* get port req */
1538 port_req = swrm_get_port_req(mport, portinfo->port_id[i],
1539 portinfo->dev_num);
1540 if (!port_req) {
1541 dev_dbg(&master->dev, "%s: new req:port id %d dev %d\n",
1542 __func__, portinfo->port_id[i],
1543 portinfo->dev_num);
1544 port_req = kzalloc(sizeof(struct swr_port_info),
1545 GFP_KERNEL);
1546 if (!port_req) {
1547 ret = -ENOMEM;
1548 goto mem_fail;
1549 }
1550 port_req->dev_num = portinfo->dev_num;
1551 port_req->slave_port_id = portinfo->port_id[i];
1552 port_req->num_ch = portinfo->num_ch[i];
1553 port_req->ch_rate = portinfo->ch_rate[i];
1554 port_req->ch_en = 0;
1555 port_req->master_port_id = mstr_port_id;
1556 list_add(&port_req->list, &mport->port_req_list);
1557 }
1558 port_req->req_ch |= portinfo->ch_en[i];
1559
1560 dev_dbg(&master->dev,
1561 "%s: mstr port %d, slv port %d ch_rate %d num_ch %d\n",
1562 __func__, port_req->master_port_id,
1563 port_req->slave_port_id, port_req->ch_rate,
1564 port_req->num_ch);
1565 /* Put the port req on master port */
1566 mport = &(swrm->mport_cfg[mstr_port_id]);
1567 mport->port_en = true;
1568 mport->req_ch |= mstr_ch_msk;
1569 master->port_en_mask |= (1 << mstr_port_id);
Sudheer Papothiae5c3632019-11-27 06:52:06 +05301570 if (swrm->clk_stop_mode0_supp &&
1571 (mport->ch_rate < portinfo->ch_rate[i])) {
1572 mport->ch_rate = portinfo->ch_rate[i];
1573 swrm_update_bus_clk(swrm);
1574 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301575 }
1576 master->num_port += portinfo->num_port;
Ramprasad Katkamcab8d722018-09-28 15:54:06 +05301577 set_bit(ENABLE_PENDING, &swrm->port_req_pending);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301578 swr_port_response(master, portinfo->tid);
1579
1580 mutex_unlock(&swrm->mlock);
1581 return 0;
1582
1583port_fail:
1584mem_fail:
1585 /* cleanup port reqs in error condition */
1586 swrm_cleanup_disabled_port_reqs(master);
1587 mutex_unlock(&swrm->mlock);
1588 return ret;
1589}
1590
1591static int swrm_disconnect_port(struct swr_master *master,
1592 struct swr_params *portinfo)
1593{
1594 int i, ret = 0;
1595 struct swr_port_info *port_req;
1596 struct swrm_mports *mport;
1597 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
1598 u8 mstr_port_id, mstr_ch_mask;
1599
1600 if (!swrm) {
1601 dev_err(&master->dev,
1602 "%s: Invalid handle to swr controller\n",
1603 __func__);
1604 return -EINVAL;
1605 }
1606
1607 if (!portinfo) {
1608 dev_err(&master->dev, "%s: portinfo is NULL\n", __func__);
1609 return -EINVAL;
1610 }
1611 mutex_lock(&swrm->mlock);
1612
1613 for (i = 0; i < portinfo->num_port; i++) {
1614
1615 ret = swrm_get_master_port(swrm, &mstr_port_id, &mstr_ch_mask,
1616 portinfo->port_type[i], portinfo->port_id[i]);
1617 if (ret) {
1618 dev_err(&master->dev,
1619 "%s: mstr portid for slv port %d not found\n",
1620 __func__, portinfo->port_id[i]);
1621 mutex_unlock(&swrm->mlock);
1622 return -EINVAL;
1623 }
1624 mport = &(swrm->mport_cfg[mstr_port_id]);
1625 /* get port req */
1626 port_req = swrm_get_port_req(mport, portinfo->port_id[i],
1627 portinfo->dev_num);
1628
1629 if (!port_req) {
1630 dev_err(&master->dev, "%s:port not enabled : port %d\n",
1631 __func__, portinfo->port_id[i]);
Ramprasad Katkam86c45e02018-10-16 19:31:51 +05301632 mutex_unlock(&swrm->mlock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301633 return -EINVAL;
1634 }
1635 port_req->req_ch &= ~portinfo->ch_en[i];
1636 mport->req_ch &= ~mstr_ch_mask;
Sudheer Papothiae5c3632019-11-27 06:52:06 +05301637 if (swrm->clk_stop_mode0_supp && !mport->req_ch) {
1638 mport->ch_rate = 0;
1639 swrm_update_bus_clk(swrm);
1640 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301641 }
1642 master->num_port -= portinfo->num_port;
Ramprasad Katkamcab8d722018-09-28 15:54:06 +05301643 set_bit(DISABLE_PENDING, &swrm->port_req_pending);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301644 swr_port_response(master, portinfo->tid);
1645 mutex_unlock(&swrm->mlock);
1646
1647 return 0;
1648}
1649
Ramprasad Katkam1f221262018-08-23 15:01:22 +05301650static int swrm_find_alert_slave(struct swr_mstr_ctrl *swrm,
1651 int status, u8 *devnum)
1652{
1653 int i;
1654 bool found = false;
1655
1656 for (i = 0; i < (swrm->master.num_dev + 1); i++) {
1657 if ((status & SWRM_MCP_SLV_STATUS_MASK) == SWR_ALERT) {
1658 *devnum = i;
1659 found = true;
1660 break;
1661 }
1662 status >>= 2;
1663 }
1664 if (found)
1665 return 0;
1666 else
1667 return -EINVAL;
1668}
1669
Sudheer Papothi07d5afc2019-07-17 06:25:45 +05301670static void swrm_enable_slave_irq(struct swr_mstr_ctrl *swrm)
1671{
1672 int i;
1673 int status = 0;
1674
1675 status = swr_master_read(swrm, SWRM_MCP_SLV_STATUS);
1676 if (!status) {
1677 dev_dbg_ratelimited(swrm->dev, "%s: slaves status is 0x%x\n",
1678 __func__, status);
1679 return;
1680 }
1681 dev_dbg(swrm->dev, "%s: slave status: 0x%x\n", __func__, status);
1682 for (i = 0; i < (swrm->master.num_dev + 1); i++) {
1683 if (status & SWRM_MCP_SLV_STATUS_MASK)
1684 swrm_cmd_fifo_wr_cmd(swrm, 0x4, i, 0x0,
1685 SWRS_SCP_INT_STATUS_MASK_1);
1686 status >>= 2;
1687 }
1688}
1689
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301690static int swrm_check_slave_change_status(struct swr_mstr_ctrl *swrm,
1691 int status, u8 *devnum)
1692{
1693 int i;
1694 int new_sts = status;
1695 int ret = SWR_NOT_PRESENT;
1696
1697 if (status != swrm->slave_status) {
1698 for (i = 0; i < (swrm->master.num_dev + 1); i++) {
1699 if ((status & SWRM_MCP_SLV_STATUS_MASK) !=
1700 (swrm->slave_status & SWRM_MCP_SLV_STATUS_MASK)) {
1701 ret = (status & SWRM_MCP_SLV_STATUS_MASK);
1702 *devnum = i;
1703 break;
1704 }
1705 status >>= 2;
1706 swrm->slave_status >>= 2;
1707 }
1708 swrm->slave_status = new_sts;
1709 }
1710 return ret;
1711}
1712
1713static irqreturn_t swr_mstr_interrupt(int irq, void *dev)
1714{
1715 struct swr_mstr_ctrl *swrm = dev;
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301716 u32 value, intr_sts, intr_sts_masked;
Ramprasad Katkam1f221262018-08-23 15:01:22 +05301717 u32 temp = 0;
1718 u32 status, chg_sts, i;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301719 u8 devnum = 0;
1720 int ret = IRQ_HANDLED;
1721 struct swr_device *swr_dev;
1722 struct swr_master *mstr = &swrm->master;
1723
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07001724 trace_printk("%s enter\n", __func__);
Ramprasad Katkam57349872018-11-11 18:34:57 +05301725 if (unlikely(swrm_lock_sleep(swrm) == false)) {
1726 dev_err(swrm->dev, "%s Failed to hold suspend\n", __func__);
1727 return IRQ_NONE;
1728 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301729
1730 mutex_lock(&swrm->reslock);
Aditya Bavanarif4a471d2019-02-19 17:57:12 +05301731 if (swrm_clk_request(swrm, true)) {
Ramprasad Katkam14efed62019-03-07 13:16:50 +05301732 dev_err_ratelimited(swrm->dev, "%s:clk request failed\n",
1733 __func__);
Aditya Bavanarif4a471d2019-02-19 17:57:12 +05301734 mutex_unlock(&swrm->reslock);
1735 goto exit;
1736 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301737 mutex_unlock(&swrm->reslock);
1738
1739 intr_sts = swr_master_read(swrm, SWRM_INTERRUPT_STATUS);
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301740 intr_sts_masked = intr_sts & swrm->intr_mask;
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07001741
1742 trace_printk("%s: status: 0x%x \n", __func__, intr_sts_masked);
Ramprasad Katkam83303512018-10-11 17:34:22 +05301743handle_irq:
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301744 for (i = 0; i < SWRM_INTERRUPT_MAX; i++) {
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301745 value = intr_sts_masked & (1 << i);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301746 if (!value)
1747 continue;
1748
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301749 switch (value) {
1750 case SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ:
1751 dev_dbg(swrm->dev, "Trigger irq to slave device\n");
1752 status = swr_master_read(swrm, SWRM_MCP_SLV_STATUS);
Ramprasad Katkam1f221262018-08-23 15:01:22 +05301753 ret = swrm_find_alert_slave(swrm, status, &devnum);
1754 if (ret) {
Ramprasad Katkam18bc8e22018-10-25 15:04:24 +05301755 dev_err_ratelimited(swrm->dev,
1756 "no slave alert found.spurious interrupt\n");
Ramprasad Katkam48b49b22018-10-01 20:12:46 +05301757 break;
Ramprasad Katkam1f221262018-08-23 15:01:22 +05301758 }
Ramprasad Katkam1f221262018-08-23 15:01:22 +05301759 swrm_cmd_fifo_rd_cmd(swrm, &temp, devnum, 0x0,
1760 SWRS_SCP_INT_STATUS_CLEAR_1, 1);
1761 swrm_cmd_fifo_wr_cmd(swrm, 0x4, devnum, 0x0,
1762 SWRS_SCP_INT_STATUS_CLEAR_1);
1763 swrm_cmd_fifo_wr_cmd(swrm, 0x0, devnum, 0x0,
1764 SWRS_SCP_INT_STATUS_CLEAR_1);
Ramprasad Katkam62d6d762018-09-20 17:50:28 +05301765
1766
1767 list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
1768 if (swr_dev->dev_num != devnum)
1769 continue;
1770 if (swr_dev->slave_irq) {
1771 do {
Ramprasad Katkam2586a4b2019-03-18 16:53:39 +05301772 swr_dev->slave_irq_pending = 0;
Ramprasad Katkam62d6d762018-09-20 17:50:28 +05301773 handle_nested_irq(
1774 irq_find_mapping(
1775 swr_dev->slave_irq, 0));
1776 } while (swr_dev->slave_irq_pending);
1777 }
1778
1779 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301780 break;
1781 case SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED:
1782 dev_dbg(swrm->dev, "SWR new slave attached\n");
1783 break;
1784 case SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS:
1785 status = swr_master_read(swrm, SWRM_MCP_SLV_STATUS);
1786 if (status == swrm->slave_status) {
1787 dev_dbg(swrm->dev,
1788 "%s: No change in slave status: %d\n",
1789 __func__, status);
1790 break;
1791 }
1792 chg_sts = swrm_check_slave_change_status(swrm, status,
1793 &devnum);
1794 switch (chg_sts) {
1795 case SWR_NOT_PRESENT:
1796 dev_dbg(swrm->dev, "device %d got detached\n",
1797 devnum);
1798 break;
1799 case SWR_ATTACHED_OK:
1800 dev_dbg(swrm->dev, "device %d got attached\n",
1801 devnum);
Ramprasad Katkamdebe8932018-09-25 18:08:18 +05301802 /* enable host irq from slave device*/
1803 swrm_cmd_fifo_wr_cmd(swrm, 0xFF, devnum, 0x0,
1804 SWRS_SCP_INT_STATUS_CLEAR_1);
1805 swrm_cmd_fifo_wr_cmd(swrm, 0x4, devnum, 0x0,
1806 SWRS_SCP_INT_STATUS_MASK_1);
1807
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301808 break;
1809 case SWR_ALERT:
1810 dev_dbg(swrm->dev,
1811 "device %d has pending interrupt\n",
1812 devnum);
1813 break;
1814 }
1815 break;
1816 case SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET:
1817 dev_err_ratelimited(swrm->dev,
1818 "SWR bus clsh detected\n");
1819 break;
1820 case SWRM_INTERRUPT_STATUS_RD_FIFO_OVERFLOW:
1821 dev_dbg(swrm->dev, "SWR read FIFO overflow\n");
1822 break;
1823 case SWRM_INTERRUPT_STATUS_RD_FIFO_UNDERFLOW:
1824 dev_dbg(swrm->dev, "SWR read FIFO underflow\n");
1825 break;
1826 case SWRM_INTERRUPT_STATUS_WR_CMD_FIFO_OVERFLOW:
1827 dev_dbg(swrm->dev, "SWR write FIFO overflow\n");
1828 break;
1829 case SWRM_INTERRUPT_STATUS_CMD_ERROR:
1830 value = swr_master_read(swrm, SWRM_CMD_FIFO_STATUS);
1831 dev_err_ratelimited(swrm->dev,
1832 "SWR CMD error, fifo status 0x%x, flushing fifo\n",
1833 value);
1834 swr_master_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
1835 break;
1836 case SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION:
Ramprasad Katkam18bc8e22018-10-25 15:04:24 +05301837 dev_err_ratelimited(swrm->dev, "SWR Port collision detected\n");
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301838 swrm->intr_mask &= ~SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION;
Ramprasad Katkam18bc8e22018-10-25 15:04:24 +05301839 swr_master_write(swrm,
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301840 SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN, swrm->intr_mask);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301841 break;
1842 case SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH:
1843 dev_dbg(swrm->dev, "SWR read enable valid mismatch\n");
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301844 swrm->intr_mask &=
Ramprasad Katkam18bc8e22018-10-25 15:04:24 +05301845 ~SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH;
1846 swr_master_write(swrm,
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301847 SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN, swrm->intr_mask);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301848 break;
1849 case SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED:
1850 complete(&swrm->broadcast);
1851 dev_dbg(swrm->dev, "SWR cmd id finished\n");
1852 break;
1853 case SWRM_INTERRUPT_STATUS_NEW_SLAVE_AUTO_ENUM_FINISHED:
1854 break;
1855 case SWRM_INTERRUPT_STATUS_AUTO_ENUM_FAILED:
1856 break;
1857 case SWRM_INTERRUPT_STATUS_AUTO_ENUM_TABLE_IS_FULL:
1858 break;
1859 case SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED:
1860 complete(&swrm->reset);
1861 break;
1862 case SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED:
1863 break;
1864 default:
1865 dev_err_ratelimited(swrm->dev,
1866 "SWR unknown interrupt\n");
1867 ret = IRQ_NONE;
1868 break;
1869 }
1870 }
Ramprasad Katkam1f221262018-08-23 15:01:22 +05301871 swr_master_write(swrm, SWRM_INTERRUPT_CLEAR, intr_sts);
1872 swr_master_write(swrm, SWRM_INTERRUPT_CLEAR, 0x0);
Ramprasad Katkam83303512018-10-11 17:34:22 +05301873
1874 intr_sts = swr_master_read(swrm, SWRM_INTERRUPT_STATUS);
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301875 intr_sts_masked = intr_sts & swrm->intr_mask;
Ramprasad Katkam83303512018-10-11 17:34:22 +05301876
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301877 if (intr_sts_masked) {
Ramprasad Katkam83303512018-10-11 17:34:22 +05301878 dev_dbg(swrm->dev, "%s: new interrupt received\n", __func__);
1879 goto handle_irq;
1880 }
1881
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301882 mutex_lock(&swrm->reslock);
1883 swrm_clk_request(swrm, false);
1884 mutex_unlock(&swrm->reslock);
Aditya Bavanarif4a471d2019-02-19 17:57:12 +05301885exit:
Ramprasad Katkam57349872018-11-11 18:34:57 +05301886 swrm_unlock_sleep(swrm);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07001887 trace_printk("%s exit\n", __func__);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301888 return ret;
1889}
1890
Sudheer Papothid19d0c52019-02-23 05:41:39 +05301891static irqreturn_t swr_mstr_interrupt_v2(int irq, void *dev)
1892{
1893 struct swr_mstr_ctrl *swrm = dev;
1894 u32 value, intr_sts, intr_sts_masked;
1895 u32 temp = 0;
1896 u32 status, chg_sts, i;
1897 u8 devnum = 0;
1898 int ret = IRQ_HANDLED;
1899 struct swr_device *swr_dev;
1900 struct swr_master *mstr = &swrm->master;
1901
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07001902 trace_printk("%s enter\n", __func__);
Sudheer Papothid19d0c52019-02-23 05:41:39 +05301903 if (unlikely(swrm_lock_sleep(swrm) == false)) {
1904 dev_err(swrm->dev, "%s Failed to hold suspend\n", __func__);
1905 return IRQ_NONE;
1906 }
1907
Aditya Bavanarieb044612019-12-22 17:14:15 +05301908 mutex_lock(&swrm->ssr_lock);
Sudheer Papothid19d0c52019-02-23 05:41:39 +05301909 mutex_lock(&swrm->reslock);
Sudheer Papothi384addd2019-06-14 02:26:52 +05301910 if (swrm_request_hw_vote(swrm, LPASS_HW_CORE, true)) {
1911 ret = IRQ_NONE;
1912 goto exit;
1913 }
1914 if (swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, true)) {
1915 ret = IRQ_NONE;
Sudheer Papothi06f43412019-07-09 03:32:54 +05301916 goto err_audio_hw_vote;
Karthikeyan Mani035c50b2019-05-02 13:35:01 -07001917 }
Karthikeyan Mani4bee1db2019-09-18 17:58:41 -07001918 ret = swrm_clk_request(swrm, true);
1919 if (ret) {
1920 dev_err(dev, "%s: swrm clk failed\n", __func__);
1921 ret = IRQ_NONE;
1922 goto err_audio_core_vote;
1923 }
Sudheer Papothid19d0c52019-02-23 05:41:39 +05301924 mutex_unlock(&swrm->reslock);
1925
1926 intr_sts = swr_master_read(swrm, SWRM_INTERRUPT_STATUS);
1927 intr_sts_masked = intr_sts & swrm->intr_mask;
Sudheer Papothi06f43412019-07-09 03:32:54 +05301928
1929 dev_dbg(swrm->dev, "%s: status: 0x%x \n", __func__, intr_sts_masked);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07001930 trace_printk("%s: status: 0x%x \n", __func__, intr_sts_masked);
Sudheer Papothid19d0c52019-02-23 05:41:39 +05301931handle_irq:
1932 for (i = 0; i < SWRM_INTERRUPT_MAX; i++) {
1933 value = intr_sts_masked & (1 << i);
1934 if (!value)
1935 continue;
1936
1937 switch (value) {
1938 case SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ:
1939 dev_dbg(swrm->dev, "%s: Trigger irq to slave device\n",
1940 __func__);
1941 status = swr_master_read(swrm, SWRM_MCP_SLV_STATUS);
1942 ret = swrm_find_alert_slave(swrm, status, &devnum);
1943 if (ret) {
1944 dev_err_ratelimited(swrm->dev,
1945 "%s: no slave alert found.spurious interrupt\n",
1946 __func__);
1947 break;
1948 }
1949 swrm_cmd_fifo_rd_cmd(swrm, &temp, devnum, 0x0,
1950 SWRS_SCP_INT_STATUS_CLEAR_1, 1);
1951 swrm_cmd_fifo_wr_cmd(swrm, 0x4, devnum, 0x0,
1952 SWRS_SCP_INT_STATUS_CLEAR_1);
1953 swrm_cmd_fifo_wr_cmd(swrm, 0x0, devnum, 0x0,
1954 SWRS_SCP_INT_STATUS_CLEAR_1);
1955
1956
1957 list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
1958 if (swr_dev->dev_num != devnum)
1959 continue;
1960 if (swr_dev->slave_irq) {
1961 do {
Meng Wang31a7ef12019-12-18 10:36:29 +08001962 swr_dev->slave_irq_pending = 0;
Sudheer Papothid19d0c52019-02-23 05:41:39 +05301963 handle_nested_irq(
1964 irq_find_mapping(
1965 swr_dev->slave_irq, 0));
1966 } while (swr_dev->slave_irq_pending);
1967 }
1968
1969 }
1970 break;
1971 case SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED:
1972 dev_dbg(swrm->dev, "%s: SWR new slave attached\n",
1973 __func__);
1974 break;
1975 case SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS:
1976 status = swr_master_read(swrm, SWRM_MCP_SLV_STATUS);
Laxminath Kasam44cedb82019-11-20 17:37:23 +05301977 swrm_enable_slave_irq(swrm);
Sudheer Papothid19d0c52019-02-23 05:41:39 +05301978 if (status == swrm->slave_status) {
1979 dev_dbg(swrm->dev,
1980 "%s: No change in slave status: %d\n",
1981 __func__, status);
1982 break;
1983 }
1984 chg_sts = swrm_check_slave_change_status(swrm, status,
1985 &devnum);
1986 switch (chg_sts) {
1987 case SWR_NOT_PRESENT:
1988 dev_dbg(swrm->dev,
1989 "%s: device %d got detached\n",
1990 __func__, devnum);
1991 break;
1992 case SWR_ATTACHED_OK:
1993 dev_dbg(swrm->dev,
1994 "%s: device %d got attached\n",
1995 __func__, devnum);
1996 /* enable host irq from slave device*/
1997 swrm_cmd_fifo_wr_cmd(swrm, 0xFF, devnum, 0x0,
1998 SWRS_SCP_INT_STATUS_CLEAR_1);
1999 swrm_cmd_fifo_wr_cmd(swrm, 0x4, devnum, 0x0,
2000 SWRS_SCP_INT_STATUS_MASK_1);
2001
2002 break;
2003 case SWR_ALERT:
2004 dev_dbg(swrm->dev,
2005 "%s: device %d has pending interrupt\n",
2006 __func__, devnum);
2007 break;
2008 }
2009 break;
2010 case SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET:
2011 dev_err_ratelimited(swrm->dev,
2012 "%s: SWR bus clsh detected\n",
2013 __func__);
2014 break;
2015 case SWRM_INTERRUPT_STATUS_RD_FIFO_OVERFLOW:
2016 dev_dbg(swrm->dev, "%s: SWR read FIFO overflow\n",
2017 __func__);
2018 break;
2019 case SWRM_INTERRUPT_STATUS_RD_FIFO_UNDERFLOW:
2020 dev_dbg(swrm->dev, "%s: SWR read FIFO underflow\n",
2021 __func__);
2022 break;
2023 case SWRM_INTERRUPT_STATUS_WR_CMD_FIFO_OVERFLOW:
2024 dev_dbg(swrm->dev, "%s: SWR write FIFO overflow\n",
2025 __func__);
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05302026 swr_master_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
Sudheer Papothid19d0c52019-02-23 05:41:39 +05302027 break;
2028 case SWRM_INTERRUPT_STATUS_CMD_ERROR:
2029 value = swr_master_read(swrm, SWRM_CMD_FIFO_STATUS);
2030 dev_err_ratelimited(swrm->dev,
2031 "%s: SWR CMD error, fifo status 0x%x, flushing fifo\n",
2032 __func__, value);
2033 swr_master_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
2034 break;
2035 case SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION:
2036 dev_err_ratelimited(swrm->dev,
2037 "%s: SWR Port collision detected\n",
2038 __func__);
2039 swrm->intr_mask &= ~SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION;
2040 swr_master_write(swrm,
2041 SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN, swrm->intr_mask);
2042 break;
2043 case SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH:
2044 dev_dbg(swrm->dev,
2045 "%s: SWR read enable valid mismatch\n",
2046 __func__);
2047 swrm->intr_mask &=
2048 ~SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH;
2049 swr_master_write(swrm,
2050 SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN, swrm->intr_mask);
2051 break;
2052 case SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED:
2053 complete(&swrm->broadcast);
2054 dev_dbg(swrm->dev, "%s: SWR cmd id finished\n",
2055 __func__);
2056 break;
2057 case SWRM_INTERRUPT_STATUS_AUTO_ENUM_FAILED_V2:
2058 break;
2059 case SWRM_INTERRUPT_STATUS_AUTO_ENUM_TABLE_IS_FULL_V2:
2060 break;
2061 case SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED_V2:
Laxminath Kasame2291972019-11-08 14:51:59 +05302062 swrm_check_link_status(swrm, 0x1);
Sudheer Papothid19d0c52019-02-23 05:41:39 +05302063 break;
2064 case SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED_V2:
2065 break;
2066 case SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP:
2067 if (swrm->state == SWR_MSTR_UP)
2068 dev_dbg(swrm->dev,
2069 "%s:SWR Master is already up\n",
2070 __func__);
2071 else
2072 dev_err_ratelimited(swrm->dev,
2073 "%s: SWR wokeup during clock stop\n",
2074 __func__);
Sudheer Papothi07d5afc2019-07-17 06:25:45 +05302075 /* It might be possible the slave device gets reset
2076 * and slave interrupt gets missed. So re-enable
2077 * Host IRQ and process slave pending
2078 * interrupts, if any.
2079 */
2080 swrm_enable_slave_irq(swrm);
Sudheer Papothid19d0c52019-02-23 05:41:39 +05302081 break;
2082 default:
2083 dev_err_ratelimited(swrm->dev,
2084 "%s: SWR unknown interrupt value: %d\n",
2085 __func__, value);
2086 ret = IRQ_NONE;
2087 break;
2088 }
2089 }
2090 swr_master_write(swrm, SWRM_INTERRUPT_CLEAR, intr_sts);
2091 swr_master_write(swrm, SWRM_INTERRUPT_CLEAR, 0x0);
2092
2093 intr_sts = swr_master_read(swrm, SWRM_INTERRUPT_STATUS);
2094 intr_sts_masked = intr_sts & swrm->intr_mask;
2095
2096 if (intr_sts_masked) {
Sudheer Papothi07d5afc2019-07-17 06:25:45 +05302097 dev_dbg(swrm->dev, "%s: new interrupt received 0x%x\n",
2098 __func__, intr_sts_masked);
Sudheer Papothid19d0c52019-02-23 05:41:39 +05302099 goto handle_irq;
2100 }
2101
2102 mutex_lock(&swrm->reslock);
2103 swrm_clk_request(swrm, false);
Karthikeyan Mani4bee1db2019-09-18 17:58:41 -07002104err_audio_core_vote:
Sudheer Papothi384addd2019-06-14 02:26:52 +05302105 swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, false);
Sudheer Papothi06f43412019-07-09 03:32:54 +05302106
2107err_audio_hw_vote:
Sudheer Papothi384addd2019-06-14 02:26:52 +05302108 swrm_request_hw_vote(swrm, LPASS_HW_CORE, false);
Karthikeyan Mani035c50b2019-05-02 13:35:01 -07002109exit:
Sudheer Papothid19d0c52019-02-23 05:41:39 +05302110 mutex_unlock(&swrm->reslock);
Aditya Bavanarieb044612019-12-22 17:14:15 +05302111 mutex_unlock(&swrm->ssr_lock);
Sudheer Papothid19d0c52019-02-23 05:41:39 +05302112 swrm_unlock_sleep(swrm);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07002113 trace_printk("%s exit\n", __func__);
Sudheer Papothid19d0c52019-02-23 05:41:39 +05302114 return ret;
2115}
2116
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302117static irqreturn_t swrm_wakeup_interrupt(int irq, void *dev)
2118{
2119 struct swr_mstr_ctrl *swrm = dev;
2120 int ret = IRQ_HANDLED;
2121
2122 if (!swrm || !(swrm->dev)) {
2123 pr_err("%s: swrm or dev is null\n", __func__);
2124 return IRQ_NONE;
2125 }
Vatsal Bucha8bcaeab2019-10-31 11:45:36 +05302126
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07002127 trace_printk("%s enter\n", __func__);
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302128 mutex_lock(&swrm->devlock);
2129 if (!swrm->dev_up) {
Vatsal Bucha8bcaeab2019-10-31 11:45:36 +05302130 if (swrm->wake_irq > 0) {
2131 if (unlikely(!irq_get_irq_data(swrm->wake_irq))) {
2132 pr_err("%s: irq data is NULL\n", __func__);
2133 mutex_unlock(&swrm->devlock);
2134 return IRQ_NONE;
2135 }
2136 mutex_lock(&swrm->irq_lock);
2137 if (!irqd_irq_disabled(
2138 irq_get_irq_data(swrm->wake_irq)))
2139 disable_irq_nosync(swrm->wake_irq);
2140 mutex_unlock(&swrm->irq_lock);
2141 }
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302142 mutex_unlock(&swrm->devlock);
2143 return ret;
2144 }
2145 mutex_unlock(&swrm->devlock);
Ramprasad Katkam44b7a962018-12-20 15:08:44 +05302146 if (unlikely(swrm_lock_sleep(swrm) == false)) {
2147 dev_err(swrm->dev, "%s Failed to hold suspend\n", __func__);
2148 goto exit;
2149 }
Vatsal Bucha8bcaeab2019-10-31 11:45:36 +05302150 if (swrm->wake_irq > 0) {
2151 if (unlikely(!irq_get_irq_data(swrm->wake_irq))) {
2152 pr_err("%s: irq data is NULL\n", __func__);
2153 return IRQ_NONE;
2154 }
2155 mutex_lock(&swrm->irq_lock);
2156 if (!irqd_irq_disabled(
2157 irq_get_irq_data(swrm->wake_irq)))
2158 disable_irq_nosync(swrm->wake_irq);
2159 mutex_unlock(&swrm->irq_lock);
2160 }
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302161 pm_runtime_get_sync(swrm->dev);
2162 pm_runtime_mark_last_busy(swrm->dev);
2163 pm_runtime_put_autosuspend(swrm->dev);
Ramprasad Katkam44b7a962018-12-20 15:08:44 +05302164 swrm_unlock_sleep(swrm);
2165exit:
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07002166 trace_printk("%s exit\n", __func__);
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302167 return ret;
2168}
2169
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302170static void swrm_wakeup_work(struct work_struct *work)
2171{
2172 struct swr_mstr_ctrl *swrm;
2173
2174 swrm = container_of(work, struct swr_mstr_ctrl,
2175 wakeup_work);
2176 if (!swrm || !(swrm->dev)) {
2177 pr_err("%s: swrm or dev is null\n", __func__);
2178 return;
2179 }
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302180
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07002181 trace_printk("%s enter\n", __func__);
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302182 mutex_lock(&swrm->devlock);
2183 if (!swrm->dev_up) {
2184 mutex_unlock(&swrm->devlock);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302185 goto exit;
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302186 }
2187 mutex_unlock(&swrm->devlock);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302188 if (unlikely(swrm_lock_sleep(swrm) == false)) {
2189 dev_err(swrm->dev, "%s Failed to hold suspend\n", __func__);
2190 goto exit;
2191 }
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302192 pm_runtime_get_sync(swrm->dev);
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302193 pm_runtime_mark_last_busy(swrm->dev);
2194 pm_runtime_put_autosuspend(swrm->dev);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302195 swrm_unlock_sleep(swrm);
2196exit:
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07002197 trace_printk("%s exit\n", __func__);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302198 pm_relax(swrm->dev);
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302199}
2200
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302201static int swrm_get_device_status(struct swr_mstr_ctrl *swrm, u8 devnum)
2202{
2203 u32 val;
2204
2205 swrm->slave_status = swr_master_read(swrm, SWRM_MCP_SLV_STATUS);
2206 val = (swrm->slave_status >> (devnum * 2));
2207 val &= SWRM_MCP_SLV_STATUS_MASK;
2208 return val;
2209}
2210
2211static int swrm_get_logical_dev_num(struct swr_master *mstr, u64 dev_id,
2212 u8 *dev_num)
2213{
2214 int i;
2215 u64 id = 0;
2216 int ret = -EINVAL;
2217 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(mstr);
2218 struct swr_device *swr_dev;
2219 u32 num_dev = 0;
2220
2221 if (!swrm) {
2222 pr_err("%s: Invalid handle to swr controller\n",
2223 __func__);
2224 return ret;
2225 }
2226 if (swrm->num_dev)
2227 num_dev = swrm->num_dev;
2228 else
2229 num_dev = mstr->num_dev;
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302230
2231 mutex_lock(&swrm->devlock);
2232 if (!swrm->dev_up) {
2233 mutex_unlock(&swrm->devlock);
2234 return ret;
2235 }
2236 mutex_unlock(&swrm->devlock);
2237
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302238 pm_runtime_get_sync(swrm->dev);
2239 for (i = 1; i < (num_dev + 1); i++) {
2240 id = ((u64)(swr_master_read(swrm,
2241 SWRM_ENUMERATOR_SLAVE_DEV_ID_2(i))) << 32);
2242 id |= swr_master_read(swrm,
2243 SWRM_ENUMERATOR_SLAVE_DEV_ID_1(i));
Ramprasad Katkam1f221262018-08-23 15:01:22 +05302244
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302245 /*
2246 * As pm_runtime_get_sync() brings all slaves out of reset
2247 * update logical device number for all slaves.
2248 */
2249 list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
2250 if (swr_dev->addr == (id & SWR_DEV_ID_MASK)) {
2251 u32 status = swrm_get_device_status(swrm, i);
2252
2253 if ((status == 0x01) || (status == 0x02)) {
2254 swr_dev->dev_num = i;
2255 if ((id & SWR_DEV_ID_MASK) == dev_id) {
2256 *dev_num = i;
2257 ret = 0;
2258 }
2259 dev_dbg(swrm->dev,
Xiao Lid8bb93c2020-01-07 12:59:05 +08002260 "%s: devnum %d is assigned for dev addr %llx\n",
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302261 __func__, i, swr_dev->addr);
2262 }
2263 }
2264 }
2265 }
2266 if (ret)
2267 dev_err(swrm->dev, "%s: device 0x%llx is not ready\n",
2268 __func__, dev_id);
2269
2270 pm_runtime_mark_last_busy(swrm->dev);
2271 pm_runtime_put_autosuspend(swrm->dev);
2272 return ret;
2273}
Sudheer Papothi6abd2de2018-09-05 05:57:04 +05302274
2275static void swrm_device_wakeup_vote(struct swr_master *mstr)
2276{
2277 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(mstr);
2278
2279 if (!swrm) {
2280 pr_err("%s: Invalid handle to swr controller\n",
2281 __func__);
2282 return;
2283 }
Ramprasad Katkam57349872018-11-11 18:34:57 +05302284 if (unlikely(swrm_lock_sleep(swrm) == false)) {
2285 dev_err(swrm->dev, "%s Failed to hold suspend\n", __func__);
2286 return;
2287 }
Aditya Bavanarieb044612019-12-22 17:14:15 +05302288 mutex_lock(&swrm->reslock);
2289 if (swrm_request_hw_vote(swrm, LPASS_HW_CORE, true))
2290 dev_err(swrm->dev, "%s:lpass core hw enable failed\n",
2291 __func__);
2292 if (swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, true))
2293 dev_err(swrm->dev, "%s:lpass audio hw enable failed\n",
2294 __func__);
2295 mutex_unlock(&swrm->reslock);
2296
Sudheer Papothi6abd2de2018-09-05 05:57:04 +05302297 pm_runtime_get_sync(swrm->dev);
2298}
2299
2300static void swrm_device_wakeup_unvote(struct swr_master *mstr)
2301{
2302 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(mstr);
2303
2304 if (!swrm) {
2305 pr_err("%s: Invalid handle to swr controller\n",
2306 __func__);
2307 return;
2308 }
2309 pm_runtime_mark_last_busy(swrm->dev);
2310 pm_runtime_put_autosuspend(swrm->dev);
Sudheer Papothi384addd2019-06-14 02:26:52 +05302311
Aditya Bavanarieb044612019-12-22 17:14:15 +05302312 mutex_lock(&swrm->reslock);
2313 swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, false);
2314 swrm_request_hw_vote(swrm, LPASS_HW_CORE, false);
2315 mutex_unlock(&swrm->reslock);
Sudheer Papothi384addd2019-06-14 02:26:52 +05302316
Ramprasad Katkam57349872018-11-11 18:34:57 +05302317 swrm_unlock_sleep(swrm);
Sudheer Papothi6abd2de2018-09-05 05:57:04 +05302318}
2319
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302320static int swrm_master_init(struct swr_mstr_ctrl *swrm)
2321{
2322 int ret = 0;
2323 u32 val;
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302324 u8 row_ctrl = SWR_ROW_50;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302325 u8 col_ctrl = SWR_MIN_COL;
2326 u8 ssp_period = 1;
2327 u8 retry_cmd_num = 3;
2328 u32 reg[SWRM_MAX_INIT_REG];
2329 u32 value[SWRM_MAX_INIT_REG];
Laxminath Kasamc7bfab92019-08-27 16:19:14 +05302330 u32 temp = 0;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302331 int len = 0;
2332
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05302333 ssp_period = swrm_get_ssp_period(swrm, SWRM_ROW_50,
2334 SWRM_COL_02, SWRM_FRAME_SYNC_SEL);
2335 dev_dbg(swrm->dev, "%s: ssp_period: %d\n", __func__, ssp_period);
2336
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302337 /* Clear Rows and Cols */
2338 val = ((row_ctrl << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT) |
2339 (col_ctrl << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT) |
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05302340 ((ssp_period - 1) << SWRM_MCP_FRAME_CTRL_BANK_SSP_PERIOD_SHFT));
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302341
2342 reg[len] = SWRM_MCP_FRAME_CTRL_BANK_ADDR(0);
2343 value[len++] = val;
2344
2345 /* Set Auto enumeration flag */
2346 reg[len] = SWRM_ENUMERATOR_CFG_ADDR;
2347 value[len++] = 1;
2348
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302349 /* Configure No pings */
2350 val = swr_master_read(swrm, SWRM_MCP_CFG_ADDR);
2351 val &= ~SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK;
2352 val |= (0x1f << SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_SHFT);
2353 reg[len] = SWRM_MCP_CFG_ADDR;
2354 value[len++] = val;
2355
2356 /* Configure number of retries of a read/write cmd */
2357 val = (retry_cmd_num << SWRM_CMD_FIFO_CFG_NUM_OF_CMD_RETRY_SHFT);
2358 reg[len] = SWRM_CMD_FIFO_CFG_ADDR;
2359 value[len++] = val;
2360
Ramprasad Katkam1f221262018-08-23 15:01:22 +05302361 reg[len] = SWRM_MCP_BUS_CTRL_ADDR;
2362 value[len++] = 0x2;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302363
Ramprasad Katkam83303512018-10-11 17:34:22 +05302364 /* Set IRQ to PULSE */
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302365 reg[len] = SWRM_COMP_CFG_ADDR;
Ramprasad Katkam83303512018-10-11 17:34:22 +05302366 value[len++] = 0x02;
2367
2368 reg[len] = SWRM_COMP_CFG_ADDR;
2369 value[len++] = 0x03;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302370
2371 reg[len] = SWRM_INTERRUPT_CLEAR;
Ramprasad Katkam1f221262018-08-23 15:01:22 +05302372 value[len++] = 0xFFFFFFFF;
2373
Ramprasad Katkam7e354782018-11-21 15:52:54 +05302374 swrm->intr_mask = SWRM_INTERRUPT_STATUS_MASK;
Ramprasad Katkam1f221262018-08-23 15:01:22 +05302375 /* Mask soundwire interrupts */
2376 reg[len] = SWRM_INTERRUPT_MASK_ADDR;
Ramprasad Katkam7e354782018-11-21 15:52:54 +05302377 value[len++] = swrm->intr_mask;
Ramprasad Katkam1f221262018-08-23 15:01:22 +05302378
2379 reg[len] = SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN;
Ramprasad Katkam7e354782018-11-21 15:52:54 +05302380 value[len++] = swrm->intr_mask;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302381
2382 swr_master_bulk_write(swrm, reg, value, len);
2383
Laxminath Kasamcafe0732019-11-20 17:31:58 +05302384 if (!swrm_check_link_status(swrm, 0x1)) {
2385 dev_err(swrm->dev,
2386 "%s: swr link failed to connect\n",
2387 __func__);
2388 return -EINVAL;
2389 }
Sudheer Papothi63f48152018-11-15 01:08:03 +05302390 /*
2391 * For SWR master version 1.5.1, continue
2392 * execute on command ignore.
2393 */
Laxminath Kasamc7bfab92019-08-27 16:19:14 +05302394 /* Execute it for versions >= 1.5.1 */
2395 if (swrm->version >= SWRM_VERSION_1_5_1)
Sudheer Papothi63f48152018-11-15 01:08:03 +05302396 swr_master_write(swrm, SWRM_CMD_FIFO_CFG_ADDR,
2397 (swr_master_read(swrm,
2398 SWRM_CMD_FIFO_CFG_ADDR) | 0x80000000));
2399
Laxminath Kasamc7bfab92019-08-27 16:19:14 +05302400 /* SW workaround to gate hw_ctl for SWR version >=1.6 */
2401 if (swrm->version >= SWRM_VERSION_1_6) {
2402 if (swrm->swrm_hctl_reg) {
2403 temp = ioread32(swrm->swrm_hctl_reg);
2404 temp &= 0xFFFFFFFD;
2405 iowrite32(temp, swrm->swrm_hctl_reg);
2406 }
2407 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302408 return ret;
2409}
2410
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302411static int swrm_event_notify(struct notifier_block *self,
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302412 unsigned long action, void *data)
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302413{
2414 struct swr_mstr_ctrl *swrm = container_of(self, struct swr_mstr_ctrl,
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302415 event_notifier);
2416
2417 if (!swrm || !(swrm->dev)) {
2418 pr_err("%s: swrm or dev is NULL\n", __func__);
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302419 return -EINVAL;
2420 }
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302421 switch (action) {
2422 case MSM_AUD_DC_EVENT:
2423 schedule_work(&(swrm->dc_presence_work));
2424 break;
2425 case SWR_WAKE_IRQ_EVENT:
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302426 if (swrm->ipc_wakeup && !swrm->ipc_wakeup_triggered) {
2427 swrm->ipc_wakeup_triggered = true;
Ramprasad Katkam57349872018-11-11 18:34:57 +05302428 pm_stay_awake(swrm->dev);
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302429 schedule_work(&swrm->wakeup_work);
Ramprasad Katkamcd61c6e2018-09-18 13:22:58 +05302430 }
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302431 break;
2432 default:
2433 dev_err(swrm->dev, "%s: invalid event type: %lu\n",
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302434 __func__, action);
2435 return -EINVAL;
2436 }
2437
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302438 return 0;
2439}
2440
2441static void swrm_notify_work_fn(struct work_struct *work)
2442{
2443 struct swr_mstr_ctrl *swrm = container_of(work, struct swr_mstr_ctrl,
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302444 dc_presence_work);
2445
2446 if (!swrm || !swrm->pdev) {
2447 pr_err("%s: swrm or pdev is NULL\n", __func__);
2448 return;
2449 }
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302450 swrm_wcd_notify(swrm->pdev, SWR_DEVICE_DOWN, NULL);
2451}
2452
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302453static int swrm_probe(struct platform_device *pdev)
2454{
2455 struct swr_mstr_ctrl *swrm;
2456 struct swr_ctrl_platform_data *pdata;
Laxminath Kasamc7bfab92019-08-27 16:19:14 +05302457 u32 i, num_ports, port_num, port_type, ch_mask, swrm_hctl_reg = 0;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302458 u32 *temp, map_size, map_length, ch_iter = 0, old_port_num = 0;
2459 int ret = 0;
Sudheer Papothi66d6fd12019-03-27 17:34:48 +05302460 struct clk *lpass_core_hw_vote = NULL;
Sudheer Papothi384addd2019-06-14 02:26:52 +05302461 struct clk *lpass_core_audio = NULL;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302462
2463 /* Allocate soundwire master driver structure */
2464 swrm = devm_kzalloc(&pdev->dev, sizeof(struct swr_mstr_ctrl),
2465 GFP_KERNEL);
2466 if (!swrm) {
2467 ret = -ENOMEM;
2468 goto err_memory_fail;
2469 }
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302470 swrm->pdev = pdev;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302471 swrm->dev = &pdev->dev;
2472 platform_set_drvdata(pdev, swrm);
2473 swr_set_ctrl_data(&swrm->master, swrm);
2474 pdata = dev_get_platdata(&pdev->dev);
2475 if (!pdata) {
2476 dev_err(&pdev->dev, "%s: pdata from parent is NULL\n",
2477 __func__);
2478 ret = -EINVAL;
2479 goto err_pdata_fail;
2480 }
2481 swrm->handle = (void *)pdata->handle;
2482 if (!swrm->handle) {
2483 dev_err(&pdev->dev, "%s: swrm->handle is NULL\n",
2484 __func__);
2485 ret = -EINVAL;
2486 goto err_pdata_fail;
2487 }
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302488 ret = of_property_read_u32(pdev->dev.of_node, "qcom,swr_master_id",
2489 &swrm->master_id);
2490 if (ret) {
2491 dev_err(&pdev->dev, "%s: failed to get master id\n", __func__);
2492 goto err_pdata_fail;
2493 }
Laxminath Kasamfbcaf322018-07-18 00:38:14 +05302494 if (!(of_property_read_u32(pdev->dev.of_node,
2495 "swrm-io-base", &swrm->swrm_base_reg)))
2496 ret = of_property_read_u32(pdev->dev.of_node,
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302497 "swrm-io-base", &swrm->swrm_base_reg);
2498 if (!swrm->swrm_base_reg) {
2499 swrm->read = pdata->read;
2500 if (!swrm->read) {
2501 dev_err(&pdev->dev, "%s: swrm->read is NULL\n",
2502 __func__);
2503 ret = -EINVAL;
2504 goto err_pdata_fail;
2505 }
2506 swrm->write = pdata->write;
2507 if (!swrm->write) {
2508 dev_err(&pdev->dev, "%s: swrm->write is NULL\n",
2509 __func__);
2510 ret = -EINVAL;
2511 goto err_pdata_fail;
2512 }
2513 swrm->bulk_write = pdata->bulk_write;
2514 if (!swrm->bulk_write) {
2515 dev_err(&pdev->dev, "%s: swrm->bulk_write is NULL\n",
2516 __func__);
2517 ret = -EINVAL;
2518 goto err_pdata_fail;
2519 }
2520 } else {
2521 swrm->swrm_dig_base = devm_ioremap(&pdev->dev,
2522 swrm->swrm_base_reg, SWRM_MAX_REGISTER);
2523 }
2524
Karthikeyan Mani1d750fe2019-09-06 14:36:09 -07002525 swrm->core_vote = pdata->core_vote;
Laxminath Kasamc7bfab92019-08-27 16:19:14 +05302526 if (!(of_property_read_u32(pdev->dev.of_node,
2527 "qcom,swrm-hctl-reg", &swrm_hctl_reg)))
2528 swrm->swrm_hctl_reg = devm_ioremap(&pdev->dev,
2529 swrm_hctl_reg, 0x4);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302530 swrm->clk = pdata->clk;
2531 if (!swrm->clk) {
2532 dev_err(&pdev->dev, "%s: swrm->clk is NULL\n",
2533 __func__);
2534 ret = -EINVAL;
2535 goto err_pdata_fail;
2536 }
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302537 if (of_property_read_u32(pdev->dev.of_node,
2538 "qcom,swr-clock-stop-mode0",
2539 &swrm->clk_stop_mode0_supp)) {
2540 swrm->clk_stop_mode0_supp = FALSE;
2541 }
Ramprasad Katkam57349872018-11-11 18:34:57 +05302542
2543 ret = of_property_read_u32(swrm->dev->of_node, "qcom,swr-num-dev",
2544 &swrm->num_dev);
2545 if (ret) {
2546 dev_dbg(&pdev->dev, "%s: Looking up %s property failed\n",
2547 __func__, "qcom,swr-num-dev");
2548 } else {
Sudheer Papothiae5c3632019-11-27 06:52:06 +05302549 if (swrm->num_dev > SWRM_NUM_AUTO_ENUM_SLAVES) {
Ramprasad Katkam57349872018-11-11 18:34:57 +05302550 dev_err(&pdev->dev, "%s: num_dev %d > max limit %d\n",
Sudheer Papothiae5c3632019-11-27 06:52:06 +05302551 __func__, swrm->num_dev,
2552 SWRM_NUM_AUTO_ENUM_SLAVES);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302553 ret = -EINVAL;
2554 goto err_pdata_fail;
2555 }
2556 }
2557
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302558 /* Parse soundwire port mapping */
2559 ret = of_property_read_u32(pdev->dev.of_node, "qcom,swr-num-ports",
2560 &num_ports);
2561 if (ret) {
2562 dev_err(swrm->dev, "%s: Failed to get num_ports\n", __func__);
2563 goto err_pdata_fail;
2564 }
2565 swrm->num_ports = num_ports;
2566
2567 if (!of_find_property(pdev->dev.of_node, "qcom,swr-port-mapping",
2568 &map_size)) {
2569 dev_err(swrm->dev, "missing port mapping\n");
2570 goto err_pdata_fail;
2571 }
2572
2573 map_length = map_size / (3 * sizeof(u32));
2574 if (num_ports > SWR_MSTR_PORT_LEN) {
2575 dev_err(&pdev->dev, "%s:invalid number of swr ports\n",
2576 __func__);
2577 ret = -EINVAL;
2578 goto err_pdata_fail;
2579 }
2580 temp = devm_kzalloc(&pdev->dev, map_size, GFP_KERNEL);
2581
2582 if (!temp) {
2583 ret = -ENOMEM;
2584 goto err_pdata_fail;
2585 }
2586 ret = of_property_read_u32_array(pdev->dev.of_node,
2587 "qcom,swr-port-mapping", temp, 3 * map_length);
2588 if (ret) {
2589 dev_err(swrm->dev, "%s: Failed to read port mapping\n",
2590 __func__);
2591 goto err_pdata_fail;
2592 }
2593
2594 for (i = 0; i < map_length; i++) {
2595 port_num = temp[3 * i];
2596 port_type = temp[3 * i + 1];
2597 ch_mask = temp[3 * i + 2];
2598
2599 if (port_num != old_port_num)
2600 ch_iter = 0;
2601 swrm->port_mapping[port_num][ch_iter].port_type = port_type;
2602 swrm->port_mapping[port_num][ch_iter++].ch_mask = ch_mask;
2603 old_port_num = port_num;
2604 }
2605 devm_kfree(&pdev->dev, temp);
2606
2607 swrm->reg_irq = pdata->reg_irq;
2608 swrm->master.read = swrm_read;
2609 swrm->master.write = swrm_write;
2610 swrm->master.bulk_write = swrm_bulk_write;
2611 swrm->master.get_logical_dev_num = swrm_get_logical_dev_num;
2612 swrm->master.connect_port = swrm_connect_port;
2613 swrm->master.disconnect_port = swrm_disconnect_port;
2614 swrm->master.slvdev_datapath_control = swrm_slvdev_datapath_control;
2615 swrm->master.remove_from_group = swrm_remove_from_group;
Sudheer Papothi6abd2de2018-09-05 05:57:04 +05302616 swrm->master.device_wakeup_vote = swrm_device_wakeup_vote;
2617 swrm->master.device_wakeup_unvote = swrm_device_wakeup_unvote;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302618 swrm->master.dev.parent = &pdev->dev;
2619 swrm->master.dev.of_node = pdev->dev.of_node;
2620 swrm->master.num_port = 0;
2621 swrm->rcmd_id = 0;
2622 swrm->wcmd_id = 0;
2623 swrm->slave_status = 0;
2624 swrm->num_rx_chs = 0;
2625 swrm->clk_ref_count = 0;
Vatsal Buchadf38c3e2019-03-11 17:10:23 +05302626 swrm->swr_irq_wakeup_capable = 0;
Laxminath Kasamb0f27cd2018-09-06 12:17:11 +05302627 swrm->mclk_freq = MCLK_FREQ;
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05302628 swrm->bus_clk = MCLK_FREQ;
Laxminath Kasam1df09a82018-09-20 18:57:49 +05302629 swrm->dev_up = true;
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302630 swrm->state = SWR_MSTR_UP;
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302631 swrm->ipc_wakeup = false;
2632 swrm->ipc_wakeup_triggered = false;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302633 init_completion(&swrm->reset);
2634 init_completion(&swrm->broadcast);
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05302635 init_completion(&swrm->clk_off_complete);
Vatsal Bucha8bcaeab2019-10-31 11:45:36 +05302636 mutex_init(&swrm->irq_lock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302637 mutex_init(&swrm->mlock);
2638 mutex_init(&swrm->reslock);
2639 mutex_init(&swrm->force_down_lock);
Ramprasad Katkam1f221262018-08-23 15:01:22 +05302640 mutex_init(&swrm->iolock);
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05302641 mutex_init(&swrm->clklock);
Laxminath Kasam1df09a82018-09-20 18:57:49 +05302642 mutex_init(&swrm->devlock);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302643 mutex_init(&swrm->pm_lock);
Aditya Bavanarieb044612019-12-22 17:14:15 +05302644 mutex_init(&swrm->ssr_lock);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302645 swrm->wlock_holders = 0;
2646 swrm->pm_state = SWRM_PM_SLEEPABLE;
2647 init_waitqueue_head(&swrm->pm_wq);
2648 pm_qos_add_request(&swrm->pm_qos_req,
2649 PM_QOS_CPU_DMA_LATENCY,
2650 PM_QOS_DEFAULT_VALUE);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302651
2652 for (i = 0 ; i < SWR_MSTR_PORT_LEN; i++)
2653 INIT_LIST_HEAD(&swrm->mport_cfg[i].port_req_list);
2654
Sudheer Papothi06f43412019-07-09 03:32:54 +05302655 /* Register LPASS core hw vote */
2656 lpass_core_hw_vote = devm_clk_get(&pdev->dev, "lpass_core_hw_vote");
2657 if (IS_ERR(lpass_core_hw_vote)) {
2658 ret = PTR_ERR(lpass_core_hw_vote);
2659 dev_dbg(&pdev->dev, "%s: clk get %s failed %d\n",
2660 __func__, "lpass_core_hw_vote", ret);
2661 lpass_core_hw_vote = NULL;
2662 ret = 0;
2663 }
2664 swrm->lpass_core_hw_vote = lpass_core_hw_vote;
2665
2666 /* Register LPASS audio core vote */
2667 lpass_core_audio = devm_clk_get(&pdev->dev, "lpass_audio_hw_vote");
2668 if (IS_ERR(lpass_core_audio)) {
2669 ret = PTR_ERR(lpass_core_audio);
2670 dev_dbg(&pdev->dev, "%s: clk get %s failed %d\n",
2671 __func__, "lpass_core_audio", ret);
2672 lpass_core_audio = NULL;
2673 ret = 0;
2674 }
2675 swrm->lpass_core_audio = lpass_core_audio;
2676
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302677 if (swrm->reg_irq) {
2678 ret = swrm->reg_irq(swrm->handle, swr_mstr_interrupt, swrm,
2679 SWR_IRQ_REGISTER);
2680 if (ret) {
2681 dev_err(&pdev->dev, "%s: IRQ register failed ret %d\n",
2682 __func__, ret);
2683 goto err_irq_fail;
2684 }
2685 } else {
2686 swrm->irq = platform_get_irq_byname(pdev, "swr_master_irq");
2687 if (swrm->irq < 0) {
2688 dev_err(swrm->dev, "%s() error getting irq hdle: %d\n",
2689 __func__, swrm->irq);
Laxminath Kasamfbcaf322018-07-18 00:38:14 +05302690 goto err_irq_fail;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302691 }
2692
2693 ret = request_threaded_irq(swrm->irq, NULL,
Sudheer Papothid19d0c52019-02-23 05:41:39 +05302694 swr_mstr_interrupt_v2,
Ramprasad Katkam83303512018-10-11 17:34:22 +05302695 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302696 "swr_master_irq", swrm);
2697 if (ret) {
2698 dev_err(swrm->dev, "%s: Failed to request irq %d\n",
2699 __func__, ret);
2700 goto err_irq_fail;
2701 }
2702
2703 }
Vatsal Buchadf38c3e2019-03-11 17:10:23 +05302704 /* Make inband tx interrupts as wakeup capable for slave irq */
2705 ret = of_property_read_u32(pdev->dev.of_node,
2706 "qcom,swr-mstr-irq-wakeup-capable",
2707 &swrm->swr_irq_wakeup_capable);
2708 if (ret)
2709 dev_dbg(swrm->dev, "%s: swrm irq wakeup capable not defined\n",
2710 __func__);
2711 if (swrm->swr_irq_wakeup_capable)
2712 irq_set_irq_wake(swrm->irq, 1);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302713 ret = swr_register_master(&swrm->master);
2714 if (ret) {
2715 dev_err(&pdev->dev, "%s: error adding swr master\n", __func__);
2716 goto err_mstr_fail;
2717 }
2718
2719 /* Add devices registered with board-info as the
2720 * controller will be up now
2721 */
2722 swr_master_add_boarddevices(&swrm->master);
2723 mutex_lock(&swrm->mlock);
2724 swrm_clk_request(swrm, true);
Laxminath Kasam4696fff2019-11-26 16:07:11 +05302725 swrm->version = swr_master_read(swrm, SWRM_COMP_HW_VERSION);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302726 ret = swrm_master_init(swrm);
2727 if (ret < 0) {
2728 dev_err(&pdev->dev,
2729 "%s: Error in master Initialization , err %d\n",
2730 __func__, ret);
2731 mutex_unlock(&swrm->mlock);
Laxminath Kasamec8c9092019-12-17 13:12:58 +05302732 goto err_mstr_init_fail;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302733 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302734
2735 mutex_unlock(&swrm->mlock);
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302736 INIT_WORK(&swrm->wakeup_work, swrm_wakeup_work);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302737
2738 if (pdev->dev.of_node)
2739 of_register_swr_devices(&swrm->master);
2740
Sudheer Papothi96c842a2019-08-29 12:11:21 +05302741#ifdef CONFIG_DEBUG_FS
2742 swrm->debugfs_swrm_dent = debugfs_create_dir(dev_name(&pdev->dev), 0);
2743 if (!IS_ERR(swrm->debugfs_swrm_dent)) {
2744 swrm->debugfs_peek = debugfs_create_file("swrm_peek",
2745 S_IFREG | 0444, swrm->debugfs_swrm_dent,
2746 (void *) swrm, &swrm_debug_read_ops);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302747
Sudheer Papothi96c842a2019-08-29 12:11:21 +05302748 swrm->debugfs_poke = debugfs_create_file("swrm_poke",
2749 S_IFREG | 0444, swrm->debugfs_swrm_dent,
2750 (void *) swrm, &swrm_debug_write_ops);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302751
Sudheer Papothi96c842a2019-08-29 12:11:21 +05302752 swrm->debugfs_reg_dump = debugfs_create_file("swrm_reg_dump",
2753 S_IFREG | 0444, swrm->debugfs_swrm_dent,
2754 (void *) swrm,
2755 &swrm_debug_dump_ops);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302756 }
Sudheer Papothi96c842a2019-08-29 12:11:21 +05302757#endif
Vatsal Buchadf38c3e2019-03-11 17:10:23 +05302758 ret = device_init_wakeup(swrm->dev, true);
2759 if (ret) {
2760 dev_err(swrm->dev, "Device wakeup init failed: %d\n", ret);
2761 goto err_irq_wakeup_fail;
2762 }
2763
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302764 pm_runtime_set_autosuspend_delay(&pdev->dev, auto_suspend_timer);
2765 pm_runtime_use_autosuspend(&pdev->dev);
2766 pm_runtime_set_active(&pdev->dev);
2767 pm_runtime_enable(&pdev->dev);
2768 pm_runtime_mark_last_busy(&pdev->dev);
2769
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302770 INIT_WORK(&swrm->dc_presence_work, swrm_notify_work_fn);
2771 swrm->event_notifier.notifier_call = swrm_event_notify;
2772 msm_aud_evt_register_client(&swrm->event_notifier);
2773
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302774 return 0;
Vatsal Buchadf38c3e2019-03-11 17:10:23 +05302775err_irq_wakeup_fail:
2776 device_init_wakeup(swrm->dev, false);
Laxminath Kasamec8c9092019-12-17 13:12:58 +05302777err_mstr_init_fail:
2778 swr_unregister_master(&swrm->master);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302779err_mstr_fail:
2780 if (swrm->reg_irq)
2781 swrm->reg_irq(swrm->handle, swr_mstr_interrupt,
2782 swrm, SWR_IRQ_FREE);
2783 else if (swrm->irq)
2784 free_irq(swrm->irq, swrm);
2785err_irq_fail:
Vatsal Bucha8bcaeab2019-10-31 11:45:36 +05302786 mutex_destroy(&swrm->irq_lock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302787 mutex_destroy(&swrm->mlock);
2788 mutex_destroy(&swrm->reslock);
2789 mutex_destroy(&swrm->force_down_lock);
Ramprasad Katkam1f221262018-08-23 15:01:22 +05302790 mutex_destroy(&swrm->iolock);
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05302791 mutex_destroy(&swrm->clklock);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302792 mutex_destroy(&swrm->pm_lock);
Aditya Bavanarieb044612019-12-22 17:14:15 +05302793 mutex_destroy(&swrm->ssr_lock);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302794 pm_qos_remove_request(&swrm->pm_qos_req);
2795
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302796err_pdata_fail:
2797err_memory_fail:
2798 return ret;
2799}
2800
2801static int swrm_remove(struct platform_device *pdev)
2802{
2803 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
2804
2805 if (swrm->reg_irq)
2806 swrm->reg_irq(swrm->handle, swr_mstr_interrupt,
2807 swrm, SWR_IRQ_FREE);
2808 else if (swrm->irq)
2809 free_irq(swrm->irq, swrm);
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302810 else if (swrm->wake_irq > 0)
2811 free_irq(swrm->wake_irq, swrm);
Vatsal Buchadf38c3e2019-03-11 17:10:23 +05302812 if (swrm->swr_irq_wakeup_capable)
2813 irq_set_irq_wake(swrm->irq, 0);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302814 cancel_work_sync(&swrm->wakeup_work);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302815 pm_runtime_disable(&pdev->dev);
2816 pm_runtime_set_suspended(&pdev->dev);
2817 swr_unregister_master(&swrm->master);
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302818 msm_aud_evt_unregister_client(&swrm->event_notifier);
Vatsal Buchadf38c3e2019-03-11 17:10:23 +05302819 device_init_wakeup(swrm->dev, false);
Vatsal Bucha8bcaeab2019-10-31 11:45:36 +05302820 mutex_destroy(&swrm->irq_lock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302821 mutex_destroy(&swrm->mlock);
2822 mutex_destroy(&swrm->reslock);
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05302823 mutex_destroy(&swrm->iolock);
2824 mutex_destroy(&swrm->clklock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302825 mutex_destroy(&swrm->force_down_lock);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302826 mutex_destroy(&swrm->pm_lock);
Aditya Bavanarieb044612019-12-22 17:14:15 +05302827 mutex_destroy(&swrm->ssr_lock);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302828 pm_qos_remove_request(&swrm->pm_qos_req);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302829 devm_kfree(&pdev->dev, swrm);
2830 return 0;
2831}
2832
2833static int swrm_clk_pause(struct swr_mstr_ctrl *swrm)
2834{
2835 u32 val;
2836
2837 dev_dbg(swrm->dev, "%s: state: %d\n", __func__, swrm->state);
2838 swr_master_write(swrm, SWRM_INTERRUPT_MASK_ADDR, 0x1FDFD);
2839 val = swr_master_read(swrm, SWRM_MCP_CFG_ADDR);
2840 val |= SWRM_MCP_CFG_BUS_CLK_PAUSE_BMSK;
2841 swr_master_write(swrm, SWRM_MCP_CFG_ADDR, val);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302842
2843 return 0;
2844}
2845
2846#ifdef CONFIG_PM
2847static int swrm_runtime_resume(struct device *dev)
2848{
2849 struct platform_device *pdev = to_platform_device(dev);
2850 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
2851 int ret = 0;
Vatsal Buchae50b5002019-09-19 14:32:20 +05302852 bool swrm_clk_req_err = false;
Sudheer Papothi384addd2019-06-14 02:26:52 +05302853 bool hw_core_err = false;
2854 bool aud_core_err = false;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302855 struct swr_master *mstr = &swrm->master;
2856 struct swr_device *swr_dev;
2857
2858 dev_dbg(dev, "%s: pm_runtime: resume, state:%d\n",
2859 __func__, swrm->state);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07002860 trace_printk("%s: pm_runtime: resume, state:%d\n",
2861 __func__, swrm->state);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302862 mutex_lock(&swrm->reslock);
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302863
Sudheer Papothi384addd2019-06-14 02:26:52 +05302864 if (swrm_request_hw_vote(swrm, LPASS_HW_CORE, true)) {
2865 dev_err(dev, "%s:lpass core hw enable failed\n",
2866 __func__);
2867 hw_core_err = true;
2868 }
2869 if (swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, true)) {
2870 dev_err(dev, "%s:lpass audio hw enable failed\n",
2871 __func__);
2872 aud_core_err = true;
Karthikeyan Manif6821902019-05-21 17:31:24 -07002873 }
Sudheer Papothi66d6fd12019-03-27 17:34:48 +05302874
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302875 if ((swrm->state == SWR_MSTR_DOWN) ||
2876 (swrm->state == SWR_MSTR_SSR && swrm->dev_up)) {
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302877 if (swrm->clk_stop_mode0_supp) {
Vatsal Bucha8bcaeab2019-10-31 11:45:36 +05302878 if (swrm->wake_irq > 0) {
2879 if (unlikely(!irq_get_irq_data
2880 (swrm->wake_irq))) {
2881 pr_err("%s: irq data is NULL\n",
2882 __func__);
2883 mutex_unlock(&swrm->reslock);
2884 return IRQ_NONE;
2885 }
2886 mutex_lock(&swrm->irq_lock);
2887 if (!irqd_irq_disabled(
2888 irq_get_irq_data(swrm->wake_irq)))
2889 disable_irq_nosync(swrm->wake_irq);
2890 mutex_unlock(&swrm->irq_lock);
2891 }
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302892 if (swrm->ipc_wakeup)
2893 msm_aud_evt_blocking_notifier_call_chain(
2894 SWR_WAKE_IRQ_DEREGISTER, (void *)swrm);
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302895 }
2896
Vatsal Bucha63b193f2019-08-12 11:56:55 +05302897 if (swrm_clk_request(swrm, true)) {
2898 /*
2899 * Set autosuspend timer to 1 for
2900 * master to enter into suspend.
2901 */
Vatsal Buchae50b5002019-09-19 14:32:20 +05302902 swrm_clk_req_err = true;
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302903 goto exit;
Vatsal Bucha63b193f2019-08-12 11:56:55 +05302904 }
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302905 if (!swrm->clk_stop_mode0_supp || swrm->state == SWR_MSTR_SSR) {
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302906 list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
2907 ret = swr_device_up(swr_dev);
Sudheer Papothi79c90752019-04-23 06:09:52 +05302908 if (ret == -ENODEV) {
2909 dev_dbg(dev,
2910 "%s slave device up not implemented\n",
2911 __func__);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07002912 trace_printk(
2913 "%s slave device up not implemented\n",
2914 __func__);
Sudheer Papothi79c90752019-04-23 06:09:52 +05302915 ret = 0;
2916 } else if (ret) {
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302917 dev_err(dev,
2918 "%s: failed to wakeup swr dev %d\n",
2919 __func__, swr_dev->dev_num);
2920 swrm_clk_request(swrm, false);
2921 goto exit;
2922 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302923 }
Ramprasad Katkam48b49b22018-10-01 20:12:46 +05302924 swr_master_write(swrm, SWRM_COMP_SW_RESET, 0x01);
2925 swr_master_write(swrm, SWRM_COMP_SW_RESET, 0x01);
2926 swrm_master_init(swrm);
Ramprasad Katkam2e85a542019-04-26 18:28:31 +05302927 /* wait for hw enumeration to complete */
2928 usleep_range(100, 105);
Laxminath Kasame2291972019-11-08 14:51:59 +05302929 if (!swrm_check_link_status(swrm, 0x1))
Laxminath Kasam696b14b2019-12-03 22:07:34 +05302930 dev_dbg(dev, "%s:failed in connecting, ssr?\n",
2931 __func__);
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302932 swrm_cmd_fifo_wr_cmd(swrm, 0x4, 0xF, 0x0,
2933 SWRS_SCP_INT_STATUS_MASK_1);
Karthikeyan Manif6821902019-05-21 17:31:24 -07002934 if (swrm->state == SWR_MSTR_SSR) {
2935 mutex_unlock(&swrm->reslock);
2936 enable_bank_switch(swrm, 0, SWR_ROW_50, SWR_MIN_COL);
2937 mutex_lock(&swrm->reslock);
2938 }
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302939 } else {
2940 /*wake up from clock stop*/
2941 swr_master_write(swrm, SWRM_MCP_BUS_CTRL_ADDR, 0x2);
Sudheer Papothi55fa5972019-09-28 02:50:27 +05302942 /* clear and enable bus clash interrupt */
2943 swr_master_write(swrm, SWRM_INTERRUPT_CLEAR, 0x08);
2944 swrm->intr_mask |= 0x08;
2945 swr_master_write(swrm, SWRM_INTERRUPT_MASK_ADDR,
2946 swrm->intr_mask);
2947 swr_master_write(swrm,
2948 SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN,
2949 swrm->intr_mask);
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302950 usleep_range(100, 105);
Laxminath Kasame2291972019-11-08 14:51:59 +05302951 if (!swrm_check_link_status(swrm, 0x1))
Laxminath Kasam696b14b2019-12-03 22:07:34 +05302952 dev_dbg(dev, "%s:failed in connecting, ssr?\n",
2953 __func__);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302954 }
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302955 swrm->state = SWR_MSTR_UP;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302956 }
2957exit:
Sudheer Papothi384addd2019-06-14 02:26:52 +05302958 if (!aud_core_err)
2959 swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, false);
2960 if (!hw_core_err)
2961 swrm_request_hw_vote(swrm, LPASS_HW_CORE, false);
Vatsal Buchae50b5002019-09-19 14:32:20 +05302962 if (swrm_clk_req_err)
2963 pm_runtime_set_autosuspend_delay(&pdev->dev,
2964 ERR_AUTO_SUSPEND_TIMER_VAL);
2965 else
2966 pm_runtime_set_autosuspend_delay(&pdev->dev,
2967 auto_suspend_timer);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302968 mutex_unlock(&swrm->reslock);
Sudheer Papothi384addd2019-06-14 02:26:52 +05302969
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07002970 trace_printk("%s: pm_runtime: resume done, state:%d\n",
2971 __func__, swrm->state);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302972 return ret;
2973}
2974
2975static int swrm_runtime_suspend(struct device *dev)
2976{
2977 struct platform_device *pdev = to_platform_device(dev);
2978 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
2979 int ret = 0;
Sudheer Papothi384addd2019-06-14 02:26:52 +05302980 bool hw_core_err = false;
2981 bool aud_core_err = false;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302982 struct swr_master *mstr = &swrm->master;
2983 struct swr_device *swr_dev;
2984 int current_state = 0;
2985
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07002986 trace_printk("%s: pm_runtime: suspend state: %d\n",
2987 __func__, swrm->state);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302988 dev_dbg(dev, "%s: pm_runtime: suspend state: %d\n",
2989 __func__, swrm->state);
2990 mutex_lock(&swrm->reslock);
2991 mutex_lock(&swrm->force_down_lock);
2992 current_state = swrm->state;
2993 mutex_unlock(&swrm->force_down_lock);
Sudheer Papothi384addd2019-06-14 02:26:52 +05302994
2995 if (swrm_request_hw_vote(swrm, LPASS_HW_CORE, true)) {
2996 dev_err(dev, "%s:lpass core hw enable failed\n",
2997 __func__);
2998 hw_core_err = true;
2999 }
3000 if (swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, true)) {
3001 dev_err(dev, "%s:lpass audio hw enable failed\n",
3002 __func__);
3003 aud_core_err = true;
Karthikeyan Manif6821902019-05-21 17:31:24 -07003004 }
Sudheer Papothi66d6fd12019-03-27 17:34:48 +05303005
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05303006 if ((current_state == SWR_MSTR_UP) ||
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303007 (current_state == SWR_MSTR_SSR)) {
3008
3009 if ((current_state != SWR_MSTR_SSR) &&
3010 swrm_is_port_en(&swrm->master)) {
3011 dev_dbg(dev, "%s ports are enabled\n", __func__);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07003012 trace_printk("%s ports are enabled\n", __func__);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303013 ret = -EBUSY;
3014 goto exit;
3015 }
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05303016 if (!swrm->clk_stop_mode0_supp || swrm->state == SWR_MSTR_SSR) {
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07003017 dev_err(dev, "%s: clk stop mode not supported or SSR entry\n",
3018 __func__);
Sudheer Papothi06f43412019-07-09 03:32:54 +05303019 mutex_unlock(&swrm->reslock);
Ramprasad Katkamb4c7c682018-12-19 18:58:36 +05303020 enable_bank_switch(swrm, 0, SWR_ROW_50, SWR_MIN_COL);
Sudheer Papothi06f43412019-07-09 03:32:54 +05303021 mutex_lock(&swrm->reslock);
Sudheer Papothi1f2565b2020-01-30 05:22:47 +05303022 if (!swrm->clk_stop_mode0_supp) {
3023 swrm_clk_pause(swrm);
3024 } else {
3025 /* Mask bus clash interrupt */
3026 swrm->intr_mask &= ~((u32)0x08);
3027 swr_master_write(swrm,
3028 SWRM_INTERRUPT_MASK_ADDR,
3029 swrm->intr_mask);
3030 swr_master_write(swrm,
3031 SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN,
3032 swrm->intr_mask);
3033 mutex_unlock(&swrm->reslock);
3034 /* clock stop sequence */
3035 swrm_cmd_fifo_wr_cmd(swrm, 0x2, 0xF, 0xF,
3036 SWRS_SCP_CONTROL);
3037 mutex_lock(&swrm->reslock);
3038 }
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05303039 swr_master_write(swrm, SWRM_COMP_CFG_ADDR, 0x00);
3040 list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
3041 ret = swr_device_down(swr_dev);
Sudheer Papothi79c90752019-04-23 06:09:52 +05303042 if (ret == -ENODEV) {
3043 dev_dbg_ratelimited(dev,
3044 "%s slave device down not implemented\n",
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07003045 __func__);
3046 trace_printk(
3047 "%s slave device down not implemented\n",
3048 __func__);
Sudheer Papothi79c90752019-04-23 06:09:52 +05303049 ret = 0;
3050 } else if (ret) {
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05303051 dev_err(dev,
3052 "%s: failed to shutdown swr dev %d\n",
3053 __func__, swr_dev->dev_num);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07003054 trace_printk(
3055 "%s: failed to shutdown swr dev %d\n",
3056 __func__, swr_dev->dev_num);
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05303057 goto exit;
3058 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303059 }
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07003060 trace_printk("%s: clk stop mode not supported or SSR exit\n",
3061 __func__);
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05303062 } else {
Sudheer Papothi55fa5972019-09-28 02:50:27 +05303063 /* Mask bus clash interrupt */
3064 swrm->intr_mask &= ~((u32)0x08);
3065 swr_master_write(swrm, SWRM_INTERRUPT_MASK_ADDR,
3066 swrm->intr_mask);
3067 swr_master_write(swrm,
3068 SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN,
3069 swrm->intr_mask);
Sudheer Papothi384addd2019-06-14 02:26:52 +05303070 mutex_unlock(&swrm->reslock);
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05303071 /* clock stop sequence */
3072 swrm_cmd_fifo_wr_cmd(swrm, 0x2, 0xF, 0xF,
3073 SWRS_SCP_CONTROL);
Sudheer Papothi384addd2019-06-14 02:26:52 +05303074 mutex_lock(&swrm->reslock);
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05303075 usleep_range(100, 105);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303076 }
Laxminath Kasamcafe0732019-11-20 17:31:58 +05303077 if (!swrm_check_link_status(swrm, 0x0))
Laxminath Kasam696b14b2019-12-03 22:07:34 +05303078 dev_dbg(dev, "%s:failed in disconnecting, ssr?\n",
3079 __func__);
Karthikeyan Mani1d750fe2019-09-06 14:36:09 -07003080 ret = swrm_clk_request(swrm, false);
3081 if (ret) {
3082 dev_err(dev, "%s: swrmn clk failed\n", __func__);
3083 ret = 0;
3084 goto exit;
3085 }
Ramprasad Katkam6a3050d2018-10-10 02:08:00 +05303086
Aditya Bavanaric034fad2018-11-12 22:55:11 +05303087 if (swrm->clk_stop_mode0_supp) {
3088 if (swrm->wake_irq > 0) {
3089 enable_irq(swrm->wake_irq);
3090 } else if (swrm->ipc_wakeup) {
3091 msm_aud_evt_blocking_notifier_call_chain(
3092 SWR_WAKE_IRQ_REGISTER, (void *)swrm);
3093 swrm->ipc_wakeup_triggered = false;
3094 }
Ramprasad Katkam6a3050d2018-10-10 02:08:00 +05303095 }
3096
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303097 }
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05303098 /* Retain SSR state until resume */
3099 if (current_state != SWR_MSTR_SSR)
3100 swrm->state = SWR_MSTR_DOWN;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303101exit:
Sudheer Papothi384addd2019-06-14 02:26:52 +05303102 if (!aud_core_err)
3103 swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, false);
3104 if (!hw_core_err)
3105 swrm_request_hw_vote(swrm, LPASS_HW_CORE, false);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303106 mutex_unlock(&swrm->reslock);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07003107 trace_printk("%s: pm_runtime: suspend done state: %d\n",
3108 __func__, swrm->state);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303109 return ret;
3110}
3111#endif /* CONFIG_PM */
3112
Sudheer Papothi06f43412019-07-09 03:32:54 +05303113static int swrm_device_suspend(struct device *dev)
3114{
3115 struct platform_device *pdev = to_platform_device(dev);
3116 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
3117 int ret = 0;
3118
3119 dev_dbg(dev, "%s: swrm state: %d\n", __func__, swrm->state);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07003120 trace_printk("%s: swrm state: %d\n", __func__, swrm->state);
Sudheer Papothi06f43412019-07-09 03:32:54 +05303121 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
3122 ret = swrm_runtime_suspend(dev);
3123 if (!ret) {
3124 pm_runtime_disable(dev);
3125 pm_runtime_set_suspended(dev);
3126 pm_runtime_enable(dev);
3127 }
3128 }
3129
3130 return 0;
3131}
3132
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303133static int swrm_device_down(struct device *dev)
3134{
3135 struct platform_device *pdev = to_platform_device(dev);
3136 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303137
3138 dev_dbg(dev, "%s: swrm state: %d\n", __func__, swrm->state);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07003139 trace_printk("%s: swrm state: %d\n", __func__, swrm->state);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303140
3141 mutex_lock(&swrm->force_down_lock);
3142 swrm->state = SWR_MSTR_SSR;
3143 mutex_unlock(&swrm->force_down_lock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303144
Ramprasad Katkam2e85a542019-04-26 18:28:31 +05303145 swrm_device_suspend(dev);
Laxminath Kasam1df09a82018-09-20 18:57:49 +05303146 return 0;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303147}
3148
Aditya Bavanaric034fad2018-11-12 22:55:11 +05303149int swrm_register_wake_irq(struct swr_mstr_ctrl *swrm)
3150{
3151 int ret = 0;
Laxminath Kasama60239e2019-01-10 14:43:03 +05303152 int irq, dir_apps_irq;
Aditya Bavanaric034fad2018-11-12 22:55:11 +05303153
3154 if (!swrm->ipc_wakeup) {
Laxminath Kasama60239e2019-01-10 14:43:03 +05303155 irq = of_get_named_gpio(swrm->dev->of_node,
3156 "qcom,swr-wakeup-irq", 0);
3157 if (gpio_is_valid(irq)) {
3158 swrm->wake_irq = gpio_to_irq(irq);
3159 if (swrm->wake_irq < 0) {
3160 dev_err(swrm->dev,
3161 "Unable to configure irq\n");
3162 return swrm->wake_irq;
3163 }
3164 } else {
3165 dir_apps_irq = platform_get_irq_byname(swrm->pdev,
3166 "swr_wake_irq");
3167 if (dir_apps_irq < 0) {
3168 dev_err(swrm->dev,
3169 "TLMM connect gpio not found\n");
3170 return -EINVAL;
3171 }
3172 swrm->wake_irq = dir_apps_irq;
Aditya Bavanaric034fad2018-11-12 22:55:11 +05303173 }
Aditya Bavanaric034fad2018-11-12 22:55:11 +05303174 ret = request_threaded_irq(swrm->wake_irq, NULL,
3175 swrm_wakeup_interrupt,
3176 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
3177 "swr_wake_irq", swrm);
3178 if (ret) {
3179 dev_err(swrm->dev, "%s: Failed to request irq %d\n",
3180 __func__, ret);
3181 return -EINVAL;
3182 }
Aditya Bavanari3517b112018-12-03 13:26:59 +05303183 irq_set_irq_wake(swrm->wake_irq, 1);
Aditya Bavanaric034fad2018-11-12 22:55:11 +05303184 }
3185 return ret;
3186}
3187
Sudheer Papothi3d1596e2018-10-27 06:19:18 +05303188static int swrm_alloc_port_mem(struct device *dev, struct swr_mstr_ctrl *swrm,
3189 u32 uc, u32 size)
3190{
3191 if (!swrm->port_param) {
3192 swrm->port_param = devm_kzalloc(dev,
3193 sizeof(swrm->port_param) * SWR_UC_MAX,
3194 GFP_KERNEL);
3195 if (!swrm->port_param)
3196 return -ENOMEM;
3197 }
3198 if (!swrm->port_param[uc]) {
3199 swrm->port_param[uc] = devm_kcalloc(dev, size,
3200 sizeof(struct port_params),
3201 GFP_KERNEL);
3202 if (!swrm->port_param[uc])
3203 return -ENOMEM;
3204 } else {
3205 dev_err_ratelimited(swrm->dev, "%s: called more than once\n",
3206 __func__);
3207 }
3208
3209 return 0;
3210}
3211
3212static int swrm_copy_port_config(struct swr_mstr_ctrl *swrm,
3213 struct swrm_port_config *port_cfg,
3214 u32 size)
3215{
3216 int idx;
3217 struct port_params *params;
3218 int uc = port_cfg->uc;
3219 int ret = 0;
3220
3221 for (idx = 0; idx < size; idx++) {
3222 params = &((struct port_params *)port_cfg->params)[idx];
3223 if (!params) {
3224 dev_err(swrm->dev, "%s: Invalid params\n", __func__);
3225 ret = -EINVAL;
3226 break;
3227 }
3228 memcpy(&swrm->port_param[uc][idx], params,
3229 sizeof(struct port_params));
3230 }
3231
3232 return ret;
3233}
3234
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303235/**
3236 * swrm_wcd_notify - parent device can notify to soundwire master through
3237 * this function
3238 * @pdev: pointer to platform device structure
3239 * @id: command id from parent to the soundwire master
3240 * @data: data from parent device to soundwire master
3241 */
3242int swrm_wcd_notify(struct platform_device *pdev, u32 id, void *data)
3243{
3244 struct swr_mstr_ctrl *swrm;
3245 int ret = 0;
3246 struct swr_master *mstr;
3247 struct swr_device *swr_dev;
Sudheer Papothi3d1596e2018-10-27 06:19:18 +05303248 struct swrm_port_config *port_cfg;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303249
3250 if (!pdev) {
3251 pr_err("%s: pdev is NULL\n", __func__);
3252 return -EINVAL;
3253 }
3254 swrm = platform_get_drvdata(pdev);
3255 if (!swrm) {
3256 dev_err(&pdev->dev, "%s: swrm is NULL\n", __func__);
3257 return -EINVAL;
3258 }
3259 mstr = &swrm->master;
3260
3261 switch (id) {
Sudheer Papothi06f43412019-07-09 03:32:54 +05303262 case SWR_REQ_CLK_SWITCH:
3263 /* This will put soundwire in clock stop mode and disable the
3264 * clocks, if there is no active usecase running, so that the
3265 * next activity on soundwire will request clock from new clock
3266 * source.
3267 */
Sudheer Papothi8c50f2f2019-12-05 01:14:47 +05303268 if (!data) {
3269 dev_err(swrm->dev, "%s: data is NULL for id:%d\n",
3270 __func__, id);
3271 ret = -EINVAL;
3272 break;
3273 }
Sudheer Papothi06f43412019-07-09 03:32:54 +05303274 mutex_lock(&swrm->mlock);
Sudheer Papothi8c50f2f2019-12-05 01:14:47 +05303275 if (swrm->clk_src != *(int *)data) {
3276 if (swrm->state == SWR_MSTR_UP)
3277 swrm_device_suspend(&pdev->dev);
3278 swrm->clk_src = *(int *)data;
3279 }
Sudheer Papothi06f43412019-07-09 03:32:54 +05303280 mutex_unlock(&swrm->mlock);
3281 break;
Laxminath Kasamb0f27cd2018-09-06 12:17:11 +05303282 case SWR_CLK_FREQ:
3283 if (!data) {
3284 dev_err(swrm->dev, "%s: data is NULL\n", __func__);
3285 ret = -EINVAL;
3286 } else {
3287 mutex_lock(&swrm->mlock);
Ramprasad Katkam2e85a542019-04-26 18:28:31 +05303288 if (swrm->mclk_freq != *(int *)data) {
3289 dev_dbg(swrm->dev, "%s: freq change: force mstr down\n", __func__);
3290 if (swrm->state == SWR_MSTR_DOWN)
3291 dev_dbg(swrm->dev, "%s:SWR master is already Down:%d\n",
3292 __func__, swrm->state);
Sudheer Papothi8a8b12b2019-11-15 23:06:41 +05303293 else {
3294 swrm->mclk_freq = *(int *)data;
3295 swrm->bus_clk = swrm->mclk_freq;
3296 swrm_switch_frame_shape(swrm,
3297 swrm->bus_clk);
Ramprasad Katkam2e85a542019-04-26 18:28:31 +05303298 swrm_device_suspend(&pdev->dev);
Sudheer Papothi8a8b12b2019-11-15 23:06:41 +05303299 }
Prasad Kumpatla386df4e2019-10-11 18:23:16 +05303300 /*
3301 * add delay to ensure clk release happen
3302 * if interrupt triggered for clk stop,
3303 * wait for it to exit
3304 */
3305 usleep_range(10000, 10500);
Ramprasad Katkam2e85a542019-04-26 18:28:31 +05303306 }
Laxminath Kasamb0f27cd2018-09-06 12:17:11 +05303307 swrm->mclk_freq = *(int *)data;
Sudheer Papothiac0ae1c2019-10-17 05:38:40 +05303308 swrm->bus_clk = swrm->mclk_freq;
Laxminath Kasamb0f27cd2018-09-06 12:17:11 +05303309 mutex_unlock(&swrm->mlock);
3310 }
3311 break;
Laxminath Kasam1df09a82018-09-20 18:57:49 +05303312 case SWR_DEVICE_SSR_DOWN:
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07003313 trace_printk("%s: swr device down called\n", __func__);
Aditya Bavanarieb044612019-12-22 17:14:15 +05303314 mutex_lock(&swrm->ssr_lock);
Prasad Kumpatla71fef462020-01-31 21:38:58 +05303315 mutex_lock(&swrm->mlock);
3316 if (swrm->state == SWR_MSTR_DOWN)
3317 dev_dbg(swrm->dev, "%s:SWR master is already Down:%d\n",
3318 __func__, swrm->state);
3319 else
3320 swrm_device_down(&pdev->dev);
Laxminath Kasam1df09a82018-09-20 18:57:49 +05303321 mutex_lock(&swrm->devlock);
3322 swrm->dev_up = false;
3323 mutex_unlock(&swrm->devlock);
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05303324 mutex_lock(&swrm->reslock);
3325 swrm->state = SWR_MSTR_SSR;
3326 mutex_unlock(&swrm->reslock);
Prasad Kumpatla71fef462020-01-31 21:38:58 +05303327 mutex_unlock(&swrm->mlock);
Aditya Bavanarieb044612019-12-22 17:14:15 +05303328 mutex_unlock(&swrm->ssr_lock);
Laxminath Kasam1df09a82018-09-20 18:57:49 +05303329 break;
3330 case SWR_DEVICE_SSR_UP:
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05303331 /* wait for clk voting to be zero */
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07003332 trace_printk("%s: swr device up called\n", __func__);
Ramprasad Katkam7f6462e2018-11-06 11:51:22 +05303333 reinit_completion(&swrm->clk_off_complete);
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05303334 if (swrm->clk_ref_count &&
3335 !wait_for_completion_timeout(&swrm->clk_off_complete,
Ramprasad Katkamc87efeb2018-12-12 19:26:19 +05303336 msecs_to_jiffies(500)))
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05303337 dev_err(swrm->dev, "%s: clock voting not zero\n",
3338 __func__);
3339
Laxminath Kasam1df09a82018-09-20 18:57:49 +05303340 mutex_lock(&swrm->devlock);
3341 swrm->dev_up = true;
3342 mutex_unlock(&swrm->devlock);
3343 break;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303344 case SWR_DEVICE_DOWN:
3345 dev_dbg(swrm->dev, "%s: swr master down called\n", __func__);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07003346 trace_printk("%s: swr master down called\n", __func__);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303347 mutex_lock(&swrm->mlock);
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05303348 if (swrm->state == SWR_MSTR_DOWN)
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303349 dev_dbg(swrm->dev, "%s:SWR master is already Down:%d\n",
3350 __func__, swrm->state);
3351 else
3352 swrm_device_down(&pdev->dev);
3353 mutex_unlock(&swrm->mlock);
3354 break;
3355 case SWR_DEVICE_UP:
3356 dev_dbg(swrm->dev, "%s: swr master up called\n", __func__);
Aditya Bavanarif500a1d2019-09-16 18:27:51 -07003357 trace_printk("%s: swr master up called\n", __func__);
Ramprasad Katkam0fed92f2018-11-08 14:22:22 +05303358 mutex_lock(&swrm->devlock);
3359 if (!swrm->dev_up) {
3360 dev_dbg(swrm->dev, "SSR not complete yet\n");
3361 mutex_unlock(&swrm->devlock);
3362 return -EBUSY;
3363 }
3364 mutex_unlock(&swrm->devlock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303365 mutex_lock(&swrm->mlock);
Ramprasad Katkam86c45e02018-10-16 19:31:51 +05303366 pm_runtime_mark_last_busy(&pdev->dev);
3367 pm_runtime_get_sync(&pdev->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303368 mutex_lock(&swrm->reslock);
Ramprasad Katkam86c45e02018-10-16 19:31:51 +05303369 list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
3370 ret = swr_reset_device(swr_dev);
3371 if (ret) {
3372 dev_err(swrm->dev,
3373 "%s: failed to reset swr device %d\n",
3374 __func__, swr_dev->dev_num);
3375 swrm_clk_request(swrm, false);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303376 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303377 }
Ramprasad Katkam86c45e02018-10-16 19:31:51 +05303378 pm_runtime_mark_last_busy(&pdev->dev);
3379 pm_runtime_put_autosuspend(&pdev->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303380 mutex_unlock(&swrm->reslock);
3381 mutex_unlock(&swrm->mlock);
3382 break;
3383 case SWR_SET_NUM_RX_CH:
3384 if (!data) {
3385 dev_err(swrm->dev, "%s: data is NULL\n", __func__);
3386 ret = -EINVAL;
3387 } else {
3388 mutex_lock(&swrm->mlock);
3389 swrm->num_rx_chs = *(int *)data;
3390 if ((swrm->num_rx_chs > 1) && !swrm->num_cfg_devs) {
3391 list_for_each_entry(swr_dev, &mstr->devices,
3392 dev_list) {
3393 ret = swr_set_device_group(swr_dev,
3394 SWR_BROADCAST);
3395 if (ret)
3396 dev_err(swrm->dev,
3397 "%s: set num ch failed\n",
3398 __func__);
3399 }
3400 } else {
3401 list_for_each_entry(swr_dev, &mstr->devices,
3402 dev_list) {
3403 ret = swr_set_device_group(swr_dev,
3404 SWR_GROUP_NONE);
3405 if (ret)
3406 dev_err(swrm->dev,
3407 "%s: set num ch failed\n",
3408 __func__);
3409 }
3410 }
3411 mutex_unlock(&swrm->mlock);
3412 }
3413 break;
Aditya Bavanaric034fad2018-11-12 22:55:11 +05303414 case SWR_REGISTER_WAKE_IRQ:
3415 if (!data) {
3416 dev_err(swrm->dev, "%s: reg wake irq data is NULL\n",
3417 __func__);
3418 ret = -EINVAL;
3419 } else {
3420 mutex_lock(&swrm->mlock);
3421 swrm->ipc_wakeup = *(u32 *)data;
3422 ret = swrm_register_wake_irq(swrm);
3423 if (ret)
3424 dev_err(swrm->dev, "%s: register wake_irq failed\n",
3425 __func__);
3426 mutex_unlock(&swrm->mlock);
3427 }
3428 break;
Sudheer Papothi72ee2642019-08-08 05:15:17 +05303429 case SWR_REGISTER_WAKEUP:
3430 msm_aud_evt_blocking_notifier_call_chain(
3431 SWR_WAKE_IRQ_REGISTER, (void *)swrm);
3432 break;
3433 case SWR_DEREGISTER_WAKEUP:
3434 msm_aud_evt_blocking_notifier_call_chain(
3435 SWR_WAKE_IRQ_DEREGISTER, (void *)swrm);
3436 break;
Sudheer Papothi3d1596e2018-10-27 06:19:18 +05303437 case SWR_SET_PORT_MAP:
3438 if (!data) {
3439 dev_err(swrm->dev, "%s: data is NULL for id=%d\n",
3440 __func__, id);
3441 ret = -EINVAL;
3442 } else {
3443 mutex_lock(&swrm->mlock);
3444 port_cfg = (struct swrm_port_config *)data;
3445 if (!port_cfg->size) {
3446 ret = -EINVAL;
3447 goto done;
3448 }
3449 ret = swrm_alloc_port_mem(&pdev->dev, swrm,
3450 port_cfg->uc, port_cfg->size);
3451 if (!ret)
3452 swrm_copy_port_config(swrm, port_cfg,
3453 port_cfg->size);
3454done:
3455 mutex_unlock(&swrm->mlock);
3456 }
3457 break;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303458 default:
3459 dev_err(swrm->dev, "%s: swr master unknown id %d\n",
3460 __func__, id);
3461 break;
3462 }
3463 return ret;
3464}
3465EXPORT_SYMBOL(swrm_wcd_notify);
3466
Ramprasad Katkam57349872018-11-11 18:34:57 +05303467/*
3468 * swrm_pm_cmpxchg:
3469 * Check old state and exchange with pm new state
3470 * if old state matches with current state
3471 *
3472 * @swrm: pointer to wcd core resource
3473 * @o: pm old state
3474 * @n: pm new state
3475 *
3476 * Returns old state
3477 */
3478static enum swrm_pm_state swrm_pm_cmpxchg(
3479 struct swr_mstr_ctrl *swrm,
3480 enum swrm_pm_state o,
3481 enum swrm_pm_state n)
3482{
3483 enum swrm_pm_state old;
3484
3485 if (!swrm)
3486 return o;
3487
3488 mutex_lock(&swrm->pm_lock);
3489 old = swrm->pm_state;
3490 if (old == o)
3491 swrm->pm_state = n;
3492 mutex_unlock(&swrm->pm_lock);
3493
3494 return old;
3495}
3496
3497static bool swrm_lock_sleep(struct swr_mstr_ctrl *swrm)
3498{
3499 enum swrm_pm_state os;
3500
3501 /*
3502 * swrm_{lock/unlock}_sleep will be called by swr irq handler
3503 * and slave wake up requests..
3504 *
3505 * If system didn't resume, we can simply return false so
3506 * IRQ handler can return without handling IRQ.
3507 */
3508 mutex_lock(&swrm->pm_lock);
3509 if (swrm->wlock_holders++ == 0) {
3510 dev_dbg(swrm->dev, "%s: holding wake lock\n", __func__);
3511 pm_qos_update_request(&swrm->pm_qos_req,
3512 msm_cpuidle_get_deep_idle_latency());
3513 pm_stay_awake(swrm->dev);
3514 }
3515 mutex_unlock(&swrm->pm_lock);
3516
3517 if (!wait_event_timeout(swrm->pm_wq,
3518 ((os = swrm_pm_cmpxchg(swrm,
3519 SWRM_PM_SLEEPABLE,
3520 SWRM_PM_AWAKE)) ==
3521 SWRM_PM_SLEEPABLE ||
3522 (os == SWRM_PM_AWAKE)),
3523 msecs_to_jiffies(
3524 SWRM_SYSTEM_RESUME_TIMEOUT_MS))) {
3525 dev_err(swrm->dev, "%s: system didn't resume within %dms, s %d, w %d\n",
3526 __func__, SWRM_SYSTEM_RESUME_TIMEOUT_MS, swrm->pm_state,
3527 swrm->wlock_holders);
3528 swrm_unlock_sleep(swrm);
3529 return false;
3530 }
3531 wake_up_all(&swrm->pm_wq);
3532 return true;
3533}
3534
3535static void swrm_unlock_sleep(struct swr_mstr_ctrl *swrm)
3536{
3537 mutex_lock(&swrm->pm_lock);
3538 if (--swrm->wlock_holders == 0) {
3539 dev_dbg(swrm->dev, "%s: releasing wake lock pm_state %d -> %d\n",
3540 __func__, swrm->pm_state, SWRM_PM_SLEEPABLE);
3541 /*
3542 * if swrm_lock_sleep failed, pm_state would be still
3543 * swrm_PM_ASLEEP, don't overwrite
3544 */
3545 if (likely(swrm->pm_state == SWRM_PM_AWAKE))
3546 swrm->pm_state = SWRM_PM_SLEEPABLE;
3547 pm_qos_update_request(&swrm->pm_qos_req,
3548 PM_QOS_DEFAULT_VALUE);
3549 pm_relax(swrm->dev);
3550 }
3551 mutex_unlock(&swrm->pm_lock);
3552 wake_up_all(&swrm->pm_wq);
3553}
3554
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303555#ifdef CONFIG_PM_SLEEP
3556static int swrm_suspend(struct device *dev)
3557{
3558 int ret = -EBUSY;
3559 struct platform_device *pdev = to_platform_device(dev);
3560 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
3561
3562 dev_dbg(dev, "%s: system suspend, state: %d\n", __func__, swrm->state);
Ramprasad Katkam57349872018-11-11 18:34:57 +05303563
3564 mutex_lock(&swrm->pm_lock);
3565
3566 if (swrm->pm_state == SWRM_PM_SLEEPABLE) {
3567 dev_dbg(swrm->dev, "%s: suspending system, state %d, wlock %d\n",
3568 __func__, swrm->pm_state,
3569 swrm->wlock_holders);
3570 swrm->pm_state = SWRM_PM_ASLEEP;
3571 } else if (swrm->pm_state == SWRM_PM_AWAKE) {
3572 /*
3573 * unlock to wait for pm_state == SWRM_PM_SLEEPABLE
3574 * then set to SWRM_PM_ASLEEP
3575 */
3576 dev_dbg(swrm->dev, "%s: waiting to suspend system, state %d, wlock %d\n",
3577 __func__, swrm->pm_state,
3578 swrm->wlock_holders);
3579 mutex_unlock(&swrm->pm_lock);
3580 if (!(wait_event_timeout(swrm->pm_wq, swrm_pm_cmpxchg(
3581 swrm, SWRM_PM_SLEEPABLE,
3582 SWRM_PM_ASLEEP) ==
3583 SWRM_PM_SLEEPABLE,
3584 msecs_to_jiffies(
3585 SWRM_SYS_SUSPEND_WAIT)))) {
3586 dev_dbg(swrm->dev, "%s: suspend failed state %d, wlock %d\n",
3587 __func__, swrm->pm_state,
3588 swrm->wlock_holders);
3589 return -EBUSY;
3590 } else {
3591 dev_dbg(swrm->dev,
3592 "%s: done, state %d, wlock %d\n",
3593 __func__, swrm->pm_state,
3594 swrm->wlock_holders);
3595 }
3596 mutex_lock(&swrm->pm_lock);
3597 } else if (swrm->pm_state == SWRM_PM_ASLEEP) {
3598 dev_dbg(swrm->dev, "%s: system is already suspended, state %d, wlock %d\n",
3599 __func__, swrm->pm_state,
3600 swrm->wlock_holders);
3601 }
3602
3603 mutex_unlock(&swrm->pm_lock);
3604
3605 if ((!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev))) {
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303606 ret = swrm_runtime_suspend(dev);
3607 if (!ret) {
3608 /*
3609 * Synchronize runtime-pm and system-pm states:
3610 * At this point, we are already suspended. If
3611 * runtime-pm still thinks its active, then
3612 * make sure its status is in sync with HW
3613 * status. The three below calls let the
3614 * runtime-pm know that we are suspended
3615 * already without re-invoking the suspend
3616 * callback
3617 */
3618 pm_runtime_disable(dev);
3619 pm_runtime_set_suspended(dev);
3620 pm_runtime_enable(dev);
3621 }
3622 }
3623 if (ret == -EBUSY) {
3624 /*
3625 * There is a possibility that some audio stream is active
3626 * during suspend. We dont want to return suspend failure in
3627 * that case so that display and relevant components can still
3628 * go to suspend.
3629 * If there is some other error, then it should be passed-on
3630 * to system level suspend
3631 */
3632 ret = 0;
3633 }
3634 return ret;
3635}
3636
3637static int swrm_resume(struct device *dev)
3638{
3639 int ret = 0;
3640 struct platform_device *pdev = to_platform_device(dev);
3641 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
3642
3643 dev_dbg(dev, "%s: system resume, state: %d\n", __func__, swrm->state);
3644 if (!pm_runtime_enabled(dev) || !pm_runtime_suspend(dev)) {
3645 ret = swrm_runtime_resume(dev);
3646 if (!ret) {
3647 pm_runtime_mark_last_busy(dev);
3648 pm_request_autosuspend(dev);
3649 }
3650 }
Ramprasad Katkam57349872018-11-11 18:34:57 +05303651 mutex_lock(&swrm->pm_lock);
3652 if (swrm->pm_state == SWRM_PM_ASLEEP) {
3653 dev_dbg(swrm->dev,
3654 "%s: resuming system, state %d, wlock %d\n",
3655 __func__, swrm->pm_state,
3656 swrm->wlock_holders);
3657 swrm->pm_state = SWRM_PM_SLEEPABLE;
3658 } else {
3659 dev_dbg(swrm->dev, "%s: system is already awake, state %d wlock %d\n",
3660 __func__, swrm->pm_state,
3661 swrm->wlock_holders);
3662 }
3663 mutex_unlock(&swrm->pm_lock);
3664 wake_up_all(&swrm->pm_wq);
3665
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303666 return ret;
3667}
3668#endif /* CONFIG_PM_SLEEP */
3669
3670static const struct dev_pm_ops swrm_dev_pm_ops = {
3671 SET_SYSTEM_SLEEP_PM_OPS(
3672 swrm_suspend,
3673 swrm_resume
3674 )
3675 SET_RUNTIME_PM_OPS(
3676 swrm_runtime_suspend,
3677 swrm_runtime_resume,
3678 NULL
3679 )
3680};
3681
3682static const struct of_device_id swrm_dt_match[] = {
3683 {
3684 .compatible = "qcom,swr-mstr",
3685 },
3686 {}
3687};
3688
3689static struct platform_driver swr_mstr_driver = {
3690 .probe = swrm_probe,
3691 .remove = swrm_remove,
3692 .driver = {
3693 .name = SWR_WCD_NAME,
3694 .owner = THIS_MODULE,
3695 .pm = &swrm_dev_pm_ops,
3696 .of_match_table = swrm_dt_match,
Xiaojun Sang53cd13a2018-06-29 15:14:37 +08003697 .suppress_bind_attrs = true,
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303698 },
3699};
3700
3701static int __init swrm_init(void)
3702{
3703 return platform_driver_register(&swr_mstr_driver);
3704}
3705module_init(swrm_init);
3706
3707static void __exit swrm_exit(void)
3708{
3709 platform_driver_unregister(&swr_mstr_driver);
3710}
3711module_exit(swrm_exit);
3712
3713MODULE_LICENSE("GPL v2");
3714MODULE_DESCRIPTION("SoundWire Master Controller");
3715MODULE_ALIAS("platform:swr-mstr");