blob: 6386bd7f1e1bd6bea3ec82cdf486cac139fc5ecb [file] [log] [blame]
Meng Wang43bbb872018-12-10 12:32:05 +08001// SPDX-License-Identifier: GPL-2.0-only
Meng Wang61af6842018-09-10 17:47:55 +08002/*
Aditya Bavanari3517b112018-12-03 13:26:59 +05303 * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05304 */
5
6#include <linux/irq.h>
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/io.h>
11#include <linux/interrupt.h>
12#include <linux/platform_device.h>
13#include <linux/delay.h>
14#include <linux/kthread.h>
Ramprasad Katkamcab8d722018-09-28 15:54:06 +053015#include <linux/bitops.h>
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053016#include <linux/clk.h>
Laxminath Kasama60239e2019-01-10 14:43:03 +053017#include <linux/gpio.h>
18#include <linux/of_gpio.h>
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053019#include <linux/pm_runtime.h>
20#include <linux/of.h>
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053021#include <soc/soundwire.h>
Sudheer Papothi3d1596e2018-10-27 06:19:18 +053022#include <soc/swr-common.h>
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053023#include <linux/regmap.h>
Ramprasad Katkam68765ab2018-08-30 11:46:32 +053024#include <dsp/msm-audio-event-notify.h>
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053025#include "swrm_registers.h"
26#include "swr-mstr-ctrl.h"
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053027
Ramprasad Katkam57349872018-11-11 18:34:57 +053028#define SWRM_SYSTEM_RESUME_TIMEOUT_MS 700
29#define SWRM_SYS_SUSPEND_WAIT 1
Sudheer Papothi3d1596e2018-10-27 06:19:18 +053030
Sudheer Papothi4c322b12018-10-31 06:34:01 +053031#define SWRM_DSD_PARAMS_PORT 4
32
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053033#define SWR_BROADCAST_CMD_ID 0x0F
Sudheer Papothi3590b312019-06-04 23:51:30 +053034#define SWR_AUTO_SUSPEND_DELAY 1 /* delay in sec */
Sudheer Papothi7c067e82018-11-15 06:53:35 +053035#define SWR_DEV_ID_MASK 0xFFFFFFFFFFFF
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053036#define SWR_REG_VAL_PACK(data, dev, id, reg) \
37 ((reg) | ((id) << 16) | ((dev) << 20) | ((data) << 24))
38
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +053039#define SWR_INVALID_PARAM 0xFF
Laxminath Kasam990c70b2018-11-09 23:15:09 +053040#define SWR_HSTOP_MAX_VAL 0xF
41#define SWR_HSTART_MIN_VAL 0x0
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +053042
Ramprasad Katkam83303512018-10-11 17:34:22 +053043#define SWRM_INTERRUPT_STATUS_MASK 0x1FDFD
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053044/* pm runtime auto suspend timer in msecs */
45static int auto_suspend_timer = SWR_AUTO_SUSPEND_DELAY * 1000;
46module_param(auto_suspend_timer, int, 0664);
47MODULE_PARM_DESC(auto_suspend_timer, "timer for auto suspend");
48
49enum {
50 SWR_NOT_PRESENT, /* Device is detached/not present on the bus */
51 SWR_ATTACHED_OK, /* Device is attached */
52 SWR_ALERT, /* Device alters master for any interrupts */
53 SWR_RESERVED, /* Reserved */
54};
55
56enum {
57 MASTER_ID_WSA = 1,
58 MASTER_ID_RX,
59 MASTER_ID_TX
60};
Ramprasad Katkamcab8d722018-09-28 15:54:06 +053061
62enum {
63 ENABLE_PENDING,
64 DISABLE_PENDING
65};
Sudheer Papothi384addd2019-06-14 02:26:52 +053066
67enum {
68 LPASS_HW_CORE,
69 LPASS_AUDIO_CORE,
70};
71
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053072#define TRUE 1
73#define FALSE 0
74
Ramprasad Katkam1f221262018-08-23 15:01:22 +053075#define SWRM_MAX_PORT_REG 120
Ramprasad Katkam83303512018-10-11 17:34:22 +053076#define SWRM_MAX_INIT_REG 11
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053077
Laxminath Kasamfbcaf322018-07-18 00:38:14 +053078#define MAX_FIFO_RD_FAIL_RETRY 3
79
Ramprasad Katkam57349872018-11-11 18:34:57 +053080static bool swrm_lock_sleep(struct swr_mstr_ctrl *swrm);
81static void swrm_unlock_sleep(struct swr_mstr_ctrl *swrm);
Sudheer Papothi96c842a2019-08-29 12:11:21 +053082static u32 swr_master_read(struct swr_mstr_ctrl *swrm, unsigned int reg_addr);
83static void swr_master_write(struct swr_mstr_ctrl *swrm, u16 reg_addr, u32 val);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053084
85static bool swrm_is_msm_variant(int val)
86{
87 return (val == SWRM_VERSION_1_3);
88}
89
Sudheer Papothi96c842a2019-08-29 12:11:21 +053090#ifdef CONFIG_DEBUG_FS
Ramprasad Katkam9f040f32018-05-16 10:19:25 +053091static int swrm_debug_open(struct inode *inode, struct file *file)
92{
93 file->private_data = inode->i_private;
94 return 0;
95}
96
97static int get_parameters(char *buf, u32 *param1, int num_of_par)
98{
99 char *token;
100 int base, cnt;
101
102 token = strsep(&buf, " ");
103 for (cnt = 0; cnt < num_of_par; cnt++) {
104 if (token) {
105 if ((token[1] == 'x') || (token[1] == 'X'))
106 base = 16;
107 else
108 base = 10;
109
110 if (kstrtou32(token, base, &param1[cnt]) != 0)
111 return -EINVAL;
112
113 token = strsep(&buf, " ");
114 } else
115 return -EINVAL;
116 }
117 return 0;
118}
119
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530120static ssize_t swrm_reg_show(struct swr_mstr_ctrl *swrm, char __user *ubuf,
121 size_t count, loff_t *ppos)
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530122{
123 int i, reg_val, len;
124 ssize_t total = 0;
125 char tmp_buf[SWR_MSTR_MAX_BUF_LEN];
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530126 int rem = 0;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530127
128 if (!ubuf || !ppos)
129 return 0;
130
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530131 i = ((int) *ppos + SWR_MSTR_START_REG_ADDR);
132 rem = i%4;
133
134 if (rem)
135 i = (i - rem);
136
137 for (; i <= SWR_MSTR_MAX_REG_ADDR; i += 4) {
138 usleep_range(100, 150);
139 reg_val = swr_master_read(swrm, i);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530140 len = snprintf(tmp_buf, 25, "0x%.3x: 0x%.2x\n", i, reg_val);
141 if ((total + len) >= count - 1)
142 break;
143 if (copy_to_user((ubuf + total), tmp_buf, len)) {
144 pr_err("%s: fail to copy reg dump\n", __func__);
145 total = -EFAULT;
146 goto copy_err;
147 }
148 *ppos += len;
149 total += len;
150 }
151
152copy_err:
153 return total;
154}
155
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530156static ssize_t swrm_debug_reg_dump(struct file *file, char __user *ubuf,
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530157 size_t count, loff_t *ppos)
158{
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530159 struct swr_mstr_ctrl *swrm;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530160
161 if (!count || !file || !ppos || !ubuf)
162 return -EINVAL;
163
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530164 swrm = file->private_data;
165 if (!swrm)
166 return -EINVAL;
167
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530168 if (*ppos < 0)
169 return -EINVAL;
170
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530171 return swrm_reg_show(swrm, ubuf, count, ppos);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530172}
173
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530174static ssize_t swrm_debug_read(struct file *file, char __user *ubuf,
175 size_t count, loff_t *ppos)
176{
177 char lbuf[SWR_MSTR_RD_BUF_LEN];
178 struct swr_mstr_ctrl *swrm = NULL;
179
180 if (!count || !file || !ppos || !ubuf)
181 return -EINVAL;
182
183 swrm = file->private_data;
184 if (!swrm)
185 return -EINVAL;
186
187 if (*ppos < 0)
188 return -EINVAL;
189
190 snprintf(lbuf, sizeof(lbuf), "0x%x\n", swrm->read_data);
191
192 return simple_read_from_buffer(ubuf, count, ppos, lbuf,
193 strnlen(lbuf, 7));
194}
195
196static ssize_t swrm_debug_peek_write(struct file *file, const char __user *ubuf,
197 size_t count, loff_t *ppos)
198{
199 char lbuf[SWR_MSTR_RD_BUF_LEN];
200 int rc;
201 u32 param[5];
202 struct swr_mstr_ctrl *swrm = NULL;
203
204 if (!count || !file || !ppos || !ubuf)
205 return -EINVAL;
206
207 swrm = file->private_data;
208 if (!swrm)
209 return -EINVAL;
210
211 if (*ppos < 0)
212 return -EINVAL;
213
214 if (count > sizeof(lbuf) - 1)
215 return -EINVAL;
216
217 rc = copy_from_user(lbuf, ubuf, count);
218 if (rc)
219 return -EFAULT;
220
221 lbuf[count] = '\0';
222 rc = get_parameters(lbuf, param, 1);
223 if ((param[0] <= SWR_MSTR_MAX_REG_ADDR) && (rc == 0))
224 swrm->read_data = swr_master_read(swrm, param[0]);
225 else
226 rc = -EINVAL;
227
228 if (rc == 0)
229 rc = count;
230 else
231 dev_err(swrm->dev, "%s: rc = %d\n", __func__, rc);
232
233 return rc;
234}
235
236static ssize_t swrm_debug_write(struct file *file,
237 const char __user *ubuf, size_t count, loff_t *ppos)
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530238{
239 char lbuf[SWR_MSTR_WR_BUF_LEN];
240 int rc;
241 u32 param[5];
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530242 struct swr_mstr_ctrl *swrm;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530243
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530244 if (!file || !ppos || !ubuf)
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530245 return -EINVAL;
246
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530247 swrm = file->private_data;
248 if (!swrm)
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530249 return -EINVAL;
250
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530251 if (count > sizeof(lbuf) - 1)
252 return -EINVAL;
253
254 rc = copy_from_user(lbuf, ubuf, count);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530255 if (rc)
256 return -EFAULT;
257
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530258 lbuf[count] = '\0';
259 rc = get_parameters(lbuf, param, 2);
260 if ((param[0] <= SWR_MSTR_MAX_REG_ADDR) &&
261 (param[1] <= 0xFFFFFFFF) &&
262 (rc == 0))
263 swr_master_write(swrm, param[0], param[1]);
264 else
265 rc = -EINVAL;
266
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530267 if (rc == 0)
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530268 rc = count;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530269 else
270 pr_err("%s: rc = %d\n", __func__, rc);
271
272 return rc;
273}
274
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530275static const struct file_operations swrm_debug_read_ops = {
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530276 .open = swrm_debug_open,
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530277 .write = swrm_debug_peek_write,
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530278 .read = swrm_debug_read,
279};
280
Sudheer Papothi96c842a2019-08-29 12:11:21 +0530281static const struct file_operations swrm_debug_write_ops = {
282 .open = swrm_debug_open,
283 .write = swrm_debug_write,
284};
285
286static const struct file_operations swrm_debug_dump_ops = {
287 .open = swrm_debug_open,
288 .read = swrm_debug_reg_dump,
289};
290#endif
291
Sudheer Papothi0016db12019-06-11 04:42:38 +0530292static void swrm_reg_dump(struct swr_mstr_ctrl *swrm,
293 u32 *reg, u32 *val, int len, const char* func)
294{
295 int i = 0;
296
297 for (i = 0; i < len; i++)
298 dev_dbg(swrm->dev, "%s: reg = 0x%x val = 0x%x\n",
299 func, reg[i], val[i]);
300}
301
Sudheer Papothi384addd2019-06-14 02:26:52 +0530302static int swrm_request_hw_vote(struct swr_mstr_ctrl *swrm,
303 int core_type, bool enable)
304{
305 int ret = 0;
306
307 if (core_type == LPASS_HW_CORE) {
308 if (swrm->lpass_core_hw_vote) {
309 if (enable) {
310 ret =
311 clk_prepare_enable(swrm->lpass_core_hw_vote);
312 if (ret < 0)
313 dev_err(swrm->dev,
314 "%s:lpass core hw enable failed\n",
315 __func__);
316 } else
317 clk_disable_unprepare(swrm->lpass_core_hw_vote);
318 }
319 }
320 if (core_type == LPASS_AUDIO_CORE) {
321 if (swrm->lpass_core_audio) {
322 if (enable) {
323 ret =
324 clk_prepare_enable(swrm->lpass_core_audio);
325 if (ret < 0)
326 dev_err(swrm->dev,
327 "%s:lpass audio hw enable failed\n",
328 __func__);
329 } else
330 clk_disable_unprepare(swrm->lpass_core_audio);
331 }
332 }
333
334 return ret;
335}
336
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530337static int swrm_clk_request(struct swr_mstr_ctrl *swrm, bool enable)
338{
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530339 int ret = 0;
340
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530341 if (!swrm->clk || !swrm->handle)
342 return -EINVAL;
343
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530344 mutex_lock(&swrm->clklock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530345 if (enable) {
Aditya Bavanarif4a471d2019-02-19 17:57:12 +0530346 if (!swrm->dev_up) {
347 ret = -ENODEV;
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530348 goto exit;
Aditya Bavanarif4a471d2019-02-19 17:57:12 +0530349 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530350 swrm->clk_ref_count++;
351 if (swrm->clk_ref_count == 1) {
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530352 ret = swrm->clk(swrm->handle, true);
353 if (ret) {
Ramprasad Katkam14efed62019-03-07 13:16:50 +0530354 dev_err_ratelimited(swrm->dev,
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530355 "%s: clock enable req failed",
356 __func__);
357 --swrm->clk_ref_count;
358 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530359 }
360 } else if (--swrm->clk_ref_count == 0) {
361 swrm->clk(swrm->handle, false);
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530362 complete(&swrm->clk_off_complete);
363 }
364 if (swrm->clk_ref_count < 0) {
Meng Wang8c60bb52019-06-19 15:49:06 +0800365 dev_err(swrm->dev, "%s: swrm clk count mismatch\n", __func__);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530366 swrm->clk_ref_count = 0;
367 }
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +0530368
369exit:
370 mutex_unlock(&swrm->clklock);
371 return ret;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530372}
373
374static int swrm_ahb_write(struct swr_mstr_ctrl *swrm,
375 u16 reg, u32 *value)
376{
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530377 u32 temp = (u32)(*value);
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530378 int ret = 0;
379
380 mutex_lock(&swrm->devlock);
381 if (!swrm->dev_up)
382 goto err;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530383
384 ret = swrm_clk_request(swrm, TRUE);
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530385 if (ret) {
386 dev_err_ratelimited(swrm->dev, "%s: clock request failed\n",
387 __func__);
388 goto err;
389 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530390 iowrite32(temp, swrm->swrm_dig_base + reg);
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530391 swrm_clk_request(swrm, FALSE);
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530392err:
393 mutex_unlock(&swrm->devlock);
394 return ret;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530395}
396
397static int swrm_ahb_read(struct swr_mstr_ctrl *swrm,
398 u16 reg, u32 *value)
399{
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530400 u32 temp = 0;
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530401 int ret = 0;
402
403 mutex_lock(&swrm->devlock);
404 if (!swrm->dev_up)
405 goto err;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530406
407 ret = swrm_clk_request(swrm, TRUE);
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530408 if (ret) {
409 dev_err_ratelimited(swrm->dev, "%s: clock request failed\n",
410 __func__);
411 goto err;
412 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530413 temp = ioread32(swrm->swrm_dig_base + reg);
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530414 *value = temp;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530415 swrm_clk_request(swrm, FALSE);
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530416err:
417 mutex_unlock(&swrm->devlock);
418 return ret;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530419}
420
421static u32 swr_master_read(struct swr_mstr_ctrl *swrm, unsigned int reg_addr)
422{
423 u32 val = 0;
424
425 if (swrm->read)
426 val = swrm->read(swrm->handle, reg_addr);
427 else
428 swrm_ahb_read(swrm, reg_addr, &val);
429 return val;
430}
431
432static void swr_master_write(struct swr_mstr_ctrl *swrm, u16 reg_addr, u32 val)
433{
434 if (swrm->write)
435 swrm->write(swrm->handle, reg_addr, val);
436 else
437 swrm_ahb_write(swrm, reg_addr, &val);
438}
439
440static int swr_master_bulk_write(struct swr_mstr_ctrl *swrm, u32 *reg_addr,
441 u32 *val, unsigned int length)
442{
443 int i = 0;
444
445 if (swrm->bulk_write)
446 swrm->bulk_write(swrm->handle, reg_addr, val, length);
447 else {
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530448 mutex_lock(&swrm->iolock);
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530449 for (i = 0; i < length; i++) {
450 /* wait for FIFO WR command to complete to avoid overflow */
451 usleep_range(100, 105);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530452 swr_master_write(swrm, reg_addr[i], val[i]);
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530453 }
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530454 mutex_unlock(&swrm->iolock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530455 }
456 return 0;
457}
458
459static bool swrm_is_port_en(struct swr_master *mstr)
460{
461 return !!(mstr->num_port);
462}
463
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +0530464static void copy_port_tables(struct swr_mstr_ctrl *swrm,
465 struct port_params *params)
466{
467 u8 i;
468 struct port_params *config = params;
469
470 for (i = 0; i < SWR_MSTR_PORT_LEN; i++) {
471 /* wsa uses single frame structure for all configurations */
472 if (!swrm->mport_cfg[i].port_en)
473 continue;
474 swrm->mport_cfg[i].sinterval = config[i].si;
475 swrm->mport_cfg[i].offset1 = config[i].off1;
476 swrm->mport_cfg[i].offset2 = config[i].off2;
477 swrm->mport_cfg[i].hstart = config[i].hstart;
478 swrm->mport_cfg[i].hstop = config[i].hstop;
479 swrm->mport_cfg[i].blk_pack_mode = config[i].bp_mode;
480 swrm->mport_cfg[i].blk_grp_count = config[i].bgp_ctrl;
481 swrm->mport_cfg[i].word_length = config[i].wd_len;
482 swrm->mport_cfg[i].lane_ctrl = config[i].lane_ctrl;
483 }
484}
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530485static int swrm_get_port_config(struct swr_mstr_ctrl *swrm)
486{
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +0530487 struct port_params *params;
Sudheer Papothi4c322b12018-10-31 06:34:01 +0530488 u32 usecase = 0;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530489
Sudheer Papothi4c322b12018-10-31 06:34:01 +0530490 /* TODO - Send usecase information to avoid checking for master_id */
491 if (swrm->mport_cfg[SWRM_DSD_PARAMS_PORT].port_en &&
492 (swrm->master_id == MASTER_ID_RX))
493 usecase = 1;
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +0530494
Sudheer Papothi4c322b12018-10-31 06:34:01 +0530495 params = swrm->port_param[usecase];
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +0530496 copy_port_tables(swrm, params);
Sudheer Papothi4c322b12018-10-31 06:34:01 +0530497
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530498 return 0;
499}
500
501static int swrm_get_master_port(struct swr_mstr_ctrl *swrm, u8 *mstr_port_id,
502 u8 *mstr_ch_mask, u8 mstr_prt_type,
503 u8 slv_port_id)
504{
505 int i, j;
506 *mstr_port_id = 0;
507
508 for (i = 1; i <= swrm->num_ports; i++) {
509 for (j = 0; j < SWR_MAX_CH_PER_PORT; j++) {
510 if (swrm->port_mapping[i][j].port_type == mstr_prt_type)
511 goto found;
512 }
513 }
514found:
515 if (i > swrm->num_ports || j == SWR_MAX_CH_PER_PORT) {
516 dev_err(swrm->dev, "%s: port type not supported by master\n",
517 __func__);
518 return -EINVAL;
519 }
520 /* id 0 corresponds to master port 1 */
521 *mstr_port_id = i - 1;
522 *mstr_ch_mask = swrm->port_mapping[i][j].ch_mask;
523
524 return 0;
525
526}
527
528static u32 swrm_get_packed_reg_val(u8 *cmd_id, u8 cmd_data,
529 u8 dev_addr, u16 reg_addr)
530{
531 u32 val;
532 u8 id = *cmd_id;
533
534 if (id != SWR_BROADCAST_CMD_ID) {
535 if (id < 14)
536 id += 1;
537 else
538 id = 0;
539 *cmd_id = id;
540 }
541 val = SWR_REG_VAL_PACK(cmd_data, dev_addr, id, reg_addr);
542
543 return val;
544}
545
546static int swrm_cmd_fifo_rd_cmd(struct swr_mstr_ctrl *swrm, int *cmd_data,
547 u8 dev_addr, u8 cmd_id, u16 reg_addr,
548 u32 len)
549{
550 u32 val;
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530551 u32 retry_attempt = 0;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530552
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530553 mutex_lock(&swrm->iolock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530554 val = swrm_get_packed_reg_val(&swrm->rcmd_id, len, dev_addr, reg_addr);
Ramprasad Katkam1e906202019-01-30 14:16:34 +0530555 if (swrm->read) {
556 /* skip delay if read is handled in platform driver */
557 swr_master_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
558 } else {
559 /* wait for FIFO RD to complete to avoid overflow */
560 usleep_range(100, 105);
561 swr_master_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
562 /* wait for FIFO RD CMD complete to avoid overflow */
563 usleep_range(250, 255);
564 }
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530565retry_read:
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530566 *cmd_data = swr_master_read(swrm, SWRM_CMD_FIFO_RD_FIFO_ADDR);
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530567 dev_dbg(swrm->dev, "%s: reg: 0x%x, cmd_id: 0x%x, rcmd_id: 0x%x, \
568 dev_num: 0x%x, cmd_data: 0x%x\n", __func__, reg_addr,
569 cmd_id, swrm->rcmd_id, dev_addr, *cmd_data);
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530570 if ((((*cmd_data) & 0xF00) >> 8) != swrm->rcmd_id) {
571 if (retry_attempt < MAX_FIFO_RD_FAIL_RETRY) {
572 /* wait 500 us before retry on fifo read failure */
573 usleep_range(500, 505);
574 retry_attempt++;
575 goto retry_read;
576 } else {
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530577 dev_err_ratelimited(swrm->dev, "%s: reg: 0x%x, cmd_id: 0x%x, \
578 rcmd_id: 0x%x, dev_num: 0x%x, cmd_data: 0x%x\n",
579 __func__, reg_addr, cmd_id, swrm->rcmd_id,
580 dev_addr, *cmd_data);
581
Laxminath Kasamfbcaf322018-07-18 00:38:14 +0530582 dev_err_ratelimited(swrm->dev,
583 "%s: failed to read fifo\n", __func__);
584 }
585 }
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530586 mutex_unlock(&swrm->iolock);
587
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530588 return 0;
589}
590
591static int swrm_cmd_fifo_wr_cmd(struct swr_mstr_ctrl *swrm, u8 cmd_data,
592 u8 dev_addr, u8 cmd_id, u16 reg_addr)
593{
594 u32 val;
595 int ret = 0;
596
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530597 mutex_lock(&swrm->iolock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530598 if (!cmd_id)
599 val = swrm_get_packed_reg_val(&swrm->wcmd_id, cmd_data,
600 dev_addr, reg_addr);
601 else
602 val = swrm_get_packed_reg_val(&cmd_id, cmd_data,
603 dev_addr, reg_addr);
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530604 dev_dbg(swrm->dev, "%s: reg: 0x%x, cmd_id: 0x%x,wcmd_id: 0x%x, \
605 dev_num: 0x%x, cmd_data: 0x%x\n", __func__,
606 reg_addr, cmd_id, swrm->wcmd_id,dev_addr, cmd_data);
Ramprasad Katkamb4c7c682018-12-19 18:58:36 +0530607 swr_master_write(swrm, SWRM_CMD_FIFO_WR_CMD, val);
Ramprasad Katkam1e906202019-01-30 14:16:34 +0530608 /*
609 * wait for FIFO WR command to complete to avoid overflow
610 * skip delay if write is handled in platform driver.
611 */
612 if(!swrm->write)
613 usleep_range(250, 255);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530614 if (cmd_id == 0xF) {
615 /*
616 * sleep for 10ms for MSM soundwire variant to allow broadcast
617 * command to complete.
618 */
619 if (swrm_is_msm_variant(swrm->version))
620 usleep_range(10000, 10100);
621 else
622 wait_for_completion_timeout(&swrm->broadcast,
623 (2 * HZ/10));
624 }
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530625 mutex_unlock(&swrm->iolock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530626 return ret;
627}
628
629static int swrm_read(struct swr_master *master, u8 dev_num, u16 reg_addr,
630 void *buf, u32 len)
631{
632 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
633 int ret = 0;
634 int val;
635 u8 *reg_val = (u8 *)buf;
636
637 if (!swrm) {
638 dev_err(&master->dev, "%s: swrm is NULL\n", __func__);
639 return -EINVAL;
640 }
Ramprasad Katkam0db48012018-11-09 11:01:23 +0530641 if (!dev_num) {
642 dev_err(&master->dev, "%s: invalid slave dev num\n", __func__);
643 return -EINVAL;
644 }
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530645 mutex_lock(&swrm->devlock);
646 if (!swrm->dev_up) {
647 mutex_unlock(&swrm->devlock);
648 return 0;
649 }
650 mutex_unlock(&swrm->devlock);
651
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530652 pm_runtime_get_sync(swrm->dev);
Ramprasad Katkam0db48012018-11-09 11:01:23 +0530653 ret = swrm_cmd_fifo_rd_cmd(swrm, &val, dev_num, 0, reg_addr, len);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530654
655 if (!ret)
656 *reg_val = (u8)val;
657
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530658 pm_runtime_put_autosuspend(swrm->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530659 pm_runtime_mark_last_busy(swrm->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530660 return ret;
661}
662
663static int swrm_write(struct swr_master *master, u8 dev_num, u16 reg_addr,
664 const void *buf)
665{
666 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
667 int ret = 0;
668 u8 reg_val = *(u8 *)buf;
669
670 if (!swrm) {
671 dev_err(&master->dev, "%s: swrm is NULL\n", __func__);
672 return -EINVAL;
673 }
Ramprasad Katkam0db48012018-11-09 11:01:23 +0530674 if (!dev_num) {
675 dev_err(&master->dev, "%s: invalid slave dev num\n", __func__);
676 return -EINVAL;
677 }
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530678 mutex_lock(&swrm->devlock);
679 if (!swrm->dev_up) {
680 mutex_unlock(&swrm->devlock);
681 return 0;
682 }
683 mutex_unlock(&swrm->devlock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530684
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530685 pm_runtime_get_sync(swrm->dev);
Ramprasad Katkam0db48012018-11-09 11:01:23 +0530686 ret = swrm_cmd_fifo_wr_cmd(swrm, reg_val, dev_num, 0, reg_addr);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530687
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530688 pm_runtime_put_autosuspend(swrm->dev);
689 pm_runtime_mark_last_busy(swrm->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530690 return ret;
691}
692
693static int swrm_bulk_write(struct swr_master *master, u8 dev_num, void *reg,
694 const void *buf, size_t len)
695{
696 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
697 int ret = 0;
698 int i;
699 u32 *val;
700 u32 *swr_fifo_reg;
701
702 if (!swrm || !swrm->handle) {
703 dev_err(&master->dev, "%s: swrm is NULL\n", __func__);
704 return -EINVAL;
705 }
706 if (len <= 0)
707 return -EINVAL;
Laxminath Kasam1df09a82018-09-20 18:57:49 +0530708 mutex_lock(&swrm->devlock);
709 if (!swrm->dev_up) {
710 mutex_unlock(&swrm->devlock);
711 return 0;
712 }
713 mutex_unlock(&swrm->devlock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530714
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530715 pm_runtime_get_sync(swrm->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530716 if (dev_num) {
717 swr_fifo_reg = kcalloc(len, sizeof(u32), GFP_KERNEL);
718 if (!swr_fifo_reg) {
719 ret = -ENOMEM;
720 goto err;
721 }
722 val = kcalloc(len, sizeof(u32), GFP_KERNEL);
723 if (!val) {
724 ret = -ENOMEM;
725 goto mem_fail;
726 }
727
728 for (i = 0; i < len; i++) {
729 val[i] = swrm_get_packed_reg_val(&swrm->wcmd_id,
730 ((u8 *)buf)[i],
731 dev_num,
732 ((u16 *)reg)[i]);
733 swr_fifo_reg[i] = SWRM_CMD_FIFO_WR_CMD;
734 }
735 ret = swr_master_bulk_write(swrm, swr_fifo_reg, val, len);
736 if (ret) {
737 dev_err(&master->dev, "%s: bulk write failed\n",
738 __func__);
739 ret = -EINVAL;
740 }
741 } else {
742 dev_err(&master->dev,
743 "%s: No support of Bulk write for master regs\n",
744 __func__);
745 ret = -EINVAL;
746 goto err;
747 }
748 kfree(val);
749mem_fail:
750 kfree(swr_fifo_reg);
751err:
Ramprasad Katkam1f221262018-08-23 15:01:22 +0530752 pm_runtime_put_autosuspend(swrm->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530753 pm_runtime_mark_last_busy(swrm->dev);
754 return ret;
755}
756
757static u8 get_inactive_bank_num(struct swr_mstr_ctrl *swrm)
758{
759 return (swr_master_read(swrm, SWRM_MCP_STATUS) &
760 SWRM_MCP_STATUS_BANK_NUM_MASK) ? 0 : 1;
761}
762
763static void enable_bank_switch(struct swr_mstr_ctrl *swrm, u8 bank,
764 u8 row, u8 col)
765{
766 swrm_cmd_fifo_wr_cmd(swrm, ((row << 3) | col), 0xF, 0xF,
767 SWRS_SCP_FRAME_CTRL_BANK(bank));
768}
769
770static struct swr_port_info *swrm_get_port_req(struct swrm_mports *mport,
771 u8 slv_port, u8 dev_num)
772{
773 struct swr_port_info *port_req = NULL;
774
775 list_for_each_entry(port_req, &mport->port_req_list, list) {
776 /* Store dev_id instead of dev_num if enumeration is changed run_time */
777 if ((port_req->slave_port_id == slv_port)
778 && (port_req->dev_num == dev_num))
779 return port_req;
780 }
781 return NULL;
782}
783
784static bool swrm_remove_from_group(struct swr_master *master)
785{
786 struct swr_device *swr_dev;
787 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
788 bool is_removed = false;
789
790 if (!swrm)
791 goto end;
792
793 mutex_lock(&swrm->mlock);
794 if ((swrm->num_rx_chs > 1) &&
795 (swrm->num_rx_chs == swrm->num_cfg_devs)) {
796 list_for_each_entry(swr_dev, &master->devices,
797 dev_list) {
798 swr_dev->group_id = SWR_GROUP_NONE;
799 master->gr_sid = 0;
800 }
801 is_removed = true;
802 }
803 mutex_unlock(&swrm->mlock);
804
805end:
806 return is_removed;
807}
808
809static void swrm_disable_ports(struct swr_master *master,
810 u8 bank)
811{
812 u32 value;
813 struct swr_port_info *port_req;
814 int i;
815 struct swrm_mports *mport;
816 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
817
818 if (!swrm) {
819 pr_err("%s: swrm is null\n", __func__);
820 return;
821 }
822
823 dev_dbg(swrm->dev, "%s: master num_port: %d\n", __func__,
824 master->num_port);
825
826
827 for (i = 0; i < SWR_MSTR_PORT_LEN ; i++) {
828
829 mport = &(swrm->mport_cfg[i]);
830 if (!mport->port_en)
831 continue;
832
833 list_for_each_entry(port_req, &mport->port_req_list, list) {
834 /* skip ports with no change req's*/
835 if (port_req->req_ch == port_req->ch_en)
836 continue;
837
838 swrm_cmd_fifo_wr_cmd(swrm, port_req->req_ch,
839 port_req->dev_num, 0x00,
840 SWRS_DP_CHANNEL_ENABLE_BANK(port_req->slave_port_id,
841 bank));
842 dev_dbg(swrm->dev, "%s: mport :%d, reg: 0x%x\n",
843 __func__, i,
844 (SWRM_DP_PORT_CTRL_BANK(i + 1, bank)));
845 }
846 value = ((mport->req_ch)
847 << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
848 value |= ((mport->offset2)
849 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT);
850 value |= ((mport->offset1)
851 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT);
852 value |= mport->sinterval;
853
854 swr_master_write(swrm,
855 SWRM_DP_PORT_CTRL_BANK(i+1, bank),
856 value);
857 dev_dbg(swrm->dev, "%s: mport :%d, reg: 0x%x, val: 0x%x\n",
858 __func__, i,
859 (SWRM_DP_PORT_CTRL_BANK(i+1, bank)), value);
860 }
861}
862
863static void swrm_cleanup_disabled_port_reqs(struct swr_master *master)
864{
865 struct swr_port_info *port_req, *next;
866 int i;
867 struct swrm_mports *mport;
868 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
869
870 if (!swrm) {
871 pr_err("%s: swrm is null\n", __func__);
872 return;
873 }
874 dev_dbg(swrm->dev, "%s: master num_port: %d\n", __func__,
875 master->num_port);
876
877 for (i = 0; i < SWR_MSTR_PORT_LEN; i++) {
878 mport = &(swrm->mport_cfg[i]);
879 list_for_each_entry_safe(port_req, next,
880 &mport->port_req_list, list) {
881 /* skip ports without new ch req */
882 if (port_req->ch_en == port_req->req_ch)
883 continue;
884
885 /* remove new ch req's*/
Ramprasad Katkamc8d52a12018-08-31 02:30:00 +0530886 port_req->ch_en = port_req->req_ch;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530887
888 /* If no streams enabled on port, remove the port req */
889 if (port_req->ch_en == 0) {
890 list_del(&port_req->list);
891 kfree(port_req);
892 }
893 }
894 /* remove new ch req's on mport*/
Ramprasad Katkamc8d52a12018-08-31 02:30:00 +0530895 mport->ch_en = mport->req_ch;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530896
897 if (!(mport->ch_en)) {
898 mport->port_en = false;
899 master->port_en_mask &= ~i;
900 }
901 }
902}
903static void swrm_copy_data_port_config(struct swr_master *master, u8 bank)
904{
905 u32 value, slv_id;
906 struct swr_port_info *port_req;
907 int i;
908 struct swrm_mports *mport;
909 u32 reg[SWRM_MAX_PORT_REG];
910 u32 val[SWRM_MAX_PORT_REG];
911 int len = 0;
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +0530912 u8 hparams;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530913 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
914
915 if (!swrm) {
916 pr_err("%s: swrm is null\n", __func__);
917 return;
918 }
919
920 dev_dbg(swrm->dev, "%s: master num_port: %d\n", __func__,
921 master->num_port);
922
923 for (i = 0; i < SWR_MSTR_PORT_LEN; i++) {
924 mport = &(swrm->mport_cfg[i]);
925 if (!mport->port_en)
926 continue;
927
928 list_for_each_entry(port_req, &mport->port_req_list, list) {
929 slv_id = port_req->slave_port_id;
930 reg[len] = SWRM_CMD_FIFO_WR_CMD;
931 val[len++] = SWR_REG_VAL_PACK(port_req->req_ch,
932 port_req->dev_num, 0x00,
933 SWRS_DP_CHANNEL_ENABLE_BANK(slv_id,
934 bank));
935
936 reg[len] = SWRM_CMD_FIFO_WR_CMD;
937 val[len++] = SWR_REG_VAL_PACK(mport->sinterval,
938 port_req->dev_num, 0x00,
939 SWRS_DP_SAMPLE_CONTROL_1_BANK(slv_id,
940 bank));
941
942 reg[len] = SWRM_CMD_FIFO_WR_CMD;
943 val[len++] = SWR_REG_VAL_PACK(mport->offset1,
944 port_req->dev_num, 0x00,
945 SWRS_DP_OFFSET_CONTROL_1_BANK(slv_id,
946 bank));
947
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +0530948 if (mport->offset2 != SWR_INVALID_PARAM) {
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530949 reg[len] = SWRM_CMD_FIFO_WR_CMD;
950 val[len++] = SWR_REG_VAL_PACK(mport->offset2,
951 port_req->dev_num, 0x00,
952 SWRS_DP_OFFSET_CONTROL_2_BANK(
953 slv_id, bank));
954 }
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +0530955 if (mport->hstart != SWR_INVALID_PARAM
956 && mport->hstop != SWR_INVALID_PARAM) {
957 hparams = (mport->hstart << 4) | mport->hstop;
958
959 reg[len] = SWRM_CMD_FIFO_WR_CMD;
960 val[len++] = SWR_REG_VAL_PACK(hparams,
961 port_req->dev_num, 0x00,
962 SWRS_DP_HCONTROL_BANK(slv_id,
963 bank));
964 }
965 if (mport->word_length != SWR_INVALID_PARAM) {
966 reg[len] = SWRM_CMD_FIFO_WR_CMD;
967 val[len++] =
968 SWR_REG_VAL_PACK(mport->word_length,
969 port_req->dev_num, 0x00,
970 SWRS_DP_BLOCK_CONTROL_1(slv_id));
971 }
Ramprasad Katkam2a0996b2018-09-25 20:13:30 +0530972 if (mport->blk_pack_mode != SWR_INVALID_PARAM
973 && swrm->master_id != MASTER_ID_WSA) {
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +0530974 reg[len] = SWRM_CMD_FIFO_WR_CMD;
975 val[len++] =
976 SWR_REG_VAL_PACK(mport->blk_pack_mode,
977 port_req->dev_num, 0x00,
978 SWRS_DP_BLOCK_CONTROL_3_BANK(slv_id,
979 bank));
980 }
981 if (mport->blk_grp_count != SWR_INVALID_PARAM) {
982 reg[len] = SWRM_CMD_FIFO_WR_CMD;
983 val[len++] =
984 SWR_REG_VAL_PACK(mport->blk_grp_count,
985 port_req->dev_num, 0x00,
986 SWRS_DP_BLOCK_CONTROL_2_BANK(slv_id,
987 bank));
988 }
989 if (mport->lane_ctrl != SWR_INVALID_PARAM) {
990 reg[len] = SWRM_CMD_FIFO_WR_CMD;
991 val[len++] =
992 SWR_REG_VAL_PACK(mport->lane_ctrl,
993 port_req->dev_num, 0x00,
994 SWRS_DP_LANE_CONTROL_BANK(slv_id,
995 bank));
996 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +0530997 port_req->ch_en = port_req->req_ch;
998 }
999 value = ((mport->req_ch)
1000 << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
Ramprasad Katkam2a0996b2018-09-25 20:13:30 +05301001
1002 if (mport->offset2 != SWR_INVALID_PARAM)
1003 value |= ((mport->offset2)
1004 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301005 value |= ((mport->offset1)
1006 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT);
1007 value |= mport->sinterval;
1008
1009
1010 reg[len] = SWRM_DP_PORT_CTRL_BANK(i + 1, bank);
1011 val[len++] = value;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301012 dev_dbg(swrm->dev, "%s: mport :%d, reg: 0x%x, val: 0x%x\n",
1013 __func__, i,
1014 (SWRM_DP_PORT_CTRL_BANK(i + 1, bank)), value);
1015
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301016 if (mport->lane_ctrl != SWR_INVALID_PARAM) {
1017 reg[len] = SWRM_DP_PORT_CTRL_2_BANK(i + 1, bank);
1018 val[len++] = mport->lane_ctrl;
1019 }
1020 if (mport->word_length != SWR_INVALID_PARAM) {
1021 reg[len] = SWRM_DP_BLOCK_CTRL_1(i + 1);
1022 val[len++] = mport->word_length;
1023 }
1024
1025 if (mport->blk_grp_count != SWR_INVALID_PARAM) {
1026 reg[len] = SWRM_DP_BLOCK_CTRL2_BANK(i + 1, bank);
1027 val[len++] = mport->blk_grp_count;
1028 }
1029 if (mport->hstart != SWR_INVALID_PARAM
1030 && mport->hstop != SWR_INVALID_PARAM) {
1031 reg[len] = SWRM_DP_PORT_HCTRL_BANK(i + 1, bank);
Laxminath Kasame30eef72018-11-05 17:40:09 +05301032 hparams = (mport->hstop << 4) | mport->hstart;
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301033 val[len++] = hparams;
Laxminath Kasam990c70b2018-11-09 23:15:09 +05301034 } else {
1035 reg[len] = SWRM_DP_PORT_HCTRL_BANK(i + 1, bank);
1036 hparams = (SWR_HSTOP_MAX_VAL << 4) | SWR_HSTART_MIN_VAL;
1037 val[len++] = hparams;
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301038 }
1039 if (mport->blk_pack_mode != SWR_INVALID_PARAM) {
1040 reg[len] = SWRM_DP_BLOCK_CTRL3_BANK(i + 1, bank);
1041 val[len++] = mport->blk_pack_mode;
1042 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301043 mport->ch_en = mport->req_ch;
1044
1045 }
Sudheer Papothi0016db12019-06-11 04:42:38 +05301046 swrm_reg_dump(swrm, reg, val, len, __func__);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301047 swr_master_bulk_write(swrm, reg, val, len);
1048}
1049
1050static void swrm_apply_port_config(struct swr_master *master)
1051{
1052 u8 bank;
1053 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
1054
1055 if (!swrm) {
1056 pr_err("%s: Invalid handle to swr controller\n",
1057 __func__);
1058 return;
1059 }
1060
1061 bank = get_inactive_bank_num(swrm);
1062 dev_dbg(swrm->dev, "%s: enter bank: %d master_ports: %d\n",
1063 __func__, bank, master->num_port);
1064
1065
1066 swrm_cmd_fifo_wr_cmd(swrm, 0x01, 0xF, 0x00,
1067 SWRS_SCP_HOST_CLK_DIV2_CTL_BANK(bank));
1068
1069 swrm_copy_data_port_config(master, bank);
1070}
1071
1072static int swrm_slvdev_datapath_control(struct swr_master *master, bool enable)
1073{
1074 u8 bank;
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301075 u32 value, n_row, n_col;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301076 int ret;
1077 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
1078 int mask = (SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK |
1079 SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK |
1080 SWRM_MCP_FRAME_CTRL_BANK_SSP_PERIOD_BMSK);
1081 u8 inactive_bank;
1082
1083 if (!swrm) {
1084 pr_err("%s: swrm is null\n", __func__);
1085 return -EFAULT;
1086 }
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301087
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301088 mutex_lock(&swrm->mlock);
1089
Ramprasad Katkam979b7c92019-05-17 15:31:21 +05301090 /*
1091 * During disable if master is already down, which implies an ssr/pdr
1092 * scenario, just mark ports as disabled and exit
1093 */
1094 if (swrm->state == SWR_MSTR_SSR && !enable) {
1095 if (!test_bit(DISABLE_PENDING, &swrm->port_req_pending)) {
1096 dev_dbg(swrm->dev, "%s:No pending disconn port req\n",
1097 __func__);
1098 goto exit;
1099 }
1100 clear_bit(DISABLE_PENDING, &swrm->port_req_pending);
1101 swrm_cleanup_disabled_port_reqs(master);
1102 if (!swrm_is_port_en(master)) {
1103 dev_dbg(&master->dev, "%s: pm_runtime auto suspend triggered\n",
1104 __func__);
1105 pm_runtime_mark_last_busy(swrm->dev);
1106 pm_runtime_put_autosuspend(swrm->dev);
1107 }
1108 goto exit;
1109 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301110 bank = get_inactive_bank_num(swrm);
1111
1112 if (enable) {
Ramprasad Katkamcab8d722018-09-28 15:54:06 +05301113 if (!test_bit(ENABLE_PENDING, &swrm->port_req_pending)) {
1114 dev_dbg(swrm->dev, "%s:No pending connect port req\n",
1115 __func__);
1116 goto exit;
1117 }
1118 clear_bit(ENABLE_PENDING, &swrm->port_req_pending);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301119 ret = swrm_get_port_config(swrm);
1120 if (ret) {
1121 /* cannot accommodate ports */
1122 swrm_cleanup_disabled_port_reqs(master);
1123 mutex_unlock(&swrm->mlock);
1124 return -EINVAL;
1125 }
Ramprasad Katkam18bc8e22018-10-25 15:04:24 +05301126 swr_master_write(swrm, SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN,
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301127 SWRM_INTERRUPT_STATUS_MASK);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301128 /* apply the new port config*/
1129 swrm_apply_port_config(master);
1130 } else {
Ramprasad Katkamcab8d722018-09-28 15:54:06 +05301131 if (!test_bit(DISABLE_PENDING, &swrm->port_req_pending)) {
1132 dev_dbg(swrm->dev, "%s:No pending disconn port req\n",
1133 __func__);
1134 goto exit;
1135 }
1136 clear_bit(DISABLE_PENDING, &swrm->port_req_pending);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301137 swrm_disable_ports(master, bank);
1138 }
1139 dev_dbg(swrm->dev, "%s: enable: %d, cfg_devs: %d\n",
1140 __func__, enable, swrm->num_cfg_devs);
1141
1142 if (enable) {
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301143 /* set col = 16 */
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301144 n_col = SWR_MAX_COL;
1145 } else {
1146 /*
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301147 * Do not change to col = 2 if there are still active ports
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301148 */
1149 if (!master->num_port)
1150 n_col = SWR_MIN_COL;
1151 else
1152 n_col = SWR_MAX_COL;
1153 }
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301154 /* Use default 50 * x, frame shape. Change based on mclk */
Laxminath Kasamb0f27cd2018-09-06 12:17:11 +05301155 if (swrm->mclk_freq == MCLK_FREQ_NATIVE) {
1156 dev_dbg(swrm->dev, "setting 64 x %d frameshape\n",
1157 n_col ? 16 : 2);
1158 n_row = SWR_ROW_64;
1159 } else {
1160 dev_dbg(swrm->dev, "setting 50 x %d frameshape\n",
1161 n_col ? 16 : 2);
1162 n_row = SWR_ROW_50;
1163 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301164 value = swr_master_read(swrm, SWRM_MCP_FRAME_CTRL_BANK_ADDR(bank));
1165 value &= (~mask);
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301166 value |= ((n_row << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT) |
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301167 (n_col << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT) |
1168 (0 << SWRM_MCP_FRAME_CTRL_BANK_SSP_PERIOD_SHFT));
1169 swr_master_write(swrm, SWRM_MCP_FRAME_CTRL_BANK_ADDR(bank), value);
1170
1171 dev_dbg(swrm->dev, "%s: regaddr: 0x%x, value: 0x%x\n", __func__,
1172 SWRM_MCP_FRAME_CTRL_BANK_ADDR(bank), value);
1173
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301174 enable_bank_switch(swrm, bank, n_row, n_col);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301175 inactive_bank = bank ? 0 : 1;
1176
1177 if (enable)
1178 swrm_copy_data_port_config(master, inactive_bank);
1179 else {
1180 swrm_disable_ports(master, inactive_bank);
1181 swrm_cleanup_disabled_port_reqs(master);
Ramprasad Katkam7cb4ff62018-09-12 04:00:26 +05301182 }
1183 if (!swrm_is_port_en(master)) {
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301184 dev_dbg(&master->dev, "%s: pm_runtime auto suspend triggered\n",
1185 __func__);
1186 pm_runtime_mark_last_busy(swrm->dev);
1187 pm_runtime_put_autosuspend(swrm->dev);
1188 }
Ramprasad Katkamcab8d722018-09-28 15:54:06 +05301189exit:
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301190 mutex_unlock(&swrm->mlock);
1191return 0;
1192}
1193
1194static int swrm_connect_port(struct swr_master *master,
1195 struct swr_params *portinfo)
1196{
1197 int i;
1198 struct swr_port_info *port_req;
1199 int ret = 0;
1200 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
1201 struct swrm_mports *mport;
1202 u8 mstr_port_id, mstr_ch_msk;
1203
1204 dev_dbg(&master->dev, "%s: enter\n", __func__);
1205 if (!portinfo)
1206 return -EINVAL;
1207
1208 if (!swrm) {
1209 dev_err(&master->dev,
1210 "%s: Invalid handle to swr controller\n",
1211 __func__);
1212 return -EINVAL;
1213 }
1214
1215 mutex_lock(&swrm->mlock);
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05301216 mutex_lock(&swrm->devlock);
1217 if (!swrm->dev_up) {
1218 mutex_unlock(&swrm->devlock);
1219 mutex_unlock(&swrm->mlock);
1220 return -EINVAL;
1221 }
1222 mutex_unlock(&swrm->devlock);
Ramprasad Katkam7cb4ff62018-09-12 04:00:26 +05301223 if (!swrm_is_port_en(master))
1224 pm_runtime_get_sync(swrm->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301225
1226 for (i = 0; i < portinfo->num_port; i++) {
1227 ret = swrm_get_master_port(swrm, &mstr_port_id, &mstr_ch_msk,
1228 portinfo->port_type[i],
1229 portinfo->port_id[i]);
1230 if (ret) {
1231 dev_err(&master->dev,
1232 "%s: mstr portid for slv port %d not found\n",
1233 __func__, portinfo->port_id[i]);
1234 goto port_fail;
1235 }
1236
1237 mport = &(swrm->mport_cfg[mstr_port_id]);
1238 /* get port req */
1239 port_req = swrm_get_port_req(mport, portinfo->port_id[i],
1240 portinfo->dev_num);
1241 if (!port_req) {
1242 dev_dbg(&master->dev, "%s: new req:port id %d dev %d\n",
1243 __func__, portinfo->port_id[i],
1244 portinfo->dev_num);
1245 port_req = kzalloc(sizeof(struct swr_port_info),
1246 GFP_KERNEL);
1247 if (!port_req) {
1248 ret = -ENOMEM;
1249 goto mem_fail;
1250 }
1251 port_req->dev_num = portinfo->dev_num;
1252 port_req->slave_port_id = portinfo->port_id[i];
1253 port_req->num_ch = portinfo->num_ch[i];
1254 port_req->ch_rate = portinfo->ch_rate[i];
1255 port_req->ch_en = 0;
1256 port_req->master_port_id = mstr_port_id;
1257 list_add(&port_req->list, &mport->port_req_list);
1258 }
1259 port_req->req_ch |= portinfo->ch_en[i];
1260
1261 dev_dbg(&master->dev,
1262 "%s: mstr port %d, slv port %d ch_rate %d num_ch %d\n",
1263 __func__, port_req->master_port_id,
1264 port_req->slave_port_id, port_req->ch_rate,
1265 port_req->num_ch);
1266 /* Put the port req on master port */
1267 mport = &(swrm->mport_cfg[mstr_port_id]);
1268 mport->port_en = true;
1269 mport->req_ch |= mstr_ch_msk;
1270 master->port_en_mask |= (1 << mstr_port_id);
1271 }
1272 master->num_port += portinfo->num_port;
Ramprasad Katkamcab8d722018-09-28 15:54:06 +05301273 set_bit(ENABLE_PENDING, &swrm->port_req_pending);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301274 swr_port_response(master, portinfo->tid);
1275
1276 mutex_unlock(&swrm->mlock);
1277 return 0;
1278
1279port_fail:
1280mem_fail:
1281 /* cleanup port reqs in error condition */
1282 swrm_cleanup_disabled_port_reqs(master);
1283 mutex_unlock(&swrm->mlock);
1284 return ret;
1285}
1286
1287static int swrm_disconnect_port(struct swr_master *master,
1288 struct swr_params *portinfo)
1289{
1290 int i, ret = 0;
1291 struct swr_port_info *port_req;
1292 struct swrm_mports *mport;
1293 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
1294 u8 mstr_port_id, mstr_ch_mask;
1295
1296 if (!swrm) {
1297 dev_err(&master->dev,
1298 "%s: Invalid handle to swr controller\n",
1299 __func__);
1300 return -EINVAL;
1301 }
1302
1303 if (!portinfo) {
1304 dev_err(&master->dev, "%s: portinfo is NULL\n", __func__);
1305 return -EINVAL;
1306 }
1307 mutex_lock(&swrm->mlock);
1308
1309 for (i = 0; i < portinfo->num_port; i++) {
1310
1311 ret = swrm_get_master_port(swrm, &mstr_port_id, &mstr_ch_mask,
1312 portinfo->port_type[i], portinfo->port_id[i]);
1313 if (ret) {
1314 dev_err(&master->dev,
1315 "%s: mstr portid for slv port %d not found\n",
1316 __func__, portinfo->port_id[i]);
1317 mutex_unlock(&swrm->mlock);
1318 return -EINVAL;
1319 }
1320 mport = &(swrm->mport_cfg[mstr_port_id]);
1321 /* get port req */
1322 port_req = swrm_get_port_req(mport, portinfo->port_id[i],
1323 portinfo->dev_num);
1324
1325 if (!port_req) {
1326 dev_err(&master->dev, "%s:port not enabled : port %d\n",
1327 __func__, portinfo->port_id[i]);
Ramprasad Katkam86c45e02018-10-16 19:31:51 +05301328 mutex_unlock(&swrm->mlock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301329 return -EINVAL;
1330 }
1331 port_req->req_ch &= ~portinfo->ch_en[i];
1332 mport->req_ch &= ~mstr_ch_mask;
1333 }
1334 master->num_port -= portinfo->num_port;
Ramprasad Katkamcab8d722018-09-28 15:54:06 +05301335 set_bit(DISABLE_PENDING, &swrm->port_req_pending);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301336 swr_port_response(master, portinfo->tid);
1337 mutex_unlock(&swrm->mlock);
1338
1339 return 0;
1340}
1341
Ramprasad Katkam1f221262018-08-23 15:01:22 +05301342static int swrm_find_alert_slave(struct swr_mstr_ctrl *swrm,
1343 int status, u8 *devnum)
1344{
1345 int i;
1346 bool found = false;
1347
1348 for (i = 0; i < (swrm->master.num_dev + 1); i++) {
1349 if ((status & SWRM_MCP_SLV_STATUS_MASK) == SWR_ALERT) {
1350 *devnum = i;
1351 found = true;
1352 break;
1353 }
1354 status >>= 2;
1355 }
1356 if (found)
1357 return 0;
1358 else
1359 return -EINVAL;
1360}
1361
Sudheer Papothi07d5afc2019-07-17 06:25:45 +05301362static void swrm_enable_slave_irq(struct swr_mstr_ctrl *swrm)
1363{
1364 int i;
1365 int status = 0;
1366
1367 status = swr_master_read(swrm, SWRM_MCP_SLV_STATUS);
1368 if (!status) {
1369 dev_dbg_ratelimited(swrm->dev, "%s: slaves status is 0x%x\n",
1370 __func__, status);
1371 return;
1372 }
1373 dev_dbg(swrm->dev, "%s: slave status: 0x%x\n", __func__, status);
1374 for (i = 0; i < (swrm->master.num_dev + 1); i++) {
1375 if (status & SWRM_MCP_SLV_STATUS_MASK)
1376 swrm_cmd_fifo_wr_cmd(swrm, 0x4, i, 0x0,
1377 SWRS_SCP_INT_STATUS_MASK_1);
1378 status >>= 2;
1379 }
1380}
1381
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301382static int swrm_check_slave_change_status(struct swr_mstr_ctrl *swrm,
1383 int status, u8 *devnum)
1384{
1385 int i;
1386 int new_sts = status;
1387 int ret = SWR_NOT_PRESENT;
1388
1389 if (status != swrm->slave_status) {
1390 for (i = 0; i < (swrm->master.num_dev + 1); i++) {
1391 if ((status & SWRM_MCP_SLV_STATUS_MASK) !=
1392 (swrm->slave_status & SWRM_MCP_SLV_STATUS_MASK)) {
1393 ret = (status & SWRM_MCP_SLV_STATUS_MASK);
1394 *devnum = i;
1395 break;
1396 }
1397 status >>= 2;
1398 swrm->slave_status >>= 2;
1399 }
1400 swrm->slave_status = new_sts;
1401 }
1402 return ret;
1403}
1404
1405static irqreturn_t swr_mstr_interrupt(int irq, void *dev)
1406{
1407 struct swr_mstr_ctrl *swrm = dev;
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301408 u32 value, intr_sts, intr_sts_masked;
Ramprasad Katkam1f221262018-08-23 15:01:22 +05301409 u32 temp = 0;
1410 u32 status, chg_sts, i;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301411 u8 devnum = 0;
1412 int ret = IRQ_HANDLED;
1413 struct swr_device *swr_dev;
1414 struct swr_master *mstr = &swrm->master;
1415
Ramprasad Katkam57349872018-11-11 18:34:57 +05301416 if (unlikely(swrm_lock_sleep(swrm) == false)) {
1417 dev_err(swrm->dev, "%s Failed to hold suspend\n", __func__);
1418 return IRQ_NONE;
1419 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301420
1421 mutex_lock(&swrm->reslock);
Aditya Bavanarif4a471d2019-02-19 17:57:12 +05301422 if (swrm_clk_request(swrm, true)) {
Ramprasad Katkam14efed62019-03-07 13:16:50 +05301423 dev_err_ratelimited(swrm->dev, "%s:clk request failed\n",
1424 __func__);
Aditya Bavanarif4a471d2019-02-19 17:57:12 +05301425 mutex_unlock(&swrm->reslock);
1426 goto exit;
1427 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301428 mutex_unlock(&swrm->reslock);
1429
1430 intr_sts = swr_master_read(swrm, SWRM_INTERRUPT_STATUS);
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301431 intr_sts_masked = intr_sts & swrm->intr_mask;
Ramprasad Katkam83303512018-10-11 17:34:22 +05301432handle_irq:
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301433 for (i = 0; i < SWRM_INTERRUPT_MAX; i++) {
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301434 value = intr_sts_masked & (1 << i);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301435 if (!value)
1436 continue;
1437
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301438 switch (value) {
1439 case SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ:
1440 dev_dbg(swrm->dev, "Trigger irq to slave device\n");
1441 status = swr_master_read(swrm, SWRM_MCP_SLV_STATUS);
Ramprasad Katkam1f221262018-08-23 15:01:22 +05301442 ret = swrm_find_alert_slave(swrm, status, &devnum);
1443 if (ret) {
Ramprasad Katkam18bc8e22018-10-25 15:04:24 +05301444 dev_err_ratelimited(swrm->dev,
1445 "no slave alert found.spurious interrupt\n");
Ramprasad Katkam48b49b22018-10-01 20:12:46 +05301446 break;
Ramprasad Katkam1f221262018-08-23 15:01:22 +05301447 }
Ramprasad Katkam1f221262018-08-23 15:01:22 +05301448 swrm_cmd_fifo_rd_cmd(swrm, &temp, devnum, 0x0,
1449 SWRS_SCP_INT_STATUS_CLEAR_1, 1);
1450 swrm_cmd_fifo_wr_cmd(swrm, 0x4, devnum, 0x0,
1451 SWRS_SCP_INT_STATUS_CLEAR_1);
1452 swrm_cmd_fifo_wr_cmd(swrm, 0x0, devnum, 0x0,
1453 SWRS_SCP_INT_STATUS_CLEAR_1);
Ramprasad Katkam62d6d762018-09-20 17:50:28 +05301454
1455
1456 list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
1457 if (swr_dev->dev_num != devnum)
1458 continue;
1459 if (swr_dev->slave_irq) {
1460 do {
1461 handle_nested_irq(
1462 irq_find_mapping(
1463 swr_dev->slave_irq, 0));
1464 } while (swr_dev->slave_irq_pending);
1465 }
1466
1467 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301468 break;
1469 case SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED:
1470 dev_dbg(swrm->dev, "SWR new slave attached\n");
1471 break;
1472 case SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS:
1473 status = swr_master_read(swrm, SWRM_MCP_SLV_STATUS);
1474 if (status == swrm->slave_status) {
1475 dev_dbg(swrm->dev,
1476 "%s: No change in slave status: %d\n",
1477 __func__, status);
1478 break;
1479 }
1480 chg_sts = swrm_check_slave_change_status(swrm, status,
1481 &devnum);
1482 switch (chg_sts) {
1483 case SWR_NOT_PRESENT:
1484 dev_dbg(swrm->dev, "device %d got detached\n",
1485 devnum);
1486 break;
1487 case SWR_ATTACHED_OK:
1488 dev_dbg(swrm->dev, "device %d got attached\n",
1489 devnum);
Ramprasad Katkamdebe8932018-09-25 18:08:18 +05301490 /* enable host irq from slave device*/
1491 swrm_cmd_fifo_wr_cmd(swrm, 0xFF, devnum, 0x0,
1492 SWRS_SCP_INT_STATUS_CLEAR_1);
1493 swrm_cmd_fifo_wr_cmd(swrm, 0x4, devnum, 0x0,
1494 SWRS_SCP_INT_STATUS_MASK_1);
1495
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301496 break;
1497 case SWR_ALERT:
1498 dev_dbg(swrm->dev,
1499 "device %d has pending interrupt\n",
1500 devnum);
1501 break;
1502 }
1503 break;
1504 case SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET:
1505 dev_err_ratelimited(swrm->dev,
1506 "SWR bus clsh detected\n");
1507 break;
1508 case SWRM_INTERRUPT_STATUS_RD_FIFO_OVERFLOW:
1509 dev_dbg(swrm->dev, "SWR read FIFO overflow\n");
1510 break;
1511 case SWRM_INTERRUPT_STATUS_RD_FIFO_UNDERFLOW:
1512 dev_dbg(swrm->dev, "SWR read FIFO underflow\n");
1513 break;
1514 case SWRM_INTERRUPT_STATUS_WR_CMD_FIFO_OVERFLOW:
1515 dev_dbg(swrm->dev, "SWR write FIFO overflow\n");
1516 break;
1517 case SWRM_INTERRUPT_STATUS_CMD_ERROR:
1518 value = swr_master_read(swrm, SWRM_CMD_FIFO_STATUS);
1519 dev_err_ratelimited(swrm->dev,
1520 "SWR CMD error, fifo status 0x%x, flushing fifo\n",
1521 value);
1522 swr_master_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
1523 break;
1524 case SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION:
Ramprasad Katkam18bc8e22018-10-25 15:04:24 +05301525 dev_err_ratelimited(swrm->dev, "SWR Port collision detected\n");
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301526 swrm->intr_mask &= ~SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION;
Ramprasad Katkam18bc8e22018-10-25 15:04:24 +05301527 swr_master_write(swrm,
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301528 SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN, swrm->intr_mask);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301529 break;
1530 case SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH:
1531 dev_dbg(swrm->dev, "SWR read enable valid mismatch\n");
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301532 swrm->intr_mask &=
Ramprasad Katkam18bc8e22018-10-25 15:04:24 +05301533 ~SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH;
1534 swr_master_write(swrm,
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301535 SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN, swrm->intr_mask);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301536 break;
1537 case SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED:
1538 complete(&swrm->broadcast);
1539 dev_dbg(swrm->dev, "SWR cmd id finished\n");
1540 break;
1541 case SWRM_INTERRUPT_STATUS_NEW_SLAVE_AUTO_ENUM_FINISHED:
1542 break;
1543 case SWRM_INTERRUPT_STATUS_AUTO_ENUM_FAILED:
1544 break;
1545 case SWRM_INTERRUPT_STATUS_AUTO_ENUM_TABLE_IS_FULL:
1546 break;
1547 case SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED:
1548 complete(&swrm->reset);
1549 break;
1550 case SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED:
1551 break;
1552 default:
1553 dev_err_ratelimited(swrm->dev,
1554 "SWR unknown interrupt\n");
1555 ret = IRQ_NONE;
1556 break;
1557 }
1558 }
Ramprasad Katkam1f221262018-08-23 15:01:22 +05301559 swr_master_write(swrm, SWRM_INTERRUPT_CLEAR, intr_sts);
1560 swr_master_write(swrm, SWRM_INTERRUPT_CLEAR, 0x0);
Ramprasad Katkam83303512018-10-11 17:34:22 +05301561
1562 intr_sts = swr_master_read(swrm, SWRM_INTERRUPT_STATUS);
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301563 intr_sts_masked = intr_sts & swrm->intr_mask;
Ramprasad Katkam83303512018-10-11 17:34:22 +05301564
Ramprasad Katkam7e354782018-11-21 15:52:54 +05301565 if (intr_sts_masked) {
Ramprasad Katkam83303512018-10-11 17:34:22 +05301566 dev_dbg(swrm->dev, "%s: new interrupt received\n", __func__);
1567 goto handle_irq;
1568 }
1569
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301570 mutex_lock(&swrm->reslock);
1571 swrm_clk_request(swrm, false);
1572 mutex_unlock(&swrm->reslock);
Aditya Bavanarif4a471d2019-02-19 17:57:12 +05301573exit:
Ramprasad Katkam57349872018-11-11 18:34:57 +05301574 swrm_unlock_sleep(swrm);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301575 return ret;
1576}
1577
Sudheer Papothid19d0c52019-02-23 05:41:39 +05301578static irqreturn_t swr_mstr_interrupt_v2(int irq, void *dev)
1579{
1580 struct swr_mstr_ctrl *swrm = dev;
1581 u32 value, intr_sts, intr_sts_masked;
1582 u32 temp = 0;
1583 u32 status, chg_sts, i;
1584 u8 devnum = 0;
1585 int ret = IRQ_HANDLED;
1586 struct swr_device *swr_dev;
1587 struct swr_master *mstr = &swrm->master;
1588
1589 if (unlikely(swrm_lock_sleep(swrm) == false)) {
1590 dev_err(swrm->dev, "%s Failed to hold suspend\n", __func__);
1591 return IRQ_NONE;
1592 }
1593
1594 mutex_lock(&swrm->reslock);
Sudheer Papothi384addd2019-06-14 02:26:52 +05301595 if (swrm_request_hw_vote(swrm, LPASS_HW_CORE, true)) {
1596 ret = IRQ_NONE;
1597 goto exit;
1598 }
1599 if (swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, true)) {
1600 ret = IRQ_NONE;
Sudheer Papothi06f43412019-07-09 03:32:54 +05301601 goto err_audio_hw_vote;
Karthikeyan Mani035c50b2019-05-02 13:35:01 -07001602 }
Sudheer Papothid19d0c52019-02-23 05:41:39 +05301603 swrm_clk_request(swrm, true);
1604 mutex_unlock(&swrm->reslock);
1605
1606 intr_sts = swr_master_read(swrm, SWRM_INTERRUPT_STATUS);
1607 intr_sts_masked = intr_sts & swrm->intr_mask;
Sudheer Papothi06f43412019-07-09 03:32:54 +05301608
1609 dev_dbg(swrm->dev, "%s: status: 0x%x \n", __func__, intr_sts_masked);
Sudheer Papothid19d0c52019-02-23 05:41:39 +05301610handle_irq:
1611 for (i = 0; i < SWRM_INTERRUPT_MAX; i++) {
1612 value = intr_sts_masked & (1 << i);
1613 if (!value)
1614 continue;
1615
1616 switch (value) {
1617 case SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ:
1618 dev_dbg(swrm->dev, "%s: Trigger irq to slave device\n",
1619 __func__);
1620 status = swr_master_read(swrm, SWRM_MCP_SLV_STATUS);
1621 ret = swrm_find_alert_slave(swrm, status, &devnum);
1622 if (ret) {
1623 dev_err_ratelimited(swrm->dev,
1624 "%s: no slave alert found.spurious interrupt\n",
1625 __func__);
1626 break;
1627 }
1628 swrm_cmd_fifo_rd_cmd(swrm, &temp, devnum, 0x0,
1629 SWRS_SCP_INT_STATUS_CLEAR_1, 1);
1630 swrm_cmd_fifo_wr_cmd(swrm, 0x4, devnum, 0x0,
1631 SWRS_SCP_INT_STATUS_CLEAR_1);
1632 swrm_cmd_fifo_wr_cmd(swrm, 0x0, devnum, 0x0,
1633 SWRS_SCP_INT_STATUS_CLEAR_1);
1634
1635
1636 list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
1637 if (swr_dev->dev_num != devnum)
1638 continue;
1639 if (swr_dev->slave_irq) {
1640 do {
1641 handle_nested_irq(
1642 irq_find_mapping(
1643 swr_dev->slave_irq, 0));
1644 } while (swr_dev->slave_irq_pending);
1645 }
1646
1647 }
1648 break;
1649 case SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED:
1650 dev_dbg(swrm->dev, "%s: SWR new slave attached\n",
1651 __func__);
1652 break;
1653 case SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS:
1654 status = swr_master_read(swrm, SWRM_MCP_SLV_STATUS);
1655 if (status == swrm->slave_status) {
1656 dev_dbg(swrm->dev,
1657 "%s: No change in slave status: %d\n",
1658 __func__, status);
1659 break;
1660 }
1661 chg_sts = swrm_check_slave_change_status(swrm, status,
1662 &devnum);
1663 switch (chg_sts) {
1664 case SWR_NOT_PRESENT:
1665 dev_dbg(swrm->dev,
1666 "%s: device %d got detached\n",
1667 __func__, devnum);
1668 break;
1669 case SWR_ATTACHED_OK:
1670 dev_dbg(swrm->dev,
1671 "%s: device %d got attached\n",
1672 __func__, devnum);
1673 /* enable host irq from slave device*/
1674 swrm_cmd_fifo_wr_cmd(swrm, 0xFF, devnum, 0x0,
1675 SWRS_SCP_INT_STATUS_CLEAR_1);
1676 swrm_cmd_fifo_wr_cmd(swrm, 0x4, devnum, 0x0,
1677 SWRS_SCP_INT_STATUS_MASK_1);
1678
1679 break;
1680 case SWR_ALERT:
1681 dev_dbg(swrm->dev,
1682 "%s: device %d has pending interrupt\n",
1683 __func__, devnum);
1684 break;
1685 }
1686 break;
1687 case SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET:
1688 dev_err_ratelimited(swrm->dev,
1689 "%s: SWR bus clsh detected\n",
1690 __func__);
1691 break;
1692 case SWRM_INTERRUPT_STATUS_RD_FIFO_OVERFLOW:
1693 dev_dbg(swrm->dev, "%s: SWR read FIFO overflow\n",
1694 __func__);
1695 break;
1696 case SWRM_INTERRUPT_STATUS_RD_FIFO_UNDERFLOW:
1697 dev_dbg(swrm->dev, "%s: SWR read FIFO underflow\n",
1698 __func__);
1699 break;
1700 case SWRM_INTERRUPT_STATUS_WR_CMD_FIFO_OVERFLOW:
1701 dev_dbg(swrm->dev, "%s: SWR write FIFO overflow\n",
1702 __func__);
1703 break;
1704 case SWRM_INTERRUPT_STATUS_CMD_ERROR:
1705 value = swr_master_read(swrm, SWRM_CMD_FIFO_STATUS);
1706 dev_err_ratelimited(swrm->dev,
1707 "%s: SWR CMD error, fifo status 0x%x, flushing fifo\n",
1708 __func__, value);
1709 swr_master_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
1710 break;
1711 case SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION:
1712 dev_err_ratelimited(swrm->dev,
1713 "%s: SWR Port collision detected\n",
1714 __func__);
1715 swrm->intr_mask &= ~SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION;
1716 swr_master_write(swrm,
1717 SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN, swrm->intr_mask);
1718 break;
1719 case SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH:
1720 dev_dbg(swrm->dev,
1721 "%s: SWR read enable valid mismatch\n",
1722 __func__);
1723 swrm->intr_mask &=
1724 ~SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH;
1725 swr_master_write(swrm,
1726 SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN, swrm->intr_mask);
1727 break;
1728 case SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED:
1729 complete(&swrm->broadcast);
1730 dev_dbg(swrm->dev, "%s: SWR cmd id finished\n",
1731 __func__);
1732 break;
1733 case SWRM_INTERRUPT_STATUS_AUTO_ENUM_FAILED_V2:
1734 break;
1735 case SWRM_INTERRUPT_STATUS_AUTO_ENUM_TABLE_IS_FULL_V2:
1736 break;
1737 case SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED_V2:
1738 break;
1739 case SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED_V2:
1740 break;
1741 case SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP:
1742 if (swrm->state == SWR_MSTR_UP)
1743 dev_dbg(swrm->dev,
1744 "%s:SWR Master is already up\n",
1745 __func__);
1746 else
1747 dev_err_ratelimited(swrm->dev,
1748 "%s: SWR wokeup during clock stop\n",
1749 __func__);
Sudheer Papothi07d5afc2019-07-17 06:25:45 +05301750 /* It might be possible the slave device gets reset
1751 * and slave interrupt gets missed. So re-enable
1752 * Host IRQ and process slave pending
1753 * interrupts, if any.
1754 */
1755 swrm_enable_slave_irq(swrm);
Sudheer Papothid19d0c52019-02-23 05:41:39 +05301756 break;
1757 default:
1758 dev_err_ratelimited(swrm->dev,
1759 "%s: SWR unknown interrupt value: %d\n",
1760 __func__, value);
1761 ret = IRQ_NONE;
1762 break;
1763 }
1764 }
1765 swr_master_write(swrm, SWRM_INTERRUPT_CLEAR, intr_sts);
1766 swr_master_write(swrm, SWRM_INTERRUPT_CLEAR, 0x0);
1767
1768 intr_sts = swr_master_read(swrm, SWRM_INTERRUPT_STATUS);
1769 intr_sts_masked = intr_sts & swrm->intr_mask;
1770
1771 if (intr_sts_masked) {
Sudheer Papothi07d5afc2019-07-17 06:25:45 +05301772 dev_dbg(swrm->dev, "%s: new interrupt received 0x%x\n",
1773 __func__, intr_sts_masked);
Sudheer Papothid19d0c52019-02-23 05:41:39 +05301774 goto handle_irq;
1775 }
1776
1777 mutex_lock(&swrm->reslock);
1778 swrm_clk_request(swrm, false);
Sudheer Papothi384addd2019-06-14 02:26:52 +05301779 swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, false);
Sudheer Papothi06f43412019-07-09 03:32:54 +05301780
1781err_audio_hw_vote:
Sudheer Papothi384addd2019-06-14 02:26:52 +05301782 swrm_request_hw_vote(swrm, LPASS_HW_CORE, false);
Karthikeyan Mani035c50b2019-05-02 13:35:01 -07001783exit:
Sudheer Papothid19d0c52019-02-23 05:41:39 +05301784 mutex_unlock(&swrm->reslock);
1785 swrm_unlock_sleep(swrm);
1786 return ret;
1787}
1788
Aditya Bavanaric034fad2018-11-12 22:55:11 +05301789static irqreturn_t swrm_wakeup_interrupt(int irq, void *dev)
1790{
1791 struct swr_mstr_ctrl *swrm = dev;
1792 int ret = IRQ_HANDLED;
1793
1794 if (!swrm || !(swrm->dev)) {
1795 pr_err("%s: swrm or dev is null\n", __func__);
1796 return IRQ_NONE;
1797 }
1798 mutex_lock(&swrm->devlock);
1799 if (!swrm->dev_up) {
1800 if (swrm->wake_irq > 0)
1801 disable_irq_nosync(swrm->wake_irq);
1802 mutex_unlock(&swrm->devlock);
1803 return ret;
1804 }
1805 mutex_unlock(&swrm->devlock);
Ramprasad Katkam44b7a962018-12-20 15:08:44 +05301806 if (unlikely(swrm_lock_sleep(swrm) == false)) {
1807 dev_err(swrm->dev, "%s Failed to hold suspend\n", __func__);
1808 goto exit;
1809 }
Aditya Bavanaric034fad2018-11-12 22:55:11 +05301810 if (swrm->wake_irq > 0)
1811 disable_irq_nosync(swrm->wake_irq);
1812 pm_runtime_get_sync(swrm->dev);
1813 pm_runtime_mark_last_busy(swrm->dev);
1814 pm_runtime_put_autosuspend(swrm->dev);
Ramprasad Katkam44b7a962018-12-20 15:08:44 +05301815 swrm_unlock_sleep(swrm);
1816exit:
Aditya Bavanaric034fad2018-11-12 22:55:11 +05301817 return ret;
1818}
1819
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05301820static void swrm_wakeup_work(struct work_struct *work)
1821{
1822 struct swr_mstr_ctrl *swrm;
1823
1824 swrm = container_of(work, struct swr_mstr_ctrl,
1825 wakeup_work);
1826 if (!swrm || !(swrm->dev)) {
1827 pr_err("%s: swrm or dev is null\n", __func__);
1828 return;
1829 }
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05301830
1831 mutex_lock(&swrm->devlock);
1832 if (!swrm->dev_up) {
1833 mutex_unlock(&swrm->devlock);
Ramprasad Katkam57349872018-11-11 18:34:57 +05301834 goto exit;
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05301835 }
1836 mutex_unlock(&swrm->devlock);
Ramprasad Katkam57349872018-11-11 18:34:57 +05301837 if (unlikely(swrm_lock_sleep(swrm) == false)) {
1838 dev_err(swrm->dev, "%s Failed to hold suspend\n", __func__);
1839 goto exit;
1840 }
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05301841 pm_runtime_get_sync(swrm->dev);
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05301842 pm_runtime_mark_last_busy(swrm->dev);
1843 pm_runtime_put_autosuspend(swrm->dev);
Ramprasad Katkam57349872018-11-11 18:34:57 +05301844 swrm_unlock_sleep(swrm);
1845exit:
1846 pm_relax(swrm->dev);
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05301847}
1848
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301849static int swrm_get_device_status(struct swr_mstr_ctrl *swrm, u8 devnum)
1850{
1851 u32 val;
1852
1853 swrm->slave_status = swr_master_read(swrm, SWRM_MCP_SLV_STATUS);
1854 val = (swrm->slave_status >> (devnum * 2));
1855 val &= SWRM_MCP_SLV_STATUS_MASK;
1856 return val;
1857}
1858
1859static int swrm_get_logical_dev_num(struct swr_master *mstr, u64 dev_id,
1860 u8 *dev_num)
1861{
1862 int i;
1863 u64 id = 0;
1864 int ret = -EINVAL;
1865 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(mstr);
1866 struct swr_device *swr_dev;
1867 u32 num_dev = 0;
1868
1869 if (!swrm) {
1870 pr_err("%s: Invalid handle to swr controller\n",
1871 __func__);
1872 return ret;
1873 }
1874 if (swrm->num_dev)
1875 num_dev = swrm->num_dev;
1876 else
1877 num_dev = mstr->num_dev;
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05301878
1879 mutex_lock(&swrm->devlock);
1880 if (!swrm->dev_up) {
1881 mutex_unlock(&swrm->devlock);
1882 return ret;
1883 }
1884 mutex_unlock(&swrm->devlock);
1885
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301886 pm_runtime_get_sync(swrm->dev);
1887 for (i = 1; i < (num_dev + 1); i++) {
1888 id = ((u64)(swr_master_read(swrm,
1889 SWRM_ENUMERATOR_SLAVE_DEV_ID_2(i))) << 32);
1890 id |= swr_master_read(swrm,
1891 SWRM_ENUMERATOR_SLAVE_DEV_ID_1(i));
Ramprasad Katkam1f221262018-08-23 15:01:22 +05301892
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301893 /*
1894 * As pm_runtime_get_sync() brings all slaves out of reset
1895 * update logical device number for all slaves.
1896 */
1897 list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
1898 if (swr_dev->addr == (id & SWR_DEV_ID_MASK)) {
1899 u32 status = swrm_get_device_status(swrm, i);
1900
1901 if ((status == 0x01) || (status == 0x02)) {
1902 swr_dev->dev_num = i;
1903 if ((id & SWR_DEV_ID_MASK) == dev_id) {
1904 *dev_num = i;
1905 ret = 0;
1906 }
1907 dev_dbg(swrm->dev,
1908 "%s: devnum %d is assigned for dev addr %lx\n",
1909 __func__, i, swr_dev->addr);
1910 }
1911 }
1912 }
1913 }
1914 if (ret)
1915 dev_err(swrm->dev, "%s: device 0x%llx is not ready\n",
1916 __func__, dev_id);
1917
1918 pm_runtime_mark_last_busy(swrm->dev);
1919 pm_runtime_put_autosuspend(swrm->dev);
1920 return ret;
1921}
Sudheer Papothi6abd2de2018-09-05 05:57:04 +05301922
1923static void swrm_device_wakeup_vote(struct swr_master *mstr)
1924{
1925 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(mstr);
1926
1927 if (!swrm) {
1928 pr_err("%s: Invalid handle to swr controller\n",
1929 __func__);
1930 return;
1931 }
Ramprasad Katkam57349872018-11-11 18:34:57 +05301932 if (unlikely(swrm_lock_sleep(swrm) == false)) {
1933 dev_err(swrm->dev, "%s Failed to hold suspend\n", __func__);
1934 return;
1935 }
Sudheer Papothi384addd2019-06-14 02:26:52 +05301936 if (++swrm->hw_core_clk_en == 1)
1937 if (swrm_request_hw_vote(swrm, LPASS_HW_CORE, true)) {
1938 dev_err(swrm->dev, "%s:lpass core hw enable failed\n",
1939 __func__);
1940 --swrm->hw_core_clk_en;
1941 }
1942 if ( ++swrm->aud_core_clk_en == 1)
1943 if (swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, true)) {
1944 dev_err(swrm->dev, "%s:lpass audio hw enable failed\n",
1945 __func__);
1946 --swrm->aud_core_clk_en;
1947 }
1948 dev_dbg(swrm->dev, "%s: hw_clk_en: %d audio_core_clk_en: %d\n",
1949 __func__, swrm->hw_core_clk_en, swrm->aud_core_clk_en);
Sudheer Papothi6abd2de2018-09-05 05:57:04 +05301950 pm_runtime_get_sync(swrm->dev);
1951}
1952
1953static void swrm_device_wakeup_unvote(struct swr_master *mstr)
1954{
1955 struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(mstr);
1956
1957 if (!swrm) {
1958 pr_err("%s: Invalid handle to swr controller\n",
1959 __func__);
1960 return;
1961 }
1962 pm_runtime_mark_last_busy(swrm->dev);
1963 pm_runtime_put_autosuspend(swrm->dev);
Sudheer Papothi384addd2019-06-14 02:26:52 +05301964 dev_dbg(swrm->dev, "%s: hw_clk_en: %d audio_core_clk_en: %d\n",
1965 __func__, swrm->hw_core_clk_en, swrm->aud_core_clk_en);
1966
1967 --swrm->aud_core_clk_en;
1968 if (swrm->aud_core_clk_en < 0)
1969 swrm->aud_core_clk_en = 0;
1970 else if (swrm->aud_core_clk_en == 0)
1971 swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, false);
1972
1973 --swrm->hw_core_clk_en;
1974 if (swrm->hw_core_clk_en < 0)
1975 swrm->hw_core_clk_en = 0;
1976 else if (swrm->hw_core_clk_en == 0)
1977 swrm_request_hw_vote(swrm, LPASS_HW_CORE, false);
1978
Ramprasad Katkam57349872018-11-11 18:34:57 +05301979 swrm_unlock_sleep(swrm);
Sudheer Papothi6abd2de2018-09-05 05:57:04 +05301980}
1981
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301982static int swrm_master_init(struct swr_mstr_ctrl *swrm)
1983{
1984 int ret = 0;
1985 u32 val;
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05301986 u8 row_ctrl = SWR_ROW_50;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05301987 u8 col_ctrl = SWR_MIN_COL;
1988 u8 ssp_period = 1;
1989 u8 retry_cmd_num = 3;
1990 u32 reg[SWRM_MAX_INIT_REG];
1991 u32 value[SWRM_MAX_INIT_REG];
1992 int len = 0;
1993
1994 /* Clear Rows and Cols */
1995 val = ((row_ctrl << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT) |
1996 (col_ctrl << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT) |
1997 (ssp_period << SWRM_MCP_FRAME_CTRL_BANK_SSP_PERIOD_SHFT));
1998
1999 reg[len] = SWRM_MCP_FRAME_CTRL_BANK_ADDR(0);
2000 value[len++] = val;
2001
2002 /* Set Auto enumeration flag */
2003 reg[len] = SWRM_ENUMERATOR_CFG_ADDR;
2004 value[len++] = 1;
2005
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302006 /* Configure No pings */
2007 val = swr_master_read(swrm, SWRM_MCP_CFG_ADDR);
2008 val &= ~SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK;
2009 val |= (0x1f << SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_SHFT);
2010 reg[len] = SWRM_MCP_CFG_ADDR;
2011 value[len++] = val;
2012
2013 /* Configure number of retries of a read/write cmd */
2014 val = (retry_cmd_num << SWRM_CMD_FIFO_CFG_NUM_OF_CMD_RETRY_SHFT);
2015 reg[len] = SWRM_CMD_FIFO_CFG_ADDR;
2016 value[len++] = val;
2017
Ramprasad Katkam1f221262018-08-23 15:01:22 +05302018 reg[len] = SWRM_MCP_BUS_CTRL_ADDR;
2019 value[len++] = 0x2;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302020
Ramprasad Katkam83303512018-10-11 17:34:22 +05302021 /* Set IRQ to PULSE */
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302022 reg[len] = SWRM_COMP_CFG_ADDR;
Ramprasad Katkam83303512018-10-11 17:34:22 +05302023 value[len++] = 0x02;
2024
2025 reg[len] = SWRM_COMP_CFG_ADDR;
2026 value[len++] = 0x03;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302027
2028 reg[len] = SWRM_INTERRUPT_CLEAR;
Ramprasad Katkam1f221262018-08-23 15:01:22 +05302029 value[len++] = 0xFFFFFFFF;
2030
Ramprasad Katkam7e354782018-11-21 15:52:54 +05302031 swrm->intr_mask = SWRM_INTERRUPT_STATUS_MASK;
Ramprasad Katkam1f221262018-08-23 15:01:22 +05302032 /* Mask soundwire interrupts */
2033 reg[len] = SWRM_INTERRUPT_MASK_ADDR;
Ramprasad Katkam7e354782018-11-21 15:52:54 +05302034 value[len++] = swrm->intr_mask;
Ramprasad Katkam1f221262018-08-23 15:01:22 +05302035
2036 reg[len] = SWR_MSTR_RX_SWRM_CPU_INTERRUPT_EN;
Ramprasad Katkam7e354782018-11-21 15:52:54 +05302037 value[len++] = swrm->intr_mask;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302038
2039 swr_master_bulk_write(swrm, reg, value, len);
2040
Sudheer Papothi63f48152018-11-15 01:08:03 +05302041 /*
2042 * For SWR master version 1.5.1, continue
2043 * execute on command ignore.
2044 */
2045 if (swrm->version == SWRM_VERSION_1_5_1)
2046 swr_master_write(swrm, SWRM_CMD_FIFO_CFG_ADDR,
2047 (swr_master_read(swrm,
2048 SWRM_CMD_FIFO_CFG_ADDR) | 0x80000000));
2049
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302050 return ret;
2051}
2052
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302053static int swrm_event_notify(struct notifier_block *self,
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302054 unsigned long action, void *data)
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302055{
2056 struct swr_mstr_ctrl *swrm = container_of(self, struct swr_mstr_ctrl,
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302057 event_notifier);
2058
2059 if (!swrm || !(swrm->dev)) {
2060 pr_err("%s: swrm or dev is NULL\n", __func__);
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302061 return -EINVAL;
2062 }
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302063 switch (action) {
2064 case MSM_AUD_DC_EVENT:
2065 schedule_work(&(swrm->dc_presence_work));
2066 break;
2067 case SWR_WAKE_IRQ_EVENT:
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302068 if (swrm->ipc_wakeup && !swrm->ipc_wakeup_triggered) {
2069 swrm->ipc_wakeup_triggered = true;
Ramprasad Katkam57349872018-11-11 18:34:57 +05302070 pm_stay_awake(swrm->dev);
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302071 schedule_work(&swrm->wakeup_work);
Ramprasad Katkamcd61c6e2018-09-18 13:22:58 +05302072 }
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302073 break;
2074 default:
2075 dev_err(swrm->dev, "%s: invalid event type: %lu\n",
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302076 __func__, action);
2077 return -EINVAL;
2078 }
2079
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302080 return 0;
2081}
2082
2083static void swrm_notify_work_fn(struct work_struct *work)
2084{
2085 struct swr_mstr_ctrl *swrm = container_of(work, struct swr_mstr_ctrl,
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302086 dc_presence_work);
2087
2088 if (!swrm || !swrm->pdev) {
2089 pr_err("%s: swrm or pdev is NULL\n", __func__);
2090 return;
2091 }
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302092 swrm_wcd_notify(swrm->pdev, SWR_DEVICE_DOWN, NULL);
2093}
2094
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302095static int swrm_probe(struct platform_device *pdev)
2096{
2097 struct swr_mstr_ctrl *swrm;
2098 struct swr_ctrl_platform_data *pdata;
2099 u32 i, num_ports, port_num, port_type, ch_mask;
2100 u32 *temp, map_size, map_length, ch_iter = 0, old_port_num = 0;
2101 int ret = 0;
Sudheer Papothi66d6fd12019-03-27 17:34:48 +05302102 struct clk *lpass_core_hw_vote = NULL;
Sudheer Papothi384addd2019-06-14 02:26:52 +05302103 struct clk *lpass_core_audio = NULL;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302104
2105 /* Allocate soundwire master driver structure */
2106 swrm = devm_kzalloc(&pdev->dev, sizeof(struct swr_mstr_ctrl),
2107 GFP_KERNEL);
2108 if (!swrm) {
2109 ret = -ENOMEM;
2110 goto err_memory_fail;
2111 }
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302112 swrm->pdev = pdev;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302113 swrm->dev = &pdev->dev;
2114 platform_set_drvdata(pdev, swrm);
2115 swr_set_ctrl_data(&swrm->master, swrm);
2116 pdata = dev_get_platdata(&pdev->dev);
2117 if (!pdata) {
2118 dev_err(&pdev->dev, "%s: pdata from parent is NULL\n",
2119 __func__);
2120 ret = -EINVAL;
2121 goto err_pdata_fail;
2122 }
2123 swrm->handle = (void *)pdata->handle;
2124 if (!swrm->handle) {
2125 dev_err(&pdev->dev, "%s: swrm->handle is NULL\n",
2126 __func__);
2127 ret = -EINVAL;
2128 goto err_pdata_fail;
2129 }
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302130 ret = of_property_read_u32(pdev->dev.of_node, "qcom,swr_master_id",
2131 &swrm->master_id);
2132 if (ret) {
2133 dev_err(&pdev->dev, "%s: failed to get master id\n", __func__);
2134 goto err_pdata_fail;
2135 }
Laxminath Kasamfbcaf322018-07-18 00:38:14 +05302136 if (!(of_property_read_u32(pdev->dev.of_node,
2137 "swrm-io-base", &swrm->swrm_base_reg)))
2138 ret = of_property_read_u32(pdev->dev.of_node,
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302139 "swrm-io-base", &swrm->swrm_base_reg);
2140 if (!swrm->swrm_base_reg) {
2141 swrm->read = pdata->read;
2142 if (!swrm->read) {
2143 dev_err(&pdev->dev, "%s: swrm->read is NULL\n",
2144 __func__);
2145 ret = -EINVAL;
2146 goto err_pdata_fail;
2147 }
2148 swrm->write = pdata->write;
2149 if (!swrm->write) {
2150 dev_err(&pdev->dev, "%s: swrm->write is NULL\n",
2151 __func__);
2152 ret = -EINVAL;
2153 goto err_pdata_fail;
2154 }
2155 swrm->bulk_write = pdata->bulk_write;
2156 if (!swrm->bulk_write) {
2157 dev_err(&pdev->dev, "%s: swrm->bulk_write is NULL\n",
2158 __func__);
2159 ret = -EINVAL;
2160 goto err_pdata_fail;
2161 }
2162 } else {
2163 swrm->swrm_dig_base = devm_ioremap(&pdev->dev,
2164 swrm->swrm_base_reg, SWRM_MAX_REGISTER);
2165 }
2166
2167 swrm->clk = pdata->clk;
2168 if (!swrm->clk) {
2169 dev_err(&pdev->dev, "%s: swrm->clk is NULL\n",
2170 __func__);
2171 ret = -EINVAL;
2172 goto err_pdata_fail;
2173 }
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302174 if (of_property_read_u32(pdev->dev.of_node,
2175 "qcom,swr-clock-stop-mode0",
2176 &swrm->clk_stop_mode0_supp)) {
2177 swrm->clk_stop_mode0_supp = FALSE;
2178 }
Ramprasad Katkam57349872018-11-11 18:34:57 +05302179
2180 ret = of_property_read_u32(swrm->dev->of_node, "qcom,swr-num-dev",
2181 &swrm->num_dev);
2182 if (ret) {
2183 dev_dbg(&pdev->dev, "%s: Looking up %s property failed\n",
2184 __func__, "qcom,swr-num-dev");
2185 } else {
2186 if (swrm->num_dev > SWR_MAX_SLAVE_DEVICES) {
2187 dev_err(&pdev->dev, "%s: num_dev %d > max limit %d\n",
2188 __func__, swrm->num_dev, SWR_MAX_SLAVE_DEVICES);
2189 ret = -EINVAL;
2190 goto err_pdata_fail;
2191 }
2192 }
2193
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302194 /* Parse soundwire port mapping */
2195 ret = of_property_read_u32(pdev->dev.of_node, "qcom,swr-num-ports",
2196 &num_ports);
2197 if (ret) {
2198 dev_err(swrm->dev, "%s: Failed to get num_ports\n", __func__);
2199 goto err_pdata_fail;
2200 }
2201 swrm->num_ports = num_ports;
2202
2203 if (!of_find_property(pdev->dev.of_node, "qcom,swr-port-mapping",
2204 &map_size)) {
2205 dev_err(swrm->dev, "missing port mapping\n");
2206 goto err_pdata_fail;
2207 }
2208
2209 map_length = map_size / (3 * sizeof(u32));
2210 if (num_ports > SWR_MSTR_PORT_LEN) {
2211 dev_err(&pdev->dev, "%s:invalid number of swr ports\n",
2212 __func__);
2213 ret = -EINVAL;
2214 goto err_pdata_fail;
2215 }
2216 temp = devm_kzalloc(&pdev->dev, map_size, GFP_KERNEL);
2217
2218 if (!temp) {
2219 ret = -ENOMEM;
2220 goto err_pdata_fail;
2221 }
2222 ret = of_property_read_u32_array(pdev->dev.of_node,
2223 "qcom,swr-port-mapping", temp, 3 * map_length);
2224 if (ret) {
2225 dev_err(swrm->dev, "%s: Failed to read port mapping\n",
2226 __func__);
2227 goto err_pdata_fail;
2228 }
2229
2230 for (i = 0; i < map_length; i++) {
2231 port_num = temp[3 * i];
2232 port_type = temp[3 * i + 1];
2233 ch_mask = temp[3 * i + 2];
2234
2235 if (port_num != old_port_num)
2236 ch_iter = 0;
2237 swrm->port_mapping[port_num][ch_iter].port_type = port_type;
2238 swrm->port_mapping[port_num][ch_iter++].ch_mask = ch_mask;
2239 old_port_num = port_num;
2240 }
2241 devm_kfree(&pdev->dev, temp);
2242
2243 swrm->reg_irq = pdata->reg_irq;
2244 swrm->master.read = swrm_read;
2245 swrm->master.write = swrm_write;
2246 swrm->master.bulk_write = swrm_bulk_write;
2247 swrm->master.get_logical_dev_num = swrm_get_logical_dev_num;
2248 swrm->master.connect_port = swrm_connect_port;
2249 swrm->master.disconnect_port = swrm_disconnect_port;
2250 swrm->master.slvdev_datapath_control = swrm_slvdev_datapath_control;
2251 swrm->master.remove_from_group = swrm_remove_from_group;
Sudheer Papothi6abd2de2018-09-05 05:57:04 +05302252 swrm->master.device_wakeup_vote = swrm_device_wakeup_vote;
2253 swrm->master.device_wakeup_unvote = swrm_device_wakeup_unvote;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302254 swrm->master.dev.parent = &pdev->dev;
2255 swrm->master.dev.of_node = pdev->dev.of_node;
2256 swrm->master.num_port = 0;
2257 swrm->rcmd_id = 0;
2258 swrm->wcmd_id = 0;
2259 swrm->slave_status = 0;
2260 swrm->num_rx_chs = 0;
2261 swrm->clk_ref_count = 0;
Vatsal Buchadf38c3e2019-03-11 17:10:23 +05302262 swrm->swr_irq_wakeup_capable = 0;
Laxminath Kasamb0f27cd2018-09-06 12:17:11 +05302263 swrm->mclk_freq = MCLK_FREQ;
Laxminath Kasam1df09a82018-09-20 18:57:49 +05302264 swrm->dev_up = true;
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302265 swrm->state = SWR_MSTR_UP;
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302266 swrm->ipc_wakeup = false;
2267 swrm->ipc_wakeup_triggered = false;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302268 init_completion(&swrm->reset);
2269 init_completion(&swrm->broadcast);
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05302270 init_completion(&swrm->clk_off_complete);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302271 mutex_init(&swrm->mlock);
2272 mutex_init(&swrm->reslock);
2273 mutex_init(&swrm->force_down_lock);
Ramprasad Katkam1f221262018-08-23 15:01:22 +05302274 mutex_init(&swrm->iolock);
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05302275 mutex_init(&swrm->clklock);
Laxminath Kasam1df09a82018-09-20 18:57:49 +05302276 mutex_init(&swrm->devlock);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302277 mutex_init(&swrm->pm_lock);
2278 swrm->wlock_holders = 0;
2279 swrm->pm_state = SWRM_PM_SLEEPABLE;
2280 init_waitqueue_head(&swrm->pm_wq);
2281 pm_qos_add_request(&swrm->pm_qos_req,
2282 PM_QOS_CPU_DMA_LATENCY,
2283 PM_QOS_DEFAULT_VALUE);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302284
2285 for (i = 0 ; i < SWR_MSTR_PORT_LEN; i++)
2286 INIT_LIST_HEAD(&swrm->mport_cfg[i].port_req_list);
2287
Sudheer Papothi06f43412019-07-09 03:32:54 +05302288 /* Register LPASS core hw vote */
2289 lpass_core_hw_vote = devm_clk_get(&pdev->dev, "lpass_core_hw_vote");
2290 if (IS_ERR(lpass_core_hw_vote)) {
2291 ret = PTR_ERR(lpass_core_hw_vote);
2292 dev_dbg(&pdev->dev, "%s: clk get %s failed %d\n",
2293 __func__, "lpass_core_hw_vote", ret);
2294 lpass_core_hw_vote = NULL;
2295 ret = 0;
2296 }
2297 swrm->lpass_core_hw_vote = lpass_core_hw_vote;
2298
2299 /* Register LPASS audio core vote */
2300 lpass_core_audio = devm_clk_get(&pdev->dev, "lpass_audio_hw_vote");
2301 if (IS_ERR(lpass_core_audio)) {
2302 ret = PTR_ERR(lpass_core_audio);
2303 dev_dbg(&pdev->dev, "%s: clk get %s failed %d\n",
2304 __func__, "lpass_core_audio", ret);
2305 lpass_core_audio = NULL;
2306 ret = 0;
2307 }
2308 swrm->lpass_core_audio = lpass_core_audio;
2309
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302310 if (swrm->reg_irq) {
2311 ret = swrm->reg_irq(swrm->handle, swr_mstr_interrupt, swrm,
2312 SWR_IRQ_REGISTER);
2313 if (ret) {
2314 dev_err(&pdev->dev, "%s: IRQ register failed ret %d\n",
2315 __func__, ret);
2316 goto err_irq_fail;
2317 }
2318 } else {
2319 swrm->irq = platform_get_irq_byname(pdev, "swr_master_irq");
2320 if (swrm->irq < 0) {
2321 dev_err(swrm->dev, "%s() error getting irq hdle: %d\n",
2322 __func__, swrm->irq);
Laxminath Kasamfbcaf322018-07-18 00:38:14 +05302323 goto err_irq_fail;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302324 }
2325
2326 ret = request_threaded_irq(swrm->irq, NULL,
Sudheer Papothid19d0c52019-02-23 05:41:39 +05302327 swr_mstr_interrupt_v2,
Ramprasad Katkam83303512018-10-11 17:34:22 +05302328 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302329 "swr_master_irq", swrm);
2330 if (ret) {
2331 dev_err(swrm->dev, "%s: Failed to request irq %d\n",
2332 __func__, ret);
2333 goto err_irq_fail;
2334 }
2335
2336 }
Vatsal Buchadf38c3e2019-03-11 17:10:23 +05302337 /* Make inband tx interrupts as wakeup capable for slave irq */
2338 ret = of_property_read_u32(pdev->dev.of_node,
2339 "qcom,swr-mstr-irq-wakeup-capable",
2340 &swrm->swr_irq_wakeup_capable);
2341 if (ret)
2342 dev_dbg(swrm->dev, "%s: swrm irq wakeup capable not defined\n",
2343 __func__);
2344 if (swrm->swr_irq_wakeup_capable)
2345 irq_set_irq_wake(swrm->irq, 1);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302346 ret = swr_register_master(&swrm->master);
2347 if (ret) {
2348 dev_err(&pdev->dev, "%s: error adding swr master\n", __func__);
2349 goto err_mstr_fail;
2350 }
2351
2352 /* Add devices registered with board-info as the
2353 * controller will be up now
2354 */
2355 swr_master_add_boarddevices(&swrm->master);
2356 mutex_lock(&swrm->mlock);
2357 swrm_clk_request(swrm, true);
2358 ret = swrm_master_init(swrm);
2359 if (ret < 0) {
2360 dev_err(&pdev->dev,
2361 "%s: Error in master Initialization , err %d\n",
2362 __func__, ret);
2363 mutex_unlock(&swrm->mlock);
2364 goto err_mstr_fail;
2365 }
2366 swrm->version = swr_master_read(swrm, SWRM_COMP_HW_VERSION);
2367
2368 mutex_unlock(&swrm->mlock);
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302369 INIT_WORK(&swrm->wakeup_work, swrm_wakeup_work);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302370
2371 if (pdev->dev.of_node)
2372 of_register_swr_devices(&swrm->master);
2373
Sudheer Papothi96c842a2019-08-29 12:11:21 +05302374#ifdef CONFIG_DEBUG_FS
2375 swrm->debugfs_swrm_dent = debugfs_create_dir(dev_name(&pdev->dev), 0);
2376 if (!IS_ERR(swrm->debugfs_swrm_dent)) {
2377 swrm->debugfs_peek = debugfs_create_file("swrm_peek",
2378 S_IFREG | 0444, swrm->debugfs_swrm_dent,
2379 (void *) swrm, &swrm_debug_read_ops);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302380
Sudheer Papothi96c842a2019-08-29 12:11:21 +05302381 swrm->debugfs_poke = debugfs_create_file("swrm_poke",
2382 S_IFREG | 0444, swrm->debugfs_swrm_dent,
2383 (void *) swrm, &swrm_debug_write_ops);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302384
Sudheer Papothi96c842a2019-08-29 12:11:21 +05302385 swrm->debugfs_reg_dump = debugfs_create_file("swrm_reg_dump",
2386 S_IFREG | 0444, swrm->debugfs_swrm_dent,
2387 (void *) swrm,
2388 &swrm_debug_dump_ops);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302389 }
Sudheer Papothi96c842a2019-08-29 12:11:21 +05302390#endif
Vatsal Buchadf38c3e2019-03-11 17:10:23 +05302391 ret = device_init_wakeup(swrm->dev, true);
2392 if (ret) {
2393 dev_err(swrm->dev, "Device wakeup init failed: %d\n", ret);
2394 goto err_irq_wakeup_fail;
2395 }
2396
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302397 pm_runtime_set_autosuspend_delay(&pdev->dev, auto_suspend_timer);
2398 pm_runtime_use_autosuspend(&pdev->dev);
2399 pm_runtime_set_active(&pdev->dev);
2400 pm_runtime_enable(&pdev->dev);
2401 pm_runtime_mark_last_busy(&pdev->dev);
2402
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302403 INIT_WORK(&swrm->dc_presence_work, swrm_notify_work_fn);
2404 swrm->event_notifier.notifier_call = swrm_event_notify;
2405 msm_aud_evt_register_client(&swrm->event_notifier);
2406
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302407 return 0;
Vatsal Buchadf38c3e2019-03-11 17:10:23 +05302408err_irq_wakeup_fail:
2409 device_init_wakeup(swrm->dev, false);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302410err_mstr_fail:
2411 if (swrm->reg_irq)
2412 swrm->reg_irq(swrm->handle, swr_mstr_interrupt,
2413 swrm, SWR_IRQ_FREE);
2414 else if (swrm->irq)
2415 free_irq(swrm->irq, swrm);
2416err_irq_fail:
2417 mutex_destroy(&swrm->mlock);
2418 mutex_destroy(&swrm->reslock);
2419 mutex_destroy(&swrm->force_down_lock);
Ramprasad Katkam1f221262018-08-23 15:01:22 +05302420 mutex_destroy(&swrm->iolock);
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05302421 mutex_destroy(&swrm->clklock);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302422 mutex_destroy(&swrm->pm_lock);
2423 pm_qos_remove_request(&swrm->pm_qos_req);
2424
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302425err_pdata_fail:
2426err_memory_fail:
2427 return ret;
2428}
2429
2430static int swrm_remove(struct platform_device *pdev)
2431{
2432 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
2433
2434 if (swrm->reg_irq)
2435 swrm->reg_irq(swrm->handle, swr_mstr_interrupt,
2436 swrm, SWR_IRQ_FREE);
2437 else if (swrm->irq)
2438 free_irq(swrm->irq, swrm);
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302439 else if (swrm->wake_irq > 0)
2440 free_irq(swrm->wake_irq, swrm);
Vatsal Buchadf38c3e2019-03-11 17:10:23 +05302441 if (swrm->swr_irq_wakeup_capable)
2442 irq_set_irq_wake(swrm->irq, 0);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302443 cancel_work_sync(&swrm->wakeup_work);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302444 pm_runtime_disable(&pdev->dev);
2445 pm_runtime_set_suspended(&pdev->dev);
2446 swr_unregister_master(&swrm->master);
Ramprasad Katkam68765ab2018-08-30 11:46:32 +05302447 msm_aud_evt_unregister_client(&swrm->event_notifier);
Vatsal Buchadf38c3e2019-03-11 17:10:23 +05302448 device_init_wakeup(swrm->dev, false);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302449 mutex_destroy(&swrm->mlock);
2450 mutex_destroy(&swrm->reslock);
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05302451 mutex_destroy(&swrm->iolock);
2452 mutex_destroy(&swrm->clklock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302453 mutex_destroy(&swrm->force_down_lock);
Ramprasad Katkam57349872018-11-11 18:34:57 +05302454 mutex_destroy(&swrm->pm_lock);
2455 pm_qos_remove_request(&swrm->pm_qos_req);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302456 devm_kfree(&pdev->dev, swrm);
2457 return 0;
2458}
2459
2460static int swrm_clk_pause(struct swr_mstr_ctrl *swrm)
2461{
2462 u32 val;
2463
2464 dev_dbg(swrm->dev, "%s: state: %d\n", __func__, swrm->state);
2465 swr_master_write(swrm, SWRM_INTERRUPT_MASK_ADDR, 0x1FDFD);
2466 val = swr_master_read(swrm, SWRM_MCP_CFG_ADDR);
2467 val |= SWRM_MCP_CFG_BUS_CLK_PAUSE_BMSK;
2468 swr_master_write(swrm, SWRM_MCP_CFG_ADDR, val);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302469
2470 return 0;
2471}
2472
2473#ifdef CONFIG_PM
2474static int swrm_runtime_resume(struct device *dev)
2475{
2476 struct platform_device *pdev = to_platform_device(dev);
2477 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
2478 int ret = 0;
Sudheer Papothi384addd2019-06-14 02:26:52 +05302479 bool hw_core_err = false;
2480 bool aud_core_err = false;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302481 struct swr_master *mstr = &swrm->master;
2482 struct swr_device *swr_dev;
2483
2484 dev_dbg(dev, "%s: pm_runtime: resume, state:%d\n",
2485 __func__, swrm->state);
2486 mutex_lock(&swrm->reslock);
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302487
Sudheer Papothi384addd2019-06-14 02:26:52 +05302488 if (swrm_request_hw_vote(swrm, LPASS_HW_CORE, true)) {
2489 dev_err(dev, "%s:lpass core hw enable failed\n",
2490 __func__);
2491 hw_core_err = true;
2492 }
2493 if (swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, true)) {
2494 dev_err(dev, "%s:lpass audio hw enable failed\n",
2495 __func__);
2496 aud_core_err = true;
Karthikeyan Manif6821902019-05-21 17:31:24 -07002497 }
Sudheer Papothi66d6fd12019-03-27 17:34:48 +05302498
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302499 if ((swrm->state == SWR_MSTR_DOWN) ||
2500 (swrm->state == SWR_MSTR_SSR && swrm->dev_up)) {
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302501 if (swrm->clk_stop_mode0_supp) {
2502 if (swrm->ipc_wakeup)
2503 msm_aud_evt_blocking_notifier_call_chain(
2504 SWR_WAKE_IRQ_DEREGISTER, (void *)swrm);
Laxminath Kasamf0128ef2018-08-31 15:15:09 +05302505 }
2506
Vatsal Bucha63b193f2019-08-12 11:56:55 +05302507 if (swrm_clk_request(swrm, true)) {
2508 /*
2509 * Set autosuspend timer to 1 for
2510 * master to enter into suspend.
2511 */
2512 auto_suspend_timer = 1;
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302513 goto exit;
Vatsal Bucha63b193f2019-08-12 11:56:55 +05302514 }
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302515 if (!swrm->clk_stop_mode0_supp || swrm->state == SWR_MSTR_SSR) {
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302516 list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
2517 ret = swr_device_up(swr_dev);
Sudheer Papothi79c90752019-04-23 06:09:52 +05302518 if (ret == -ENODEV) {
2519 dev_dbg(dev,
2520 "%s slave device up not implemented\n",
2521 __func__);
2522 ret = 0;
2523 } else if (ret) {
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302524 dev_err(dev,
2525 "%s: failed to wakeup swr dev %d\n",
2526 __func__, swr_dev->dev_num);
2527 swrm_clk_request(swrm, false);
2528 goto exit;
2529 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302530 }
Ramprasad Katkam48b49b22018-10-01 20:12:46 +05302531 swr_master_write(swrm, SWRM_COMP_SW_RESET, 0x01);
2532 swr_master_write(swrm, SWRM_COMP_SW_RESET, 0x01);
2533 swrm_master_init(swrm);
Ramprasad Katkam2e85a542019-04-26 18:28:31 +05302534 /* wait for hw enumeration to complete */
2535 usleep_range(100, 105);
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302536 swrm_cmd_fifo_wr_cmd(swrm, 0x4, 0xF, 0x0,
2537 SWRS_SCP_INT_STATUS_MASK_1);
Karthikeyan Manif6821902019-05-21 17:31:24 -07002538 if (swrm->state == SWR_MSTR_SSR) {
2539 mutex_unlock(&swrm->reslock);
2540 enable_bank_switch(swrm, 0, SWR_ROW_50, SWR_MIN_COL);
2541 mutex_lock(&swrm->reslock);
2542 }
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302543 } else {
2544 /*wake up from clock stop*/
2545 swr_master_write(swrm, SWRM_MCP_BUS_CTRL_ADDR, 0x2);
2546 usleep_range(100, 105);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302547 }
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302548 swrm->state = SWR_MSTR_UP;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302549 }
2550exit:
Sudheer Papothi384addd2019-06-14 02:26:52 +05302551 if (!aud_core_err)
2552 swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, false);
2553 if (!hw_core_err)
2554 swrm_request_hw_vote(swrm, LPASS_HW_CORE, false);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302555 pm_runtime_set_autosuspend_delay(&pdev->dev, auto_suspend_timer);
Vatsal Bucha63b193f2019-08-12 11:56:55 +05302556 auto_suspend_timer = SWR_AUTO_SUSPEND_DELAY * 1000;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302557 mutex_unlock(&swrm->reslock);
Sudheer Papothi384addd2019-06-14 02:26:52 +05302558
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302559 return ret;
2560}
2561
2562static int swrm_runtime_suspend(struct device *dev)
2563{
2564 struct platform_device *pdev = to_platform_device(dev);
2565 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
2566 int ret = 0;
Sudheer Papothi384addd2019-06-14 02:26:52 +05302567 bool hw_core_err = false;
2568 bool aud_core_err = false;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302569 struct swr_master *mstr = &swrm->master;
2570 struct swr_device *swr_dev;
2571 int current_state = 0;
2572
2573 dev_dbg(dev, "%s: pm_runtime: suspend state: %d\n",
2574 __func__, swrm->state);
2575 mutex_lock(&swrm->reslock);
2576 mutex_lock(&swrm->force_down_lock);
2577 current_state = swrm->state;
2578 mutex_unlock(&swrm->force_down_lock);
Sudheer Papothi384addd2019-06-14 02:26:52 +05302579
2580 if (swrm_request_hw_vote(swrm, LPASS_HW_CORE, true)) {
2581 dev_err(dev, "%s:lpass core hw enable failed\n",
2582 __func__);
2583 hw_core_err = true;
2584 }
2585 if (swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, true)) {
2586 dev_err(dev, "%s:lpass audio hw enable failed\n",
2587 __func__);
2588 aud_core_err = true;
Karthikeyan Manif6821902019-05-21 17:31:24 -07002589 }
Sudheer Papothi66d6fd12019-03-27 17:34:48 +05302590
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302591 if ((current_state == SWR_MSTR_UP) ||
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302592 (current_state == SWR_MSTR_SSR)) {
2593
2594 if ((current_state != SWR_MSTR_SSR) &&
2595 swrm_is_port_en(&swrm->master)) {
2596 dev_dbg(dev, "%s ports are enabled\n", __func__);
2597 ret = -EBUSY;
2598 goto exit;
2599 }
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302600 if (!swrm->clk_stop_mode0_supp || swrm->state == SWR_MSTR_SSR) {
Sudheer Papothi06f43412019-07-09 03:32:54 +05302601 mutex_unlock(&swrm->reslock);
Ramprasad Katkamb4c7c682018-12-19 18:58:36 +05302602 enable_bank_switch(swrm, 0, SWR_ROW_50, SWR_MIN_COL);
Sudheer Papothi06f43412019-07-09 03:32:54 +05302603 mutex_lock(&swrm->reslock);
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302604 swrm_clk_pause(swrm);
2605 swr_master_write(swrm, SWRM_COMP_CFG_ADDR, 0x00);
2606 list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
2607 ret = swr_device_down(swr_dev);
Sudheer Papothi79c90752019-04-23 06:09:52 +05302608 if (ret == -ENODEV) {
2609 dev_dbg_ratelimited(dev,
2610 "%s slave device down not implemented\n",
2611 __func__);
2612 ret = 0;
2613 } else if (ret) {
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302614 dev_err(dev,
2615 "%s: failed to shutdown swr dev %d\n",
2616 __func__, swr_dev->dev_num);
2617 goto exit;
2618 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302619 }
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302620 } else {
Sudheer Papothi384addd2019-06-14 02:26:52 +05302621 mutex_unlock(&swrm->reslock);
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302622 /* clock stop sequence */
2623 swrm_cmd_fifo_wr_cmd(swrm, 0x2, 0xF, 0xF,
2624 SWRS_SCP_CONTROL);
Sudheer Papothi384addd2019-06-14 02:26:52 +05302625 mutex_lock(&swrm->reslock);
Ramprasad Katkam14f47cc2018-07-25 17:20:18 +05302626 usleep_range(100, 105);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302627 }
2628 swrm_clk_request(swrm, false);
Ramprasad Katkam6a3050d2018-10-10 02:08:00 +05302629
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302630 if (swrm->clk_stop_mode0_supp) {
2631 if (swrm->wake_irq > 0) {
2632 enable_irq(swrm->wake_irq);
2633 } else if (swrm->ipc_wakeup) {
2634 msm_aud_evt_blocking_notifier_call_chain(
2635 SWR_WAKE_IRQ_REGISTER, (void *)swrm);
2636 swrm->ipc_wakeup_triggered = false;
2637 }
Ramprasad Katkam6a3050d2018-10-10 02:08:00 +05302638 }
2639
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302640 }
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302641 /* Retain SSR state until resume */
2642 if (current_state != SWR_MSTR_SSR)
2643 swrm->state = SWR_MSTR_DOWN;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302644exit:
Sudheer Papothi384addd2019-06-14 02:26:52 +05302645 if (!aud_core_err)
2646 swrm_request_hw_vote(swrm, LPASS_AUDIO_CORE, false);
2647 if (!hw_core_err)
2648 swrm_request_hw_vote(swrm, LPASS_HW_CORE, false);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302649 mutex_unlock(&swrm->reslock);
2650 return ret;
2651}
2652#endif /* CONFIG_PM */
2653
Sudheer Papothi06f43412019-07-09 03:32:54 +05302654static int swrm_device_suspend(struct device *dev)
2655{
2656 struct platform_device *pdev = to_platform_device(dev);
2657 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
2658 int ret = 0;
2659
2660 dev_dbg(dev, "%s: swrm state: %d\n", __func__, swrm->state);
2661 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
2662 ret = swrm_runtime_suspend(dev);
2663 if (!ret) {
2664 pm_runtime_disable(dev);
2665 pm_runtime_set_suspended(dev);
2666 pm_runtime_enable(dev);
2667 }
2668 }
2669
2670 return 0;
2671}
2672
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302673static int swrm_device_down(struct device *dev)
2674{
2675 struct platform_device *pdev = to_platform_device(dev);
2676 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302677
2678 dev_dbg(dev, "%s: swrm state: %d\n", __func__, swrm->state);
2679
2680 mutex_lock(&swrm->force_down_lock);
2681 swrm->state = SWR_MSTR_SSR;
2682 mutex_unlock(&swrm->force_down_lock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302683
Ramprasad Katkam2e85a542019-04-26 18:28:31 +05302684 swrm_device_suspend(dev);
Laxminath Kasam1df09a82018-09-20 18:57:49 +05302685 return 0;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302686}
2687
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302688int swrm_register_wake_irq(struct swr_mstr_ctrl *swrm)
2689{
2690 int ret = 0;
Laxminath Kasama60239e2019-01-10 14:43:03 +05302691 int irq, dir_apps_irq;
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302692
2693 if (!swrm->ipc_wakeup) {
Laxminath Kasama60239e2019-01-10 14:43:03 +05302694 irq = of_get_named_gpio(swrm->dev->of_node,
2695 "qcom,swr-wakeup-irq", 0);
2696 if (gpio_is_valid(irq)) {
2697 swrm->wake_irq = gpio_to_irq(irq);
2698 if (swrm->wake_irq < 0) {
2699 dev_err(swrm->dev,
2700 "Unable to configure irq\n");
2701 return swrm->wake_irq;
2702 }
2703 } else {
2704 dir_apps_irq = platform_get_irq_byname(swrm->pdev,
2705 "swr_wake_irq");
2706 if (dir_apps_irq < 0) {
2707 dev_err(swrm->dev,
2708 "TLMM connect gpio not found\n");
2709 return -EINVAL;
2710 }
2711 swrm->wake_irq = dir_apps_irq;
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302712 }
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302713 ret = request_threaded_irq(swrm->wake_irq, NULL,
2714 swrm_wakeup_interrupt,
2715 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
2716 "swr_wake_irq", swrm);
2717 if (ret) {
2718 dev_err(swrm->dev, "%s: Failed to request irq %d\n",
2719 __func__, ret);
2720 return -EINVAL;
2721 }
Aditya Bavanari3517b112018-12-03 13:26:59 +05302722 irq_set_irq_wake(swrm->wake_irq, 1);
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302723 }
2724 return ret;
2725}
2726
Sudheer Papothi3d1596e2018-10-27 06:19:18 +05302727static int swrm_alloc_port_mem(struct device *dev, struct swr_mstr_ctrl *swrm,
2728 u32 uc, u32 size)
2729{
2730 if (!swrm->port_param) {
2731 swrm->port_param = devm_kzalloc(dev,
2732 sizeof(swrm->port_param) * SWR_UC_MAX,
2733 GFP_KERNEL);
2734 if (!swrm->port_param)
2735 return -ENOMEM;
2736 }
2737 if (!swrm->port_param[uc]) {
2738 swrm->port_param[uc] = devm_kcalloc(dev, size,
2739 sizeof(struct port_params),
2740 GFP_KERNEL);
2741 if (!swrm->port_param[uc])
2742 return -ENOMEM;
2743 } else {
2744 dev_err_ratelimited(swrm->dev, "%s: called more than once\n",
2745 __func__);
2746 }
2747
2748 return 0;
2749}
2750
2751static int swrm_copy_port_config(struct swr_mstr_ctrl *swrm,
2752 struct swrm_port_config *port_cfg,
2753 u32 size)
2754{
2755 int idx;
2756 struct port_params *params;
2757 int uc = port_cfg->uc;
2758 int ret = 0;
2759
2760 for (idx = 0; idx < size; idx++) {
2761 params = &((struct port_params *)port_cfg->params)[idx];
2762 if (!params) {
2763 dev_err(swrm->dev, "%s: Invalid params\n", __func__);
2764 ret = -EINVAL;
2765 break;
2766 }
2767 memcpy(&swrm->port_param[uc][idx], params,
2768 sizeof(struct port_params));
2769 }
2770
2771 return ret;
2772}
2773
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302774/**
2775 * swrm_wcd_notify - parent device can notify to soundwire master through
2776 * this function
2777 * @pdev: pointer to platform device structure
2778 * @id: command id from parent to the soundwire master
2779 * @data: data from parent device to soundwire master
2780 */
2781int swrm_wcd_notify(struct platform_device *pdev, u32 id, void *data)
2782{
2783 struct swr_mstr_ctrl *swrm;
2784 int ret = 0;
2785 struct swr_master *mstr;
2786 struct swr_device *swr_dev;
Sudheer Papothi3d1596e2018-10-27 06:19:18 +05302787 struct swrm_port_config *port_cfg;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302788
2789 if (!pdev) {
2790 pr_err("%s: pdev is NULL\n", __func__);
2791 return -EINVAL;
2792 }
2793 swrm = platform_get_drvdata(pdev);
2794 if (!swrm) {
2795 dev_err(&pdev->dev, "%s: swrm is NULL\n", __func__);
2796 return -EINVAL;
2797 }
2798 mstr = &swrm->master;
2799
2800 switch (id) {
Sudheer Papothi06f43412019-07-09 03:32:54 +05302801 case SWR_REQ_CLK_SWITCH:
2802 /* This will put soundwire in clock stop mode and disable the
2803 * clocks, if there is no active usecase running, so that the
2804 * next activity on soundwire will request clock from new clock
2805 * source.
2806 */
2807 mutex_lock(&swrm->mlock);
2808 if (swrm->state == SWR_MSTR_UP)
2809 swrm_device_suspend(&pdev->dev);
2810 mutex_unlock(&swrm->mlock);
2811 break;
Laxminath Kasamb0f27cd2018-09-06 12:17:11 +05302812 case SWR_CLK_FREQ:
2813 if (!data) {
2814 dev_err(swrm->dev, "%s: data is NULL\n", __func__);
2815 ret = -EINVAL;
2816 } else {
2817 mutex_lock(&swrm->mlock);
Ramprasad Katkam2e85a542019-04-26 18:28:31 +05302818 if (swrm->mclk_freq != *(int *)data) {
2819 dev_dbg(swrm->dev, "%s: freq change: force mstr down\n", __func__);
2820 if (swrm->state == SWR_MSTR_DOWN)
2821 dev_dbg(swrm->dev, "%s:SWR master is already Down:%d\n",
2822 __func__, swrm->state);
2823 else
2824 swrm_device_suspend(&pdev->dev);
2825 }
Laxminath Kasamb0f27cd2018-09-06 12:17:11 +05302826 swrm->mclk_freq = *(int *)data;
2827 mutex_unlock(&swrm->mlock);
2828 }
2829 break;
Laxminath Kasam1df09a82018-09-20 18:57:49 +05302830 case SWR_DEVICE_SSR_DOWN:
2831 mutex_lock(&swrm->devlock);
2832 swrm->dev_up = false;
2833 mutex_unlock(&swrm->devlock);
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302834 mutex_lock(&swrm->reslock);
2835 swrm->state = SWR_MSTR_SSR;
2836 mutex_unlock(&swrm->reslock);
Laxminath Kasam1df09a82018-09-20 18:57:49 +05302837 break;
2838 case SWR_DEVICE_SSR_UP:
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05302839 /* wait for clk voting to be zero */
Ramprasad Katkam7f6462e2018-11-06 11:51:22 +05302840 reinit_completion(&swrm->clk_off_complete);
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05302841 if (swrm->clk_ref_count &&
2842 !wait_for_completion_timeout(&swrm->clk_off_complete,
Ramprasad Katkamc87efeb2018-12-12 19:26:19 +05302843 msecs_to_jiffies(500)))
Ramprasad Katkam6bce2e72018-10-10 19:20:13 +05302844 dev_err(swrm->dev, "%s: clock voting not zero\n",
2845 __func__);
2846
Laxminath Kasam1df09a82018-09-20 18:57:49 +05302847 mutex_lock(&swrm->devlock);
2848 swrm->dev_up = true;
2849 mutex_unlock(&swrm->devlock);
2850 break;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302851 case SWR_DEVICE_DOWN:
2852 dev_dbg(swrm->dev, "%s: swr master down called\n", __func__);
2853 mutex_lock(&swrm->mlock);
Ramprasad Katkam2a799b42018-10-04 20:23:28 +05302854 if (swrm->state == SWR_MSTR_DOWN)
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302855 dev_dbg(swrm->dev, "%s:SWR master is already Down:%d\n",
2856 __func__, swrm->state);
2857 else
2858 swrm_device_down(&pdev->dev);
2859 mutex_unlock(&swrm->mlock);
2860 break;
2861 case SWR_DEVICE_UP:
2862 dev_dbg(swrm->dev, "%s: swr master up called\n", __func__);
Ramprasad Katkam0fed92f2018-11-08 14:22:22 +05302863 mutex_lock(&swrm->devlock);
2864 if (!swrm->dev_up) {
2865 dev_dbg(swrm->dev, "SSR not complete yet\n");
2866 mutex_unlock(&swrm->devlock);
2867 return -EBUSY;
2868 }
2869 mutex_unlock(&swrm->devlock);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302870 mutex_lock(&swrm->mlock);
Ramprasad Katkam86c45e02018-10-16 19:31:51 +05302871 pm_runtime_mark_last_busy(&pdev->dev);
2872 pm_runtime_get_sync(&pdev->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302873 mutex_lock(&swrm->reslock);
Ramprasad Katkam86c45e02018-10-16 19:31:51 +05302874 list_for_each_entry(swr_dev, &mstr->devices, dev_list) {
2875 ret = swr_reset_device(swr_dev);
2876 if (ret) {
2877 dev_err(swrm->dev,
2878 "%s: failed to reset swr device %d\n",
2879 __func__, swr_dev->dev_num);
2880 swrm_clk_request(swrm, false);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302881 }
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302882 }
Ramprasad Katkam86c45e02018-10-16 19:31:51 +05302883 pm_runtime_mark_last_busy(&pdev->dev);
2884 pm_runtime_put_autosuspend(&pdev->dev);
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302885 mutex_unlock(&swrm->reslock);
2886 mutex_unlock(&swrm->mlock);
2887 break;
2888 case SWR_SET_NUM_RX_CH:
2889 if (!data) {
2890 dev_err(swrm->dev, "%s: data is NULL\n", __func__);
2891 ret = -EINVAL;
2892 } else {
2893 mutex_lock(&swrm->mlock);
2894 swrm->num_rx_chs = *(int *)data;
2895 if ((swrm->num_rx_chs > 1) && !swrm->num_cfg_devs) {
2896 list_for_each_entry(swr_dev, &mstr->devices,
2897 dev_list) {
2898 ret = swr_set_device_group(swr_dev,
2899 SWR_BROADCAST);
2900 if (ret)
2901 dev_err(swrm->dev,
2902 "%s: set num ch failed\n",
2903 __func__);
2904 }
2905 } else {
2906 list_for_each_entry(swr_dev, &mstr->devices,
2907 dev_list) {
2908 ret = swr_set_device_group(swr_dev,
2909 SWR_GROUP_NONE);
2910 if (ret)
2911 dev_err(swrm->dev,
2912 "%s: set num ch failed\n",
2913 __func__);
2914 }
2915 }
2916 mutex_unlock(&swrm->mlock);
2917 }
2918 break;
Aditya Bavanaric034fad2018-11-12 22:55:11 +05302919 case SWR_REGISTER_WAKE_IRQ:
2920 if (!data) {
2921 dev_err(swrm->dev, "%s: reg wake irq data is NULL\n",
2922 __func__);
2923 ret = -EINVAL;
2924 } else {
2925 mutex_lock(&swrm->mlock);
2926 swrm->ipc_wakeup = *(u32 *)data;
2927 ret = swrm_register_wake_irq(swrm);
2928 if (ret)
2929 dev_err(swrm->dev, "%s: register wake_irq failed\n",
2930 __func__);
2931 mutex_unlock(&swrm->mlock);
2932 }
2933 break;
Sudheer Papothi3d1596e2018-10-27 06:19:18 +05302934 case SWR_SET_PORT_MAP:
2935 if (!data) {
2936 dev_err(swrm->dev, "%s: data is NULL for id=%d\n",
2937 __func__, id);
2938 ret = -EINVAL;
2939 } else {
2940 mutex_lock(&swrm->mlock);
2941 port_cfg = (struct swrm_port_config *)data;
2942 if (!port_cfg->size) {
2943 ret = -EINVAL;
2944 goto done;
2945 }
2946 ret = swrm_alloc_port_mem(&pdev->dev, swrm,
2947 port_cfg->uc, port_cfg->size);
2948 if (!ret)
2949 swrm_copy_port_config(swrm, port_cfg,
2950 port_cfg->size);
2951done:
2952 mutex_unlock(&swrm->mlock);
2953 }
2954 break;
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05302955 default:
2956 dev_err(swrm->dev, "%s: swr master unknown id %d\n",
2957 __func__, id);
2958 break;
2959 }
2960 return ret;
2961}
2962EXPORT_SYMBOL(swrm_wcd_notify);
2963
Ramprasad Katkam57349872018-11-11 18:34:57 +05302964/*
2965 * swrm_pm_cmpxchg:
2966 * Check old state and exchange with pm new state
2967 * if old state matches with current state
2968 *
2969 * @swrm: pointer to wcd core resource
2970 * @o: pm old state
2971 * @n: pm new state
2972 *
2973 * Returns old state
2974 */
2975static enum swrm_pm_state swrm_pm_cmpxchg(
2976 struct swr_mstr_ctrl *swrm,
2977 enum swrm_pm_state o,
2978 enum swrm_pm_state n)
2979{
2980 enum swrm_pm_state old;
2981
2982 if (!swrm)
2983 return o;
2984
2985 mutex_lock(&swrm->pm_lock);
2986 old = swrm->pm_state;
2987 if (old == o)
2988 swrm->pm_state = n;
2989 mutex_unlock(&swrm->pm_lock);
2990
2991 return old;
2992}
2993
2994static bool swrm_lock_sleep(struct swr_mstr_ctrl *swrm)
2995{
2996 enum swrm_pm_state os;
2997
2998 /*
2999 * swrm_{lock/unlock}_sleep will be called by swr irq handler
3000 * and slave wake up requests..
3001 *
3002 * If system didn't resume, we can simply return false so
3003 * IRQ handler can return without handling IRQ.
3004 */
3005 mutex_lock(&swrm->pm_lock);
3006 if (swrm->wlock_holders++ == 0) {
3007 dev_dbg(swrm->dev, "%s: holding wake lock\n", __func__);
3008 pm_qos_update_request(&swrm->pm_qos_req,
3009 msm_cpuidle_get_deep_idle_latency());
3010 pm_stay_awake(swrm->dev);
3011 }
3012 mutex_unlock(&swrm->pm_lock);
3013
3014 if (!wait_event_timeout(swrm->pm_wq,
3015 ((os = swrm_pm_cmpxchg(swrm,
3016 SWRM_PM_SLEEPABLE,
3017 SWRM_PM_AWAKE)) ==
3018 SWRM_PM_SLEEPABLE ||
3019 (os == SWRM_PM_AWAKE)),
3020 msecs_to_jiffies(
3021 SWRM_SYSTEM_RESUME_TIMEOUT_MS))) {
3022 dev_err(swrm->dev, "%s: system didn't resume within %dms, s %d, w %d\n",
3023 __func__, SWRM_SYSTEM_RESUME_TIMEOUT_MS, swrm->pm_state,
3024 swrm->wlock_holders);
3025 swrm_unlock_sleep(swrm);
3026 return false;
3027 }
3028 wake_up_all(&swrm->pm_wq);
3029 return true;
3030}
3031
3032static void swrm_unlock_sleep(struct swr_mstr_ctrl *swrm)
3033{
3034 mutex_lock(&swrm->pm_lock);
3035 if (--swrm->wlock_holders == 0) {
3036 dev_dbg(swrm->dev, "%s: releasing wake lock pm_state %d -> %d\n",
3037 __func__, swrm->pm_state, SWRM_PM_SLEEPABLE);
3038 /*
3039 * if swrm_lock_sleep failed, pm_state would be still
3040 * swrm_PM_ASLEEP, don't overwrite
3041 */
3042 if (likely(swrm->pm_state == SWRM_PM_AWAKE))
3043 swrm->pm_state = SWRM_PM_SLEEPABLE;
3044 pm_qos_update_request(&swrm->pm_qos_req,
3045 PM_QOS_DEFAULT_VALUE);
3046 pm_relax(swrm->dev);
3047 }
3048 mutex_unlock(&swrm->pm_lock);
3049 wake_up_all(&swrm->pm_wq);
3050}
3051
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303052#ifdef CONFIG_PM_SLEEP
3053static int swrm_suspend(struct device *dev)
3054{
3055 int ret = -EBUSY;
3056 struct platform_device *pdev = to_platform_device(dev);
3057 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
3058
3059 dev_dbg(dev, "%s: system suspend, state: %d\n", __func__, swrm->state);
Ramprasad Katkam57349872018-11-11 18:34:57 +05303060
3061 mutex_lock(&swrm->pm_lock);
3062
3063 if (swrm->pm_state == SWRM_PM_SLEEPABLE) {
3064 dev_dbg(swrm->dev, "%s: suspending system, state %d, wlock %d\n",
3065 __func__, swrm->pm_state,
3066 swrm->wlock_holders);
3067 swrm->pm_state = SWRM_PM_ASLEEP;
3068 } else if (swrm->pm_state == SWRM_PM_AWAKE) {
3069 /*
3070 * unlock to wait for pm_state == SWRM_PM_SLEEPABLE
3071 * then set to SWRM_PM_ASLEEP
3072 */
3073 dev_dbg(swrm->dev, "%s: waiting to suspend system, state %d, wlock %d\n",
3074 __func__, swrm->pm_state,
3075 swrm->wlock_holders);
3076 mutex_unlock(&swrm->pm_lock);
3077 if (!(wait_event_timeout(swrm->pm_wq, swrm_pm_cmpxchg(
3078 swrm, SWRM_PM_SLEEPABLE,
3079 SWRM_PM_ASLEEP) ==
3080 SWRM_PM_SLEEPABLE,
3081 msecs_to_jiffies(
3082 SWRM_SYS_SUSPEND_WAIT)))) {
3083 dev_dbg(swrm->dev, "%s: suspend failed state %d, wlock %d\n",
3084 __func__, swrm->pm_state,
3085 swrm->wlock_holders);
3086 return -EBUSY;
3087 } else {
3088 dev_dbg(swrm->dev,
3089 "%s: done, state %d, wlock %d\n",
3090 __func__, swrm->pm_state,
3091 swrm->wlock_holders);
3092 }
3093 mutex_lock(&swrm->pm_lock);
3094 } else if (swrm->pm_state == SWRM_PM_ASLEEP) {
3095 dev_dbg(swrm->dev, "%s: system is already suspended, state %d, wlock %d\n",
3096 __func__, swrm->pm_state,
3097 swrm->wlock_holders);
3098 }
3099
3100 mutex_unlock(&swrm->pm_lock);
3101
3102 if ((!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev))) {
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303103 ret = swrm_runtime_suspend(dev);
3104 if (!ret) {
3105 /*
3106 * Synchronize runtime-pm and system-pm states:
3107 * At this point, we are already suspended. If
3108 * runtime-pm still thinks its active, then
3109 * make sure its status is in sync with HW
3110 * status. The three below calls let the
3111 * runtime-pm know that we are suspended
3112 * already without re-invoking the suspend
3113 * callback
3114 */
3115 pm_runtime_disable(dev);
3116 pm_runtime_set_suspended(dev);
3117 pm_runtime_enable(dev);
3118 }
3119 }
3120 if (ret == -EBUSY) {
3121 /*
3122 * There is a possibility that some audio stream is active
3123 * during suspend. We dont want to return suspend failure in
3124 * that case so that display and relevant components can still
3125 * go to suspend.
3126 * If there is some other error, then it should be passed-on
3127 * to system level suspend
3128 */
3129 ret = 0;
3130 }
3131 return ret;
3132}
3133
3134static int swrm_resume(struct device *dev)
3135{
3136 int ret = 0;
3137 struct platform_device *pdev = to_platform_device(dev);
3138 struct swr_mstr_ctrl *swrm = platform_get_drvdata(pdev);
3139
3140 dev_dbg(dev, "%s: system resume, state: %d\n", __func__, swrm->state);
3141 if (!pm_runtime_enabled(dev) || !pm_runtime_suspend(dev)) {
3142 ret = swrm_runtime_resume(dev);
3143 if (!ret) {
3144 pm_runtime_mark_last_busy(dev);
3145 pm_request_autosuspend(dev);
3146 }
3147 }
Ramprasad Katkam57349872018-11-11 18:34:57 +05303148 mutex_lock(&swrm->pm_lock);
3149 if (swrm->pm_state == SWRM_PM_ASLEEP) {
3150 dev_dbg(swrm->dev,
3151 "%s: resuming system, state %d, wlock %d\n",
3152 __func__, swrm->pm_state,
3153 swrm->wlock_holders);
3154 swrm->pm_state = SWRM_PM_SLEEPABLE;
3155 } else {
3156 dev_dbg(swrm->dev, "%s: system is already awake, state %d wlock %d\n",
3157 __func__, swrm->pm_state,
3158 swrm->wlock_holders);
3159 }
3160 mutex_unlock(&swrm->pm_lock);
3161 wake_up_all(&swrm->pm_wq);
3162
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303163 return ret;
3164}
3165#endif /* CONFIG_PM_SLEEP */
3166
3167static const struct dev_pm_ops swrm_dev_pm_ops = {
3168 SET_SYSTEM_SLEEP_PM_OPS(
3169 swrm_suspend,
3170 swrm_resume
3171 )
3172 SET_RUNTIME_PM_OPS(
3173 swrm_runtime_suspend,
3174 swrm_runtime_resume,
3175 NULL
3176 )
3177};
3178
3179static const struct of_device_id swrm_dt_match[] = {
3180 {
3181 .compatible = "qcom,swr-mstr",
3182 },
3183 {}
3184};
3185
3186static struct platform_driver swr_mstr_driver = {
3187 .probe = swrm_probe,
3188 .remove = swrm_remove,
3189 .driver = {
3190 .name = SWR_WCD_NAME,
3191 .owner = THIS_MODULE,
3192 .pm = &swrm_dev_pm_ops,
3193 .of_match_table = swrm_dt_match,
Xiaojun Sang53cd13a2018-06-29 15:14:37 +08003194 .suppress_bind_attrs = true,
Ramprasad Katkam9f040f32018-05-16 10:19:25 +05303195 },
3196};
3197
3198static int __init swrm_init(void)
3199{
3200 return platform_driver_register(&swr_mstr_driver);
3201}
3202module_init(swrm_init);
3203
3204static void __exit swrm_exit(void)
3205{
3206 platform_driver_unregister(&swr_mstr_driver);
3207}
3208module_exit(swrm_exit);
3209
3210MODULE_LICENSE("GPL v2");
3211MODULE_DESCRIPTION("SoundWire Master Controller");
3212MODULE_ALIAS("platform:swr-mstr");