blob: a72cb174a10f3a3d4537fac901ebefc122fc8a9d [file] [log] [blame]
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/slimbus/slimbus.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/clk.h>
24#include <linux/pm_runtime.h>
25#include <linux/of.h>
26#include <linux/of_slimbus.h>
27#include <linux/timer.h>
28#include <linux/msm-sps.h>
29#include <soc/qcom/service-locator.h>
30#include <soc/qcom/service-notifier.h>
31#include <soc/qcom/subsystem_notif.h>
32#include "slim-msm.h"
33
34#define NGD_SLIM_NAME "ngd_msm_ctrl"
35#define SLIM_LA_MGR 0xFF
36#define SLIM_ROOT_FREQ 24576000
37#define LADDR_RETRY 5
38
39#define NGD_BASE_V1(r) (((r) % 2) ? 0x800 : 0xA00)
40#define NGD_BASE_V2(r) (((r) % 2) ? 0x1000 : 0x2000)
41#define NGD_BASE(r, v) ((v) ? NGD_BASE_V2(r) : NGD_BASE_V1(r))
42/* NGD (Non-ported Generic Device) registers */
43enum ngd_reg {
44 NGD_CFG = 0x0,
45 NGD_STATUS = 0x4,
46 NGD_RX_MSGQ_CFG = 0x8,
47 NGD_INT_EN = 0x10,
48 NGD_INT_STAT = 0x14,
49 NGD_INT_CLR = 0x18,
50 NGD_TX_MSG = 0x30,
51 NGD_RX_MSG = 0x70,
52 NGD_IE_STAT = 0xF0,
53 NGD_VE_STAT = 0x100,
54};
55
56enum ngd_msg_cfg {
57 NGD_CFG_ENABLE = 1,
58 NGD_CFG_RX_MSGQ_EN = 1 << 1,
59 NGD_CFG_TX_MSGQ_EN = 1 << 2,
60};
61
62enum ngd_intr {
63 NGD_INT_RECFG_DONE = 1 << 24,
64 NGD_INT_TX_NACKED_2 = 1 << 25,
65 NGD_INT_MSG_BUF_CONTE = 1 << 26,
66 NGD_INT_MSG_TX_INVAL = 1 << 27,
67 NGD_INT_IE_VE_CHG = 1 << 28,
68 NGD_INT_DEV_ERR = 1 << 29,
69 NGD_INT_RX_MSG_RCVD = 1 << 30,
70 NGD_INT_TX_MSG_SENT = 1 << 31,
71};
72
73enum ngd_offsets {
74 NGD_NACKED_MC = 0x7F00000,
75 NGD_ACKED_MC = 0xFE000,
76 NGD_ERROR = 0x1800,
77 NGD_MSGQ_SUPPORT = 0x400,
78 NGD_RX_MSGQ_TIME_OUT = 0x16,
79 NGD_ENUMERATED = 0x1,
80 NGD_TX_BUSY = 0x0,
81};
82
83enum ngd_status {
84 NGD_LADDR = 1 << 1,
85};
86
87static void ngd_slim_rx(struct msm_slim_ctrl *dev, u8 *buf);
88static int ngd_slim_runtime_resume(struct device *device);
89static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart);
90static void ngd_dom_down(struct msm_slim_ctrl *dev);
91static int dsp_domr_notify_cb(struct notifier_block *n, unsigned long code,
92 void *_cmd);
93
94static irqreturn_t ngd_slim_interrupt(int irq, void *d)
95{
96 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)d;
97 void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
98 u32 stat = readl_relaxed(ngd + NGD_INT_STAT);
99 u32 pstat;
100
101 if ((stat & NGD_INT_MSG_BUF_CONTE) ||
102 (stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) ||
103 (stat & NGD_INT_TX_NACKED_2)) {
104 writel_relaxed(stat, ngd + NGD_INT_CLR);
105 if (stat & NGD_INT_MSG_TX_INVAL)
106 dev->err = -EINVAL;
107 else
108 dev->err = -EIO;
109
110 SLIM_WARN(dev, "NGD interrupt error:0x%x, err:%d\n", stat,
111 dev->err);
112 /* Guarantee that error interrupts are cleared */
113 mb();
114 msm_slim_manage_tx_msgq(dev, false, NULL, dev->err);
115
116 } else if (stat & NGD_INT_TX_MSG_SENT) {
117 writel_relaxed(NGD_INT_TX_MSG_SENT, ngd + NGD_INT_CLR);
118 /* Make sure interrupt is cleared */
119 mb();
120 msm_slim_manage_tx_msgq(dev, false, NULL, 0);
121 }
122 if (stat & NGD_INT_RX_MSG_RCVD) {
123 u32 rx_buf[10];
124 u8 len, i;
125
126 rx_buf[0] = readl_relaxed(ngd + NGD_RX_MSG);
127 len = rx_buf[0] & 0x1F;
128 for (i = 1; i < ((len + 3) >> 2); i++) {
129 rx_buf[i] = readl_relaxed(ngd + NGD_RX_MSG +
130 (4 * i));
131 SLIM_DBG(dev, "REG-RX data: %x\n", rx_buf[i]);
132 }
133 writel_relaxed(NGD_INT_RX_MSG_RCVD,
134 ngd + NGD_INT_CLR);
135 /*
136 * Guarantee that CLR bit write goes through before
137 * queuing work
138 */
139 mb();
140 ngd_slim_rx(dev, (u8 *)rx_buf);
141 }
142 if (stat & NGD_INT_RECFG_DONE) {
143 writel_relaxed(NGD_INT_RECFG_DONE, ngd + NGD_INT_CLR);
144 /* Guarantee RECONFIG DONE interrupt is cleared */
145 mb();
146 /* In satellite mode, just log the reconfig done IRQ */
147 SLIM_DBG(dev, "reconfig done IRQ for NGD\n");
148 }
149 if (stat & NGD_INT_IE_VE_CHG) {
150 writel_relaxed(NGD_INT_IE_VE_CHG, ngd + NGD_INT_CLR);
151 /* Guarantee IE VE change interrupt is cleared */
152 mb();
153 SLIM_DBG(dev, "NGD IE VE change\n");
154 }
155
156 pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
157 if (pstat != 0)
158 return msm_slim_port_irq_handler(dev, pstat);
159 return IRQ_HANDLED;
160}
161
162static int ngd_qmi_available(struct notifier_block *n, unsigned long code,
163 void *_cmd)
164{
165 struct msm_slim_qmi *qmi = container_of(n, struct msm_slim_qmi, nb);
166 struct msm_slim_ctrl *dev =
167 container_of(qmi, struct msm_slim_ctrl, qmi);
168 SLIM_INFO(dev, "Slimbus QMI NGD CB received event:%ld\n", code);
169 switch (code) {
170 case QMI_SERVER_ARRIVE:
171 atomic_set(&dev->ssr_in_progress, 0);
172 schedule_work(&dev->dsp.dom_up);
173 break;
174 default:
175 break;
176 }
177 return 0;
178}
179
180static void ngd_reg_ssr(struct msm_slim_ctrl *dev)
181{
182 int ret;
183 const char *subsys_name = NULL;
184
185 dev->dsp.dom_t = MSM_SLIM_DOM_NONE;
186 ret = of_property_read_string(dev->dev->of_node,
187 "qcom,subsys-name", &subsys_name);
188 if (ret)
189 subsys_name = "adsp";
190
191 dev->dsp.nb.notifier_call = dsp_domr_notify_cb;
192 dev->dsp.domr = subsys_notif_register_notifier(subsys_name,
193 &dev->dsp.nb);
194 if (IS_ERR_OR_NULL(dev->dsp.domr)) {
195 dev_err(dev->dev,
196 "subsys_notif_register_notifier failed %ld",
197 PTR_ERR(dev->dsp.domr));
198 return;
199 }
200 dev->dsp.dom_t = MSM_SLIM_DOM_SS;
201 SLIM_INFO(dev, "reg-SSR with:%s, PDR not available\n",
202 subsys_name);
203}
204
205static int dsp_domr_notify_cb(struct notifier_block *n, unsigned long code,
206 void *_cmd)
207{
208 int cur = -1;
209 struct msm_slim_ss *dsp = container_of(n, struct msm_slim_ss, nb);
210 struct msm_slim_ctrl *dev = container_of(dsp, struct msm_slim_ctrl,
211 dsp);
212 struct pd_qmi_client_data *reg;
213
214 SLIM_INFO(dev, "SLIM DSP SSR/PDR notify cb:0x%lx, type:%d\n",
215 code, dsp->dom_t);
216 switch (code) {
217 case SUBSYS_BEFORE_SHUTDOWN:
218 case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01:
219 SLIM_INFO(dev, "SLIM DSP SSR notify cb:%lu\n", code);
220 atomic_set(&dev->ssr_in_progress, 1);
221 /* wait for current transaction */
222 mutex_lock(&dev->tx_lock);
223 /* make sure autosuspend is not called until ADSP comes up*/
224 pm_runtime_get_noresume(dev->dev);
225 dev->state = MSM_CTRL_DOWN;
Sagar Dhariaa4bfae12016-08-25 22:35:24 -0600226 dev->qmi.deferred_resp = false;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -0700227 msm_slim_sps_exit(dev, false);
228 ngd_dom_down(dev);
229 mutex_unlock(&dev->tx_lock);
230 break;
231 case LOCATOR_UP:
232 reg = _cmd;
233 if (!reg || reg->total_domains != 1) {
234 SLIM_WARN(dev, "error locating audio-PD\n");
235 if (reg)
236 SLIM_WARN(dev, "audio-PDs matched:%d\n",
237 reg->total_domains);
238
239 /* Fall back to SSR */
240 ngd_reg_ssr(dev);
241 return NOTIFY_DONE;
242 }
243 dev->dsp.domr = service_notif_register_notifier(
244 reg->domain_list->name,
245 reg->domain_list->instance_id,
246 &dev->dsp.nb,
247 &cur);
248 SLIM_INFO(dev, "reg-PD client:%s with service:%s\n",
249 reg->client_name, reg->service_name);
250 SLIM_INFO(dev, "reg-PD dom:%s instance:%d, cur:%d\n",
251 reg->domain_list->name,
252 reg->domain_list->instance_id, cur);
253 if (IS_ERR_OR_NULL(dev->dsp.domr))
254 ngd_reg_ssr(dev);
255 else
256 dev->dsp.dom_t = MSM_SLIM_DOM_PD;
257 break;
258 case LOCATOR_DOWN:
259 ngd_reg_ssr(dev);
260 default:
261 break;
262 }
263 return NOTIFY_DONE;
264}
265
266static void ngd_dom_init(struct msm_slim_ctrl *dev)
267{
268 struct pd_qmi_client_data reg;
269 int ret;
270
271 memset(&reg, 0, sizeof(struct pd_qmi_client_data));
272 dev->dsp.nb.priority = 4;
273 dev->dsp.nb.notifier_call = dsp_domr_notify_cb;
274 scnprintf(reg.client_name, QMI_SERVREG_LOC_NAME_LENGTH_V01, "appsngd%d",
275 dev->ctrl.nr);
276 scnprintf(reg.service_name, QMI_SERVREG_LOC_NAME_LENGTH_V01,
277 "avs/audio");
278 ret = get_service_location(reg.client_name, reg.service_name,
279 &dev->dsp.nb);
280 if (ret)
281 ngd_reg_ssr(dev);
282}
283
284static int mdm_ssr_notify_cb(struct notifier_block *n, unsigned long code,
285 void *_cmd)
286{
287 void __iomem *ngd;
288 struct msm_slim_ss *ext_mdm = container_of(n, struct msm_slim_ss, nb);
289 struct msm_slim_ctrl *dev = container_of(ext_mdm, struct msm_slim_ctrl,
290 ext_mdm);
291 struct slim_controller *ctrl = &dev->ctrl;
292 u32 laddr;
293 struct slim_device *sbdev;
294
295 switch (code) {
296 case SUBSYS_BEFORE_SHUTDOWN:
297 SLIM_INFO(dev, "SLIM %lu external_modem SSR notify cb\n", code);
298 /* vote for runtime-pm so that ADSP doesn't go down */
299 msm_slim_get_ctrl(dev);
300 /*
301 * checking framer here will wake-up ADSP and may avoid framer
302 * handover later
303 */
304 msm_slim_qmi_check_framer_request(dev);
305 dev->ext_mdm.state = MSM_CTRL_DOWN;
306 msm_slim_put_ctrl(dev);
307 break;
308 case SUBSYS_AFTER_POWERUP:
309 if (dev->ext_mdm.state != MSM_CTRL_DOWN)
310 return NOTIFY_DONE;
311 SLIM_INFO(dev,
312 "SLIM %lu external_modem SSR notify cb\n", code);
313 /* vote for runtime-pm so that ADSP doesn't go down */
314 msm_slim_get_ctrl(dev);
315 msm_slim_qmi_check_framer_request(dev);
316 /* If NGD enumeration is lost, we will need to power us up */
317 ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
318 laddr = readl_relaxed(ngd + NGD_STATUS);
319 if (!(laddr & NGD_LADDR)) {
320 mutex_lock(&dev->tx_lock);
321 /* runtime-pm state should be consistent with HW */
322 pm_runtime_disable(dev->dev);
323 pm_runtime_set_suspended(dev->dev);
324 dev->state = MSM_CTRL_DOWN;
325 mutex_unlock(&dev->tx_lock);
326 SLIM_INFO(dev,
327 "SLIM MDM SSR (active framer on MDM) dev-down\n");
328 list_for_each_entry(sbdev, &ctrl->devs, dev_list)
329 slim_report_absent(sbdev);
330 ngd_slim_runtime_resume(dev->dev);
331 pm_runtime_set_active(dev->dev);
332 pm_runtime_enable(dev->dev);
333 }
334 dev->ext_mdm.state = MSM_CTRL_AWAKE;
335 msm_slim_put_ctrl(dev);
336 break;
337 default:
338 break;
339 }
340 return NOTIFY_DONE;
341}
342
343static int ngd_get_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn,
344 u8 *tid, struct completion *done)
345{
346 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
347 unsigned long flags;
348
349 spin_lock_irqsave(&ctrl->txn_lock, flags);
350 if (ctrl->last_tid <= 255) {
351 dev->msg_cnt = ctrl->last_tid;
352 ctrl->last_tid++;
353 } else {
354 int i;
355
356 for (i = 0; i < 256; i++) {
357 dev->msg_cnt = ((dev->msg_cnt + 1) & 0xFF);
358 if (ctrl->txnt[dev->msg_cnt] == NULL)
359 break;
360 }
361 if (i >= 256) {
362 dev_err(&ctrl->dev, "out of TID");
363 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
364 return -ENOMEM;
365 }
366 }
367 ctrl->txnt[dev->msg_cnt] = txn;
368 txn->tid = dev->msg_cnt;
369 txn->comp = done;
370 *tid = dev->msg_cnt;
371 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
372 return 0;
373}
374
375static void slim_reinit_tx_msgq(struct msm_slim_ctrl *dev)
376{
377 /*
378 * disconnect/recoonect pipe so that subsequent
379 * transactions don't timeout due to unavailable
380 * descriptors
381 */
382 if (dev->state != MSM_CTRL_DOWN) {
383 msm_slim_disconnect_endp(dev, &dev->tx_msgq,
384 &dev->use_tx_msgqs);
385 msm_slim_connect_endp(dev, &dev->tx_msgq);
386 }
387}
388
389static int ngd_check_hw_status(struct msm_slim_ctrl *dev)
390{
391 void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
392 u32 laddr = readl_relaxed(ngd + NGD_STATUS);
393 int ret = 0;
394
395 /* Lost logical addr due to noise */
396 if (!(laddr & NGD_LADDR)) {
397 SLIM_WARN(dev, "NGD lost LADDR: status:0x%x\n", laddr);
398 ret = ngd_slim_power_up(dev, false);
399
400 if (ret) {
401 SLIM_WARN(dev, "slim resume ret:%d, state:%d\n",
402 ret, dev->state);
403 ret = -EREMOTEIO;
404 }
405 }
406 return ret;
407}
408
409static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
410{
411 DECLARE_COMPLETION_ONSTACK(done);
412 DECLARE_COMPLETION_ONSTACK(tx_sent);
413
414 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
415 u32 *pbuf;
416 u8 *puc;
417 int ret = 0;
418 u8 la = txn->la;
419 u8 txn_mt;
420 u16 txn_mc = txn->mc;
421 u8 wbuf[SLIM_MSGQ_BUF_LEN];
422 bool report_sat = false;
423 bool sync_wr = true;
424
425 if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
426 return -EPROTONOSUPPORT;
427
428 if (txn->mt == SLIM_MSG_MT_CORE &&
429 (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
430 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
431 return 0;
432
433 if (txn->mc == SLIM_USR_MC_REPORT_SATELLITE &&
434 txn->mt == SLIM_MSG_MT_SRC_REFERRED_USER)
435 report_sat = true;
436 else
437 mutex_lock(&dev->tx_lock);
438
439 if (!report_sat && !pm_runtime_enabled(dev->dev) &&
440 dev->state == MSM_CTRL_ASLEEP) {
441 /*
442 * Counter-part of system-suspend when runtime-pm is not enabled
443 * This way, resume can be left empty and device will be put in
444 * active mode only if client requests anything on the bus
445 * If the state was DOWN, SSR UP notification will take
446 * care of putting the device in active state.
447 */
448 mutex_unlock(&dev->tx_lock);
449 ret = ngd_slim_runtime_resume(dev->dev);
450
451 if (ret) {
452 SLIM_ERR(dev, "slim resume failed ret:%d, state:%d",
453 ret, dev->state);
454 return -EREMOTEIO;
455 }
456 mutex_lock(&dev->tx_lock);
457 }
458
459 /* If txn is tried when controller is down, wait for ADSP to boot */
460 if (!report_sat) {
461 if (dev->state == MSM_CTRL_DOWN) {
462 u8 mc = (u8)txn->mc;
463 int timeout;
464
465 mutex_unlock(&dev->tx_lock);
466 SLIM_INFO(dev, "ADSP slimbus not up yet\n");
467 /*
468 * Messages related to data channel management can't
469 * wait since they are holding reconfiguration lock.
470 * clk_pause in resume (which can change state back to
471 * MSM_CTRL_AWAKE), will need that lock.
472 * Port disconnection, channel removal calls should pass
473 * through since there is no activity on the bus and
474 * those calls are triggered by clients due to
475 * device_down callback in that situation.
476 * Returning 0 on the disconnections and
477 * removals will ensure consistent state of channels,
478 * ports with the HW
479 * Remote requests to remove channel/port will be
480 * returned from the path where they wait on
481 * acknowledgment from ADSP
482 */
483 if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
484 ((mc == SLIM_USR_MC_CHAN_CTRL ||
485 mc == SLIM_USR_MC_DISCONNECT_PORT ||
486 mc == SLIM_USR_MC_RECONFIG_NOW)))
487 return -EREMOTEIO;
488 if ((txn->mt == SLIM_MSG_MT_CORE) &&
489 ((mc == SLIM_MSG_MC_DISCONNECT_PORT ||
490 mc == SLIM_MSG_MC_NEXT_REMOVE_CHANNEL ||
491 mc == SLIM_USR_MC_RECONFIG_NOW)))
492 return 0;
493 if ((txn->mt == SLIM_MSG_MT_CORE) &&
494 ((mc >= SLIM_MSG_MC_CONNECT_SOURCE &&
495 mc <= SLIM_MSG_MC_CHANGE_CONTENT) ||
496 (mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
497 mc <= SLIM_MSG_MC_RECONFIGURE_NOW)))
498 return -EREMOTEIO;
499 if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
500 ((mc >= SLIM_USR_MC_DEFINE_CHAN &&
501 mc < SLIM_USR_MC_DISCONNECT_PORT)))
502 return -EREMOTEIO;
503 timeout = wait_for_completion_timeout(&dev->ctrl_up,
504 HZ);
505 if (!timeout)
506 return -ETIMEDOUT;
507 mutex_lock(&dev->tx_lock);
508 }
509
510 mutex_unlock(&dev->tx_lock);
511 ret = msm_slim_get_ctrl(dev);
512 mutex_lock(&dev->tx_lock);
513 /*
514 * Runtime-pm's callbacks are not called until runtime-pm's
515 * error status is cleared
516 * Setting runtime status to suspended clears the error
517 * It also makes HW status cosistent with what SW has it here
518 */
519 if ((pm_runtime_enabled(dev->dev) && ret < 0) ||
520 dev->state >= MSM_CTRL_ASLEEP) {
521 SLIM_ERR(dev, "slim ctrl vote failed ret:%d, state:%d",
522 ret, dev->state);
523 pm_runtime_set_suspended(dev->dev);
524 mutex_unlock(&dev->tx_lock);
525 msm_slim_put_ctrl(dev);
526 return -EREMOTEIO;
527 }
528 ret = ngd_check_hw_status(dev);
529 if (ret) {
530 mutex_unlock(&dev->tx_lock);
531 msm_slim_put_ctrl(dev);
532 return ret;
533 }
534 }
535
536 if (txn->mt == SLIM_MSG_MT_CORE &&
537 (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
538 txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
539 txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
540 int i = 0;
541
542 if (txn->mc != SLIM_MSG_MC_DISCONNECT_PORT)
543 SLIM_INFO(dev,
544 "Connect port: laddr 0x%x port_num %d chan_num %d\n",
545 txn->la, txn->wbuf[0], txn->wbuf[1]);
546 else
547 SLIM_INFO(dev,
548 "Disconnect port: laddr 0x%x port_num %d\n",
549 txn->la, txn->wbuf[0]);
550 txn->mt = SLIM_MSG_MT_DEST_REFERRED_USER;
551 if (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE)
552 txn->mc = SLIM_USR_MC_CONNECT_SRC;
553 else if (txn->mc == SLIM_MSG_MC_CONNECT_SINK)
554 txn->mc = SLIM_USR_MC_CONNECT_SINK;
555 else if (txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)
556 txn->mc = SLIM_USR_MC_DISCONNECT_PORT;
557 if (txn->la == SLIM_LA_MGR) {
558 if (dev->pgdla == SLIM_LA_MGR) {
559 u8 ea[] = {0, QC_DEVID_PGD, 0, 0, QC_MFGID_MSB,
560 QC_MFGID_LSB};
561 ea[2] = (u8)(dev->pdata.eapc & 0xFF);
562 ea[3] = (u8)((dev->pdata.eapc & 0xFF00) >> 8);
563 mutex_unlock(&dev->tx_lock);
564 ret = dev->ctrl.get_laddr(&dev->ctrl, ea, 6,
565 &dev->pgdla);
566 SLIM_DBG(dev, "SLIM PGD LA:0x%x, ret:%d\n",
567 dev->pgdla, ret);
568 if (ret) {
569 SLIM_ERR(dev,
570 "Incorrect SLIM-PGD EAPC:0x%x\n",
571 dev->pdata.eapc);
572 return ret;
573 }
574 mutex_lock(&dev->tx_lock);
575 }
576 txn->la = dev->pgdla;
577 }
578 wbuf[i++] = txn->la;
579 la = SLIM_LA_MGR;
580 wbuf[i++] = txn->wbuf[0];
581 if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
582 wbuf[i++] = txn->wbuf[1];
583 ret = ngd_get_tid(ctrl, txn, &wbuf[i++], &done);
584 if (ret) {
585 SLIM_ERR(dev, "TID for connect/disconnect fail:%d\n",
586 ret);
587 goto ngd_xfer_err;
588 }
589 txn->len = i;
590 txn->wbuf = wbuf;
591 txn->rl = txn->len + 4;
592 }
593 txn->rl--;
594
595 if (txn->len > SLIM_MSGQ_BUF_LEN || txn->rl > SLIM_MSGQ_BUF_LEN) {
596 SLIM_WARN(dev, "msg exeeds HW lim:%d, rl:%d, mc:0x%x, mt:0x%x",
597 txn->len, txn->rl, txn->mc, txn->mt);
598 ret = -EDQUOT;
599 goto ngd_xfer_err;
600 }
601
602 if (txn->mt == SLIM_MSG_MT_CORE && txn->comp &&
603 dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
604 (txn_mc != SLIM_MSG_MC_REQUEST_INFORMATION &&
605 txn_mc != SLIM_MSG_MC_REQUEST_VALUE &&
606 txn_mc != SLIM_MSG_MC_REQUEST_CHANGE_VALUE &&
607 txn_mc != SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION)) {
608 sync_wr = false;
609 pbuf = msm_get_msg_buf(dev, txn->rl, txn->comp);
610 } else if (txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
611 dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
612 txn->mc == SLIM_USR_MC_REPEAT_CHANGE_VALUE &&
613 txn->comp) {
614 sync_wr = false;
615 pbuf = msm_get_msg_buf(dev, txn->rl, txn->comp);
616 } else {
617 pbuf = msm_get_msg_buf(dev, txn->rl, &tx_sent);
618 }
619
620 if (!pbuf) {
621 SLIM_ERR(dev, "Message buffer unavailable\n");
622 ret = -ENOMEM;
623 goto ngd_xfer_err;
624 }
625 dev->err = 0;
626
627 if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
628 ret = -EPROTONOSUPPORT;
629 goto ngd_xfer_err;
630 }
631 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
632 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 0,
633 la);
634 else
635 *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 1,
636 la);
637 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
638 puc = ((u8 *)pbuf) + 3;
639 else
640 puc = ((u8 *)pbuf) + 2;
641 if (txn->rbuf)
642 *(puc++) = txn->tid;
643 if (((txn->mt == SLIM_MSG_MT_CORE) &&
644 ((txn->mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
645 txn->mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
646 (txn->mc >= SLIM_MSG_MC_REQUEST_VALUE &&
647 txn->mc <= SLIM_MSG_MC_CHANGE_VALUE))) ||
648 (txn->mc == SLIM_USR_MC_REPEAT_CHANGE_VALUE &&
649 txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER)) {
650 *(puc++) = (txn->ec & 0xFF);
651 *(puc++) = (txn->ec >> 8)&0xFF;
652 }
653 if (txn->wbuf)
654 memcpy(puc, txn->wbuf, txn->len);
655 if (txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
656 (txn->mc == SLIM_USR_MC_CONNECT_SRC ||
657 txn->mc == SLIM_USR_MC_CONNECT_SINK ||
658 txn->mc == SLIM_USR_MC_DISCONNECT_PORT) && txn->wbuf &&
659 wbuf[0] == dev->pgdla) {
660 if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
661 dev->err = msm_slim_connect_pipe_port(dev, wbuf[1]);
662 else
663 writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn,
664 (dev->pipes[wbuf[1]].port_b),
665 dev->ver));
666 if (dev->err) {
667 SLIM_ERR(dev, "pipe-port connect err:%d\n", dev->err);
668 goto ngd_xfer_err;
669 }
670 /* Add port-base to port number if this is manager side port */
671 puc[1] = (u8)dev->pipes[wbuf[1]].port_b;
672 }
673 dev->err = 0;
674 /*
675 * If it's a read txn, it may be freed if a response is received by
676 * received thread before reaching end of this function.
677 * mc, mt may have changed to convert standard slimbus code/type to
678 * satellite user-defined message. Reinitialize again
679 */
680 txn_mc = txn->mc;
681 txn_mt = txn->mt;
682 ret = msm_send_msg_buf(dev, pbuf, txn->rl,
683 NGD_BASE(dev->ctrl.nr, dev->ver) + NGD_TX_MSG);
684 if (!ret && sync_wr) {
685 int i;
686 int timeout = wait_for_completion_timeout(&tx_sent, HZ);
687
688 if (!timeout && dev->use_tx_msgqs == MSM_MSGQ_ENABLED) {
689 struct msm_slim_endp *endpoint = &dev->tx_msgq;
690 struct sps_mem_buffer *mem = &endpoint->buf;
691 u32 idx = (u32) (((u8 *)pbuf - (u8 *)mem->base) /
692 SLIM_MSGQ_BUF_LEN);
693 phys_addr_t addr = mem->phys_base +
694 (idx * SLIM_MSGQ_BUF_LEN);
695 ret = -ETIMEDOUT;
696 SLIM_WARN(dev, "timeout, BAM desc_idx:%d, phys:%llx",
697 idx, (u64)addr);
698 for (i = 0; i < (SLIM_MSGQ_BUF_LEN >> 2) ; i++)
699 SLIM_WARN(dev, "timeout:bam-desc[%d]:0x%x",
700 i, *(pbuf + i));
701 if (idx < MSM_TX_BUFS)
702 dev->wr_comp[idx] = NULL;
703 slim_reinit_tx_msgq(dev);
704 } else if (!timeout) {
705 ret = -ETIMEDOUT;
706 SLIM_WARN(dev, "timeout non-BAM TX,len:%d", txn->rl);
707 for (i = 0; i < (SLIM_MSGQ_BUF_LEN >> 2) ; i++)
708 SLIM_WARN(dev, "timeout:txbuf[%d]:0x%x", i,
709 dev->tx_buf[i]);
710 } else {
711 ret = dev->err;
712 }
713 }
714 if (ret) {
715 u32 conf, stat, rx_msgq, int_stat, int_en, int_clr;
716 void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr,
717 dev->ver);
718 SLIM_WARN(dev, "TX failed :MC:0x%x,mt:0x%x, ret:%d, ver:%d\n",
719 txn_mc, txn_mt, ret, dev->ver);
720 conf = readl_relaxed(ngd);
721 stat = readl_relaxed(ngd + NGD_STATUS);
722 rx_msgq = readl_relaxed(ngd + NGD_RX_MSGQ_CFG);
723 int_stat = readl_relaxed(ngd + NGD_INT_STAT);
724 int_en = readl_relaxed(ngd + NGD_INT_EN);
725 int_clr = readl_relaxed(ngd + NGD_INT_CLR);
726
727 SLIM_WARN(dev, "conf:0x%x,stat:0x%x,rxmsgq:0x%x\n",
728 conf, stat, rx_msgq);
729 SLIM_ERR(dev, "int_stat:0x%x,int_en:0x%x,int_cll:0x%x\n",
730 int_stat, int_en, int_clr);
731 }
732
733 if (txn_mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
734 (txn_mc == SLIM_USR_MC_CONNECT_SRC ||
735 txn_mc == SLIM_USR_MC_CONNECT_SINK ||
736 txn_mc == SLIM_USR_MC_DISCONNECT_PORT)) {
737 int timeout;
738 unsigned long flags;
739
740 mutex_unlock(&dev->tx_lock);
741 msm_slim_put_ctrl(dev);
742 if (!ret) {
743 timeout = wait_for_completion_timeout(txn->comp, HZ);
744 /* remote side did not acknowledge */
745 if (!timeout)
746 ret = -EREMOTEIO;
747 else
748 ret = txn->ec;
749 }
750 if (ret) {
751 SLIM_ERR(dev,
752 "connect/disconnect:0x%x,tid:%d err:%d\n",
753 txn->mc, txn->tid, ret);
754 spin_lock_irqsave(&ctrl->txn_lock, flags);
755 ctrl->txnt[txn->tid] = NULL;
756 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
757 }
758 return ret ? ret : dev->err;
759 }
760ngd_xfer_err:
761 if (!report_sat) {
762 mutex_unlock(&dev->tx_lock);
763 msm_slim_put_ctrl(dev);
764 }
765 return ret ? ret : dev->err;
766}
767
768static int ngd_get_ec(u16 start_offset, u8 len, u16 *ec)
769{
770 if (len > SLIM_MAX_VE_SLC_BYTES ||
771 start_offset > MSM_SLIM_VE_MAX_MAP_ADDR)
772 return -EINVAL;
773 if (len <= 4) {
774 *ec = len - 1;
775 } else if (len <= 8) {
776 if (len & 0x1)
777 return -EINVAL;
778 *ec = ((len >> 1) + 1);
779 } else {
780 if (len & 0x3)
781 return -EINVAL;
782 *ec = ((len >> 2) + 3);
783 }
784 *ec |= (0x8 | ((start_offset & 0xF) << 4));
785 *ec |= ((start_offset & 0xFF0) << 4);
786 return 0;
787}
788
789static int ngd_user_msg(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
790 struct slim_ele_access *msg, u8 *buf, u8 len)
791{
792 int ret;
793 struct slim_msg_txn txn;
794
795 if (mt != SLIM_MSG_MT_DEST_REFERRED_USER ||
796 mc != SLIM_USR_MC_REPEAT_CHANGE_VALUE) {
797 return -EPROTONOSUPPORT;
798 }
799
800 ret = ngd_get_ec(msg->start_offset, len, &txn.ec);
801 if (ret)
802 return ret;
803 txn.la = la;
804 txn.mt = mt;
805 txn.mc = mc;
806 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
807 txn.len = len;
808 txn.rl = len + 6;
809 txn.wbuf = buf;
810 txn.rbuf = NULL;
811 txn.comp = msg->comp;
812 return ngd_xfer_msg(ctrl, &txn);
813}
814
815static int ngd_bulk_cb(void *ctx, int err)
816{
817 if (ctx)
818 complete(ctx);
819 return err;
820}
821
822static int ngd_bulk_wr(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
823 struct slim_val_inf msgs[], int n,
824 int (*comp_cb)(void *ctx, int err), void *ctx)
825{
826 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
827 int i, ret;
828 struct msm_slim_endp *endpoint = &dev->tx_msgq;
829 u32 *header;
830 DECLARE_COMPLETION_ONSTACK(done);
831
832 ret = msm_slim_get_ctrl(dev);
833 mutex_lock(&dev->tx_lock);
834
835 if ((pm_runtime_enabled(dev->dev) && ret < 0) ||
836 dev->state >= MSM_CTRL_ASLEEP) {
837 SLIM_WARN(dev, "vote failed/SSR in-progress ret:%d, state:%d",
838 ret, dev->state);
839 pm_runtime_set_suspended(dev->dev);
840 mutex_unlock(&dev->tx_lock);
841 msm_slim_put_ctrl(dev);
842 return -EREMOTEIO;
843 }
844 if (!pm_runtime_enabled(dev->dev) && dev->state == MSM_CTRL_ASLEEP) {
845 mutex_unlock(&dev->tx_lock);
846 ret = ngd_slim_runtime_resume(dev->dev);
847
848 if (ret) {
849 SLIM_ERR(dev, "slim resume failed ret:%d, state:%d",
850 ret, dev->state);
851 return -EREMOTEIO;
852 }
853 mutex_lock(&dev->tx_lock);
854 }
855
856 ret = ngd_check_hw_status(dev);
857 if (ret) {
858 mutex_unlock(&dev->tx_lock);
859 msm_slim_put_ctrl(dev);
860 return ret;
861 }
862
863 if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
864 SLIM_WARN(dev, "bulk wr not supported");
865 ret = -EPROTONOSUPPORT;
866 goto retpath;
867 }
868 if (dev->bulk.in_progress) {
869 SLIM_WARN(dev, "bulk wr in progress:");
870 ret = -EAGAIN;
871 goto retpath;
872 }
873 dev->bulk.in_progress = true;
874 /* every txn has 5 bytes of overhead: la, mc, mt, ec, len */
875 dev->bulk.size = n * 5;
876 for (i = 0; i < n; i++) {
877 dev->bulk.size += msgs[i].num_bytes;
878 dev->bulk.size += (4 - ((msgs[i].num_bytes + 1) & 0x3));
879 }
880
881 if (dev->bulk.size > 0xffff) {
882 SLIM_WARN(dev, "len exceeds limit, split bulk and retry");
883 ret = -EDQUOT;
884 goto retpath;
885 }
886 if (dev->bulk.size > dev->bulk.buf_sz) {
887 void *temp = krealloc(dev->bulk.base, dev->bulk.size,
888 GFP_KERNEL | GFP_DMA);
889 if (!temp) {
890 ret = -ENOMEM;
891 goto retpath;
892 }
893 dev->bulk.base = temp;
894 dev->bulk.buf_sz = dev->bulk.size;
895 }
896
897 header = dev->bulk.base;
898 for (i = 0; i < n; i++) {
899 u8 *buf = (u8 *)header;
900 int rl = msgs[i].num_bytes + 5;
901 u16 ec;
902
903 *header = SLIM_MSG_ASM_FIRST_WORD(rl, mt, mc, 0, la);
904 buf += 3;
905 ret = ngd_get_ec(msgs[i].start_offset, msgs[i].num_bytes, &ec);
906 if (ret)
907 goto retpath;
908 *(buf++) = (ec & 0xFF);
909 *(buf++) = (ec >> 8) & 0xFF;
910 memcpy(buf, msgs[i].wbuf, msgs[i].num_bytes);
911 buf += msgs[i].num_bytes;
912 header += (rl >> 2);
913 if (rl & 3) {
914 header++;
915 memset(buf, 0, ((u8 *)header - buf));
916 }
917 }
918 header = dev->bulk.base;
919 if (comp_cb) {
920 dev->bulk.cb = comp_cb;
921 dev->bulk.ctx = ctx;
922 } else {
923 dev->bulk.cb = ngd_bulk_cb;
924 dev->bulk.ctx = &done;
925 }
926 dev->bulk.wr_dma = dma_map_single(dev->dev, dev->bulk.base,
927 dev->bulk.size, DMA_TO_DEVICE);
928 if (dma_mapping_error(dev->dev, dev->bulk.wr_dma)) {
929 ret = -ENOMEM;
930 goto retpath;
931 }
932
933 ret = sps_transfer_one(endpoint->sps, dev->bulk.wr_dma, dev->bulk.size,
934 NULL, SPS_IOVEC_FLAG_EOT);
935 if (ret) {
936 SLIM_WARN(dev, "sps transfer one returned error:%d", ret);
937 goto retpath;
938 }
939 if (dev->bulk.cb == ngd_bulk_cb) {
940 int timeout = wait_for_completion_timeout(&done, HZ);
941
942 if (!timeout) {
943 SLIM_WARN(dev, "timeout for bulk wr");
944 dma_unmap_single(dev->dev, dev->bulk.wr_dma,
945 dev->bulk.size, DMA_TO_DEVICE);
946 ret = -ETIMEDOUT;
947 }
948 }
949retpath:
950 if (ret) {
951 dev->bulk.in_progress = false;
952 dev->bulk.ctx = NULL;
953 dev->bulk.wr_dma = 0;
954 slim_reinit_tx_msgq(dev);
955 }
956 mutex_unlock(&dev->tx_lock);
957 msm_slim_put_ctrl(dev);
958 return ret;
959}
960
961static int ngd_xferandwait_ack(struct slim_controller *ctrl,
962 struct slim_msg_txn *txn)
963{
964 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
965 unsigned long flags;
966 int ret;
967
968 if (dev->state == MSM_CTRL_DOWN) {
969 /*
970 * no need to send anything to the bus due to SSR
971 * transactions related to channel removal marked as success
972 * since HW is down
973 */
974 if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
975 ((txn->mc >= SLIM_USR_MC_CHAN_CTRL &&
976 txn->mc <= SLIM_USR_MC_REQ_BW) ||
977 txn->mc == SLIM_USR_MC_DISCONNECT_PORT)) {
978 spin_lock_irqsave(&ctrl->txn_lock, flags);
979 ctrl->txnt[txn->tid] = NULL;
980 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
981 return 0;
982 }
983 }
984
985 ret = ngd_xfer_msg(ctrl, txn);
986 if (!ret) {
987 int timeout;
988
989 timeout = wait_for_completion_timeout(txn->comp, HZ);
990 if (!timeout)
991 ret = -ETIMEDOUT;
992 else
993 ret = txn->ec;
994 }
995
996 if (ret) {
997 if (ret != -EREMOTEIO || txn->mc != SLIM_USR_MC_CHAN_CTRL)
998 SLIM_ERR(dev, "master msg:0x%x,tid:%d ret:%d\n",
999 txn->mc, txn->tid, ret);
1000 spin_lock_irqsave(&ctrl->txn_lock, flags);
1001 ctrl->txnt[txn->tid] = NULL;
1002 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
1003 }
1004
1005 return ret;
1006}
1007
1008static int ngd_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear)
1009{
1010 int ret = 0, num_chan = 0;
1011 struct slim_pending_ch *pch;
1012 struct slim_msg_txn txn;
1013 struct slim_controller *ctrl = sb->ctrl;
1014 DECLARE_COMPLETION_ONSTACK(done);
1015 u8 wbuf[SLIM_MSGQ_BUF_LEN];
1016 struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
1017
1018 *clkgear = ctrl->clkgear;
1019 *subfrmc = 0;
1020 txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
1021 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1022 txn.la = SLIM_LA_MGR;
1023 txn.len = 0;
1024 txn.ec = 0;
1025 txn.wbuf = wbuf;
1026 txn.rbuf = NULL;
1027
1028 if (ctrl->sched.msgsl != ctrl->sched.pending_msgsl) {
1029 SLIM_DBG(dev, "slim reserve BW for messaging: req: %d\n",
1030 ctrl->sched.pending_msgsl);
1031 txn.mc = SLIM_USR_MC_REQ_BW;
1032 wbuf[txn.len++] = ((sb->laddr & 0x1f) |
1033 ((u8)(ctrl->sched.pending_msgsl & 0x7) << 5));
1034 wbuf[txn.len++] = (u8)(ctrl->sched.pending_msgsl >> 3);
1035 ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
1036 if (ret)
1037 return ret;
1038 txn.rl = txn.len + 4;
1039 ret = ngd_xferandwait_ack(ctrl, &txn);
1040 if (ret)
1041 return ret;
1042
1043 txn.mc = SLIM_USR_MC_RECONFIG_NOW;
1044 txn.len = 2;
1045 wbuf[1] = sb->laddr;
1046 txn.rl = txn.len + 4;
1047 ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
1048 if (ret)
1049 return ret;
1050 ret = ngd_xferandwait_ack(ctrl, &txn);
1051 if (ret)
1052 return ret;
1053
1054 txn.len = 0;
1055 }
1056 list_for_each_entry(pch, &sb->mark_define, pending) {
1057 struct slim_ich *slc;
1058
1059 slc = &ctrl->chans[pch->chan];
1060 if (!slc) {
1061 SLIM_WARN(dev, "no channel in define?\n");
1062 return -ENXIO;
1063 }
1064 if (txn.len == 0) {
1065 /* Per protocol, only last 5 bits for client no. */
1066 wbuf[txn.len++] = (u8) (slc->prop.dataf << 5) |
1067 (sb->laddr & 0x1f);
1068 wbuf[txn.len] = slc->prop.sampleszbits >> 2;
1069 if (slc->srch && slc->prop.prot == SLIM_PUSH)
1070 slc->prop.prot = SLIM_PULL;
1071 if (slc->coeff == SLIM_COEFF_3)
1072 wbuf[txn.len] |= 1 << 5;
1073 wbuf[txn.len++] |= slc->prop.auxf << 6;
1074 wbuf[txn.len++] = slc->rootexp << 4 | slc->prop.prot;
1075 wbuf[txn.len++] = slc->prrate;
1076 ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
1077 if (ret) {
1078 SLIM_WARN(dev, "no tid for channel define?\n");
1079 return -ENXIO;
1080 }
1081 }
1082 num_chan++;
1083 wbuf[txn.len++] = slc->chan;
1084 SLIM_INFO(dev, "slim activate chan:%d, laddr: 0x%x\n",
1085 slc->chan, sb->laddr);
1086 }
1087 if (txn.len) {
1088 txn.mc = SLIM_USR_MC_DEF_ACT_CHAN;
1089 txn.rl = txn.len + 4;
1090 ret = ngd_xferandwait_ack(ctrl, &txn);
1091 if (ret)
1092 return ret;
1093
1094 txn.mc = SLIM_USR_MC_RECONFIG_NOW;
1095 txn.len = 2;
1096 wbuf[1] = sb->laddr;
1097 txn.rl = txn.len + 4;
1098 ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
1099 if (ret)
1100 return ret;
1101 ret = ngd_xferandwait_ack(ctrl, &txn);
1102 if (ret)
1103 return ret;
1104 }
1105 txn.len = 0;
1106 list_for_each_entry(pch, &sb->mark_removal, pending) {
1107 struct slim_ich *slc;
1108
1109 slc = &ctrl->chans[pch->chan];
1110 if (!slc) {
1111 SLIM_WARN(dev, "no channel in removal?\n");
1112 return -ENXIO;
1113 }
1114 if (txn.len == 0) {
1115 /* Per protocol, only last 5 bits for client no. */
1116 wbuf[txn.len++] = (u8) (SLIM_CH_REMOVE << 6) |
1117 (sb->laddr & 0x1f);
1118 ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
1119 if (ret) {
1120 SLIM_WARN(dev, "no tid for channel define?\n");
1121 return -ENXIO;
1122 }
1123 }
1124 wbuf[txn.len++] = slc->chan;
1125 SLIM_INFO(dev, "slim remove chan:%d, laddr: 0x%x\n",
1126 slc->chan, sb->laddr);
1127 }
1128 if (txn.len) {
1129 txn.mc = SLIM_USR_MC_CHAN_CTRL;
1130 txn.rl = txn.len + 4;
1131 ret = ngd_xferandwait_ack(ctrl, &txn);
1132 /* HW restarting, channel removal should succeed */
1133 if (ret == -EREMOTEIO)
1134 return 0;
1135 else if (ret)
1136 return ret;
1137
1138 txn.mc = SLIM_USR_MC_RECONFIG_NOW;
1139 txn.len = 2;
1140 wbuf[1] = sb->laddr;
1141 txn.rl = txn.len + 4;
1142 ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
1143 if (ret)
1144 return ret;
1145 ret = ngd_xferandwait_ack(ctrl, &txn);
1146 if (ret)
1147 return ret;
1148 txn.len = 0;
1149 }
1150 return 0;
1151}
1152
1153static int ngd_set_laddr(struct slim_controller *ctrl, const u8 *ea,
1154 u8 elen, u8 laddr)
1155{
1156 return 0;
1157}
1158
1159static int ngd_get_laddr(struct slim_controller *ctrl, const u8 *ea,
1160 u8 elen, u8 *laddr)
1161{
1162 int ret;
1163 u8 wbuf[10];
1164 struct slim_msg_txn txn;
1165 DECLARE_COMPLETION_ONSTACK(done);
1166
1167 txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
1168 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1169 txn.la = SLIM_LA_MGR;
1170 txn.ec = 0;
1171 ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
1172 if (ret)
1173 return ret;
1174 memcpy(&wbuf[1], ea, elen);
1175 txn.mc = SLIM_USR_MC_ADDR_QUERY;
1176 txn.rl = 11;
1177 txn.len = 7;
1178 txn.wbuf = wbuf;
1179 txn.rbuf = NULL;
1180 ret = ngd_xferandwait_ack(ctrl, &txn);
1181 if (!ret && txn.la == 0xFF)
1182 ret = -ENXIO;
1183 else if (!ret)
1184 *laddr = txn.la;
1185 return ret;
1186}
1187
1188static void ngd_slim_setup(struct msm_slim_ctrl *dev)
1189{
1190 u32 new_cfg = NGD_CFG_ENABLE;
1191 u32 cfg = readl_relaxed(dev->base +
1192 NGD_BASE(dev->ctrl.nr, dev->ver));
1193 if (dev->state == MSM_CTRL_DOWN) {
1194 /* if called after SSR, cleanup and re-assign */
1195 if (dev->use_tx_msgqs != MSM_MSGQ_RESET)
1196 msm_slim_deinit_ep(dev, &dev->tx_msgq,
1197 &dev->use_tx_msgqs);
1198
1199 if (dev->use_rx_msgqs != MSM_MSGQ_RESET)
1200 msm_slim_deinit_ep(dev, &dev->rx_msgq,
1201 &dev->use_rx_msgqs);
1202
1203 msm_slim_sps_init(dev, dev->bam_mem,
1204 NGD_BASE(dev->ctrl.nr,
1205 dev->ver) + NGD_STATUS, true);
1206 } else {
1207 if (dev->use_rx_msgqs == MSM_MSGQ_DISABLED)
1208 goto setup_tx_msg_path;
1209
1210 if ((dev->use_rx_msgqs == MSM_MSGQ_ENABLED) &&
1211 (cfg & NGD_CFG_RX_MSGQ_EN))
1212 goto setup_tx_msg_path;
1213
1214 if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
1215 msm_slim_disconnect_endp(dev, &dev->rx_msgq,
1216 &dev->use_rx_msgqs);
1217 msm_slim_connect_endp(dev, &dev->rx_msgq);
1218
1219setup_tx_msg_path:
1220 if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED)
1221 goto ngd_enable;
1222 if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
1223 cfg & NGD_CFG_TX_MSGQ_EN)
1224 goto ngd_enable;
1225
1226 if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
1227 msm_slim_disconnect_endp(dev, &dev->tx_msgq,
1228 &dev->use_tx_msgqs);
1229 msm_slim_connect_endp(dev, &dev->tx_msgq);
1230 }
1231ngd_enable:
1232
1233 if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
1234 new_cfg |= NGD_CFG_RX_MSGQ_EN;
1235 if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
1236 new_cfg |= NGD_CFG_TX_MSGQ_EN;
1237
1238 /* Enable NGD, and program MSGQs if not already */
1239 if (cfg == new_cfg)
1240 return;
1241
1242 writel_relaxed(new_cfg, dev->base + NGD_BASE(dev->ctrl.nr, dev->ver));
1243 /* make sure NGD MSG-Q config goes through */
1244 mb();
1245}
1246
1247static void ngd_slim_rx(struct msm_slim_ctrl *dev, u8 *buf)
1248{
1249 unsigned long flags;
1250 u8 mc, mt, len;
1251
1252 len = buf[0] & 0x1F;
1253 mt = (buf[0] >> 5) & 0x7;
1254 mc = buf[1];
1255 if (mc == SLIM_USR_MC_MASTER_CAPABILITY &&
1256 mt == SLIM_MSG_MT_SRC_REFERRED_USER)
1257 complete(&dev->rx_msgq_notify);
1258
1259 if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
1260 mc == SLIM_MSG_MC_REPLY_VALUE) {
1261 u8 tid = buf[3];
1262
1263 dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len);
1264 slim_msg_response(&dev->ctrl, &buf[4], tid,
1265 len - 4);
1266 pm_runtime_mark_last_busy(dev->dev);
1267 }
1268 if (mc == SLIM_USR_MC_ADDR_REPLY &&
1269 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
1270 struct slim_msg_txn *txn;
1271 u8 failed_ea[6] = {0, 0, 0, 0, 0, 0};
1272
1273 spin_lock_irqsave(&dev->ctrl.txn_lock, flags);
1274 txn = dev->ctrl.txnt[buf[3]];
1275 if (!txn) {
1276 spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
1277 SLIM_WARN(dev,
1278 "LADDR response after timeout, tid:0x%x\n",
1279 buf[3]);
1280 return;
1281 }
1282 if (memcmp(&buf[4], failed_ea, 6))
1283 txn->la = buf[10];
1284 dev->ctrl.txnt[buf[3]] = NULL;
1285 complete(txn->comp);
1286 spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
1287 }
1288 if (mc == SLIM_USR_MC_GENERIC_ACK &&
1289 mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
1290 struct slim_msg_txn *txn;
1291
1292 spin_lock_irqsave(&dev->ctrl.txn_lock, flags);
1293 txn = dev->ctrl.txnt[buf[3]];
1294 if (!txn) {
1295 spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
1296 SLIM_WARN(dev, "ACK received after timeout, tid:0x%x\n",
1297 buf[3]);
1298 return;
1299 }
1300 dev_dbg(dev->dev, "got response:tid:%d, response:0x%x",
1301 (int)buf[3], buf[4]);
1302 if (!(buf[4] & MSM_SAT_SUCCSS)) {
1303 SLIM_WARN(dev, "TID:%d, NACK code:0x%x\n", (int)buf[3],
1304 buf[4]);
1305 txn->ec = -EIO;
1306 }
1307 dev->ctrl.txnt[buf[3]] = NULL;
1308 complete(txn->comp);
1309 spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
1310 }
1311}
1312
1313static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart)
1314{
1315 void __iomem *ngd;
1316 int timeout, retries = 0, ret = 0;
1317 enum msm_ctrl_state cur_state = dev->state;
1318 u32 laddr;
1319 u32 rx_msgq;
1320 u32 ngd_int = (NGD_INT_TX_NACKED_2 |
1321 NGD_INT_MSG_BUF_CONTE | NGD_INT_MSG_TX_INVAL |
1322 NGD_INT_IE_VE_CHG | NGD_INT_DEV_ERR |
1323 NGD_INT_TX_MSG_SENT);
1324
1325 if (!mdm_restart && cur_state == MSM_CTRL_DOWN) {
1326 int timeout = wait_for_completion_timeout(&dev->qmi.qmi_comp,
1327 HZ);
1328 if (!timeout) {
1329 SLIM_ERR(dev, "slimbus QMI init timed out\n");
1330 return -EREMOTEIO;
1331 }
1332 }
1333
1334hw_init_retry:
1335 /* No need to vote if contorller is not in low power mode */
1336 if (!mdm_restart &&
1337 (cur_state == MSM_CTRL_DOWN || cur_state == MSM_CTRL_ASLEEP)) {
1338 ret = msm_slim_qmi_power_request(dev, true);
1339 if (ret) {
1340 SLIM_WARN(dev, "SLIM power req failed:%d, retry:%d\n",
1341 ret, retries);
1342 if (!atomic_read(&dev->ssr_in_progress))
1343 msm_slim_qmi_power_request(dev, false);
1344 if (retries < INIT_MX_RETRIES &&
1345 !atomic_read(&dev->ssr_in_progress)) {
1346 retries++;
1347 goto hw_init_retry;
1348 }
1349 return ret;
1350 }
1351 }
1352 retries = 0;
1353
1354 if (!dev->ver) {
1355 dev->ver = readl_relaxed(dev->base);
1356 /* Version info in 16 MSbits */
1357 dev->ver >>= 16;
1358 }
1359 ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
1360 laddr = readl_relaxed(ngd + NGD_STATUS);
1361 if (laddr & NGD_LADDR) {
1362 u32 int_en = readl_relaxed(ngd + NGD_INT_EN);
1363
1364 /*
1365 * external MDM restart case where ADSP itself was active framer
1366 * For example, modem restarted when playback was active
1367 */
1368 if (cur_state == MSM_CTRL_AWAKE) {
1369 SLIM_INFO(dev, "Subsys restart: ADSP active framer\n");
1370 return 0;
1371 }
1372 /*
1373 * ADSP power collapse case, where HW wasn't reset.
1374 */
1375 if (int_en != 0)
1376 return 0;
1377
1378 /* Retention */
1379 if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
1380 msm_slim_disconnect_endp(dev, &dev->rx_msgq,
1381 &dev->use_rx_msgqs);
1382 if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
1383 msm_slim_disconnect_endp(dev, &dev->tx_msgq,
1384 &dev->use_tx_msgqs);
1385
1386 writel_relaxed(ngd_int, (dev->base + NGD_INT_EN +
1387 NGD_BASE(dev->ctrl.nr, dev->ver)));
1388
1389 rx_msgq = readl_relaxed(ngd + NGD_RX_MSGQ_CFG);
1390 /**
1391 * Program with minimum value so that signal get
1392 * triggered immediately after receiving the message
1393 */
1394 writel_relaxed((rx_msgq | SLIM_RX_MSGQ_TIMEOUT_VAL),
1395 (ngd + NGD_RX_MSGQ_CFG));
1396 /* reconnect BAM pipes if needed and enable NGD */
1397 ngd_slim_setup(dev);
1398 return 0;
1399 }
1400
1401 if (mdm_restart) {
1402 /*
1403 * external MDM SSR when MDM is active framer
1404 * ADSP will reset slimbus HW. disconnect BAM pipes so that
1405 * they can be connected after capability message is received.
1406 * Set device state to ASLEEP to be synchronous with the HW
1407 */
1408 /* make current state as DOWN */
1409 cur_state = MSM_CTRL_DOWN;
1410 SLIM_INFO(dev,
1411 "SLIM MDM restart: MDM active framer: reinit HW\n");
1412 /* disconnect BAM pipes */
1413 msm_slim_sps_exit(dev, false);
1414 dev->state = MSM_CTRL_DOWN;
1415 }
1416
1417capability_retry:
1418 /*
1419 * ADSP power collapse case (OR SSR), where HW was reset
1420 * BAM programming will happen when capability message is received
1421 */
1422 writel_relaxed(ngd_int, dev->base + NGD_INT_EN +
1423 NGD_BASE(dev->ctrl.nr, dev->ver));
1424
1425 rx_msgq = readl_relaxed(ngd + NGD_RX_MSGQ_CFG);
1426 /*
1427 * Program with minimum value so that signal get
1428 * triggered immediately after receiving the message
1429 */
1430 writel_relaxed(rx_msgq|SLIM_RX_MSGQ_TIMEOUT_VAL,
1431 ngd + NGD_RX_MSGQ_CFG);
1432 /* make sure register got updated */
1433 mb();
1434
1435 /* reconnect BAM pipes if needed and enable NGD */
1436 ngd_slim_setup(dev);
1437
1438 timeout = wait_for_completion_timeout(&dev->reconf, HZ);
1439 if (!timeout) {
1440 u32 cfg = readl_relaxed(dev->base +
1441 NGD_BASE(dev->ctrl.nr, dev->ver));
1442 laddr = readl_relaxed(ngd + NGD_STATUS);
1443 SLIM_WARN(dev,
1444 "slim capability time-out:%d, stat:0x%x,cfg:0x%x\n",
1445 retries, laddr, cfg);
1446 if ((retries < INIT_MX_RETRIES) &&
1447 !atomic_read(&dev->ssr_in_progress)) {
1448 retries++;
1449 goto capability_retry;
1450 }
1451 return -ETIMEDOUT;
1452 }
1453 /* mutliple transactions waiting on slimbus to power up? */
1454 if (cur_state == MSM_CTRL_DOWN)
1455 complete_all(&dev->ctrl_up);
1456 /* Resetting the log level */
1457 SLIM_RST_LOGLVL(dev);
1458 return 0;
1459}
1460
1461static int ngd_slim_enable(struct msm_slim_ctrl *dev, bool enable)
1462{
1463 int ret = 0;
1464
1465 if (enable) {
1466 ret = msm_slim_qmi_init(dev, false);
1467 /* controller state should be in sync with framework state */
1468 if (!ret) {
1469 complete(&dev->qmi.qmi_comp);
1470 if (!pm_runtime_enabled(dev->dev) ||
1471 !pm_runtime_suspended(dev->dev))
1472 ngd_slim_runtime_resume(dev->dev);
1473 else
1474 pm_runtime_resume(dev->dev);
1475 pm_runtime_mark_last_busy(dev->dev);
1476 pm_runtime_put(dev->dev);
1477 } else
1478 SLIM_ERR(dev, "qmi init fail, ret:%d, state:%d\n",
1479 ret, dev->state);
1480 } else {
1481 msm_slim_qmi_exit(dev);
1482 }
1483
1484 return ret;
1485}
1486
1487#ifdef CONFIG_PM
1488static int ngd_slim_power_down(struct msm_slim_ctrl *dev)
1489{
1490 unsigned long flags;
1491 int i;
1492 struct slim_controller *ctrl = &dev->ctrl;
1493
1494 spin_lock_irqsave(&ctrl->txn_lock, flags);
1495 /* Pending response for a message */
1496 for (i = 0; i < ctrl->last_tid; i++) {
1497 if (ctrl->txnt[i]) {
1498 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
1499 SLIM_INFO(dev, "NGD down:txn-rsp for %d pending", i);
1500 return -EBUSY;
1501 }
1502 }
1503 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
1504 return msm_slim_qmi_power_request(dev, false);
1505}
1506#endif
1507
1508static int ngd_slim_rx_msgq_thread(void *data)
1509{
1510 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1511 struct completion *notify = &dev->rx_msgq_notify;
1512 int ret = 0;
1513
1514 while (!kthread_should_stop()) {
1515 struct slim_msg_txn txn;
1516 int retries = 0;
1517 u8 wbuf[8];
1518
1519 wait_for_completion_interruptible(notify);
1520
1521 txn.dt = SLIM_MSG_DEST_LOGICALADDR;
1522 txn.ec = 0;
1523 txn.rbuf = NULL;
1524 txn.mc = SLIM_USR_MC_REPORT_SATELLITE;
1525 txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
1526 txn.la = SLIM_LA_MGR;
1527 wbuf[0] = SAT_MAGIC_LSB;
1528 wbuf[1] = SAT_MAGIC_MSB;
1529 wbuf[2] = SAT_MSG_VER;
1530 wbuf[3] = SAT_MSG_PROT;
1531 txn.wbuf = wbuf;
1532 txn.len = 4;
1533 SLIM_INFO(dev, "SLIM SAT: Rcvd master capability\n");
1534capability_retry:
1535 txn.rl = 8;
1536 ret = ngd_xfer_msg(&dev->ctrl, &txn);
1537 if (!ret) {
1538 enum msm_ctrl_state prev_state = dev->state;
1539
1540 SLIM_INFO(dev,
1541 "SLIM SAT: capability exchange successful\n");
1542 if (prev_state < MSM_CTRL_ASLEEP)
1543 SLIM_WARN(dev,
1544 "capability due to noise, state:%d\n",
1545 prev_state);
1546 complete(&dev->reconf);
1547 /* ADSP SSR, send device_up notifications */
1548 if (prev_state == MSM_CTRL_DOWN)
1549 complete(&dev->qmi.slave_notify);
1550 } else if (ret == -EIO) {
1551 SLIM_WARN(dev, "capability message NACKed, retrying\n");
1552 if (retries < INIT_MX_RETRIES) {
1553 msleep(DEF_RETRY_MS);
1554 retries++;
1555 goto capability_retry;
1556 }
1557 } else {
1558 SLIM_WARN(dev, "SLIM: capability TX failed:%d\n", ret);
1559 }
1560 }
1561 return 0;
1562}
1563
1564static int ngd_notify_slaves(void *data)
1565{
1566 struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
1567 struct slim_controller *ctrl = &dev->ctrl;
1568 struct slim_device *sbdev;
1569 struct list_head *pos, *next;
1570 int ret, i = 0;
1571
1572 ret = qmi_svc_event_notifier_register(SLIMBUS_QMI_SVC_ID,
1573 SLIMBUS_QMI_SVC_V1,
1574 SLIMBUS_QMI_INS_ID, &dev->qmi.nb);
1575 if (ret) {
1576 pr_err("Slimbus QMI service registration failed:%d", ret);
1577 return ret;
1578 }
1579
1580 while (!kthread_should_stop()) {
1581 wait_for_completion_interruptible(&dev->qmi.slave_notify);
1582 /* Probe devices for first notification */
1583 if (!i) {
1584 i++;
1585 dev->err = 0;
1586 if (dev->dev->of_node)
1587 of_register_slim_devices(&dev->ctrl);
1588
1589 /*
1590 * Add devices registered with board-info now that
1591 * controller is up
1592 */
1593 slim_ctrl_add_boarddevs(&dev->ctrl);
1594 ngd_dom_init(dev);
1595 } else {
1596 slim_framer_booted(ctrl);
1597 }
1598 mutex_lock(&ctrl->m_ctrl);
1599 list_for_each_safe(pos, next, &ctrl->devs) {
1600 int j;
1601
1602 sbdev = list_entry(pos, struct slim_device, dev_list);
1603 mutex_unlock(&ctrl->m_ctrl);
1604 for (j = 0; j < LADDR_RETRY; j++) {
1605 ret = slim_get_logical_addr(sbdev,
1606 sbdev->e_addr,
1607 6, &sbdev->laddr);
1608 if (!ret)
1609 break;
1610 /* time for ADSP to assign LA */
1611 msleep(20);
1612 }
1613 mutex_lock(&ctrl->m_ctrl);
1614 }
1615 mutex_unlock(&ctrl->m_ctrl);
1616 }
1617 return 0;
1618}
1619
1620static void ngd_dom_down(struct msm_slim_ctrl *dev)
1621{
1622 struct slim_controller *ctrl = &dev->ctrl;
1623 struct slim_device *sbdev;
1624
1625 mutex_lock(&dev->ssr_lock);
1626 ngd_slim_enable(dev, false);
1627 /* device up should be called again after SSR */
1628 list_for_each_entry(sbdev, &ctrl->devs, dev_list)
1629 slim_report_absent(sbdev);
1630 SLIM_INFO(dev, "SLIM ADSP SSR (DOWN) done\n");
1631 mutex_unlock(&dev->ssr_lock);
1632}
1633
1634static void ngd_dom_up(struct work_struct *work)
1635{
1636 struct msm_slim_ss *dsp =
1637 container_of(work, struct msm_slim_ss, dom_up);
1638 struct msm_slim_ctrl *dev =
1639 container_of(dsp, struct msm_slim_ctrl, dsp);
1640 mutex_lock(&dev->ssr_lock);
1641 ngd_slim_enable(dev, true);
1642 mutex_unlock(&dev->ssr_lock);
1643}
1644
1645static ssize_t show_mask(struct device *device, struct device_attribute *attr,
1646 char *buf)
1647{
1648 struct platform_device *pdev = to_platform_device(device);
1649 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1650
1651 return snprintf(buf, sizeof(int), "%u\n", dev->ipc_log_mask);
1652}
1653
1654static ssize_t set_mask(struct device *device, struct device_attribute *attr,
1655 const char *buf, size_t count)
1656{
1657 struct platform_device *pdev = to_platform_device(device);
1658 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1659
1660 dev->ipc_log_mask = buf[0] - '0';
1661 if (dev->ipc_log_mask > DBG_LEV)
1662 dev->ipc_log_mask = DBG_LEV;
1663 return count;
1664}
1665
1666static DEVICE_ATTR(debug_mask, 0644, show_mask, set_mask);
1667
1668static int ngd_slim_probe(struct platform_device *pdev)
1669{
1670 struct msm_slim_ctrl *dev;
1671 int ret;
1672 struct resource *bam_mem;
1673 struct resource *slim_mem;
1674 struct resource *irq, *bam_irq;
1675 bool rxreg_access = false;
1676 bool slim_mdm = false;
1677 const char *ext_modem_id = NULL;
1678
1679 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1680 "slimbus_physical");
1681 if (!slim_mem) {
1682 dev_err(&pdev->dev, "no slimbus physical memory resource\n");
1683 return -ENODEV;
1684 }
1685 bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1686 "slimbus_bam_physical");
1687 if (!bam_mem) {
1688 dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
1689 return -ENODEV;
1690 }
1691 irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1692 "slimbus_irq");
1693 if (!irq) {
1694 dev_err(&pdev->dev, "no slimbus IRQ resource\n");
1695 return -ENODEV;
1696 }
1697 bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1698 "slimbus_bam_irq");
1699 if (!bam_irq) {
1700 dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
1701 return -ENODEV;
1702 }
1703
1704 dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
1705 if (IS_ERR_OR_NULL(dev)) {
1706 dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
1707 return PTR_ERR(dev);
1708 }
1709 dev->wr_comp = kzalloc(sizeof(struct completion *) * MSM_TX_BUFS,
1710 GFP_KERNEL);
Karthikeyan Ramasubramanian26182ce2017-01-17 10:15:00 -07001711 if (!dev->wr_comp) {
1712 ret = -ENOMEM;
1713 goto err_nobulk;
1714 }
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07001715
1716 /* typical txn numbers and size used in bulk operation */
1717 dev->bulk.buf_sz = SLIM_MAX_TXNS * 8;
1718 dev->bulk.base = kzalloc(dev->bulk.buf_sz, GFP_KERNEL | GFP_DMA);
1719 if (!dev->bulk.base) {
1720 ret = -ENOMEM;
1721 goto err_nobulk;
1722 }
1723
1724 dev->dev = &pdev->dev;
1725 platform_set_drvdata(pdev, dev);
1726 slim_set_ctrldata(&dev->ctrl, dev);
1727
1728 /* Create IPC log context */
1729 dev->ipc_slimbus_log = ipc_log_context_create(IPC_SLIMBUS_LOG_PAGES,
1730 dev_name(dev->dev), 0);
1731 if (!dev->ipc_slimbus_log)
1732 dev_err(&pdev->dev, "error creating ipc_logging context\n");
1733 else {
1734 /* Initialize the log mask */
1735 dev->ipc_log_mask = INFO_LEV;
1736 dev->default_ipc_log_mask = INFO_LEV;
1737 SLIM_INFO(dev, "start logging for slim dev %s\n",
1738 dev_name(dev->dev));
1739 }
1740 ret = sysfs_create_file(&dev->dev->kobj, &dev_attr_debug_mask.attr);
1741 if (ret) {
1742 dev_err(&pdev->dev, "Failed to create dev. attr\n");
1743 dev->sysfs_created = false;
1744 } else
1745 dev->sysfs_created = true;
1746
1747 dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
1748 if (!dev->base) {
1749 dev_err(&pdev->dev, "IOremap failed\n");
1750 ret = -ENOMEM;
1751 goto err_ioremap_failed;
1752 }
1753 dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
1754 if (!dev->bam.base) {
1755 dev_err(&pdev->dev, "BAM IOremap failed\n");
1756 ret = -ENOMEM;
1757 goto err_ioremap_bam_failed;
1758 }
1759 if (pdev->dev.of_node) {
1760
1761 ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
1762 &dev->ctrl.nr);
1763 if (ret) {
1764 dev_err(&pdev->dev, "Cell index not specified:%d", ret);
1765 goto err_ctrl_failed;
1766 }
1767 rxreg_access = of_property_read_bool(pdev->dev.of_node,
1768 "qcom,rxreg-access");
1769 of_property_read_u32(pdev->dev.of_node, "qcom,apps-ch-pipes",
1770 &dev->pdata.apps_pipes);
1771 of_property_read_u32(pdev->dev.of_node, "qcom,ea-pc",
1772 &dev->pdata.eapc);
1773 ret = of_property_read_string(pdev->dev.of_node,
1774 "qcom,slim-mdm", &ext_modem_id);
1775 if (!ret)
1776 slim_mdm = true;
1777 } else {
1778 dev->ctrl.nr = pdev->id;
1779 }
1780 /*
1781 * Keep PGD's logical address as manager's. Query it when first data
1782 * channel request comes in
1783 */
1784 dev->pgdla = SLIM_LA_MGR;
1785 dev->ctrl.nchans = MSM_SLIM_NCHANS;
1786 dev->ctrl.nports = MSM_SLIM_NPORTS;
1787 dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
1788 dev->framer.superfreq =
1789 dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
1790 dev->ctrl.a_framer = &dev->framer;
1791 dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
1792 dev->ctrl.set_laddr = ngd_set_laddr;
1793 dev->ctrl.get_laddr = ngd_get_laddr;
1794 dev->ctrl.allocbw = ngd_allocbw;
1795 dev->ctrl.xfer_msg = ngd_xfer_msg;
1796 dev->ctrl.xfer_user_msg = ngd_user_msg;
1797 dev->ctrl.xfer_bulk_wr = ngd_bulk_wr;
1798 dev->ctrl.wakeup = NULL;
1799 dev->ctrl.alloc_port = msm_alloc_port;
1800 dev->ctrl.dealloc_port = msm_dealloc_port;
1801 dev->ctrl.port_xfer = msm_slim_port_xfer;
1802 dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
1803 dev->bam_mem = bam_mem;
1804 dev->rx_slim = ngd_slim_rx;
1805
1806 init_completion(&dev->reconf);
1807 init_completion(&dev->ctrl_up);
1808 mutex_init(&dev->tx_lock);
1809 mutex_init(&dev->ssr_lock);
1810 spin_lock_init(&dev->tx_buf_lock);
1811 spin_lock_init(&dev->rx_lock);
1812 dev->ee = 1;
1813 dev->irq = irq->start;
1814 dev->bam.irq = bam_irq->start;
1815 atomic_set(&dev->ssr_in_progress, 0);
1816
1817 if (rxreg_access)
1818 dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
1819 else
1820 dev->use_rx_msgqs = MSM_MSGQ_RESET;
1821
1822 /* Enable TX message queues by default as recommended by HW */
1823 dev->use_tx_msgqs = MSM_MSGQ_RESET;
1824
1825 init_completion(&dev->rx_msgq_notify);
1826 init_completion(&dev->qmi.slave_notify);
1827
1828 /* Register with framework */
1829 ret = slim_add_numbered_controller(&dev->ctrl);
1830 if (ret) {
1831 dev_err(dev->dev, "error adding controller\n");
1832 goto err_ctrl_failed;
1833 }
1834
1835 dev->ctrl.dev.parent = &pdev->dev;
1836 dev->ctrl.dev.of_node = pdev->dev.of_node;
1837 dev->state = MSM_CTRL_DOWN;
1838
1839 /*
1840 * As this does not perform expensive
1841 * operations, it can execute in an
1842 * interrupt context. This avoids
1843 * context switches, provides
1844 * extensive benifits and performance
1845 * improvements.
1846 */
1847 ret = request_irq(dev->irq,
1848 ngd_slim_interrupt,
1849 IRQF_TRIGGER_HIGH,
1850 "ngd_slim_irq", dev);
1851
1852 if (ret) {
1853 dev_err(&pdev->dev, "request IRQ failed\n");
1854 goto err_request_irq_failed;
1855 }
1856
1857 init_completion(&dev->qmi.qmi_comp);
1858 dev->err = -EPROBE_DEFER;
1859 pm_runtime_use_autosuspend(dev->dev);
1860 pm_runtime_set_autosuspend_delay(dev->dev, MSM_SLIM_AUTOSUSPEND);
1861 pm_runtime_set_suspended(dev->dev);
1862 pm_runtime_enable(dev->dev);
1863
1864 if (slim_mdm) {
1865 dev->ext_mdm.nb.notifier_call = mdm_ssr_notify_cb;
1866 dev->ext_mdm.domr = subsys_notif_register_notifier(ext_modem_id,
1867 &dev->ext_mdm.nb);
1868 if (IS_ERR_OR_NULL(dev->ext_mdm.domr))
1869 dev_err(dev->dev,
1870 "subsys_notif_register_notifier failed %p",
1871 dev->ext_mdm.domr);
1872 }
1873
1874 INIT_WORK(&dev->dsp.dom_up, ngd_dom_up);
1875 dev->qmi.nb.notifier_call = ngd_qmi_available;
1876 pm_runtime_get_noresume(dev->dev);
1877
1878 /* Fire up the Rx message queue thread */
1879 dev->rx_msgq_thread = kthread_run(ngd_slim_rx_msgq_thread, dev,
1880 "ngd_rx_thread%d", dev->ctrl.nr);
1881 if (IS_ERR(dev->rx_msgq_thread)) {
1882 ret = PTR_ERR(dev->rx_msgq_thread);
1883 dev_err(dev->dev, "Failed to start Rx thread:%d\n", ret);
1884 goto err_rx_thread_create_failed;
1885 }
1886
1887 /* Start thread to probe, and notify slaves */
1888 dev->qmi.slave_thread = kthread_run(ngd_notify_slaves, dev,
1889 "ngd_notify_sl%d", dev->ctrl.nr);
1890 if (IS_ERR(dev->qmi.slave_thread)) {
1891 ret = PTR_ERR(dev->qmi.slave_thread);
1892 dev_err(dev->dev, "Failed to start notifier thread:%d\n", ret);
1893 goto err_notify_thread_create_failed;
1894 }
1895 SLIM_INFO(dev, "NGD SB controller is up!\n");
1896 return 0;
1897
1898err_notify_thread_create_failed:
1899 kthread_stop(dev->rx_msgq_thread);
1900err_rx_thread_create_failed:
1901 free_irq(dev->irq, dev);
1902err_request_irq_failed:
1903err_ctrl_failed:
1904 iounmap(dev->bam.base);
1905err_ioremap_bam_failed:
1906 iounmap(dev->base);
1907err_ioremap_failed:
1908 if (dev->sysfs_created)
1909 sysfs_remove_file(&dev->dev->kobj,
1910 &dev_attr_debug_mask.attr);
1911 kfree(dev->bulk.base);
1912err_nobulk:
1913 kfree(dev->wr_comp);
1914 kfree(dev);
1915 return ret;
1916}
1917
1918static int ngd_slim_remove(struct platform_device *pdev)
1919{
1920 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1921
1922 ngd_slim_enable(dev, false);
1923 if (dev->sysfs_created)
1924 sysfs_remove_file(&dev->dev->kobj,
1925 &dev_attr_debug_mask.attr);
1926 qmi_svc_event_notifier_unregister(SLIMBUS_QMI_SVC_ID,
1927 SLIMBUS_QMI_SVC_V1,
1928 SLIMBUS_QMI_INS_ID, &dev->qmi.nb);
1929 pm_runtime_disable(&pdev->dev);
1930 if (dev->dsp.dom_t == MSM_SLIM_DOM_SS)
1931 subsys_notif_unregister_notifier(dev->dsp.domr,
1932 &dev->dsp.nb);
1933 if (dev->dsp.dom_t == MSM_SLIM_DOM_PD)
1934 service_notif_unregister_notifier(dev->dsp.domr,
1935 &dev->dsp.nb);
1936 if (!IS_ERR_OR_NULL(dev->ext_mdm.domr))
1937 subsys_notif_unregister_notifier(dev->ext_mdm.domr,
1938 &dev->ext_mdm.nb);
1939 kfree(dev->bulk.base);
1940 free_irq(dev->irq, dev);
1941 slim_del_controller(&dev->ctrl);
1942 kthread_stop(dev->rx_msgq_thread);
1943 iounmap(dev->bam.base);
1944 iounmap(dev->base);
1945 kfree(dev->wr_comp);
1946 kfree(dev);
1947 return 0;
1948}
1949
1950#ifdef CONFIG_PM
1951static int ngd_slim_runtime_idle(struct device *device)
1952{
1953 struct platform_device *pdev = to_platform_device(device);
1954 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1955
1956 mutex_lock(&dev->tx_lock);
1957 if (dev->state == MSM_CTRL_AWAKE)
1958 dev->state = MSM_CTRL_IDLE;
1959 mutex_unlock(&dev->tx_lock);
1960 dev_dbg(device, "pm_runtime: idle...\n");
1961 pm_request_autosuspend(device);
1962 return -EAGAIN;
1963}
1964#endif
1965
1966/*
1967 * If PM_RUNTIME is not defined, these 2 functions become helper
1968 * functions to be called from system suspend/resume. So they are not
1969 * inside ifdef CONFIG_PM_RUNTIME
1970 */
1971static int ngd_slim_runtime_resume(struct device *device)
1972{
1973 struct platform_device *pdev = to_platform_device(device);
1974 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1975 int ret = 0;
1976
1977 mutex_lock(&dev->tx_lock);
1978 if (dev->state >= MSM_CTRL_ASLEEP)
1979 ret = ngd_slim_power_up(dev, false);
1980 if (ret) {
1981 /* Did SSR cause this power up failure */
1982 if (dev->state != MSM_CTRL_DOWN)
1983 dev->state = MSM_CTRL_ASLEEP;
1984 else
1985 SLIM_WARN(dev, "HW wakeup attempt during SSR\n");
1986 } else {
1987 dev->state = MSM_CTRL_AWAKE;
1988 }
1989 mutex_unlock(&dev->tx_lock);
1990 SLIM_INFO(dev, "Slim runtime resume: ret %d\n", ret);
1991 return ret;
1992}
1993
1994#ifdef CONFIG_PM
1995static int ngd_slim_runtime_suspend(struct device *device)
1996{
1997 struct platform_device *pdev = to_platform_device(device);
1998 struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
1999 int ret = 0;
2000
2001 mutex_lock(&dev->tx_lock);
2002 ret = ngd_slim_power_down(dev);
2003 if (ret && ret != -EBUSY)
2004 SLIM_INFO(dev, "slim resource not idle:%d\n", ret);
2005 if (!ret || ret == -ETIMEDOUT)
2006 dev->state = MSM_CTRL_ASLEEP;
2007 mutex_unlock(&dev->tx_lock);
2008 SLIM_INFO(dev, "Slim runtime suspend: ret %d\n", ret);
2009 return ret;
2010}
2011#endif
2012
2013#ifdef CONFIG_PM_SLEEP
2014static int ngd_slim_suspend(struct device *dev)
2015{
2016 int ret = -EBUSY;
2017 struct platform_device *pdev = to_platform_device(dev);
2018 struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
2019
2020 if (!pm_runtime_enabled(dev) ||
2021 (!pm_runtime_suspended(dev) &&
2022 cdev->state == MSM_CTRL_IDLE)) {
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06002023 cdev->qmi.deferred_resp = true;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07002024 ret = ngd_slim_runtime_suspend(dev);
2025 /*
2026 * If runtime-PM still thinks it's active, then make sure its
2027 * status is in sync with HW status.
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07002028 */
2029 if (!ret) {
2030 pm_runtime_disable(dev);
2031 pm_runtime_set_suspended(dev);
2032 pm_runtime_enable(dev);
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06002033 } else {
2034 cdev->qmi.deferred_resp = false;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07002035 }
2036 }
2037 if (ret == -EBUSY) {
2038 /*
2039 * There is a possibility that some audio stream is active
2040 * during suspend. We dont want to return suspend failure in
2041 * that case so that display and relevant components can still
2042 * go to suspend.
2043 * If there is some other error, then it should be passed-on
2044 * to system level suspend
2045 */
2046 ret = 0;
2047 }
2048 SLIM_INFO(cdev, "system suspend\n");
2049 return ret;
2050}
2051
2052static int ngd_slim_resume(struct device *dev)
2053{
2054 struct platform_device *pdev = to_platform_device(dev);
2055 struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06002056 int ret = 0;
2057
2058 /*
2059 * If deferred response was requested for power-off and it failed,
2060 * mark runtime-pm status as active to be consistent
2061 * with HW status
2062 */
2063 if (cdev->qmi.deferred_resp) {
2064 ret = msm_slim_qmi_deferred_status_req(cdev);
2065 if (ret) {
2066 pm_runtime_disable(dev);
2067 pm_runtime_set_active(dev);
2068 pm_runtime_enable(dev);
2069 }
2070 cdev->qmi.deferred_resp = false;
2071 }
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07002072 /*
2073 * Rely on runtime-PM to call resume in case it is enabled.
2074 * Even if it's not enabled, rely on 1st client transaction to do
2075 * clock/power on
2076 */
2077 SLIM_INFO(cdev, "system resume\n");
Sagar Dhariaa4bfae12016-08-25 22:35:24 -06002078 return ret;
Sagar Dhariabe37c9c2016-11-28 23:06:58 -07002079}
2080#endif /* CONFIG_PM_SLEEP */
2081
2082static const struct dev_pm_ops ngd_slim_dev_pm_ops = {
2083 SET_SYSTEM_SLEEP_PM_OPS(
2084 ngd_slim_suspend,
2085 ngd_slim_resume
2086 )
2087 SET_RUNTIME_PM_OPS(
2088 ngd_slim_runtime_suspend,
2089 ngd_slim_runtime_resume,
2090 ngd_slim_runtime_idle
2091 )
2092};
2093
2094static const struct of_device_id ngd_slim_dt_match[] = {
2095 {
2096 .compatible = "qcom,slim-ngd",
2097 },
2098 {}
2099};
2100
2101static struct platform_driver ngd_slim_driver = {
2102 .probe = ngd_slim_probe,
2103 .remove = ngd_slim_remove,
2104 .driver = {
2105 .name = NGD_SLIM_NAME,
2106 .owner = THIS_MODULE,
2107 .pm = &ngd_slim_dev_pm_ops,
2108 .of_match_table = ngd_slim_dt_match,
2109 },
2110};
2111
2112static int ngd_slim_init(void)
2113{
2114 return platform_driver_register(&ngd_slim_driver);
2115}
2116late_initcall(ngd_slim_init);
2117
2118static void ngd_slim_exit(void)
2119{
2120 platform_driver_unregister(&ngd_slim_driver);
2121}
2122module_exit(ngd_slim_exit);
2123
2124MODULE_LICENSE("GPL v2");
2125MODULE_DESCRIPTION("MSM Slimbus controller");
2126MODULE_ALIAS("platform:msm-slim-ngd");