slimbus: Add snapshot of slimbus driver.
This snapshot is taken as of msm-4.4 'commit <3c7400dc73db>
("Merge "diag: dci: Fix possible out of bound access"")'.
CRs-Fixed: 2002163
Change-Id: I7e91941a324206b964a55558a1a9ab2fed7fb3e9
Signed-off-by: Sagar Dharia <sdharia@codeaurora.org>
Signed-off-by: Karthikeyan Ramasubramanian <kramasub@codeaurora.org>
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
new file mode 100644
index 0000000..b7d040e
--- /dev/null
+++ b/drivers/slimbus/Kconfig
@@ -0,0 +1,32 @@
+#
+# SLIMBUS driver configuration
+#
+menuconfig SLIMBUS
+ bool "Slimbus support"
+ depends on HAS_IOMEM
+ help
+ Slimbus is standard interface between baseband and
+ application processors and peripheral components in mobile
+ terminals.
+
+if SLIMBUS
+config SLIMBUS_MSM_CTRL
+ tristate "QTI Slimbus Master Component"
+ default n
+ help
+ Select driver for Qualcomm Technologies Inc. (QTI) Slimbus
+ Master Component. This driver is responsible for configuring
+ SLIMbus and performing bus administration, administration of
+ components on the bus and dynamic channel allocation.
+
+config SLIMBUS_MSM_NGD
+ tristate "QTI Slimbus Satellite Component"
+ help
+ Select driver for Qualcomm Technologies Inc. (QTI) Slimbus
+ Satellite Component. This is light-weight slimbus controller
+ driver responsible for communicating with slave HW directly over
+ the bus using messaging interface, and communicating with master
+ component residing on ADSP for bandwidth and data-channel
+ management.
+
+endif
diff --git a/drivers/slimbus/Makefile b/drivers/slimbus/Makefile
new file mode 100644
index 0000000..45d6e6e
--- /dev/null
+++ b/drivers/slimbus/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for kernel slimbus framework.
+#
+obj-$(CONFIG_SLIMBUS) += slimbus.o
+obj-$(CONFIG_SLIMBUS_MSM_CTRL) += slim-msm.o slim-msm-ctrl.o
+obj-$(CONFIG_SLIMBUS_MSM_NGD) += slim-msm.o slim-msm-ngd.o
diff --git a/drivers/slimbus/slim-msm-ctrl.c b/drivers/slimbus/slim-msm-ctrl.c
new file mode 100644
index 0000000..3f99b2b
--- /dev/null
+++ b/drivers/slimbus/slim-msm-ctrl.c
@@ -0,0 +1,1641 @@
+/* Copyright (c) 2011-2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slimbus/slimbus.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_slimbus.h>
+#include <linux/msm-sps.h>
+#include <linux/qdsp6v2/apr.h>
+#include "slim-msm.h"
+
+#define MSM_SLIM_NAME "msm_slim_ctrl"
+#define SLIM_ROOT_FREQ 24576000
+
+#define QC_MSM_DEVS 5
+
+/* Manager registers */
+enum mgr_reg {
+ MGR_CFG = 0x200,
+ MGR_STATUS = 0x204,
+ MGR_RX_MSGQ_CFG = 0x208,
+ MGR_INT_EN = 0x210,
+ MGR_INT_STAT = 0x214,
+ MGR_INT_CLR = 0x218,
+ MGR_TX_MSG = 0x230,
+ MGR_RX_MSG = 0x270,
+ MGR_IE_STAT = 0x2F0,
+ MGR_VE_STAT = 0x300,
+};
+
+enum msg_cfg {
+ MGR_CFG_ENABLE = 1,
+ MGR_CFG_RX_MSGQ_EN = 1 << 1,
+ MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
+ MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
+};
+/* Message queue types */
+enum msm_slim_msgq_type {
+ MSGQ_RX = 0,
+ MSGQ_TX_LOW = 1,
+ MSGQ_TX_HIGH = 2,
+};
+/* Framer registers */
+enum frm_reg {
+ FRM_CFG = 0x400,
+ FRM_STAT = 0x404,
+ FRM_INT_EN = 0x410,
+ FRM_INT_STAT = 0x414,
+ FRM_INT_CLR = 0x418,
+ FRM_WAKEUP = 0x41C,
+ FRM_CLKCTL_DONE = 0x420,
+ FRM_IE_STAT = 0x430,
+ FRM_VE_STAT = 0x440,
+};
+
+/* Interface registers */
+enum intf_reg {
+ INTF_CFG = 0x600,
+ INTF_STAT = 0x604,
+ INTF_INT_EN = 0x610,
+ INTF_INT_STAT = 0x614,
+ INTF_INT_CLR = 0x618,
+ INTF_IE_STAT = 0x630,
+ INTF_VE_STAT = 0x640,
+};
+
+enum mgr_intr {
+ MGR_INT_RECFG_DONE = 1 << 24,
+ MGR_INT_TX_NACKED_2 = 1 << 25,
+ MGR_INT_MSG_BUF_CONTE = 1 << 26,
+ MGR_INT_RX_MSG_RCVD = 1 << 30,
+ MGR_INT_TX_MSG_SENT = 1 << 31,
+};
+
+enum frm_cfg {
+ FRM_ACTIVE = 1,
+ CLK_GEAR = 7,
+ ROOT_FREQ = 11,
+ REF_CLK_GEAR = 15,
+ INTR_WAKE = 19,
+};
+
+static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
+
+static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
+{
+ struct msm_slim_ctrl *dev = sat->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sat->lock, flags);
+ if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
+ spin_unlock_irqrestore(&sat->lock, flags);
+ dev_err(dev->dev, "SAT QUEUE full!");
+ return -EXFULL;
+ }
+ memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
+ sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
+ spin_unlock_irqrestore(&sat->lock, flags);
+ return 0;
+}
+
+static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sat->lock, flags);
+ if (sat->stail == sat->shead) {
+ spin_unlock_irqrestore(&sat->lock, flags);
+ return -ENODATA;
+ }
+ memcpy(buf, sat->sat_msgs[sat->shead], 40);
+ sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
+ spin_unlock_irqrestore(&sat->lock, flags);
+ return 0;
+}
+
+static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
+{
+ e_addr[0] = (buffer[1] >> 24) & 0xff;
+ e_addr[1] = (buffer[1] >> 16) & 0xff;
+ e_addr[2] = (buffer[1] >> 8) & 0xff;
+ e_addr[3] = buffer[1] & 0xff;
+ e_addr[4] = (buffer[0] >> 24) & 0xff;
+ e_addr[5] = (buffer[0] >> 16) & 0xff;
+}
+
+static bool msm_is_sat_dev(u8 *e_addr)
+{
+ if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
+ e_addr[2] != QC_CHIPID_SL &&
+ (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
+ return true;
+ return false;
+}
+
+static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
+{
+ struct msm_slim_sat *sat = NULL;
+ int i = 0;
+
+ while (!sat && i < dev->nsats) {
+ if (laddr == dev->satd[i]->satcl.laddr)
+ sat = dev->satd[i];
+ i++;
+ }
+ return sat;
+}
+
+static irqreturn_t msm_slim_interrupt(int irq, void *d)
+{
+ struct msm_slim_ctrl *dev = d;
+ u32 pstat;
+ u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
+
+ if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
+ if (stat & MGR_INT_TX_MSG_SENT)
+ writel_relaxed(MGR_INT_TX_MSG_SENT,
+ dev->base + MGR_INT_CLR);
+ else {
+ u32 mgr_stat = readl_relaxed(dev->base + MGR_STATUS);
+ u32 mgr_ie_stat = readl_relaxed(dev->base +
+ MGR_IE_STAT);
+ u32 frm_stat = readl_relaxed(dev->base + FRM_STAT);
+ u32 frm_cfg = readl_relaxed(dev->base + FRM_CFG);
+ u32 frm_intr_stat = readl_relaxed(dev->base +
+ FRM_INT_STAT);
+ u32 frm_ie_stat = readl_relaxed(dev->base +
+ FRM_IE_STAT);
+ u32 intf_stat = readl_relaxed(dev->base + INTF_STAT);
+ u32 intf_intr_stat = readl_relaxed(dev->base +
+ INTF_INT_STAT);
+ u32 intf_ie_stat = readl_relaxed(dev->base +
+ INTF_IE_STAT);
+
+ writel_relaxed(MGR_INT_TX_NACKED_2,
+ dev->base + MGR_INT_CLR);
+ pr_err("TX Nack MGR dump:int_stat:0x%x, mgr_stat:0x%x",
+ stat, mgr_stat);
+ pr_err("TX Nack MGR dump:ie_stat:0x%x", mgr_ie_stat);
+ pr_err("TX Nack FRM dump:int_stat:0x%x, frm_stat:0x%x",
+ frm_intr_stat, frm_stat);
+ pr_err("TX Nack FRM dump:frm_cfg:0x%x, ie_stat:0x%x",
+ frm_cfg, frm_ie_stat);
+ pr_err("TX Nack INTF dump:intr_st:0x%x, intf_stat:0x%x",
+ intf_intr_stat, intf_stat);
+ pr_err("TX Nack INTF dump:ie_stat:0x%x", intf_ie_stat);
+
+ dev->err = -EIO;
+ }
+ /*
+ * Guarantee that interrupt clear bit write goes through before
+ * signalling completion/exiting ISR
+ */
+ mb();
+ msm_slim_manage_tx_msgq(dev, false, NULL);
+ }
+ if (stat & MGR_INT_RX_MSG_RCVD) {
+ u32 rx_buf[10];
+ u32 mc, mt;
+ u8 len, i;
+
+ rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
+ len = rx_buf[0] & 0x1F;
+ for (i = 1; i < ((len + 3) >> 2); i++) {
+ rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
+ (4 * i));
+ dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
+ }
+ mt = (rx_buf[0] >> 5) & 0x7;
+ mc = (rx_buf[0] >> 8) & 0xff;
+ dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
+ if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
+ mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
+ u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
+ struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
+
+ if (sat)
+ msm_sat_enqueue(sat, rx_buf, len);
+ else
+ dev_err(dev->dev, "unknown sat:%d message",
+ laddr);
+ writel_relaxed(MGR_INT_RX_MSG_RCVD,
+ dev->base + MGR_INT_CLR);
+ /*
+ * Guarantee that CLR bit write goes through before
+ * queuing work
+ */
+ mb();
+ if (sat)
+ queue_work(sat->wq, &sat->wd);
+ } else if (mt == SLIM_MSG_MT_CORE &&
+ mc == SLIM_MSG_MC_REPORT_PRESENT) {
+ u8 e_addr[6];
+
+ msm_get_eaddr(e_addr, rx_buf);
+ msm_slim_rx_enqueue(dev, rx_buf, len);
+ writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+ MGR_INT_CLR);
+ /*
+ * Guarantee that CLR bit write goes through
+ * before signalling completion
+ */
+ mb();
+ complete(&dev->rx_msgq_notify);
+ } else if (mt == SLIM_MSG_MT_CORE &&
+ mc == SLIM_MSG_MC_REPORT_ABSENT) {
+ writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+ MGR_INT_CLR);
+ /*
+ * Guarantee that CLR bit write goes through
+ * before signalling completion
+ */
+ mb();
+ complete(&dev->rx_msgq_notify);
+
+ } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
+ mc == SLIM_MSG_MC_REPLY_VALUE) {
+ msm_slim_rx_enqueue(dev, rx_buf, len);
+ writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+ MGR_INT_CLR);
+ /*
+ * Guarantee that CLR bit write goes through
+ * before signalling completion
+ */
+ mb();
+ complete(&dev->rx_msgq_notify);
+ } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
+ u8 *buf = (u8 *)rx_buf;
+ u8 l_addr = buf[2];
+ u16 ele = (u16)buf[4] << 4;
+
+ ele |= ((buf[3] & 0xf0) >> 4);
+ dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
+ l_addr, ele);
+ for (i = 0; i < len - 5; i++)
+ dev_err(dev->dev, "offset:0x%x:bit mask:%x",
+ i, buf[i+5]);
+ writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+ MGR_INT_CLR);
+ /*
+ * Guarantee that CLR bit write goes through
+ * before exiting
+ */
+ mb();
+ } else {
+ dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
+ mc, mt, len);
+ for (i = 0; i < ((len + 3) >> 2); i++)
+ dev_err(dev->dev, "error msg: %x", rx_buf[i]);
+ writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+ MGR_INT_CLR);
+ /*
+ * Guarantee that CLR bit write goes through
+ * before exiting
+ */
+ mb();
+ }
+ }
+ if (stat & MGR_INT_RECFG_DONE) {
+ writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
+ /*
+ * Guarantee that CLR bit write goes through
+ * before exiting ISR
+ */
+ mb();
+ complete(&dev->reconf);
+ }
+ pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
+ if (pstat != 0)
+ return msm_slim_port_irq_handler(dev, pstat);
+
+ return IRQ_HANDLED;
+}
+
+static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+ u32 *pbuf;
+ u8 *puc;
+ int timeout;
+ int msgv = -1;
+ u8 la = txn->la;
+ u8 mc = (u8)(txn->mc & 0xFF);
+ /*
+ * Voting for runtime PM: Slimbus has 2 possible use cases:
+ * 1. messaging
+ * 2. Data channels
+ * Messaging case goes through messaging slots and data channels
+ * use their own slots
+ * This "get" votes for messaging bandwidth
+ */
+ if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
+ msgv = msm_slim_get_ctrl(dev);
+ if (msgv >= 0)
+ dev->state = MSM_CTRL_AWAKE;
+ mutex_lock(&dev->tx_lock);
+ if (dev->state == MSM_CTRL_ASLEEP ||
+ ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
+ dev->state == MSM_CTRL_IDLE)) {
+ dev_err(dev->dev, "runtime or system PM suspended state");
+ mutex_unlock(&dev->tx_lock);
+ if (msgv >= 0)
+ msm_slim_put_ctrl(dev);
+ return -EBUSY;
+ }
+ if (txn->mt == SLIM_MSG_MT_CORE &&
+ mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
+ if (dev->reconf_busy) {
+ wait_for_completion(&dev->reconf);
+ dev->reconf_busy = false;
+ }
+ /* This "get" votes for data channels */
+ if (dev->ctrl.sched.usedslots != 0 &&
+ !dev->chan_active) {
+ int chv = msm_slim_get_ctrl(dev);
+
+ if (chv >= 0)
+ dev->chan_active = true;
+ }
+ }
+ txn->rl--;
+ pbuf = msm_get_msg_buf(dev, txn->rl, &done);
+ dev->err = 0;
+
+ if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
+ mutex_unlock(&dev->tx_lock);
+ if (msgv >= 0)
+ msm_slim_put_ctrl(dev);
+ return -EPROTONOSUPPORT;
+ }
+ if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
+ (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
+ mc == SLIM_MSG_MC_CONNECT_SINK ||
+ mc == SLIM_MSG_MC_DISCONNECT_PORT))
+ la = dev->pgdla;
+ if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
+ *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
+ else
+ *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
+ if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
+ puc = ((u8 *)pbuf) + 3;
+ else
+ puc = ((u8 *)pbuf) + 2;
+ if (txn->rbuf)
+ *(puc++) = txn->tid;
+ if ((txn->mt == SLIM_MSG_MT_CORE) &&
+ ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
+ mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
+ (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
+ mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
+ *(puc++) = (txn->ec & 0xFF);
+ *(puc++) = (txn->ec >> 8)&0xFF;
+ }
+ if (txn->wbuf)
+ memcpy(puc, txn->wbuf, txn->len);
+ if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
+ (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
+ mc == SLIM_MSG_MC_CONNECT_SINK ||
+ mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
+ if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
+ dev->err = msm_slim_connect_pipe_port(dev, *puc);
+ else {
+ /*
+ * Remove channel disconnects master-side ports from
+ * channel. No need to send that again on the bus
+ * Only disable port
+ */
+ writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn,
+ dev->pipes[*puc].port_b, dev->ver));
+ mutex_unlock(&dev->tx_lock);
+ if (msgv >= 0)
+ msm_slim_put_ctrl(dev);
+ return 0;
+ }
+ if (dev->err) {
+ dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
+ mutex_unlock(&dev->tx_lock);
+ if (msgv >= 0)
+ msm_slim_put_ctrl(dev);
+ return dev->err;
+ }
+ *(puc) = (u8)dev->pipes[*puc].port_b;
+ }
+ if (txn->mt == SLIM_MSG_MT_CORE &&
+ mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
+ dev->reconf_busy = true;
+ msm_send_msg_buf(dev, pbuf, txn->rl, MGR_TX_MSG);
+ timeout = wait_for_completion_timeout(&done, HZ);
+ if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
+ if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
+ SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
+ timeout) {
+ timeout = wait_for_completion_timeout(&dev->reconf, HZ);
+ dev->reconf_busy = false;
+ if (timeout) {
+ clk_disable_unprepare(dev->rclk);
+ disable_irq(dev->irq);
+ }
+ }
+ if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
+ SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
+ !timeout) {
+ dev->reconf_busy = false;
+ dev_err(dev->dev, "clock pause failed");
+ mutex_unlock(&dev->tx_lock);
+ return -ETIMEDOUT;
+ }
+ if (txn->mt == SLIM_MSG_MT_CORE &&
+ txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
+ if (dev->ctrl.sched.usedslots == 0 &&
+ dev->chan_active) {
+ dev->chan_active = false;
+ msm_slim_put_ctrl(dev);
+ }
+ }
+ }
+ mutex_unlock(&dev->tx_lock);
+ if (msgv >= 0)
+ msm_slim_put_ctrl(dev);
+
+ if (!timeout)
+ dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
+ txn->mt);
+
+ return timeout ? dev->err : -ETIMEDOUT;
+}
+
+static void msm_slim_wait_retry(struct msm_slim_ctrl *dev)
+{
+ int msec_per_frm = 0;
+ int sfr_per_sec;
+ /* Wait for 1 superframe, or default time and then retry */
+ sfr_per_sec = dev->framer.superfreq /
+ (1 << (SLIM_MAX_CLK_GEAR - dev->ctrl.clkgear));
+ if (sfr_per_sec)
+ msec_per_frm = MSEC_PER_SEC / sfr_per_sec;
+ if (msec_per_frm < DEF_RETRY_MS)
+ msec_per_frm = DEF_RETRY_MS;
+ msleep(msec_per_frm);
+}
+static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
+ u8 elen, u8 laddr)
+{
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+ struct completion done;
+ int timeout, ret, retries = 0;
+ u32 *buf;
+retry_laddr:
+ init_completion(&done);
+ mutex_lock(&dev->tx_lock);
+ buf = msm_get_msg_buf(dev, 9, &done);
+ if (buf == NULL)
+ return -ENOMEM;
+ buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
+ SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
+ SLIM_MSG_DEST_LOGICALADDR,
+ ea[5] | ea[4] << 8);
+ buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
+ buf[2] = laddr;
+
+ ret = msm_send_msg_buf(dev, buf, 9, MGR_TX_MSG);
+ timeout = wait_for_completion_timeout(&done, HZ);
+ if (!timeout)
+ dev->err = -ETIMEDOUT;
+ if (dev->err) {
+ ret = dev->err;
+ dev->err = 0;
+ }
+ mutex_unlock(&dev->tx_lock);
+ if (ret) {
+ pr_err("set LADDR:0x%x failed:ret:%d, retrying", laddr, ret);
+ if (retries < INIT_MX_RETRIES) {
+ msm_slim_wait_retry(dev);
+ retries++;
+ goto retry_laddr;
+ } else {
+ pr_err("set LADDR failed after retrying:ret:%d", ret);
+ }
+ }
+ return ret;
+}
+
+static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
+{
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+
+ enable_irq(dev->irq);
+ clk_prepare_enable(dev->rclk);
+ writel_relaxed(1, dev->base + FRM_WAKEUP);
+ /* Make sure framer wakeup write goes through before exiting function */
+ mb();
+ /*
+ * Workaround: Currently, slave is reporting lost-sync messages
+ * after slimbus comes out of clock pause.
+ * Transaction with slave fail before slave reports that message
+ * Give some time for that report to come
+ * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
+ * being 250 usecs, we wait for 20 superframes here to ensure
+ * we get the message
+ */
+ usleep_range(4950, 5000);
+ return 0;
+}
+
+static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
+{
+ struct msm_slim_ctrl *dev = sat->dev;
+ enum slim_ch_control oper;
+ int i;
+ int ret = 0;
+
+ if (mc == SLIM_USR_MC_CHAN_CTRL) {
+ for (i = 0; i < sat->nsatch; i++) {
+ if (buf[5] == sat->satch[i].chan)
+ break;
+ }
+ if (i >= sat->nsatch)
+ return -ENOTCONN;
+ oper = ((buf[3] & 0xC0) >> 6);
+ /* part of grp. activating/removing 1 will take care of rest */
+ ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper,
+ false);
+ if (!ret) {
+ for (i = 5; i < len; i++) {
+ int j;
+
+ for (j = 0; j < sat->nsatch; j++) {
+ if (buf[i] != sat->satch[j].chan)
+ continue;
+
+ if (oper == SLIM_CH_REMOVE)
+ sat->satch[j].req_rem++;
+ else
+ sat->satch[j].req_def++;
+ break;
+ }
+ }
+ }
+ } else {
+ u16 chh[40];
+ struct slim_ch prop;
+ u32 exp;
+ u16 *grph = NULL;
+ u8 coeff, cc;
+ u8 prrate = buf[6];
+
+ if (len <= 8)
+ return -EINVAL;
+ for (i = 8; i < len; i++) {
+ int j = 0;
+
+ for (j = 0; j < sat->nsatch; j++) {
+ if (sat->satch[j].chan == buf[i]) {
+ chh[i - 8] = sat->satch[j].chanh;
+ break;
+ }
+ }
+ if (j < sat->nsatch) {
+ u16 dummy;
+
+ ret = slim_query_ch(&sat->satcl, buf[i],
+ &dummy);
+ if (ret)
+ return ret;
+ if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
+ sat->satch[j].req_def++;
+ /* First channel in group from satellite */
+ if (i == 8)
+ grph = &sat->satch[j].chanh;
+ continue;
+ }
+ if (sat->nsatch >= MSM_MAX_SATCH)
+ return -EXFULL;
+ ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]);
+ if (ret)
+ return ret;
+ sat->satch[j].chan = buf[i];
+ sat->satch[j].chanh = chh[i - 8];
+ if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
+ sat->satch[j].req_def++;
+ if (i == 8)
+ grph = &sat->satch[j].chanh;
+ sat->nsatch++;
+ }
+ prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
+ prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
+ prop.baser = SLIM_RATE_4000HZ;
+ if (prrate & 0x8)
+ prop.baser = SLIM_RATE_11025HZ;
+ else
+ prop.baser = SLIM_RATE_4000HZ;
+ prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
+ prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
+ exp = (u32)((buf[5] & 0xF0) >> 4);
+ coeff = (buf[4] & 0x20) >> 5;
+ cc = (coeff ? 3 : 1);
+ prop.ratem = cc * (1 << exp);
+ if (i > 9)
+ ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
+ true, &chh[0]);
+ else
+ ret = slim_define_ch(&sat->satcl, &prop,
+ chh, 1, true, &chh[0]);
+ dev_dbg(dev->dev, "define sat grp returned:%d", ret);
+ if (ret)
+ return ret;
+ else if (grph)
+ *grph = chh[0];
+
+ /* part of group so activating 1 will take care of rest */
+ if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
+ ret = slim_control_ch(&sat->satcl,
+ chh[0],
+ SLIM_CH_ACTIVATE, false);
+ }
+ return ret;
+}
+
+static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
+{
+ u8 buf[40];
+ u8 mc, mt, len;
+ int i, ret;
+
+ if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
+ len = buf[0] & 0x1F;
+ mt = (buf[0] >> 5) & 0x7;
+ mc = buf[1];
+ if (mt == SLIM_MSG_MT_CORE &&
+ mc == SLIM_MSG_MC_REPORT_PRESENT) {
+ u8 laddr;
+ u8 e_addr[6];
+
+ for (i = 0; i < 6; i++)
+ e_addr[i] = buf[7-i];
+
+ ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr,
+ false);
+ /* Is this QTI ported generic device? */
+ if (!ret && e_addr[5] == QC_MFGID_LSB &&
+ e_addr[4] == QC_MFGID_MSB &&
+ e_addr[1] == QC_DEVID_PGD &&
+ e_addr[2] != QC_CHIPID_SL)
+ dev->pgdla = laddr;
+ if (!ret && !pm_runtime_enabled(dev->dev) &&
+ laddr == (QC_MSM_DEVS - 1))
+ pm_runtime_enable(dev->dev);
+
+ if (!ret && msm_is_sat_dev(e_addr)) {
+ struct msm_slim_sat *sat = addr_to_sat(dev,
+ laddr);
+ if (!sat)
+ sat = msm_slim_alloc_sat(dev);
+ if (!sat)
+ return;
+
+ sat->satcl.laddr = laddr;
+ msm_sat_enqueue(sat, (u32 *)buf, len);
+ queue_work(sat->wq, &sat->wd);
+ }
+ if (ret)
+ pr_err("assign laddr failed, error:%d", ret);
+ } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
+ mc == SLIM_MSG_MC_REPLY_VALUE) {
+ u8 tid = buf[3];
+
+ dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
+ slim_msg_response(&dev->ctrl, &buf[4], tid,
+ len - 4);
+ pm_runtime_mark_last_busy(dev->dev);
+ } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
+ u8 l_addr = buf[2];
+ u16 ele = (u16)buf[4] << 4;
+
+ ele |= ((buf[3] & 0xf0) >> 4);
+ dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
+ l_addr, ele);
+ for (i = 0; i < len - 5; i++)
+ dev_err(dev->dev, "offset:0x%x:bit mask:%x",
+ i, buf[i+5]);
+ } else {
+ dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
+ mc, mt);
+ for (i = 0; i < len; i++)
+ dev_err(dev->dev, "error msg: %x", buf[i]);
+
+ }
+ } else
+ dev_err(dev->dev, "rxwq called and no dequeue");
+}
+
+static void slim_sat_rxprocess(struct work_struct *work)
+{
+ struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
+ struct msm_slim_ctrl *dev = sat->dev;
+ u8 buf[40];
+
+ while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
+ struct slim_msg_txn txn;
+ u8 len, mc, mt;
+ u32 bw_sl;
+ int ret = 0;
+ int satv = -1;
+ bool gen_ack = false;
+ u8 tid;
+ u8 wbuf[8];
+ int i, retries = 0;
+
+ txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+ txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+ txn.ec = 0;
+ txn.rbuf = NULL;
+ txn.la = sat->satcl.laddr;
+ /* satellite handling */
+ len = buf[0] & 0x1F;
+ mc = buf[1];
+ mt = (buf[0] >> 5) & 0x7;
+
+ if (mt == SLIM_MSG_MT_CORE &&
+ mc == SLIM_MSG_MC_REPORT_PRESENT) {
+ u8 e_addr[6];
+
+ for (i = 0; i < 6; i++)
+ e_addr[i] = buf[7-i];
+
+ if (pm_runtime_enabled(dev->dev)) {
+ satv = msm_slim_get_ctrl(dev);
+ if (satv >= 0)
+ sat->pending_capability = true;
+ }
+ /*
+ * Since capability message is already sent, present
+ * message will indicate subsystem hosting this
+ * satellite has restarted.
+ * Remove all active channels of this satellite
+ * when this is detected
+ */
+ if (sat->sent_capability) {
+ for (i = 0; i < sat->nsatch; i++) {
+ if (sat->satch[i].reconf) {
+ pr_err("SSR, sat:%d, rm ch:%d",
+ sat->satcl.laddr,
+ sat->satch[i].chan);
+ slim_control_ch(&sat->satcl,
+ sat->satch[i].chanh,
+ SLIM_CH_REMOVE, true);
+ slim_dealloc_ch(&sat->satcl,
+ sat->satch[i].chanh);
+ sat->satch[i].reconf = false;
+ }
+ }
+ }
+ } else if (mt != SLIM_MSG_MT_CORE &&
+ mc != SLIM_MSG_MC_REPORT_PRESENT) {
+ satv = msm_slim_get_ctrl(dev);
+ }
+ switch (mc) {
+ case SLIM_MSG_MC_REPORT_PRESENT:
+ /* Remove runtime_pm vote once satellite acks */
+ if (mt != SLIM_MSG_MT_CORE) {
+ if (pm_runtime_enabled(dev->dev) &&
+ sat->pending_capability) {
+ msm_slim_put_ctrl(dev);
+ sat->pending_capability = false;
+ }
+ continue;
+ }
+ /* send a Manager capability msg */
+ if (sat->sent_capability) {
+ if (mt == SLIM_MSG_MT_CORE)
+ goto send_capability;
+ else
+ continue;
+ }
+ ret = slim_add_device(&dev->ctrl, &sat->satcl);
+ if (ret) {
+ dev_err(dev->dev,
+ "Satellite-init failed");
+ continue;
+ }
+ /* Satellite-channels */
+ sat->satch = kzalloc(MSM_MAX_SATCH *
+ sizeof(struct msm_sat_chan),
+ GFP_KERNEL);
+send_capability:
+ txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
+ txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+ txn.la = sat->satcl.laddr;
+ txn.rl = 8;
+ wbuf[0] = SAT_MAGIC_LSB;
+ wbuf[1] = SAT_MAGIC_MSB;
+ wbuf[2] = SAT_MSG_VER;
+ wbuf[3] = SAT_MSG_PROT;
+ txn.wbuf = wbuf;
+ txn.len = 4;
+ ret = msm_xfer_msg(&dev->ctrl, &txn);
+ if (ret) {
+ pr_err("capability for:0x%x fail:%d, retry:%d",
+ sat->satcl.laddr, ret, retries);
+ if (retries < INIT_MX_RETRIES) {
+ msm_slim_wait_retry(dev);
+ retries++;
+ goto send_capability;
+ } else {
+ pr_err("failed after all retries:%d",
+ ret);
+ }
+ } else {
+ sat->sent_capability = true;
+ }
+ break;
+ case SLIM_USR_MC_ADDR_QUERY:
+ memcpy(&wbuf[1], &buf[4], 6);
+ ret = slim_get_logical_addr(&sat->satcl,
+ &wbuf[1], 6, &wbuf[7]);
+ if (ret)
+ memset(&wbuf[1], 0, 6);
+ wbuf[0] = buf[3];
+ txn.mc = SLIM_USR_MC_ADDR_REPLY;
+ txn.rl = 12;
+ txn.len = 8;
+ txn.wbuf = wbuf;
+ msm_xfer_msg(&dev->ctrl, &txn);
+ break;
+ case SLIM_USR_MC_DEFINE_CHAN:
+ case SLIM_USR_MC_DEF_ACT_CHAN:
+ case SLIM_USR_MC_CHAN_CTRL:
+ if (mc != SLIM_USR_MC_CHAN_CTRL)
+ tid = buf[7];
+ else
+ tid = buf[4];
+ gen_ack = true;
+ ret = msm_sat_define_ch(sat, buf, len, mc);
+ if (ret) {
+ dev_err(dev->dev,
+ "SAT define_ch returned:%d",
+ ret);
+ }
+ if (!sat->pending_reconf) {
+ int chv = msm_slim_get_ctrl(dev);
+
+ if (chv >= 0)
+ sat->pending_reconf = true;
+ }
+ break;
+ case SLIM_USR_MC_RECONFIG_NOW:
+ tid = buf[3];
+ gen_ack = true;
+ ret = slim_reconfigure_now(&sat->satcl);
+ for (i = 0; i < sat->nsatch; i++) {
+ struct msm_sat_chan *sch = &sat->satch[i];
+
+ if (sch->req_rem && sch->reconf) {
+ if (!ret) {
+ slim_dealloc_ch(&sat->satcl,
+ sch->chanh);
+ sch->reconf = false;
+ }
+ sch->req_rem--;
+ } else if (sch->req_def) {
+ if (ret)
+ slim_dealloc_ch(&sat->satcl,
+ sch->chanh);
+ else
+ sch->reconf = true;
+ sch->req_def--;
+ }
+ }
+ if (sat->pending_reconf) {
+ msm_slim_put_ctrl(dev);
+ sat->pending_reconf = false;
+ }
+ break;
+ case SLIM_USR_MC_REQ_BW:
+ /* what we get is in SLOTS */
+ bw_sl = (u32)buf[4] << 3 |
+ ((buf[3] & 0xE0) >> 5);
+ sat->satcl.pending_msgsl = bw_sl;
+ tid = buf[5];
+ gen_ack = true;
+ break;
+ case SLIM_USR_MC_CONNECT_SRC:
+ case SLIM_USR_MC_CONNECT_SINK:
+ if (mc == SLIM_USR_MC_CONNECT_SRC)
+ txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
+ else
+ txn.mc = SLIM_MSG_MC_CONNECT_SINK;
+ wbuf[0] = buf[4] & 0x1F;
+ wbuf[1] = buf[5];
+ tid = buf[6];
+ txn.la = buf[3];
+ txn.mt = SLIM_MSG_MT_CORE;
+ txn.rl = 6;
+ txn.len = 2;
+ txn.wbuf = wbuf;
+ gen_ack = true;
+ ret = msm_xfer_msg(&dev->ctrl, &txn);
+ break;
+ case SLIM_USR_MC_DISCONNECT_PORT:
+ txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
+ wbuf[0] = buf[4] & 0x1F;
+ tid = buf[5];
+ txn.la = buf[3];
+ txn.rl = 5;
+ txn.len = 1;
+ txn.mt = SLIM_MSG_MT_CORE;
+ txn.wbuf = wbuf;
+ gen_ack = true;
+ ret = msm_xfer_msg(&dev->ctrl, &txn);
+ break;
+ case SLIM_MSG_MC_REPORT_ABSENT:
+ dev_info(dev->dev, "Received Report Absent Message\n");
+ break;
+ default:
+ break;
+ }
+ if (!gen_ack) {
+ if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
+ msm_slim_put_ctrl(dev);
+ continue;
+ }
+
+ wbuf[0] = tid;
+ if (!ret)
+ wbuf[1] = MSM_SAT_SUCCSS;
+ else
+ wbuf[1] = 0;
+ txn.mc = SLIM_USR_MC_GENERIC_ACK;
+ txn.la = sat->satcl.laddr;
+ txn.rl = 6;
+ txn.len = 2;
+ txn.wbuf = wbuf;
+ txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+ msm_xfer_msg(&dev->ctrl, &txn);
+ if (satv >= 0)
+ msm_slim_put_ctrl(dev);
+ }
+}
+
+static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
+{
+ struct msm_slim_sat *sat;
+ char *name;
+
+ if (dev->nsats >= MSM_MAX_NSATS)
+ return NULL;
+
+ sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
+ if (!sat)
+ return NULL;
+ name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
+ if (!name) {
+ kfree(sat);
+ return NULL;
+ }
+ dev->satd[dev->nsats] = sat;
+ sat->dev = dev;
+ snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
+ sat->satcl.name = name;
+ spin_lock_init(&sat->lock);
+ INIT_WORK(&sat->wd, slim_sat_rxprocess);
+ sat->wq = create_singlethread_workqueue(sat->satcl.name);
+ if (!sat->wq) {
+ kfree(name);
+ kfree(sat);
+ return NULL;
+ }
+ /*
+ * Both sats will be allocated from RX thread and RX thread will
+ * process messages sequentially. No synchronization necessary
+ */
+ dev->nsats++;
+ return sat;
+}
+
+static int msm_slim_rx_msgq_thread(void *data)
+{
+ struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
+ struct completion *notify = &dev->rx_msgq_notify;
+ struct msm_slim_sat *sat = NULL;
+ u32 mc = 0;
+ u32 mt = 0;
+ u32 buffer[10];
+ int index = 0;
+ u8 msg_len = 0;
+ int ret;
+
+ dev_dbg(dev->dev, "rx thread started");
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ ret = wait_for_completion_interruptible(notify);
+
+ if (ret)
+ dev_err(dev->dev, "rx thread wait error:%d", ret);
+
+ /* 1 irq notification per message */
+ if (dev->use_rx_msgqs != MSM_MSGQ_ENABLED) {
+ msm_slim_rxwq(dev);
+ continue;
+ }
+
+ ret = msm_slim_rx_msgq_get(dev, buffer, index);
+ if (ret) {
+ dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
+ continue;
+ }
+
+ pr_debug("message[%d] = 0x%x\n", index, *buffer);
+
+ /* Decide if we use generic RX or satellite RX */
+ if (index++ == 0) {
+ msg_len = *buffer & 0x1F;
+ pr_debug("Start of new message, len = %d\n", msg_len);
+ mt = (buffer[0] >> 5) & 0x7;
+ mc = (buffer[0] >> 8) & 0xff;
+ dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
+ if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
+ mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
+ u8 laddr;
+
+ laddr = (u8)((buffer[0] >> 16) & 0xff);
+ sat = addr_to_sat(dev, laddr);
+ }
+ }
+ if ((index * 4) >= msg_len) {
+ index = 0;
+ if (sat) {
+ msm_sat_enqueue(sat, buffer, msg_len);
+ queue_work(sat->wq, &sat->wd);
+ sat = NULL;
+ } else {
+ msm_slim_rx_enqueue(dev, buffer, msg_len);
+ msm_slim_rxwq(dev);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void msm_slim_prg_slew(struct platform_device *pdev,
+ struct msm_slim_ctrl *dev)
+{
+ struct resource *slew_io;
+ void __iomem *slew_reg;
+ /* SLEW RATE register for this slimbus */
+ dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "slimbus_slew_reg");
+ if (!dev->slew_mem) {
+ dev_dbg(&pdev->dev, "no slimbus slew resource\n");
+ return;
+ }
+ slew_io = request_mem_region(dev->slew_mem->start,
+ resource_size(dev->slew_mem), pdev->name);
+ if (!slew_io) {
+ dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
+ dev->slew_mem = NULL;
+ return;
+ }
+
+ slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
+ if (!slew_reg) {
+ dev_dbg(dev->dev, "slew register mapping failed");
+ release_mem_region(dev->slew_mem->start,
+ resource_size(dev->slew_mem));
+ dev->slew_mem = NULL;
+ return;
+ }
+ writel_relaxed(1, slew_reg);
+ /* Make sure slimbus-slew rate enabling goes through */
+ wmb();
+ iounmap(slew_reg);
+}
+
+static int msm_slim_probe(struct platform_device *pdev)
+{
+ struct msm_slim_ctrl *dev;
+ int ret;
+ enum apr_subsys_state q6_state;
+ struct resource *bam_mem, *bam_io;
+ struct resource *slim_mem, *slim_io;
+ struct resource *irq, *bam_irq;
+ bool rxreg_access = false;
+
+ q6_state = apr_get_q6_state();
+ if (q6_state == APR_SUBSYS_DOWN) {
+ dev_dbg(&pdev->dev, "defering %s, adsp_state %d\n", __func__,
+ q6_state);
+ return -EPROBE_DEFER;
+ }
+ dev_dbg(&pdev->dev, "adsp is ready\n");
+
+ slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "slimbus_physical");
+ if (!slim_mem) {
+ dev_err(&pdev->dev, "no slimbus physical memory resource\n");
+ return -ENODEV;
+ }
+ slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
+ pdev->name);
+ if (!slim_io) {
+ dev_err(&pdev->dev, "slimbus memory already claimed\n");
+ return -EBUSY;
+ }
+
+ bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "slimbus_bam_physical");
+ if (!bam_mem) {
+ dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
+ ret = -ENODEV;
+ goto err_get_res_bam_failed;
+ }
+ bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
+ pdev->name);
+ if (!bam_io) {
+ release_mem_region(slim_mem->start, resource_size(slim_mem));
+ dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
+ ret = -EBUSY;
+ goto err_get_res_bam_failed;
+ }
+ irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "slimbus_irq");
+ if (!irq) {
+ dev_err(&pdev->dev, "no slimbus IRQ resource\n");
+ ret = -ENODEV;
+ goto err_get_res_failed;
+ }
+ bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "slimbus_bam_irq");
+ if (!bam_irq) {
+ dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
+ ret = -ENODEV;
+ goto err_get_res_failed;
+ }
+
+ dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err_get_res_failed;
+ }
+ dev->wr_comp = kzalloc(sizeof(struct completion *) * MSM_TX_BUFS,
+ GFP_KERNEL);
+ if (!dev->wr_comp)
+ return -ENOMEM;
+ dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, dev);
+ slim_set_ctrldata(&dev->ctrl, dev);
+ dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
+ if (!dev->base) {
+ dev_err(&pdev->dev, "IOremap failed\n");
+ ret = -ENOMEM;
+ goto err_ioremap_failed;
+ }
+ dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
+ if (!dev->bam.base) {
+ dev_err(&pdev->dev, "BAM IOremap failed\n");
+ ret = -ENOMEM;
+ goto err_ioremap_bam_failed;
+ }
+ if (pdev->dev.of_node) {
+
+ ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
+ &dev->ctrl.nr);
+ if (ret) {
+ dev_err(&pdev->dev, "Cell index not specified:%d", ret);
+ goto err_of_init_failed;
+ }
+ rxreg_access = of_property_read_bool(pdev->dev.of_node,
+ "qcom,rxreg-access");
+ /* Optional properties */
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,min-clk-gear", &dev->ctrl.min_cg);
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,max-clk-gear", &dev->ctrl.max_cg);
+ pr_debug("min_cg:%d, max_cg:%d, rxreg: %d", dev->ctrl.min_cg,
+ dev->ctrl.max_cg, rxreg_access);
+ } else {
+ dev->ctrl.nr = pdev->id;
+ }
+ dev->ctrl.nchans = MSM_SLIM_NCHANS;
+ dev->ctrl.nports = MSM_SLIM_NPORTS;
+ dev->ctrl.set_laddr = msm_set_laddr;
+ dev->ctrl.xfer_msg = msm_xfer_msg;
+ dev->ctrl.wakeup = msm_clk_pause_wakeup;
+ dev->ctrl.alloc_port = msm_alloc_port;
+ dev->ctrl.dealloc_port = msm_dealloc_port;
+ dev->ctrl.port_xfer = msm_slim_port_xfer;
+ dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
+ /* Reserve some messaging BW for satellite-apps driver communication */
+ dev->ctrl.sched.pending_msgsl = 30;
+
+ init_completion(&dev->reconf);
+ mutex_init(&dev->tx_lock);
+ spin_lock_init(&dev->rx_lock);
+ dev->ee = 1;
+ if (rxreg_access)
+ dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+ else
+ dev->use_rx_msgqs = MSM_MSGQ_RESET;
+
+ dev->irq = irq->start;
+ dev->bam.irq = bam_irq->start;
+
+ dev->hclk = clk_get(dev->dev, "iface_clk");
+ if (IS_ERR(dev->hclk))
+ dev->hclk = NULL;
+ else
+ clk_prepare_enable(dev->hclk);
+
+ ret = msm_slim_sps_init(dev, bam_mem, MGR_STATUS, false);
+ if (ret != 0) {
+ dev_err(dev->dev, "error SPS init\n");
+ goto err_sps_init_failed;
+ }
+
+ /* Fire up the Rx message queue thread */
+ dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
+ MSM_SLIM_NAME "_rx_msgq_thread");
+ if (IS_ERR(dev->rx_msgq_thread)) {
+ ret = PTR_ERR(dev->rx_msgq_thread);
+ dev_err(dev->dev, "Failed to start Rx message queue thread\n");
+ goto err_thread_create_failed;
+ }
+
+ dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
+ dev->framer.superfreq =
+ dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
+ dev->ctrl.a_framer = &dev->framer;
+ dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
+ dev->ctrl.dev.parent = &pdev->dev;
+ dev->ctrl.dev.of_node = pdev->dev.of_node;
+
+ ret = request_threaded_irq(dev->irq, NULL, msm_slim_interrupt,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "msm_slim_irq", dev);
+ if (ret) {
+ dev_err(&pdev->dev, "request IRQ failed\n");
+ goto err_request_irq_failed;
+ }
+
+ msm_slim_prg_slew(pdev, dev);
+
+ /* Register with framework before enabling frame, clock */
+ ret = slim_add_numbered_controller(&dev->ctrl);
+ if (ret) {
+ dev_err(dev->dev, "error adding controller\n");
+ goto err_ctrl_failed;
+ }
+
+
+ dev->rclk = clk_get(dev->dev, "core_clk");
+ if (!dev->rclk) {
+ dev_err(dev->dev, "slimbus clock not found");
+ goto err_clk_get_failed;
+ }
+ clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
+ clk_prepare_enable(dev->rclk);
+
+ dev->ver = readl_relaxed(dev->base);
+ /* Version info in 16 MSbits */
+ dev->ver >>= 16;
+ /* Component register initialization */
+ writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
+ writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
+ dev->base + CFG_PORT(COMP_TRUST_CFG, dev->ver));
+
+ /*
+ * Manager register initialization
+ * If RX msg Q is used, disable RX_MSG_RCVD interrupt
+ */
+ if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+ writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
+ MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
+ MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
+ else
+ writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
+ MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
+ MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
+ writel_relaxed(1, dev->base + MGR_CFG);
+ /*
+ * Framer registers are beyond 1K memory region after Manager and/or
+ * component registers. Make sure those writes are ordered
+ * before framer register writes
+ */
+ wmb();
+
+ /* Framer register initialization */
+ writel_relaxed((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
+ (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
+ dev->base + FRM_CFG);
+ /*
+ * Make sure that framer wake-up and enabling writes go through
+ * before any other component is enabled. Framer is responsible for
+ * clocking the bus and enabling framer first will ensure that other
+ * devices can report presence when they are enabled
+ */
+ mb();
+
+ /* Enable RX msg Q */
+ if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+ writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
+ dev->base + MGR_CFG);
+ else
+ writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
+ /*
+ * Make sure that manager-enable is written through before interface
+ * device is enabled
+ */
+ mb();
+ writel_relaxed(1, dev->base + INTF_CFG);
+ /*
+ * Make sure that interface-enable is written through before enabling
+ * ported generic device inside MSM manager
+ */
+ mb();
+ writel_relaxed(1, dev->base + CFG_PORT(PGD_CFG, dev->ver));
+ writel_relaxed(0x3F<<17, dev->base + CFG_PORT(PGD_OWN_EEn, dev->ver) +
+ (4 * dev->ee));
+ /*
+ * Make sure that ported generic device is enabled and port-EE settings
+ * are written through before finally enabling the component
+ */
+ mb();
+
+ writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
+ /*
+ * Make sure that all writes have gone through before exiting this
+ * function
+ */
+ mb();
+
+ /* Add devices registered with board-info now that controller is up */
+ slim_ctrl_add_boarddevs(&dev->ctrl);
+
+ if (pdev->dev.of_node)
+ of_register_slim_devices(&dev->ctrl);
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
+ pm_runtime_set_active(&pdev->dev);
+
+ dev_dbg(dev->dev, "MSM SB controller is up!\n");
+ return 0;
+
+err_ctrl_failed:
+ writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver));
+err_clk_get_failed:
+ kfree(dev->satd);
+err_request_irq_failed:
+ kthread_stop(dev->rx_msgq_thread);
+err_thread_create_failed:
+ msm_slim_sps_exit(dev, true);
+ msm_slim_deinit_ep(dev, &dev->rx_msgq,
+ &dev->use_rx_msgqs);
+ msm_slim_deinit_ep(dev, &dev->tx_msgq,
+ &dev->use_tx_msgqs);
+err_sps_init_failed:
+ if (dev->hclk) {
+ clk_disable_unprepare(dev->hclk);
+ clk_put(dev->hclk);
+ }
+err_of_init_failed:
+ iounmap(dev->bam.base);
+err_ioremap_bam_failed:
+ iounmap(dev->base);
+err_ioremap_failed:
+ kfree(dev->wr_comp);
+ kfree(dev);
+err_get_res_failed:
+ release_mem_region(bam_mem->start, resource_size(bam_mem));
+err_get_res_bam_failed:
+ release_mem_region(slim_mem->start, resource_size(slim_mem));
+ return ret;
+}
+
+static int msm_slim_remove(struct platform_device *pdev)
+{
+ struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+ struct resource *bam_mem;
+ struct resource *slim_mem;
+ struct resource *slew_mem = dev->slew_mem;
+ int i;
+
+ for (i = 0; i < dev->nsats; i++) {
+ struct msm_slim_sat *sat = dev->satd[i];
+ int j;
+
+ for (j = 0; j < sat->nsatch; j++)
+ slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh);
+ slim_remove_device(&sat->satcl);
+ kfree(sat->satch);
+ destroy_workqueue(sat->wq);
+ kfree(sat->satcl.name);
+ kfree(sat);
+ }
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ free_irq(dev->irq, dev);
+ slim_del_controller(&dev->ctrl);
+ clk_put(dev->rclk);
+ if (dev->hclk)
+ clk_put(dev->hclk);
+ msm_slim_sps_exit(dev, true);
+ msm_slim_deinit_ep(dev, &dev->rx_msgq,
+ &dev->use_rx_msgqs);
+ msm_slim_deinit_ep(dev, &dev->tx_msgq,
+ &dev->use_tx_msgqs);
+
+ kthread_stop(dev->rx_msgq_thread);
+ iounmap(dev->bam.base);
+ iounmap(dev->base);
+ kfree(dev->wr_comp);
+ kfree(dev);
+ bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "slimbus_bam_physical");
+ if (bam_mem)
+ release_mem_region(bam_mem->start, resource_size(bam_mem));
+ if (slew_mem)
+ release_mem_region(slew_mem->start, resource_size(slew_mem));
+ slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "slimbus_physical");
+ if (slim_mem)
+ release_mem_region(slim_mem->start, resource_size(slim_mem));
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int msm_slim_runtime_idle(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+
+ if (dev->state == MSM_CTRL_AWAKE)
+ dev->state = MSM_CTRL_IDLE;
+ dev_dbg(device, "pm_runtime: idle...\n");
+ pm_request_autosuspend(device);
+ return -EAGAIN;
+}
+#endif
+
+/*
+ * If PM_RUNTIME is not defined, these 2 functions become helper
+ * functions to be called from system suspend/resume. So they are not
+ * inside ifdef CONFIG_PM_RUNTIME
+ */
+#ifdef CONFIG_PM
+static int msm_slim_runtime_suspend(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+ int ret;
+
+ dev_dbg(device, "pm_runtime: suspending...\n");
+ ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
+ if (ret) {
+ dev_err(device, "clk pause not entered:%d", ret);
+ dev->state = MSM_CTRL_AWAKE;
+ } else {
+ dev->state = MSM_CTRL_ASLEEP;
+ }
+ return ret;
+}
+
+static int msm_slim_runtime_resume(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ dev_dbg(device, "pm_runtime: resuming...\n");
+ if (dev->state == MSM_CTRL_ASLEEP)
+ ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
+ if (ret) {
+ dev_err(device, "clk pause not exited:%d", ret);
+ dev->state = MSM_CTRL_ASLEEP;
+ } else {
+ dev->state = MSM_CTRL_AWAKE;
+ }
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_slim_suspend(struct device *dev)
+{
+ int ret = -EBUSY;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+
+ if (!pm_runtime_enabled(dev) ||
+ (!pm_runtime_suspended(dev) &&
+ cdev->state == MSM_CTRL_IDLE)) {
+ dev_dbg(dev, "system suspend");
+ ret = msm_slim_runtime_suspend(dev);
+ if (!ret) {
+ if (cdev->hclk)
+ clk_disable_unprepare(cdev->hclk);
+ }
+ }
+ if (ret == -EBUSY) {
+ /*
+ * If the clock pause failed due to active channels, there is
+ * a possibility that some audio stream is active during suspend
+ * We dont want to return suspend failure in that case so that
+ * display and relevant components can still go to suspend.
+ * If there is some other error, then it should be passed-on
+ * to system level suspend
+ */
+ ret = 0;
+ }
+ return ret;
+}
+
+static int msm_slim_resume(struct device *dev)
+{
+ /* If runtime_pm is enabled, this resume shouldn't do anything */
+ if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+ int ret;
+
+ dev_dbg(dev, "system resume");
+ if (cdev->hclk)
+ clk_prepare_enable(cdev->hclk);
+ ret = msm_slim_runtime_resume(dev);
+ if (!ret) {
+ pm_runtime_mark_last_busy(dev);
+ pm_request_autosuspend(dev);
+ }
+ return ret;
+
+ }
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops msm_slim_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(
+ msm_slim_suspend,
+ msm_slim_resume
+ )
+ SET_RUNTIME_PM_OPS(
+ msm_slim_runtime_suspend,
+ msm_slim_runtime_resume,
+ msm_slim_runtime_idle
+ )
+};
+
+static const struct of_device_id msm_slim_dt_match[] = {
+ {
+ .compatible = "qcom,slim-msm",
+ },
+ {}
+};
+
+static struct platform_driver msm_slim_driver = {
+ .probe = msm_slim_probe,
+ .remove = msm_slim_remove,
+ .driver = {
+ .name = MSM_SLIM_NAME,
+ .owner = THIS_MODULE,
+ .pm = &msm_slim_dev_pm_ops,
+ .of_match_table = msm_slim_dt_match,
+ },
+};
+
+static int msm_slim_init(void)
+{
+ return platform_driver_register(&msm_slim_driver);
+}
+subsys_initcall(msm_slim_init);
+
+static void msm_slim_exit(void)
+{
+ platform_driver_unregister(&msm_slim_driver);
+}
+module_exit(msm_slim_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM Slimbus controller");
+MODULE_ALIAS("platform:msm-slim");
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
new file mode 100644
index 0000000..969d162
--- /dev/null
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -0,0 +1,2108 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slimbus/slimbus.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_slimbus.h>
+#include <linux/timer.h>
+#include <linux/msm-sps.h>
+#include <soc/qcom/service-locator.h>
+#include <soc/qcom/service-notifier.h>
+#include <soc/qcom/subsystem_notif.h>
+#include "slim-msm.h"
+
+#define NGD_SLIM_NAME "ngd_msm_ctrl"
+#define SLIM_LA_MGR 0xFF
+#define SLIM_ROOT_FREQ 24576000
+#define LADDR_RETRY 5
+
+#define NGD_BASE_V1(r) (((r) % 2) ? 0x800 : 0xA00)
+#define NGD_BASE_V2(r) (((r) % 2) ? 0x1000 : 0x2000)
+#define NGD_BASE(r, v) ((v) ? NGD_BASE_V2(r) : NGD_BASE_V1(r))
+/* NGD (Non-ported Generic Device) registers */
+enum ngd_reg {
+ NGD_CFG = 0x0,
+ NGD_STATUS = 0x4,
+ NGD_RX_MSGQ_CFG = 0x8,
+ NGD_INT_EN = 0x10,
+ NGD_INT_STAT = 0x14,
+ NGD_INT_CLR = 0x18,
+ NGD_TX_MSG = 0x30,
+ NGD_RX_MSG = 0x70,
+ NGD_IE_STAT = 0xF0,
+ NGD_VE_STAT = 0x100,
+};
+
+enum ngd_msg_cfg {
+ NGD_CFG_ENABLE = 1,
+ NGD_CFG_RX_MSGQ_EN = 1 << 1,
+ NGD_CFG_TX_MSGQ_EN = 1 << 2,
+};
+
+enum ngd_intr {
+ NGD_INT_RECFG_DONE = 1 << 24,
+ NGD_INT_TX_NACKED_2 = 1 << 25,
+ NGD_INT_MSG_BUF_CONTE = 1 << 26,
+ NGD_INT_MSG_TX_INVAL = 1 << 27,
+ NGD_INT_IE_VE_CHG = 1 << 28,
+ NGD_INT_DEV_ERR = 1 << 29,
+ NGD_INT_RX_MSG_RCVD = 1 << 30,
+ NGD_INT_TX_MSG_SENT = 1 << 31,
+};
+
+enum ngd_offsets {
+ NGD_NACKED_MC = 0x7F00000,
+ NGD_ACKED_MC = 0xFE000,
+ NGD_ERROR = 0x1800,
+ NGD_MSGQ_SUPPORT = 0x400,
+ NGD_RX_MSGQ_TIME_OUT = 0x16,
+ NGD_ENUMERATED = 0x1,
+ NGD_TX_BUSY = 0x0,
+};
+
+enum ngd_status {
+ NGD_LADDR = 1 << 1,
+};
+
+static void ngd_slim_rx(struct msm_slim_ctrl *dev, u8 *buf);
+static int ngd_slim_runtime_resume(struct device *device);
+static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart);
+static void ngd_dom_down(struct msm_slim_ctrl *dev);
+static int dsp_domr_notify_cb(struct notifier_block *n, unsigned long code,
+ void *_cmd);
+
+static irqreturn_t ngd_slim_interrupt(int irq, void *d)
+{
+ struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)d;
+ void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+ u32 stat = readl_relaxed(ngd + NGD_INT_STAT);
+ u32 pstat;
+
+ if ((stat & NGD_INT_MSG_BUF_CONTE) ||
+ (stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) ||
+ (stat & NGD_INT_TX_NACKED_2)) {
+ writel_relaxed(stat, ngd + NGD_INT_CLR);
+ if (stat & NGD_INT_MSG_TX_INVAL)
+ dev->err = -EINVAL;
+ else
+ dev->err = -EIO;
+
+ SLIM_WARN(dev, "NGD interrupt error:0x%x, err:%d\n", stat,
+ dev->err);
+ /* Guarantee that error interrupts are cleared */
+ mb();
+ msm_slim_manage_tx_msgq(dev, false, NULL, dev->err);
+
+ } else if (stat & NGD_INT_TX_MSG_SENT) {
+ writel_relaxed(NGD_INT_TX_MSG_SENT, ngd + NGD_INT_CLR);
+ /* Make sure interrupt is cleared */
+ mb();
+ msm_slim_manage_tx_msgq(dev, false, NULL, 0);
+ }
+ if (stat & NGD_INT_RX_MSG_RCVD) {
+ u32 rx_buf[10];
+ u8 len, i;
+
+ rx_buf[0] = readl_relaxed(ngd + NGD_RX_MSG);
+ len = rx_buf[0] & 0x1F;
+ for (i = 1; i < ((len + 3) >> 2); i++) {
+ rx_buf[i] = readl_relaxed(ngd + NGD_RX_MSG +
+ (4 * i));
+ SLIM_DBG(dev, "REG-RX data: %x\n", rx_buf[i]);
+ }
+ writel_relaxed(NGD_INT_RX_MSG_RCVD,
+ ngd + NGD_INT_CLR);
+ /*
+ * Guarantee that CLR bit write goes through before
+ * queuing work
+ */
+ mb();
+ ngd_slim_rx(dev, (u8 *)rx_buf);
+ }
+ if (stat & NGD_INT_RECFG_DONE) {
+ writel_relaxed(NGD_INT_RECFG_DONE, ngd + NGD_INT_CLR);
+ /* Guarantee RECONFIG DONE interrupt is cleared */
+ mb();
+ /* In satellite mode, just log the reconfig done IRQ */
+ SLIM_DBG(dev, "reconfig done IRQ for NGD\n");
+ }
+ if (stat & NGD_INT_IE_VE_CHG) {
+ writel_relaxed(NGD_INT_IE_VE_CHG, ngd + NGD_INT_CLR);
+ /* Guarantee IE VE change interrupt is cleared */
+ mb();
+ SLIM_DBG(dev, "NGD IE VE change\n");
+ }
+
+ pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
+ if (pstat != 0)
+ return msm_slim_port_irq_handler(dev, pstat);
+ return IRQ_HANDLED;
+}
+
+static int ngd_qmi_available(struct notifier_block *n, unsigned long code,
+ void *_cmd)
+{
+ struct msm_slim_qmi *qmi = container_of(n, struct msm_slim_qmi, nb);
+ struct msm_slim_ctrl *dev =
+ container_of(qmi, struct msm_slim_ctrl, qmi);
+ SLIM_INFO(dev, "Slimbus QMI NGD CB received event:%ld\n", code);
+ switch (code) {
+ case QMI_SERVER_ARRIVE:
+ atomic_set(&dev->ssr_in_progress, 0);
+ schedule_work(&dev->dsp.dom_up);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void ngd_reg_ssr(struct msm_slim_ctrl *dev)
+{
+ int ret;
+ const char *subsys_name = NULL;
+
+ dev->dsp.dom_t = MSM_SLIM_DOM_NONE;
+ ret = of_property_read_string(dev->dev->of_node,
+ "qcom,subsys-name", &subsys_name);
+ if (ret)
+ subsys_name = "adsp";
+
+ dev->dsp.nb.notifier_call = dsp_domr_notify_cb;
+ dev->dsp.domr = subsys_notif_register_notifier(subsys_name,
+ &dev->dsp.nb);
+ if (IS_ERR_OR_NULL(dev->dsp.domr)) {
+ dev_err(dev->dev,
+ "subsys_notif_register_notifier failed %ld",
+ PTR_ERR(dev->dsp.domr));
+ return;
+ }
+ dev->dsp.dom_t = MSM_SLIM_DOM_SS;
+ SLIM_INFO(dev, "reg-SSR with:%s, PDR not available\n",
+ subsys_name);
+}
+
+static int dsp_domr_notify_cb(struct notifier_block *n, unsigned long code,
+ void *_cmd)
+{
+ int cur = -1;
+ struct msm_slim_ss *dsp = container_of(n, struct msm_slim_ss, nb);
+ struct msm_slim_ctrl *dev = container_of(dsp, struct msm_slim_ctrl,
+ dsp);
+ struct pd_qmi_client_data *reg;
+
+ SLIM_INFO(dev, "SLIM DSP SSR/PDR notify cb:0x%lx, type:%d\n",
+ code, dsp->dom_t);
+ switch (code) {
+ case SUBSYS_BEFORE_SHUTDOWN:
+ case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01:
+ SLIM_INFO(dev, "SLIM DSP SSR notify cb:%lu\n", code);
+ atomic_set(&dev->ssr_in_progress, 1);
+ /* wait for current transaction */
+ mutex_lock(&dev->tx_lock);
+ /* make sure autosuspend is not called until ADSP comes up*/
+ pm_runtime_get_noresume(dev->dev);
+ dev->state = MSM_CTRL_DOWN;
+ msm_slim_sps_exit(dev, false);
+ ngd_dom_down(dev);
+ mutex_unlock(&dev->tx_lock);
+ break;
+ case LOCATOR_UP:
+ reg = _cmd;
+ if (!reg || reg->total_domains != 1) {
+ SLIM_WARN(dev, "error locating audio-PD\n");
+ if (reg)
+ SLIM_WARN(dev, "audio-PDs matched:%d\n",
+ reg->total_domains);
+
+ /* Fall back to SSR */
+ ngd_reg_ssr(dev);
+ return NOTIFY_DONE;
+ }
+ dev->dsp.domr = service_notif_register_notifier(
+ reg->domain_list->name,
+ reg->domain_list->instance_id,
+ &dev->dsp.nb,
+ &cur);
+ SLIM_INFO(dev, "reg-PD client:%s with service:%s\n",
+ reg->client_name, reg->service_name);
+ SLIM_INFO(dev, "reg-PD dom:%s instance:%d, cur:%d\n",
+ reg->domain_list->name,
+ reg->domain_list->instance_id, cur);
+ if (IS_ERR_OR_NULL(dev->dsp.domr))
+ ngd_reg_ssr(dev);
+ else
+ dev->dsp.dom_t = MSM_SLIM_DOM_PD;
+ break;
+ case LOCATOR_DOWN:
+ ngd_reg_ssr(dev);
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static void ngd_dom_init(struct msm_slim_ctrl *dev)
+{
+ struct pd_qmi_client_data reg;
+ int ret;
+
+ memset(®, 0, sizeof(struct pd_qmi_client_data));
+ dev->dsp.nb.priority = 4;
+ dev->dsp.nb.notifier_call = dsp_domr_notify_cb;
+ scnprintf(reg.client_name, QMI_SERVREG_LOC_NAME_LENGTH_V01, "appsngd%d",
+ dev->ctrl.nr);
+ scnprintf(reg.service_name, QMI_SERVREG_LOC_NAME_LENGTH_V01,
+ "avs/audio");
+ ret = get_service_location(reg.client_name, reg.service_name,
+ &dev->dsp.nb);
+ if (ret)
+ ngd_reg_ssr(dev);
+}
+
+static int mdm_ssr_notify_cb(struct notifier_block *n, unsigned long code,
+ void *_cmd)
+{
+ void __iomem *ngd;
+ struct msm_slim_ss *ext_mdm = container_of(n, struct msm_slim_ss, nb);
+ struct msm_slim_ctrl *dev = container_of(ext_mdm, struct msm_slim_ctrl,
+ ext_mdm);
+ struct slim_controller *ctrl = &dev->ctrl;
+ u32 laddr;
+ struct slim_device *sbdev;
+
+ switch (code) {
+ case SUBSYS_BEFORE_SHUTDOWN:
+ SLIM_INFO(dev, "SLIM %lu external_modem SSR notify cb\n", code);
+ /* vote for runtime-pm so that ADSP doesn't go down */
+ msm_slim_get_ctrl(dev);
+ /*
+ * checking framer here will wake-up ADSP and may avoid framer
+ * handover later
+ */
+ msm_slim_qmi_check_framer_request(dev);
+ dev->ext_mdm.state = MSM_CTRL_DOWN;
+ msm_slim_put_ctrl(dev);
+ break;
+ case SUBSYS_AFTER_POWERUP:
+ if (dev->ext_mdm.state != MSM_CTRL_DOWN)
+ return NOTIFY_DONE;
+ SLIM_INFO(dev,
+ "SLIM %lu external_modem SSR notify cb\n", code);
+ /* vote for runtime-pm so that ADSP doesn't go down */
+ msm_slim_get_ctrl(dev);
+ msm_slim_qmi_check_framer_request(dev);
+ /* If NGD enumeration is lost, we will need to power us up */
+ ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+ laddr = readl_relaxed(ngd + NGD_STATUS);
+ if (!(laddr & NGD_LADDR)) {
+ mutex_lock(&dev->tx_lock);
+ /* runtime-pm state should be consistent with HW */
+ pm_runtime_disable(dev->dev);
+ pm_runtime_set_suspended(dev->dev);
+ dev->state = MSM_CTRL_DOWN;
+ mutex_unlock(&dev->tx_lock);
+ SLIM_INFO(dev,
+ "SLIM MDM SSR (active framer on MDM) dev-down\n");
+ list_for_each_entry(sbdev, &ctrl->devs, dev_list)
+ slim_report_absent(sbdev);
+ ngd_slim_runtime_resume(dev->dev);
+ pm_runtime_set_active(dev->dev);
+ pm_runtime_enable(dev->dev);
+ }
+ dev->ext_mdm.state = MSM_CTRL_AWAKE;
+ msm_slim_put_ctrl(dev);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static int ngd_get_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn,
+ u8 *tid, struct completion *done)
+{
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ if (ctrl->last_tid <= 255) {
+ dev->msg_cnt = ctrl->last_tid;
+ ctrl->last_tid++;
+ } else {
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ dev->msg_cnt = ((dev->msg_cnt + 1) & 0xFF);
+ if (ctrl->txnt[dev->msg_cnt] == NULL)
+ break;
+ }
+ if (i >= 256) {
+ dev_err(&ctrl->dev, "out of TID");
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ return -ENOMEM;
+ }
+ }
+ ctrl->txnt[dev->msg_cnt] = txn;
+ txn->tid = dev->msg_cnt;
+ txn->comp = done;
+ *tid = dev->msg_cnt;
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ return 0;
+}
+
+static void slim_reinit_tx_msgq(struct msm_slim_ctrl *dev)
+{
+ /*
+ * disconnect/recoonect pipe so that subsequent
+ * transactions don't timeout due to unavailable
+ * descriptors
+ */
+ if (dev->state != MSM_CTRL_DOWN) {
+ msm_slim_disconnect_endp(dev, &dev->tx_msgq,
+ &dev->use_tx_msgqs);
+ msm_slim_connect_endp(dev, &dev->tx_msgq);
+ }
+}
+
+static int ngd_check_hw_status(struct msm_slim_ctrl *dev)
+{
+ void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+ u32 laddr = readl_relaxed(ngd + NGD_STATUS);
+ int ret = 0;
+
+ /* Lost logical addr due to noise */
+ if (!(laddr & NGD_LADDR)) {
+ SLIM_WARN(dev, "NGD lost LADDR: status:0x%x\n", laddr);
+ ret = ngd_slim_power_up(dev, false);
+
+ if (ret) {
+ SLIM_WARN(dev, "slim resume ret:%d, state:%d\n",
+ ret, dev->state);
+ ret = -EREMOTEIO;
+ }
+ }
+ return ret;
+}
+
+static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ DECLARE_COMPLETION_ONSTACK(tx_sent);
+
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+ u32 *pbuf;
+ u8 *puc;
+ int ret = 0;
+ u8 la = txn->la;
+ u8 txn_mt;
+ u16 txn_mc = txn->mc;
+ u8 wbuf[SLIM_MSGQ_BUF_LEN];
+ bool report_sat = false;
+ bool sync_wr = true;
+
+ if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
+ return -EPROTONOSUPPORT;
+
+ if (txn->mt == SLIM_MSG_MT_CORE &&
+ (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
+ txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
+ return 0;
+
+ if (txn->mc == SLIM_USR_MC_REPORT_SATELLITE &&
+ txn->mt == SLIM_MSG_MT_SRC_REFERRED_USER)
+ report_sat = true;
+ else
+ mutex_lock(&dev->tx_lock);
+
+ if (!report_sat && !pm_runtime_enabled(dev->dev) &&
+ dev->state == MSM_CTRL_ASLEEP) {
+ /*
+ * Counter-part of system-suspend when runtime-pm is not enabled
+ * This way, resume can be left empty and device will be put in
+ * active mode only if client requests anything on the bus
+ * If the state was DOWN, SSR UP notification will take
+ * care of putting the device in active state.
+ */
+ mutex_unlock(&dev->tx_lock);
+ ret = ngd_slim_runtime_resume(dev->dev);
+
+ if (ret) {
+ SLIM_ERR(dev, "slim resume failed ret:%d, state:%d",
+ ret, dev->state);
+ return -EREMOTEIO;
+ }
+ mutex_lock(&dev->tx_lock);
+ }
+
+ /* If txn is tried when controller is down, wait for ADSP to boot */
+ if (!report_sat) {
+ if (dev->state == MSM_CTRL_DOWN) {
+ u8 mc = (u8)txn->mc;
+ int timeout;
+
+ mutex_unlock(&dev->tx_lock);
+ SLIM_INFO(dev, "ADSP slimbus not up yet\n");
+ /*
+ * Messages related to data channel management can't
+ * wait since they are holding reconfiguration lock.
+ * clk_pause in resume (which can change state back to
+ * MSM_CTRL_AWAKE), will need that lock.
+ * Port disconnection, channel removal calls should pass
+ * through since there is no activity on the bus and
+ * those calls are triggered by clients due to
+ * device_down callback in that situation.
+ * Returning 0 on the disconnections and
+ * removals will ensure consistent state of channels,
+ * ports with the HW
+ * Remote requests to remove channel/port will be
+ * returned from the path where they wait on
+ * acknowledgment from ADSP
+ */
+ if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
+ ((mc == SLIM_USR_MC_CHAN_CTRL ||
+ mc == SLIM_USR_MC_DISCONNECT_PORT ||
+ mc == SLIM_USR_MC_RECONFIG_NOW)))
+ return -EREMOTEIO;
+ if ((txn->mt == SLIM_MSG_MT_CORE) &&
+ ((mc == SLIM_MSG_MC_DISCONNECT_PORT ||
+ mc == SLIM_MSG_MC_NEXT_REMOVE_CHANNEL ||
+ mc == SLIM_USR_MC_RECONFIG_NOW)))
+ return 0;
+ if ((txn->mt == SLIM_MSG_MT_CORE) &&
+ ((mc >= SLIM_MSG_MC_CONNECT_SOURCE &&
+ mc <= SLIM_MSG_MC_CHANGE_CONTENT) ||
+ (mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
+ mc <= SLIM_MSG_MC_RECONFIGURE_NOW)))
+ return -EREMOTEIO;
+ if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
+ ((mc >= SLIM_USR_MC_DEFINE_CHAN &&
+ mc < SLIM_USR_MC_DISCONNECT_PORT)))
+ return -EREMOTEIO;
+ timeout = wait_for_completion_timeout(&dev->ctrl_up,
+ HZ);
+ if (!timeout)
+ return -ETIMEDOUT;
+ mutex_lock(&dev->tx_lock);
+ }
+
+ mutex_unlock(&dev->tx_lock);
+ ret = msm_slim_get_ctrl(dev);
+ mutex_lock(&dev->tx_lock);
+ /*
+ * Runtime-pm's callbacks are not called until runtime-pm's
+ * error status is cleared
+ * Setting runtime status to suspended clears the error
+ * It also makes HW status cosistent with what SW has it here
+ */
+ if ((pm_runtime_enabled(dev->dev) && ret < 0) ||
+ dev->state >= MSM_CTRL_ASLEEP) {
+ SLIM_ERR(dev, "slim ctrl vote failed ret:%d, state:%d",
+ ret, dev->state);
+ pm_runtime_set_suspended(dev->dev);
+ mutex_unlock(&dev->tx_lock);
+ msm_slim_put_ctrl(dev);
+ return -EREMOTEIO;
+ }
+ ret = ngd_check_hw_status(dev);
+ if (ret) {
+ mutex_unlock(&dev->tx_lock);
+ msm_slim_put_ctrl(dev);
+ return ret;
+ }
+ }
+
+ if (txn->mt == SLIM_MSG_MT_CORE &&
+ (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
+ txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
+ txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
+ int i = 0;
+
+ if (txn->mc != SLIM_MSG_MC_DISCONNECT_PORT)
+ SLIM_INFO(dev,
+ "Connect port: laddr 0x%x port_num %d chan_num %d\n",
+ txn->la, txn->wbuf[0], txn->wbuf[1]);
+ else
+ SLIM_INFO(dev,
+ "Disconnect port: laddr 0x%x port_num %d\n",
+ txn->la, txn->wbuf[0]);
+ txn->mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+ if (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE)
+ txn->mc = SLIM_USR_MC_CONNECT_SRC;
+ else if (txn->mc == SLIM_MSG_MC_CONNECT_SINK)
+ txn->mc = SLIM_USR_MC_CONNECT_SINK;
+ else if (txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)
+ txn->mc = SLIM_USR_MC_DISCONNECT_PORT;
+ if (txn->la == SLIM_LA_MGR) {
+ if (dev->pgdla == SLIM_LA_MGR) {
+ u8 ea[] = {0, QC_DEVID_PGD, 0, 0, QC_MFGID_MSB,
+ QC_MFGID_LSB};
+ ea[2] = (u8)(dev->pdata.eapc & 0xFF);
+ ea[3] = (u8)((dev->pdata.eapc & 0xFF00) >> 8);
+ mutex_unlock(&dev->tx_lock);
+ ret = dev->ctrl.get_laddr(&dev->ctrl, ea, 6,
+ &dev->pgdla);
+ SLIM_DBG(dev, "SLIM PGD LA:0x%x, ret:%d\n",
+ dev->pgdla, ret);
+ if (ret) {
+ SLIM_ERR(dev,
+ "Incorrect SLIM-PGD EAPC:0x%x\n",
+ dev->pdata.eapc);
+ return ret;
+ }
+ mutex_lock(&dev->tx_lock);
+ }
+ txn->la = dev->pgdla;
+ }
+ wbuf[i++] = txn->la;
+ la = SLIM_LA_MGR;
+ wbuf[i++] = txn->wbuf[0];
+ if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
+ wbuf[i++] = txn->wbuf[1];
+ ret = ngd_get_tid(ctrl, txn, &wbuf[i++], &done);
+ if (ret) {
+ SLIM_ERR(dev, "TID for connect/disconnect fail:%d\n",
+ ret);
+ goto ngd_xfer_err;
+ }
+ txn->len = i;
+ txn->wbuf = wbuf;
+ txn->rl = txn->len + 4;
+ }
+ txn->rl--;
+
+ if (txn->len > SLIM_MSGQ_BUF_LEN || txn->rl > SLIM_MSGQ_BUF_LEN) {
+ SLIM_WARN(dev, "msg exeeds HW lim:%d, rl:%d, mc:0x%x, mt:0x%x",
+ txn->len, txn->rl, txn->mc, txn->mt);
+ ret = -EDQUOT;
+ goto ngd_xfer_err;
+ }
+
+ if (txn->mt == SLIM_MSG_MT_CORE && txn->comp &&
+ dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
+ (txn_mc != SLIM_MSG_MC_REQUEST_INFORMATION &&
+ txn_mc != SLIM_MSG_MC_REQUEST_VALUE &&
+ txn_mc != SLIM_MSG_MC_REQUEST_CHANGE_VALUE &&
+ txn_mc != SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION)) {
+ sync_wr = false;
+ pbuf = msm_get_msg_buf(dev, txn->rl, txn->comp);
+ } else if (txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
+ dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
+ txn->mc == SLIM_USR_MC_REPEAT_CHANGE_VALUE &&
+ txn->comp) {
+ sync_wr = false;
+ pbuf = msm_get_msg_buf(dev, txn->rl, txn->comp);
+ } else {
+ pbuf = msm_get_msg_buf(dev, txn->rl, &tx_sent);
+ }
+
+ if (!pbuf) {
+ SLIM_ERR(dev, "Message buffer unavailable\n");
+ ret = -ENOMEM;
+ goto ngd_xfer_err;
+ }
+ dev->err = 0;
+
+ if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
+ ret = -EPROTONOSUPPORT;
+ goto ngd_xfer_err;
+ }
+ if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
+ *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 0,
+ la);
+ else
+ *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 1,
+ la);
+ if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
+ puc = ((u8 *)pbuf) + 3;
+ else
+ puc = ((u8 *)pbuf) + 2;
+ if (txn->rbuf)
+ *(puc++) = txn->tid;
+ if (((txn->mt == SLIM_MSG_MT_CORE) &&
+ ((txn->mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
+ txn->mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
+ (txn->mc >= SLIM_MSG_MC_REQUEST_VALUE &&
+ txn->mc <= SLIM_MSG_MC_CHANGE_VALUE))) ||
+ (txn->mc == SLIM_USR_MC_REPEAT_CHANGE_VALUE &&
+ txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER)) {
+ *(puc++) = (txn->ec & 0xFF);
+ *(puc++) = (txn->ec >> 8)&0xFF;
+ }
+ if (txn->wbuf)
+ memcpy(puc, txn->wbuf, txn->len);
+ if (txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
+ (txn->mc == SLIM_USR_MC_CONNECT_SRC ||
+ txn->mc == SLIM_USR_MC_CONNECT_SINK ||
+ txn->mc == SLIM_USR_MC_DISCONNECT_PORT) && txn->wbuf &&
+ wbuf[0] == dev->pgdla) {
+ if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
+ dev->err = msm_slim_connect_pipe_port(dev, wbuf[1]);
+ else
+ writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn,
+ (dev->pipes[wbuf[1]].port_b),
+ dev->ver));
+ if (dev->err) {
+ SLIM_ERR(dev, "pipe-port connect err:%d\n", dev->err);
+ goto ngd_xfer_err;
+ }
+ /* Add port-base to port number if this is manager side port */
+ puc[1] = (u8)dev->pipes[wbuf[1]].port_b;
+ }
+ dev->err = 0;
+ /*
+ * If it's a read txn, it may be freed if a response is received by
+ * received thread before reaching end of this function.
+ * mc, mt may have changed to convert standard slimbus code/type to
+ * satellite user-defined message. Reinitialize again
+ */
+ txn_mc = txn->mc;
+ txn_mt = txn->mt;
+ ret = msm_send_msg_buf(dev, pbuf, txn->rl,
+ NGD_BASE(dev->ctrl.nr, dev->ver) + NGD_TX_MSG);
+ if (!ret && sync_wr) {
+ int i;
+ int timeout = wait_for_completion_timeout(&tx_sent, HZ);
+
+ if (!timeout && dev->use_tx_msgqs == MSM_MSGQ_ENABLED) {
+ struct msm_slim_endp *endpoint = &dev->tx_msgq;
+ struct sps_mem_buffer *mem = &endpoint->buf;
+ u32 idx = (u32) (((u8 *)pbuf - (u8 *)mem->base) /
+ SLIM_MSGQ_BUF_LEN);
+ phys_addr_t addr = mem->phys_base +
+ (idx * SLIM_MSGQ_BUF_LEN);
+ ret = -ETIMEDOUT;
+ SLIM_WARN(dev, "timeout, BAM desc_idx:%d, phys:%llx",
+ idx, (u64)addr);
+ for (i = 0; i < (SLIM_MSGQ_BUF_LEN >> 2) ; i++)
+ SLIM_WARN(dev, "timeout:bam-desc[%d]:0x%x",
+ i, *(pbuf + i));
+ if (idx < MSM_TX_BUFS)
+ dev->wr_comp[idx] = NULL;
+ slim_reinit_tx_msgq(dev);
+ } else if (!timeout) {
+ ret = -ETIMEDOUT;
+ SLIM_WARN(dev, "timeout non-BAM TX,len:%d", txn->rl);
+ for (i = 0; i < (SLIM_MSGQ_BUF_LEN >> 2) ; i++)
+ SLIM_WARN(dev, "timeout:txbuf[%d]:0x%x", i,
+ dev->tx_buf[i]);
+ } else {
+ ret = dev->err;
+ }
+ }
+ if (ret) {
+ u32 conf, stat, rx_msgq, int_stat, int_en, int_clr;
+ void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr,
+ dev->ver);
+ SLIM_WARN(dev, "TX failed :MC:0x%x,mt:0x%x, ret:%d, ver:%d\n",
+ txn_mc, txn_mt, ret, dev->ver);
+ conf = readl_relaxed(ngd);
+ stat = readl_relaxed(ngd + NGD_STATUS);
+ rx_msgq = readl_relaxed(ngd + NGD_RX_MSGQ_CFG);
+ int_stat = readl_relaxed(ngd + NGD_INT_STAT);
+ int_en = readl_relaxed(ngd + NGD_INT_EN);
+ int_clr = readl_relaxed(ngd + NGD_INT_CLR);
+
+ SLIM_WARN(dev, "conf:0x%x,stat:0x%x,rxmsgq:0x%x\n",
+ conf, stat, rx_msgq);
+ SLIM_ERR(dev, "int_stat:0x%x,int_en:0x%x,int_cll:0x%x\n",
+ int_stat, int_en, int_clr);
+ }
+
+ if (txn_mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
+ (txn_mc == SLIM_USR_MC_CONNECT_SRC ||
+ txn_mc == SLIM_USR_MC_CONNECT_SINK ||
+ txn_mc == SLIM_USR_MC_DISCONNECT_PORT)) {
+ int timeout;
+ unsigned long flags;
+
+ mutex_unlock(&dev->tx_lock);
+ msm_slim_put_ctrl(dev);
+ if (!ret) {
+ timeout = wait_for_completion_timeout(txn->comp, HZ);
+ /* remote side did not acknowledge */
+ if (!timeout)
+ ret = -EREMOTEIO;
+ else
+ ret = txn->ec;
+ }
+ if (ret) {
+ SLIM_ERR(dev,
+ "connect/disconnect:0x%x,tid:%d err:%d\n",
+ txn->mc, txn->tid, ret);
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ ctrl->txnt[txn->tid] = NULL;
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ }
+ return ret ? ret : dev->err;
+ }
+ngd_xfer_err:
+ if (!report_sat) {
+ mutex_unlock(&dev->tx_lock);
+ msm_slim_put_ctrl(dev);
+ }
+ return ret ? ret : dev->err;
+}
+
+static int ngd_get_ec(u16 start_offset, u8 len, u16 *ec)
+{
+ if (len > SLIM_MAX_VE_SLC_BYTES ||
+ start_offset > MSM_SLIM_VE_MAX_MAP_ADDR)
+ return -EINVAL;
+ if (len <= 4) {
+ *ec = len - 1;
+ } else if (len <= 8) {
+ if (len & 0x1)
+ return -EINVAL;
+ *ec = ((len >> 1) + 1);
+ } else {
+ if (len & 0x3)
+ return -EINVAL;
+ *ec = ((len >> 2) + 3);
+ }
+ *ec |= (0x8 | ((start_offset & 0xF) << 4));
+ *ec |= ((start_offset & 0xFF0) << 4);
+ return 0;
+}
+
+static int ngd_user_msg(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
+ struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+ int ret;
+ struct slim_msg_txn txn;
+
+ if (mt != SLIM_MSG_MT_DEST_REFERRED_USER ||
+ mc != SLIM_USR_MC_REPEAT_CHANGE_VALUE) {
+ return -EPROTONOSUPPORT;
+ }
+
+ ret = ngd_get_ec(msg->start_offset, len, &txn.ec);
+ if (ret)
+ return ret;
+ txn.la = la;
+ txn.mt = mt;
+ txn.mc = mc;
+ txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+ txn.len = len;
+ txn.rl = len + 6;
+ txn.wbuf = buf;
+ txn.rbuf = NULL;
+ txn.comp = msg->comp;
+ return ngd_xfer_msg(ctrl, &txn);
+}
+
+static int ngd_bulk_cb(void *ctx, int err)
+{
+ if (ctx)
+ complete(ctx);
+ return err;
+}
+
+static int ngd_bulk_wr(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
+ struct slim_val_inf msgs[], int n,
+ int (*comp_cb)(void *ctx, int err), void *ctx)
+{
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+ int i, ret;
+ struct msm_slim_endp *endpoint = &dev->tx_msgq;
+ u32 *header;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ ret = msm_slim_get_ctrl(dev);
+ mutex_lock(&dev->tx_lock);
+
+ if ((pm_runtime_enabled(dev->dev) && ret < 0) ||
+ dev->state >= MSM_CTRL_ASLEEP) {
+ SLIM_WARN(dev, "vote failed/SSR in-progress ret:%d, state:%d",
+ ret, dev->state);
+ pm_runtime_set_suspended(dev->dev);
+ mutex_unlock(&dev->tx_lock);
+ msm_slim_put_ctrl(dev);
+ return -EREMOTEIO;
+ }
+ if (!pm_runtime_enabled(dev->dev) && dev->state == MSM_CTRL_ASLEEP) {
+ mutex_unlock(&dev->tx_lock);
+ ret = ngd_slim_runtime_resume(dev->dev);
+
+ if (ret) {
+ SLIM_ERR(dev, "slim resume failed ret:%d, state:%d",
+ ret, dev->state);
+ return -EREMOTEIO;
+ }
+ mutex_lock(&dev->tx_lock);
+ }
+
+ ret = ngd_check_hw_status(dev);
+ if (ret) {
+ mutex_unlock(&dev->tx_lock);
+ msm_slim_put_ctrl(dev);
+ return ret;
+ }
+
+ if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
+ SLIM_WARN(dev, "bulk wr not supported");
+ ret = -EPROTONOSUPPORT;
+ goto retpath;
+ }
+ if (dev->bulk.in_progress) {
+ SLIM_WARN(dev, "bulk wr in progress:");
+ ret = -EAGAIN;
+ goto retpath;
+ }
+ dev->bulk.in_progress = true;
+ /* every txn has 5 bytes of overhead: la, mc, mt, ec, len */
+ dev->bulk.size = n * 5;
+ for (i = 0; i < n; i++) {
+ dev->bulk.size += msgs[i].num_bytes;
+ dev->bulk.size += (4 - ((msgs[i].num_bytes + 1) & 0x3));
+ }
+
+ if (dev->bulk.size > 0xffff) {
+ SLIM_WARN(dev, "len exceeds limit, split bulk and retry");
+ ret = -EDQUOT;
+ goto retpath;
+ }
+ if (dev->bulk.size > dev->bulk.buf_sz) {
+ void *temp = krealloc(dev->bulk.base, dev->bulk.size,
+ GFP_KERNEL | GFP_DMA);
+ if (!temp) {
+ ret = -ENOMEM;
+ goto retpath;
+ }
+ dev->bulk.base = temp;
+ dev->bulk.buf_sz = dev->bulk.size;
+ }
+
+ header = dev->bulk.base;
+ for (i = 0; i < n; i++) {
+ u8 *buf = (u8 *)header;
+ int rl = msgs[i].num_bytes + 5;
+ u16 ec;
+
+ *header = SLIM_MSG_ASM_FIRST_WORD(rl, mt, mc, 0, la);
+ buf += 3;
+ ret = ngd_get_ec(msgs[i].start_offset, msgs[i].num_bytes, &ec);
+ if (ret)
+ goto retpath;
+ *(buf++) = (ec & 0xFF);
+ *(buf++) = (ec >> 8) & 0xFF;
+ memcpy(buf, msgs[i].wbuf, msgs[i].num_bytes);
+ buf += msgs[i].num_bytes;
+ header += (rl >> 2);
+ if (rl & 3) {
+ header++;
+ memset(buf, 0, ((u8 *)header - buf));
+ }
+ }
+ header = dev->bulk.base;
+ if (comp_cb) {
+ dev->bulk.cb = comp_cb;
+ dev->bulk.ctx = ctx;
+ } else {
+ dev->bulk.cb = ngd_bulk_cb;
+ dev->bulk.ctx = &done;
+ }
+ dev->bulk.wr_dma = dma_map_single(dev->dev, dev->bulk.base,
+ dev->bulk.size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, dev->bulk.wr_dma)) {
+ ret = -ENOMEM;
+ goto retpath;
+ }
+
+ ret = sps_transfer_one(endpoint->sps, dev->bulk.wr_dma, dev->bulk.size,
+ NULL, SPS_IOVEC_FLAG_EOT);
+ if (ret) {
+ SLIM_WARN(dev, "sps transfer one returned error:%d", ret);
+ goto retpath;
+ }
+ if (dev->bulk.cb == ngd_bulk_cb) {
+ int timeout = wait_for_completion_timeout(&done, HZ);
+
+ if (!timeout) {
+ SLIM_WARN(dev, "timeout for bulk wr");
+ dma_unmap_single(dev->dev, dev->bulk.wr_dma,
+ dev->bulk.size, DMA_TO_DEVICE);
+ ret = -ETIMEDOUT;
+ }
+ }
+retpath:
+ if (ret) {
+ dev->bulk.in_progress = false;
+ dev->bulk.ctx = NULL;
+ dev->bulk.wr_dma = 0;
+ slim_reinit_tx_msgq(dev);
+ }
+ mutex_unlock(&dev->tx_lock);
+ msm_slim_put_ctrl(dev);
+ return ret;
+}
+
+static int ngd_xferandwait_ack(struct slim_controller *ctrl,
+ struct slim_msg_txn *txn)
+{
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+ unsigned long flags;
+ int ret;
+
+ if (dev->state == MSM_CTRL_DOWN) {
+ /*
+ * no need to send anything to the bus due to SSR
+ * transactions related to channel removal marked as success
+ * since HW is down
+ */
+ if ((txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER) &&
+ ((txn->mc >= SLIM_USR_MC_CHAN_CTRL &&
+ txn->mc <= SLIM_USR_MC_REQ_BW) ||
+ txn->mc == SLIM_USR_MC_DISCONNECT_PORT)) {
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ ctrl->txnt[txn->tid] = NULL;
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ return 0;
+ }
+ }
+
+ ret = ngd_xfer_msg(ctrl, txn);
+ if (!ret) {
+ int timeout;
+
+ timeout = wait_for_completion_timeout(txn->comp, HZ);
+ if (!timeout)
+ ret = -ETIMEDOUT;
+ else
+ ret = txn->ec;
+ }
+
+ if (ret) {
+ if (ret != -EREMOTEIO || txn->mc != SLIM_USR_MC_CHAN_CTRL)
+ SLIM_ERR(dev, "master msg:0x%x,tid:%d ret:%d\n",
+ txn->mc, txn->tid, ret);
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ ctrl->txnt[txn->tid] = NULL;
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ }
+
+ return ret;
+}
+
+static int ngd_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear)
+{
+ int ret = 0, num_chan = 0;
+ struct slim_pending_ch *pch;
+ struct slim_msg_txn txn;
+ struct slim_controller *ctrl = sb->ctrl;
+ DECLARE_COMPLETION_ONSTACK(done);
+ u8 wbuf[SLIM_MSGQ_BUF_LEN];
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+
+ *clkgear = ctrl->clkgear;
+ *subfrmc = 0;
+ txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+ txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+ txn.la = SLIM_LA_MGR;
+ txn.len = 0;
+ txn.ec = 0;
+ txn.wbuf = wbuf;
+ txn.rbuf = NULL;
+
+ if (ctrl->sched.msgsl != ctrl->sched.pending_msgsl) {
+ SLIM_DBG(dev, "slim reserve BW for messaging: req: %d\n",
+ ctrl->sched.pending_msgsl);
+ txn.mc = SLIM_USR_MC_REQ_BW;
+ wbuf[txn.len++] = ((sb->laddr & 0x1f) |
+ ((u8)(ctrl->sched.pending_msgsl & 0x7) << 5));
+ wbuf[txn.len++] = (u8)(ctrl->sched.pending_msgsl >> 3);
+ ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
+ if (ret)
+ return ret;
+ txn.rl = txn.len + 4;
+ ret = ngd_xferandwait_ack(ctrl, &txn);
+ if (ret)
+ return ret;
+
+ txn.mc = SLIM_USR_MC_RECONFIG_NOW;
+ txn.len = 2;
+ wbuf[1] = sb->laddr;
+ txn.rl = txn.len + 4;
+ ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
+ if (ret)
+ return ret;
+ ret = ngd_xferandwait_ack(ctrl, &txn);
+ if (ret)
+ return ret;
+
+ txn.len = 0;
+ }
+ list_for_each_entry(pch, &sb->mark_define, pending) {
+ struct slim_ich *slc;
+
+ slc = &ctrl->chans[pch->chan];
+ if (!slc) {
+ SLIM_WARN(dev, "no channel in define?\n");
+ return -ENXIO;
+ }
+ if (txn.len == 0) {
+ /* Per protocol, only last 5 bits for client no. */
+ wbuf[txn.len++] = (u8) (slc->prop.dataf << 5) |
+ (sb->laddr & 0x1f);
+ wbuf[txn.len] = slc->prop.sampleszbits >> 2;
+ if (slc->srch && slc->prop.prot == SLIM_PUSH)
+ slc->prop.prot = SLIM_PULL;
+ if (slc->coeff == SLIM_COEFF_3)
+ wbuf[txn.len] |= 1 << 5;
+ wbuf[txn.len++] |= slc->prop.auxf << 6;
+ wbuf[txn.len++] = slc->rootexp << 4 | slc->prop.prot;
+ wbuf[txn.len++] = slc->prrate;
+ ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
+ if (ret) {
+ SLIM_WARN(dev, "no tid for channel define?\n");
+ return -ENXIO;
+ }
+ }
+ num_chan++;
+ wbuf[txn.len++] = slc->chan;
+ SLIM_INFO(dev, "slim activate chan:%d, laddr: 0x%x\n",
+ slc->chan, sb->laddr);
+ }
+ if (txn.len) {
+ txn.mc = SLIM_USR_MC_DEF_ACT_CHAN;
+ txn.rl = txn.len + 4;
+ ret = ngd_xferandwait_ack(ctrl, &txn);
+ if (ret)
+ return ret;
+
+ txn.mc = SLIM_USR_MC_RECONFIG_NOW;
+ txn.len = 2;
+ wbuf[1] = sb->laddr;
+ txn.rl = txn.len + 4;
+ ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
+ if (ret)
+ return ret;
+ ret = ngd_xferandwait_ack(ctrl, &txn);
+ if (ret)
+ return ret;
+ }
+ txn.len = 0;
+ list_for_each_entry(pch, &sb->mark_removal, pending) {
+ struct slim_ich *slc;
+
+ slc = &ctrl->chans[pch->chan];
+ if (!slc) {
+ SLIM_WARN(dev, "no channel in removal?\n");
+ return -ENXIO;
+ }
+ if (txn.len == 0) {
+ /* Per protocol, only last 5 bits for client no. */
+ wbuf[txn.len++] = (u8) (SLIM_CH_REMOVE << 6) |
+ (sb->laddr & 0x1f);
+ ret = ngd_get_tid(ctrl, &txn, &wbuf[txn.len++], &done);
+ if (ret) {
+ SLIM_WARN(dev, "no tid for channel define?\n");
+ return -ENXIO;
+ }
+ }
+ wbuf[txn.len++] = slc->chan;
+ SLIM_INFO(dev, "slim remove chan:%d, laddr: 0x%x\n",
+ slc->chan, sb->laddr);
+ }
+ if (txn.len) {
+ txn.mc = SLIM_USR_MC_CHAN_CTRL;
+ txn.rl = txn.len + 4;
+ ret = ngd_xferandwait_ack(ctrl, &txn);
+ /* HW restarting, channel removal should succeed */
+ if (ret == -EREMOTEIO)
+ return 0;
+ else if (ret)
+ return ret;
+
+ txn.mc = SLIM_USR_MC_RECONFIG_NOW;
+ txn.len = 2;
+ wbuf[1] = sb->laddr;
+ txn.rl = txn.len + 4;
+ ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
+ if (ret)
+ return ret;
+ ret = ngd_xferandwait_ack(ctrl, &txn);
+ if (ret)
+ return ret;
+ txn.len = 0;
+ }
+ return 0;
+}
+
+static int ngd_set_laddr(struct slim_controller *ctrl, const u8 *ea,
+ u8 elen, u8 laddr)
+{
+ return 0;
+}
+
+static int ngd_get_laddr(struct slim_controller *ctrl, const u8 *ea,
+ u8 elen, u8 *laddr)
+{
+ int ret;
+ u8 wbuf[10];
+ struct slim_msg_txn txn;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+ txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+ txn.la = SLIM_LA_MGR;
+ txn.ec = 0;
+ ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
+ if (ret)
+ return ret;
+ memcpy(&wbuf[1], ea, elen);
+ txn.mc = SLIM_USR_MC_ADDR_QUERY;
+ txn.rl = 11;
+ txn.len = 7;
+ txn.wbuf = wbuf;
+ txn.rbuf = NULL;
+ ret = ngd_xferandwait_ack(ctrl, &txn);
+ if (!ret && txn.la == 0xFF)
+ ret = -ENXIO;
+ else if (!ret)
+ *laddr = txn.la;
+ return ret;
+}
+
+static void ngd_slim_setup(struct msm_slim_ctrl *dev)
+{
+ u32 new_cfg = NGD_CFG_ENABLE;
+ u32 cfg = readl_relaxed(dev->base +
+ NGD_BASE(dev->ctrl.nr, dev->ver));
+ if (dev->state == MSM_CTRL_DOWN) {
+ /* if called after SSR, cleanup and re-assign */
+ if (dev->use_tx_msgqs != MSM_MSGQ_RESET)
+ msm_slim_deinit_ep(dev, &dev->tx_msgq,
+ &dev->use_tx_msgqs);
+
+ if (dev->use_rx_msgqs != MSM_MSGQ_RESET)
+ msm_slim_deinit_ep(dev, &dev->rx_msgq,
+ &dev->use_rx_msgqs);
+
+ msm_slim_sps_init(dev, dev->bam_mem,
+ NGD_BASE(dev->ctrl.nr,
+ dev->ver) + NGD_STATUS, true);
+ } else {
+ if (dev->use_rx_msgqs == MSM_MSGQ_DISABLED)
+ goto setup_tx_msg_path;
+
+ if ((dev->use_rx_msgqs == MSM_MSGQ_ENABLED) &&
+ (cfg & NGD_CFG_RX_MSGQ_EN))
+ goto setup_tx_msg_path;
+
+ if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+ msm_slim_disconnect_endp(dev, &dev->rx_msgq,
+ &dev->use_rx_msgqs);
+ msm_slim_connect_endp(dev, &dev->rx_msgq);
+
+setup_tx_msg_path:
+ if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED)
+ goto ngd_enable;
+ if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
+ cfg & NGD_CFG_TX_MSGQ_EN)
+ goto ngd_enable;
+
+ if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
+ msm_slim_disconnect_endp(dev, &dev->tx_msgq,
+ &dev->use_tx_msgqs);
+ msm_slim_connect_endp(dev, &dev->tx_msgq);
+ }
+ngd_enable:
+
+ if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+ new_cfg |= NGD_CFG_RX_MSGQ_EN;
+ if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
+ new_cfg |= NGD_CFG_TX_MSGQ_EN;
+
+ /* Enable NGD, and program MSGQs if not already */
+ if (cfg == new_cfg)
+ return;
+
+ writel_relaxed(new_cfg, dev->base + NGD_BASE(dev->ctrl.nr, dev->ver));
+ /* make sure NGD MSG-Q config goes through */
+ mb();
+}
+
+static void ngd_slim_rx(struct msm_slim_ctrl *dev, u8 *buf)
+{
+ unsigned long flags;
+ u8 mc, mt, len;
+
+ len = buf[0] & 0x1F;
+ mt = (buf[0] >> 5) & 0x7;
+ mc = buf[1];
+ if (mc == SLIM_USR_MC_MASTER_CAPABILITY &&
+ mt == SLIM_MSG_MT_SRC_REFERRED_USER)
+ complete(&dev->rx_msgq_notify);
+
+ if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
+ mc == SLIM_MSG_MC_REPLY_VALUE) {
+ u8 tid = buf[3];
+
+ dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len);
+ slim_msg_response(&dev->ctrl, &buf[4], tid,
+ len - 4);
+ pm_runtime_mark_last_busy(dev->dev);
+ }
+ if (mc == SLIM_USR_MC_ADDR_REPLY &&
+ mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
+ struct slim_msg_txn *txn;
+ u8 failed_ea[6] = {0, 0, 0, 0, 0, 0};
+
+ spin_lock_irqsave(&dev->ctrl.txn_lock, flags);
+ txn = dev->ctrl.txnt[buf[3]];
+ if (!txn) {
+ spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
+ SLIM_WARN(dev,
+ "LADDR response after timeout, tid:0x%x\n",
+ buf[3]);
+ return;
+ }
+ if (memcmp(&buf[4], failed_ea, 6))
+ txn->la = buf[10];
+ dev->ctrl.txnt[buf[3]] = NULL;
+ complete(txn->comp);
+ spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
+ }
+ if (mc == SLIM_USR_MC_GENERIC_ACK &&
+ mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
+ struct slim_msg_txn *txn;
+
+ spin_lock_irqsave(&dev->ctrl.txn_lock, flags);
+ txn = dev->ctrl.txnt[buf[3]];
+ if (!txn) {
+ spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
+ SLIM_WARN(dev, "ACK received after timeout, tid:0x%x\n",
+ buf[3]);
+ return;
+ }
+ dev_dbg(dev->dev, "got response:tid:%d, response:0x%x",
+ (int)buf[3], buf[4]);
+ if (!(buf[4] & MSM_SAT_SUCCSS)) {
+ SLIM_WARN(dev, "TID:%d, NACK code:0x%x\n", (int)buf[3],
+ buf[4]);
+ txn->ec = -EIO;
+ }
+ dev->ctrl.txnt[buf[3]] = NULL;
+ complete(txn->comp);
+ spin_unlock_irqrestore(&dev->ctrl.txn_lock, flags);
+ }
+}
+
+static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart)
+{
+ void __iomem *ngd;
+ int timeout, retries = 0, ret = 0;
+ enum msm_ctrl_state cur_state = dev->state;
+ u32 laddr;
+ u32 rx_msgq;
+ u32 ngd_int = (NGD_INT_TX_NACKED_2 |
+ NGD_INT_MSG_BUF_CONTE | NGD_INT_MSG_TX_INVAL |
+ NGD_INT_IE_VE_CHG | NGD_INT_DEV_ERR |
+ NGD_INT_TX_MSG_SENT);
+
+ if (!mdm_restart && cur_state == MSM_CTRL_DOWN) {
+ int timeout = wait_for_completion_timeout(&dev->qmi.qmi_comp,
+ HZ);
+ if (!timeout) {
+ SLIM_ERR(dev, "slimbus QMI init timed out\n");
+ return -EREMOTEIO;
+ }
+ }
+
+hw_init_retry:
+ /* No need to vote if contorller is not in low power mode */
+ if (!mdm_restart &&
+ (cur_state == MSM_CTRL_DOWN || cur_state == MSM_CTRL_ASLEEP)) {
+ ret = msm_slim_qmi_power_request(dev, true);
+ if (ret) {
+ SLIM_WARN(dev, "SLIM power req failed:%d, retry:%d\n",
+ ret, retries);
+ if (!atomic_read(&dev->ssr_in_progress))
+ msm_slim_qmi_power_request(dev, false);
+ if (retries < INIT_MX_RETRIES &&
+ !atomic_read(&dev->ssr_in_progress)) {
+ retries++;
+ goto hw_init_retry;
+ }
+ return ret;
+ }
+ }
+ retries = 0;
+
+ if (!dev->ver) {
+ dev->ver = readl_relaxed(dev->base);
+ /* Version info in 16 MSbits */
+ dev->ver >>= 16;
+ }
+ ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
+ laddr = readl_relaxed(ngd + NGD_STATUS);
+ if (laddr & NGD_LADDR) {
+ u32 int_en = readl_relaxed(ngd + NGD_INT_EN);
+
+ /*
+ * external MDM restart case where ADSP itself was active framer
+ * For example, modem restarted when playback was active
+ */
+ if (cur_state == MSM_CTRL_AWAKE) {
+ SLIM_INFO(dev, "Subsys restart: ADSP active framer\n");
+ return 0;
+ }
+ /*
+ * ADSP power collapse case, where HW wasn't reset.
+ */
+ if (int_en != 0)
+ return 0;
+
+ /* Retention */
+ if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
+ msm_slim_disconnect_endp(dev, &dev->rx_msgq,
+ &dev->use_rx_msgqs);
+ if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
+ msm_slim_disconnect_endp(dev, &dev->tx_msgq,
+ &dev->use_tx_msgqs);
+
+ writel_relaxed(ngd_int, (dev->base + NGD_INT_EN +
+ NGD_BASE(dev->ctrl.nr, dev->ver)));
+
+ rx_msgq = readl_relaxed(ngd + NGD_RX_MSGQ_CFG);
+ /**
+ * Program with minimum value so that signal get
+ * triggered immediately after receiving the message
+ */
+ writel_relaxed((rx_msgq | SLIM_RX_MSGQ_TIMEOUT_VAL),
+ (ngd + NGD_RX_MSGQ_CFG));
+ /* reconnect BAM pipes if needed and enable NGD */
+ ngd_slim_setup(dev);
+ return 0;
+ }
+
+ if (mdm_restart) {
+ /*
+ * external MDM SSR when MDM is active framer
+ * ADSP will reset slimbus HW. disconnect BAM pipes so that
+ * they can be connected after capability message is received.
+ * Set device state to ASLEEP to be synchronous with the HW
+ */
+ /* make current state as DOWN */
+ cur_state = MSM_CTRL_DOWN;
+ SLIM_INFO(dev,
+ "SLIM MDM restart: MDM active framer: reinit HW\n");
+ /* disconnect BAM pipes */
+ msm_slim_sps_exit(dev, false);
+ dev->state = MSM_CTRL_DOWN;
+ }
+
+capability_retry:
+ /*
+ * ADSP power collapse case (OR SSR), where HW was reset
+ * BAM programming will happen when capability message is received
+ */
+ writel_relaxed(ngd_int, dev->base + NGD_INT_EN +
+ NGD_BASE(dev->ctrl.nr, dev->ver));
+
+ rx_msgq = readl_relaxed(ngd + NGD_RX_MSGQ_CFG);
+ /*
+ * Program with minimum value so that signal get
+ * triggered immediately after receiving the message
+ */
+ writel_relaxed(rx_msgq|SLIM_RX_MSGQ_TIMEOUT_VAL,
+ ngd + NGD_RX_MSGQ_CFG);
+ /* make sure register got updated */
+ mb();
+
+ /* reconnect BAM pipes if needed and enable NGD */
+ ngd_slim_setup(dev);
+
+ timeout = wait_for_completion_timeout(&dev->reconf, HZ);
+ if (!timeout) {
+ u32 cfg = readl_relaxed(dev->base +
+ NGD_BASE(dev->ctrl.nr, dev->ver));
+ laddr = readl_relaxed(ngd + NGD_STATUS);
+ SLIM_WARN(dev,
+ "slim capability time-out:%d, stat:0x%x,cfg:0x%x\n",
+ retries, laddr, cfg);
+ if ((retries < INIT_MX_RETRIES) &&
+ !atomic_read(&dev->ssr_in_progress)) {
+ retries++;
+ goto capability_retry;
+ }
+ return -ETIMEDOUT;
+ }
+ /* mutliple transactions waiting on slimbus to power up? */
+ if (cur_state == MSM_CTRL_DOWN)
+ complete_all(&dev->ctrl_up);
+ /* Resetting the log level */
+ SLIM_RST_LOGLVL(dev);
+ return 0;
+}
+
+static int ngd_slim_enable(struct msm_slim_ctrl *dev, bool enable)
+{
+ int ret = 0;
+
+ if (enable) {
+ ret = msm_slim_qmi_init(dev, false);
+ /* controller state should be in sync with framework state */
+ if (!ret) {
+ complete(&dev->qmi.qmi_comp);
+ if (!pm_runtime_enabled(dev->dev) ||
+ !pm_runtime_suspended(dev->dev))
+ ngd_slim_runtime_resume(dev->dev);
+ else
+ pm_runtime_resume(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put(dev->dev);
+ } else
+ SLIM_ERR(dev, "qmi init fail, ret:%d, state:%d\n",
+ ret, dev->state);
+ } else {
+ msm_slim_qmi_exit(dev);
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int ngd_slim_power_down(struct msm_slim_ctrl *dev)
+{
+ unsigned long flags;
+ int i;
+ struct slim_controller *ctrl = &dev->ctrl;
+
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ /* Pending response for a message */
+ for (i = 0; i < ctrl->last_tid; i++) {
+ if (ctrl->txnt[i]) {
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ SLIM_INFO(dev, "NGD down:txn-rsp for %d pending", i);
+ return -EBUSY;
+ }
+ }
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ return msm_slim_qmi_power_request(dev, false);
+}
+#endif
+
+static int ngd_slim_rx_msgq_thread(void *data)
+{
+ struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
+ struct completion *notify = &dev->rx_msgq_notify;
+ int ret = 0;
+
+ while (!kthread_should_stop()) {
+ struct slim_msg_txn txn;
+ int retries = 0;
+ u8 wbuf[8];
+
+ wait_for_completion_interruptible(notify);
+
+ txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+ txn.ec = 0;
+ txn.rbuf = NULL;
+ txn.mc = SLIM_USR_MC_REPORT_SATELLITE;
+ txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+ txn.la = SLIM_LA_MGR;
+ wbuf[0] = SAT_MAGIC_LSB;
+ wbuf[1] = SAT_MAGIC_MSB;
+ wbuf[2] = SAT_MSG_VER;
+ wbuf[3] = SAT_MSG_PROT;
+ txn.wbuf = wbuf;
+ txn.len = 4;
+ SLIM_INFO(dev, "SLIM SAT: Rcvd master capability\n");
+capability_retry:
+ txn.rl = 8;
+ ret = ngd_xfer_msg(&dev->ctrl, &txn);
+ if (!ret) {
+ enum msm_ctrl_state prev_state = dev->state;
+
+ SLIM_INFO(dev,
+ "SLIM SAT: capability exchange successful\n");
+ if (prev_state < MSM_CTRL_ASLEEP)
+ SLIM_WARN(dev,
+ "capability due to noise, state:%d\n",
+ prev_state);
+ complete(&dev->reconf);
+ /* ADSP SSR, send device_up notifications */
+ if (prev_state == MSM_CTRL_DOWN)
+ complete(&dev->qmi.slave_notify);
+ } else if (ret == -EIO) {
+ SLIM_WARN(dev, "capability message NACKed, retrying\n");
+ if (retries < INIT_MX_RETRIES) {
+ msleep(DEF_RETRY_MS);
+ retries++;
+ goto capability_retry;
+ }
+ } else {
+ SLIM_WARN(dev, "SLIM: capability TX failed:%d\n", ret);
+ }
+ }
+ return 0;
+}
+
+static int ngd_notify_slaves(void *data)
+{
+ struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
+ struct slim_controller *ctrl = &dev->ctrl;
+ struct slim_device *sbdev;
+ struct list_head *pos, *next;
+ int ret, i = 0;
+
+ ret = qmi_svc_event_notifier_register(SLIMBUS_QMI_SVC_ID,
+ SLIMBUS_QMI_SVC_V1,
+ SLIMBUS_QMI_INS_ID, &dev->qmi.nb);
+ if (ret) {
+ pr_err("Slimbus QMI service registration failed:%d", ret);
+ return ret;
+ }
+
+ while (!kthread_should_stop()) {
+ wait_for_completion_interruptible(&dev->qmi.slave_notify);
+ /* Probe devices for first notification */
+ if (!i) {
+ i++;
+ dev->err = 0;
+ if (dev->dev->of_node)
+ of_register_slim_devices(&dev->ctrl);
+
+ /*
+ * Add devices registered with board-info now that
+ * controller is up
+ */
+ slim_ctrl_add_boarddevs(&dev->ctrl);
+ ngd_dom_init(dev);
+ } else {
+ slim_framer_booted(ctrl);
+ }
+ mutex_lock(&ctrl->m_ctrl);
+ list_for_each_safe(pos, next, &ctrl->devs) {
+ int j;
+
+ sbdev = list_entry(pos, struct slim_device, dev_list);
+ mutex_unlock(&ctrl->m_ctrl);
+ for (j = 0; j < LADDR_RETRY; j++) {
+ ret = slim_get_logical_addr(sbdev,
+ sbdev->e_addr,
+ 6, &sbdev->laddr);
+ if (!ret)
+ break;
+ /* time for ADSP to assign LA */
+ msleep(20);
+ }
+ mutex_lock(&ctrl->m_ctrl);
+ }
+ mutex_unlock(&ctrl->m_ctrl);
+ }
+ return 0;
+}
+
+static void ngd_dom_down(struct msm_slim_ctrl *dev)
+{
+ struct slim_controller *ctrl = &dev->ctrl;
+ struct slim_device *sbdev;
+
+ mutex_lock(&dev->ssr_lock);
+ ngd_slim_enable(dev, false);
+ /* device up should be called again after SSR */
+ list_for_each_entry(sbdev, &ctrl->devs, dev_list)
+ slim_report_absent(sbdev);
+ SLIM_INFO(dev, "SLIM ADSP SSR (DOWN) done\n");
+ mutex_unlock(&dev->ssr_lock);
+}
+
+static void ngd_dom_up(struct work_struct *work)
+{
+ struct msm_slim_ss *dsp =
+ container_of(work, struct msm_slim_ss, dom_up);
+ struct msm_slim_ctrl *dev =
+ container_of(dsp, struct msm_slim_ctrl, dsp);
+ mutex_lock(&dev->ssr_lock);
+ ngd_slim_enable(dev, true);
+ mutex_unlock(&dev->ssr_lock);
+}
+
+static ssize_t show_mask(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+
+ return snprintf(buf, sizeof(int), "%u\n", dev->ipc_log_mask);
+}
+
+static ssize_t set_mask(struct device *device, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+
+ dev->ipc_log_mask = buf[0] - '0';
+ if (dev->ipc_log_mask > DBG_LEV)
+ dev->ipc_log_mask = DBG_LEV;
+ return count;
+}
+
+static DEVICE_ATTR(debug_mask, 0644, show_mask, set_mask);
+
+static int ngd_slim_probe(struct platform_device *pdev)
+{
+ struct msm_slim_ctrl *dev;
+ int ret;
+ struct resource *bam_mem;
+ struct resource *slim_mem;
+ struct resource *irq, *bam_irq;
+ bool rxreg_access = false;
+ bool slim_mdm = false;
+ const char *ext_modem_id = NULL;
+
+ slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "slimbus_physical");
+ if (!slim_mem) {
+ dev_err(&pdev->dev, "no slimbus physical memory resource\n");
+ return -ENODEV;
+ }
+ bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "slimbus_bam_physical");
+ if (!bam_mem) {
+ dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
+ return -ENODEV;
+ }
+ irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "slimbus_irq");
+ if (!irq) {
+ dev_err(&pdev->dev, "no slimbus IRQ resource\n");
+ return -ENODEV;
+ }
+ bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "slimbus_bam_irq");
+ if (!bam_irq) {
+ dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
+ return -ENODEV;
+ }
+
+ dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(dev)) {
+ dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
+ return PTR_ERR(dev);
+ }
+ dev->wr_comp = kzalloc(sizeof(struct completion *) * MSM_TX_BUFS,
+ GFP_KERNEL);
+ if (!dev->wr_comp)
+ return -ENOMEM;
+
+ /* typical txn numbers and size used in bulk operation */
+ dev->bulk.buf_sz = SLIM_MAX_TXNS * 8;
+ dev->bulk.base = kzalloc(dev->bulk.buf_sz, GFP_KERNEL | GFP_DMA);
+ if (!dev->bulk.base) {
+ ret = -ENOMEM;
+ goto err_nobulk;
+ }
+
+ dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, dev);
+ slim_set_ctrldata(&dev->ctrl, dev);
+
+ /* Create IPC log context */
+ dev->ipc_slimbus_log = ipc_log_context_create(IPC_SLIMBUS_LOG_PAGES,
+ dev_name(dev->dev), 0);
+ if (!dev->ipc_slimbus_log)
+ dev_err(&pdev->dev, "error creating ipc_logging context\n");
+ else {
+ /* Initialize the log mask */
+ dev->ipc_log_mask = INFO_LEV;
+ dev->default_ipc_log_mask = INFO_LEV;
+ SLIM_INFO(dev, "start logging for slim dev %s\n",
+ dev_name(dev->dev));
+ }
+ ret = sysfs_create_file(&dev->dev->kobj, &dev_attr_debug_mask.attr);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to create dev. attr\n");
+ dev->sysfs_created = false;
+ } else
+ dev->sysfs_created = true;
+
+ dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
+ if (!dev->base) {
+ dev_err(&pdev->dev, "IOremap failed\n");
+ ret = -ENOMEM;
+ goto err_ioremap_failed;
+ }
+ dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
+ if (!dev->bam.base) {
+ dev_err(&pdev->dev, "BAM IOremap failed\n");
+ ret = -ENOMEM;
+ goto err_ioremap_bam_failed;
+ }
+ if (pdev->dev.of_node) {
+
+ ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
+ &dev->ctrl.nr);
+ if (ret) {
+ dev_err(&pdev->dev, "Cell index not specified:%d", ret);
+ goto err_ctrl_failed;
+ }
+ rxreg_access = of_property_read_bool(pdev->dev.of_node,
+ "qcom,rxreg-access");
+ of_property_read_u32(pdev->dev.of_node, "qcom,apps-ch-pipes",
+ &dev->pdata.apps_pipes);
+ of_property_read_u32(pdev->dev.of_node, "qcom,ea-pc",
+ &dev->pdata.eapc);
+ ret = of_property_read_string(pdev->dev.of_node,
+ "qcom,slim-mdm", &ext_modem_id);
+ if (!ret)
+ slim_mdm = true;
+ } else {
+ dev->ctrl.nr = pdev->id;
+ }
+ /*
+ * Keep PGD's logical address as manager's. Query it when first data
+ * channel request comes in
+ */
+ dev->pgdla = SLIM_LA_MGR;
+ dev->ctrl.nchans = MSM_SLIM_NCHANS;
+ dev->ctrl.nports = MSM_SLIM_NPORTS;
+ dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
+ dev->framer.superfreq =
+ dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
+ dev->ctrl.a_framer = &dev->framer;
+ dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
+ dev->ctrl.set_laddr = ngd_set_laddr;
+ dev->ctrl.get_laddr = ngd_get_laddr;
+ dev->ctrl.allocbw = ngd_allocbw;
+ dev->ctrl.xfer_msg = ngd_xfer_msg;
+ dev->ctrl.xfer_user_msg = ngd_user_msg;
+ dev->ctrl.xfer_bulk_wr = ngd_bulk_wr;
+ dev->ctrl.wakeup = NULL;
+ dev->ctrl.alloc_port = msm_alloc_port;
+ dev->ctrl.dealloc_port = msm_dealloc_port;
+ dev->ctrl.port_xfer = msm_slim_port_xfer;
+ dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
+ dev->bam_mem = bam_mem;
+ dev->rx_slim = ngd_slim_rx;
+
+ init_completion(&dev->reconf);
+ init_completion(&dev->ctrl_up);
+ mutex_init(&dev->tx_lock);
+ mutex_init(&dev->ssr_lock);
+ spin_lock_init(&dev->tx_buf_lock);
+ spin_lock_init(&dev->rx_lock);
+ dev->ee = 1;
+ dev->irq = irq->start;
+ dev->bam.irq = bam_irq->start;
+ atomic_set(&dev->ssr_in_progress, 0);
+
+ if (rxreg_access)
+ dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+ else
+ dev->use_rx_msgqs = MSM_MSGQ_RESET;
+
+ /* Enable TX message queues by default as recommended by HW */
+ dev->use_tx_msgqs = MSM_MSGQ_RESET;
+
+ init_completion(&dev->rx_msgq_notify);
+ init_completion(&dev->qmi.slave_notify);
+
+ /* Register with framework */
+ ret = slim_add_numbered_controller(&dev->ctrl);
+ if (ret) {
+ dev_err(dev->dev, "error adding controller\n");
+ goto err_ctrl_failed;
+ }
+
+ dev->ctrl.dev.parent = &pdev->dev;
+ dev->ctrl.dev.of_node = pdev->dev.of_node;
+ dev->state = MSM_CTRL_DOWN;
+
+ /*
+ * As this does not perform expensive
+ * operations, it can execute in an
+ * interrupt context. This avoids
+ * context switches, provides
+ * extensive benifits and performance
+ * improvements.
+ */
+ ret = request_irq(dev->irq,
+ ngd_slim_interrupt,
+ IRQF_TRIGGER_HIGH,
+ "ngd_slim_irq", dev);
+
+ if (ret) {
+ dev_err(&pdev->dev, "request IRQ failed\n");
+ goto err_request_irq_failed;
+ }
+
+ init_completion(&dev->qmi.qmi_comp);
+ dev->err = -EPROBE_DEFER;
+ pm_runtime_use_autosuspend(dev->dev);
+ pm_runtime_set_autosuspend_delay(dev->dev, MSM_SLIM_AUTOSUSPEND);
+ pm_runtime_set_suspended(dev->dev);
+ pm_runtime_enable(dev->dev);
+
+ if (slim_mdm) {
+ dev->ext_mdm.nb.notifier_call = mdm_ssr_notify_cb;
+ dev->ext_mdm.domr = subsys_notif_register_notifier(ext_modem_id,
+ &dev->ext_mdm.nb);
+ if (IS_ERR_OR_NULL(dev->ext_mdm.domr))
+ dev_err(dev->dev,
+ "subsys_notif_register_notifier failed %p",
+ dev->ext_mdm.domr);
+ }
+
+ INIT_WORK(&dev->dsp.dom_up, ngd_dom_up);
+ dev->qmi.nb.notifier_call = ngd_qmi_available;
+ pm_runtime_get_noresume(dev->dev);
+
+ /* Fire up the Rx message queue thread */
+ dev->rx_msgq_thread = kthread_run(ngd_slim_rx_msgq_thread, dev,
+ "ngd_rx_thread%d", dev->ctrl.nr);
+ if (IS_ERR(dev->rx_msgq_thread)) {
+ ret = PTR_ERR(dev->rx_msgq_thread);
+ dev_err(dev->dev, "Failed to start Rx thread:%d\n", ret);
+ goto err_rx_thread_create_failed;
+ }
+
+ /* Start thread to probe, and notify slaves */
+ dev->qmi.slave_thread = kthread_run(ngd_notify_slaves, dev,
+ "ngd_notify_sl%d", dev->ctrl.nr);
+ if (IS_ERR(dev->qmi.slave_thread)) {
+ ret = PTR_ERR(dev->qmi.slave_thread);
+ dev_err(dev->dev, "Failed to start notifier thread:%d\n", ret);
+ goto err_notify_thread_create_failed;
+ }
+ SLIM_INFO(dev, "NGD SB controller is up!\n");
+ return 0;
+
+err_notify_thread_create_failed:
+ kthread_stop(dev->rx_msgq_thread);
+err_rx_thread_create_failed:
+ free_irq(dev->irq, dev);
+err_request_irq_failed:
+err_ctrl_failed:
+ iounmap(dev->bam.base);
+err_ioremap_bam_failed:
+ iounmap(dev->base);
+err_ioremap_failed:
+ if (dev->sysfs_created)
+ sysfs_remove_file(&dev->dev->kobj,
+ &dev_attr_debug_mask.attr);
+ kfree(dev->bulk.base);
+err_nobulk:
+ kfree(dev->wr_comp);
+ kfree(dev);
+ return ret;
+}
+
+static int ngd_slim_remove(struct platform_device *pdev)
+{
+ struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+
+ ngd_slim_enable(dev, false);
+ if (dev->sysfs_created)
+ sysfs_remove_file(&dev->dev->kobj,
+ &dev_attr_debug_mask.attr);
+ qmi_svc_event_notifier_unregister(SLIMBUS_QMI_SVC_ID,
+ SLIMBUS_QMI_SVC_V1,
+ SLIMBUS_QMI_INS_ID, &dev->qmi.nb);
+ pm_runtime_disable(&pdev->dev);
+ if (dev->dsp.dom_t == MSM_SLIM_DOM_SS)
+ subsys_notif_unregister_notifier(dev->dsp.domr,
+ &dev->dsp.nb);
+ if (dev->dsp.dom_t == MSM_SLIM_DOM_PD)
+ service_notif_unregister_notifier(dev->dsp.domr,
+ &dev->dsp.nb);
+ if (!IS_ERR_OR_NULL(dev->ext_mdm.domr))
+ subsys_notif_unregister_notifier(dev->ext_mdm.domr,
+ &dev->ext_mdm.nb);
+ kfree(dev->bulk.base);
+ free_irq(dev->irq, dev);
+ slim_del_controller(&dev->ctrl);
+ kthread_stop(dev->rx_msgq_thread);
+ iounmap(dev->bam.base);
+ iounmap(dev->base);
+ kfree(dev->wr_comp);
+ kfree(dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ngd_slim_runtime_idle(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+
+ mutex_lock(&dev->tx_lock);
+ if (dev->state == MSM_CTRL_AWAKE)
+ dev->state = MSM_CTRL_IDLE;
+ mutex_unlock(&dev->tx_lock);
+ dev_dbg(device, "pm_runtime: idle...\n");
+ pm_request_autosuspend(device);
+ return -EAGAIN;
+}
+#endif
+
+/*
+ * If PM_RUNTIME is not defined, these 2 functions become helper
+ * functions to be called from system suspend/resume. So they are not
+ * inside ifdef CONFIG_PM_RUNTIME
+ */
+static int ngd_slim_runtime_resume(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ mutex_lock(&dev->tx_lock);
+ if (dev->state >= MSM_CTRL_ASLEEP)
+ ret = ngd_slim_power_up(dev, false);
+ if (ret) {
+ /* Did SSR cause this power up failure */
+ if (dev->state != MSM_CTRL_DOWN)
+ dev->state = MSM_CTRL_ASLEEP;
+ else
+ SLIM_WARN(dev, "HW wakeup attempt during SSR\n");
+ } else {
+ dev->state = MSM_CTRL_AWAKE;
+ }
+ mutex_unlock(&dev->tx_lock);
+ SLIM_INFO(dev, "Slim runtime resume: ret %d\n", ret);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int ngd_slim_runtime_suspend(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ mutex_lock(&dev->tx_lock);
+ ret = ngd_slim_power_down(dev);
+ if (ret && ret != -EBUSY)
+ SLIM_INFO(dev, "slim resource not idle:%d\n", ret);
+ if (!ret || ret == -ETIMEDOUT)
+ dev->state = MSM_CTRL_ASLEEP;
+ mutex_unlock(&dev->tx_lock);
+ SLIM_INFO(dev, "Slim runtime suspend: ret %d\n", ret);
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int ngd_slim_suspend(struct device *dev)
+{
+ int ret = -EBUSY;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+
+ if (!pm_runtime_enabled(dev) ||
+ (!pm_runtime_suspended(dev) &&
+ cdev->state == MSM_CTRL_IDLE)) {
+ ret = ngd_slim_runtime_suspend(dev);
+ /*
+ * If runtime-PM still thinks it's active, then make sure its
+ * status is in sync with HW status.
+ * Since this suspend calls QMI api, it results in holding a
+ * wakelock. That results in failure of first suspend.
+ * Subsequent suspend should not call low-power transition
+ * again since the HW is already in suspended state.
+ */
+ if (!ret) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+ }
+ }
+ if (ret == -EBUSY) {
+ /*
+ * There is a possibility that some audio stream is active
+ * during suspend. We dont want to return suspend failure in
+ * that case so that display and relevant components can still
+ * go to suspend.
+ * If there is some other error, then it should be passed-on
+ * to system level suspend
+ */
+ ret = 0;
+ }
+ SLIM_INFO(cdev, "system suspend\n");
+ return ret;
+}
+
+static int ngd_slim_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+ /*
+ * Rely on runtime-PM to call resume in case it is enabled.
+ * Even if it's not enabled, rely on 1st client transaction to do
+ * clock/power on
+ */
+ SLIM_INFO(cdev, "system resume\n");
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops ngd_slim_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(
+ ngd_slim_suspend,
+ ngd_slim_resume
+ )
+ SET_RUNTIME_PM_OPS(
+ ngd_slim_runtime_suspend,
+ ngd_slim_runtime_resume,
+ ngd_slim_runtime_idle
+ )
+};
+
+static const struct of_device_id ngd_slim_dt_match[] = {
+ {
+ .compatible = "qcom,slim-ngd",
+ },
+ {}
+};
+
+static struct platform_driver ngd_slim_driver = {
+ .probe = ngd_slim_probe,
+ .remove = ngd_slim_remove,
+ .driver = {
+ .name = NGD_SLIM_NAME,
+ .owner = THIS_MODULE,
+ .pm = &ngd_slim_dev_pm_ops,
+ .of_match_table = ngd_slim_dt_match,
+ },
+};
+
+static int ngd_slim_init(void)
+{
+ return platform_driver_register(&ngd_slim_driver);
+}
+late_initcall(ngd_slim_init);
+
+static void ngd_slim_exit(void)
+{
+ platform_driver_unregister(&ngd_slim_driver);
+}
+module_exit(ngd_slim_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM Slimbus controller");
+MODULE_ALIAS("platform:msm-slim-ngd");
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
new file mode 100644
index 0000000..5cc29e7
--- /dev/null
+++ b/drivers/slimbus/slim-msm.c
@@ -0,0 +1,1633 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/slimbus/slimbus.h>
+#include <linux/msm-sps.h>
+#include <linux/gcd.h>
+#include "slim-msm.h"
+
+int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
+{
+ spin_lock(&dev->rx_lock);
+ if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
+ spin_unlock(&dev->rx_lock);
+ dev_err(dev->dev, "RX QUEUE full!");
+ return -EXFULL;
+ }
+ memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
+ dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
+ spin_unlock(&dev->rx_lock);
+ return 0;
+}
+
+int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+ if (dev->tail == dev->head) {
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+ return -ENODATA;
+ }
+ memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
+ dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+ return 0;
+}
+
+int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
+{
+#ifdef CONFIG_PM
+ int ref = 0;
+ int ret = pm_runtime_get_sync(dev->dev);
+
+ if (ret >= 0) {
+ ref = atomic_read(&dev->dev->power.usage_count);
+ if (ref <= 0) {
+ SLIM_WARN(dev, "reference count -ve:%d", ref);
+ ret = -ENODEV;
+ }
+ }
+ return ret;
+#else
+ return -ENODEV;
+#endif
+}
+void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
+{
+#ifdef CONFIG_PM
+ int ref;
+
+ pm_runtime_mark_last_busy(dev->dev);
+ ref = atomic_read(&dev->dev->power.usage_count);
+ if (ref <= 0)
+ SLIM_WARN(dev, "reference count mismatch:%d", ref);
+ else
+ pm_runtime_put_sync(dev->dev);
+#endif
+}
+
+irqreturn_t msm_slim_port_irq_handler(struct msm_slim_ctrl *dev, u32 pstat)
+{
+ int i;
+ u32 int_en = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
+ dev->ver));
+ /*
+ * different port-interrupt than what we enabled, ignore.
+ * This may happen if overflow/underflow is reported, but
+ * was disabled due to unavailability of buffers provided by
+ * client.
+ */
+ if ((pstat & int_en) == 0)
+ return IRQ_HANDLED;
+ for (i = 0; i < dev->port_nums; i++) {
+ struct msm_slim_endp *endpoint = &dev->pipes[i];
+
+ if (pstat & (1 << endpoint->port_b)) {
+ u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn,
+ endpoint->port_b, dev->ver));
+ if (val & MSM_PORT_OVERFLOW) {
+ dev->ctrl.ports[i].err =
+ SLIM_P_OVERFLOW;
+ } else if (val & MSM_PORT_UNDERFLOW) {
+ dev->ctrl.ports[i].err =
+ SLIM_P_UNDERFLOW;
+ }
+ }
+ }
+ /*
+ * Disable port interrupt here. Re-enable when more
+ * buffers are provided for this port.
+ */
+ writel_relaxed((int_en & (~pstat)),
+ PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
+ dev->ver));
+ /* clear port interrupts */
+ writel_relaxed(pstat, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
+ dev->ver));
+ SLIM_INFO(dev, "disabled overflow/underflow for port 0x%x", pstat);
+
+ /*
+ * Guarantee that port interrupt bit(s) clearing writes go
+ * through before exiting ISR
+ */
+ mb();
+ return IRQ_HANDLED;
+}
+
+int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
+{
+ int ret;
+ struct sps_pipe *endpoint;
+ struct sps_connect *config = &ep->config;
+
+ /* Allocate the endpoint */
+ endpoint = sps_alloc_endpoint();
+ if (!endpoint) {
+ dev_err(dev->dev, "sps_alloc_endpoint failed\n");
+ return -ENOMEM;
+ }
+
+ /* Get default connection configuration for an endpoint */
+ ret = sps_get_config(endpoint, config);
+ if (ret) {
+ dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
+ goto sps_config_failed;
+ }
+
+ ep->sps = endpoint;
+ return 0;
+
+sps_config_failed:
+ sps_free_endpoint(endpoint);
+ return ret;
+}
+
+void msm_slim_free_endpoint(struct msm_slim_endp *ep)
+{
+ sps_free_endpoint(ep->sps);
+ ep->sps = NULL;
+}
+
+int msm_slim_sps_mem_alloc(
+ struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
+{
+ dma_addr_t phys;
+
+ mem->size = len;
+ mem->min_size = 0;
+ mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
+
+ if (!mem->base) {
+ dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
+ return -ENOMEM;
+ }
+
+ mem->phys_base = phys;
+ memset(mem->base, 0x00, mem->size);
+ return 0;
+}
+
+void
+msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
+{
+ if (mem->base && mem->phys_base)
+ dma_free_coherent(dev->dev, mem->size, mem->base,
+ mem->phys_base);
+ else
+ dev_err(dev->dev, "cant dma free. they are NULL\n");
+ mem->size = 0;
+ mem->base = NULL;
+ mem->phys_base = 0;
+}
+
+void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pipenum, u8 portnum)
+{
+ struct slim_controller *ctrl;
+ struct slim_ch *chan;
+ struct msm_slim_pshpull_parm *parm;
+ u32 set_cfg = 0;
+ struct slim_port_cfg cfg = dev->ctrl.ports[portnum].cfg;
+
+ if (!dev) {
+ pr_err("%s:Dev node is null\n", __func__);
+ return;
+ }
+ if (portnum >= dev->port_nums) {
+ pr_err("%s:Invalid port\n", __func__);
+ return;
+ }
+ ctrl = &dev->ctrl;
+ chan = ctrl->ports[portnum].ch;
+ parm = &dev->pipes[portnum].psh_pull;
+
+ if (cfg.watermark)
+ set_cfg = (cfg.watermark << 1);
+ else
+ set_cfg = DEF_WATERMARK;
+
+ if (cfg.port_opts & SLIM_OPT_NO_PACK)
+ set_cfg |= DEF_NO_PACK;
+ else
+ set_cfg |= DEF_PACK;
+
+ if (cfg.port_opts & SLIM_OPT_ALIGN_MSB)
+ set_cfg |= DEF_ALIGN_MSB;
+ else
+ set_cfg |= DEF_ALIGN_LSB;
+
+ set_cfg |= ENABLE_PORT;
+
+ writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pipenum, dev->ver));
+ writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pipenum, dev->ver));
+ writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pipenum, dev->ver));
+
+ if (chan->prot == SLIM_PUSH || chan->prot == SLIM_PULL) {
+ set_cfg = 0;
+ set_cfg |= ((0xFFFF & parm->num_samples)<<16);
+ set_cfg |= (0xFFFF & parm->rpt_period);
+ writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_PSHPLLn,
+ pipenum, dev->ver));
+ }
+ /* Make sure that port registers are updated before returning */
+ mb();
+}
+
+static void msm_slim_disconn_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
+{
+ struct msm_slim_endp *endpoint = &dev->pipes[pn];
+ struct sps_register_event sps_event;
+ u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
+ dev->ver));
+ writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn, (endpoint->port_b),
+ dev->ver));
+ writel_relaxed((int_port & ~(1 << endpoint->port_b)),
+ PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver));
+ /* Make sure port register is updated */
+ mb();
+ memset(&sps_event, 0, sizeof(sps_event));
+ sps_register_event(endpoint->sps, &sps_event);
+ sps_disconnect(endpoint->sps);
+ dev->pipes[pn].connected = false;
+}
+
+static void msm_slim_calc_pshpull_parm(struct msm_slim_ctrl *dev,
+ u8 pn, struct slim_ch *prop)
+{
+ struct msm_slim_endp *endpoint = &dev->pipes[pn];
+ struct msm_slim_pshpull_parm *parm = &endpoint->psh_pull;
+ int chan_freq, round_off, divisor, super_freq;
+
+ super_freq = dev->ctrl.a_framer->superfreq;
+
+ if (prop->baser == SLIM_RATE_4000HZ)
+ chan_freq = 4000 * prop->ratem;
+ else if (prop->baser == SLIM_RATE_11025HZ)
+ chan_freq = 11025 * prop->ratem;
+ else
+ chan_freq = prop->baser * prop->ratem;
+
+ /*
+ * If channel frequency is multiple of super frame frequency
+ * ISO protocol is suggested
+ */
+ if (!(chan_freq % super_freq)) {
+ prop->prot = SLIM_HARD_ISO;
+ return;
+ }
+ round_off = DIV_ROUND_UP(chan_freq, super_freq);
+ divisor = gcd(round_off * super_freq, chan_freq);
+ parm->num_samples = chan_freq/divisor;
+ parm->rpt_period = (round_off * super_freq)/divisor;
+}
+
+int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
+{
+ struct msm_slim_endp *endpoint;
+ struct sps_connect *cfg;
+ struct slim_ch *prop;
+ u32 stat;
+ int ret;
+
+ if (!dev || pn >= dev->port_nums)
+ return -ENODEV;
+ endpoint = &dev->pipes[pn];
+ cfg = &endpoint->config;
+ prop = dev->ctrl.ports[pn].ch;
+
+ endpoint = &dev->pipes[pn];
+ ret = sps_get_config(dev->pipes[pn].sps, cfg);
+ if (ret) {
+ dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
+ return ret;
+ }
+ cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
+ SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+ if (prop->prot == SLIM_PUSH || prop->prot == SLIM_PULL)
+ msm_slim_calc_pshpull_parm(dev, pn, prop);
+
+ if (dev->pipes[pn].connected &&
+ dev->ctrl.ports[pn].state == SLIM_P_CFG) {
+ return -EISCONN;
+ } else if (dev->pipes[pn].connected) {
+ writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn,
+ (endpoint->port_b), dev->ver));
+ /* Make sure port disabling goes through */
+ mb();
+ /* Is pipe already connected in desired direction */
+ if ((dev->ctrl.ports[pn].flow == SLIM_SRC &&
+ cfg->mode == SPS_MODE_DEST) ||
+ (dev->ctrl.ports[pn].flow == SLIM_SINK &&
+ cfg->mode == SPS_MODE_SRC)) {
+ msm_hw_set_port(dev, endpoint->port_b, pn);
+ return 0;
+ }
+ msm_slim_disconn_pipe_port(dev, pn);
+ }
+
+ stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, endpoint->port_b,
+ dev->ver));
+ if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
+ cfg->destination = dev->bam.hdl;
+ cfg->source = SPS_DEV_HANDLE_MEM;
+ cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
+ cfg->src_pipe_index = 0;
+ dev_dbg(dev->dev, "flow src:pipe num:%d",
+ cfg->dest_pipe_index);
+ cfg->mode = SPS_MODE_DEST;
+ } else {
+ cfg->source = dev->bam.hdl;
+ cfg->destination = SPS_DEV_HANDLE_MEM;
+ cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
+ cfg->dest_pipe_index = 0;
+ dev_dbg(dev->dev, "flow dest:pipe num:%d",
+ cfg->src_pipe_index);
+ cfg->mode = SPS_MODE_SRC;
+ }
+ /* Space for desciptor FIFOs */
+ ret = msm_slim_sps_mem_alloc(dev, &cfg->desc,
+ MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
+ if (ret)
+ pr_err("mem alloc for descr failed:%d", ret);
+ else
+ ret = sps_connect(dev->pipes[pn].sps, cfg);
+
+ if (!ret) {
+ dev->pipes[pn].connected = true;
+ msm_hw_set_port(dev, endpoint->port_b, pn);
+ }
+ return ret;
+}
+
+int msm_alloc_port(struct slim_controller *ctrl, u8 pn)
+{
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+ struct msm_slim_endp *endpoint;
+ int ret = 0;
+
+ if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
+ ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
+ return -EPROTONOSUPPORT;
+ if (pn >= dev->port_nums)
+ return -ENODEV;
+
+ endpoint = &dev->pipes[pn];
+ ret = msm_slim_init_endpoint(dev, endpoint);
+ dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
+ return ret;
+}
+
+void msm_dealloc_port(struct slim_controller *ctrl, u8 pn)
+{
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+ struct msm_slim_endp *endpoint;
+
+ if (pn >= dev->port_nums)
+ return;
+ endpoint = &dev->pipes[pn];
+ if (dev->pipes[pn].connected) {
+ struct sps_connect *config = &endpoint->config;
+
+ msm_slim_disconn_pipe_port(dev, pn);
+ msm_slim_sps_mem_free(dev, &config->desc);
+ }
+ if (endpoint->sps)
+ msm_slim_free_endpoint(endpoint);
+}
+
+enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
+ u8 pn, phys_addr_t *done_buf, u32 *done_len)
+{
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
+ struct sps_iovec sio;
+ int ret;
+
+ if (done_len)
+ *done_len = 0;
+ if (done_buf)
+ *done_buf = 0;
+ if (!dev->pipes[pn].connected)
+ return SLIM_P_DISCONNECT;
+ ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
+ if (!ret) {
+ if (done_len)
+ *done_len = sio.size;
+ if (done_buf)
+ *done_buf = (phys_addr_t)sio.addr;
+ }
+ dev_dbg(dev->dev, "get iovec returned %d\n", ret);
+ return SLIM_P_INPROGRESS;
+}
+
+static void msm_slim_port_cb(struct sps_event_notify *ev)
+{
+
+ struct completion *comp = ev->data.transfer.user;
+ struct sps_iovec *iovec = &ev->data.transfer.iovec;
+
+ if (ev->event_id == SPS_EVENT_DESC_DONE) {
+
+ pr_debug("desc done iovec = (0x%x 0x%x 0x%x)\n",
+ iovec->addr, iovec->size, iovec->flags);
+
+ } else {
+ pr_err("%s: ERR event %d\n",
+ __func__, ev->event_id);
+ }
+ if (comp)
+ complete(comp);
+}
+
+int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf,
+ u32 len, struct completion *comp)
+{
+ struct sps_register_event sreg;
+ int ret;
+ struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+
+ if (pn >= dev->port_nums)
+ return -ENODEV;
+
+ if (!dev->pipes[pn].connected)
+ return -ENOTCONN;
+
+ sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
+ sreg.mode = SPS_TRIGGER_WAIT;
+ sreg.xfer_done = NULL;
+ sreg.callback = msm_slim_port_cb;
+ sreg.user = NULL;
+ ret = sps_register_event(dev->pipes[pn].sps, &sreg);
+ if (ret) {
+ dev_dbg(dev->dev, "sps register event error:%x\n", ret);
+ return ret;
+ }
+ ret = sps_transfer_one(dev->pipes[pn].sps, iobuf, len, comp,
+ SPS_IOVEC_FLAG_INT);
+ dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
+ if (!ret) {
+ /* Enable port interrupts */
+ u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
+ dev->ver));
+ if (!(int_port & (1 << (dev->pipes[pn].port_b))))
+ writel_relaxed((int_port |
+ (1 << dev->pipes[pn].port_b)),
+ PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver));
+ /* Make sure that port registers are updated before returning */
+ mb();
+ }
+
+ return ret;
+}
+
+/* Queue up Tx message buffer */
+static int msm_slim_post_tx_msgq(struct msm_slim_ctrl *dev, u8 *buf, int len)
+{
+ int ret;
+ struct msm_slim_endp *endpoint = &dev->tx_msgq;
+ struct sps_mem_buffer *mem = &endpoint->buf;
+ struct sps_pipe *pipe = endpoint->sps;
+ int ix = (buf - (u8 *)mem->base);
+
+ phys_addr_t phys_addr = mem->phys_base + ix;
+
+ for (ret = 0; ret < ((len + 3) >> 2); ret++)
+ pr_debug("BAM TX buf[%d]:0x%x", ret, ((u32 *)buf)[ret]);
+
+ ret = sps_transfer_one(pipe, phys_addr, ((len + 3) & 0xFC), NULL,
+ SPS_IOVEC_FLAG_EOT);
+ if (ret)
+ dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
+
+ return ret;
+}
+
+void msm_slim_tx_msg_return(struct msm_slim_ctrl *dev, int err)
+{
+ struct msm_slim_endp *endpoint = &dev->tx_msgq;
+ struct sps_mem_buffer *mem = &endpoint->buf;
+ struct sps_pipe *pipe = endpoint->sps;
+ struct sps_iovec iovec;
+ int idx, ret = 0;
+ phys_addr_t addr;
+
+ if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
+ /* use 1 buffer, non-blocking writes are not possible */
+ if (dev->wr_comp[0]) {
+ struct completion *comp = dev->wr_comp[0];
+
+ dev->wr_comp[0] = NULL;
+ complete(comp);
+ }
+ return;
+ }
+ while (!ret) {
+ ret = sps_get_iovec(pipe, &iovec);
+ addr = DESC_FULL_ADDR(iovec.flags, iovec.addr);
+ if (ret || addr == 0) {
+ if (ret)
+ pr_err("SLIM TX get IOVEC failed:%d", ret);
+ return;
+ }
+ if (addr == dev->bulk.wr_dma) {
+ dma_unmap_single(dev->dev, dev->bulk.wr_dma,
+ dev->bulk.size, DMA_TO_DEVICE);
+ if (!dev->bulk.cb)
+ SLIM_WARN(dev, "no callback for bulk WR?");
+ else
+ dev->bulk.cb(dev->bulk.ctx, err);
+ dev->bulk.in_progress = false;
+ pm_runtime_mark_last_busy(dev->dev);
+ return;
+ } else if (addr < mem->phys_base ||
+ (addr > (mem->phys_base +
+ (MSM_TX_BUFS * SLIM_MSGQ_BUF_LEN)))) {
+ SLIM_WARN(dev, "BUF out of bounds:base:0x%pa, io:0x%pa",
+ &mem->phys_base, &addr);
+ continue;
+ }
+ idx = (int) ((addr - mem->phys_base)
+ / SLIM_MSGQ_BUF_LEN);
+ if (dev->wr_comp[idx]) {
+ struct completion *comp = dev->wr_comp[idx];
+
+ dev->wr_comp[idx] = NULL;
+ complete(comp);
+ }
+ if (err) {
+ int i;
+ u32 *addr = (u32 *)mem->base +
+ (idx * (SLIM_MSGQ_BUF_LEN >> 2));
+ /* print the descriptor that resulted in error */
+ for (i = 0; i < (SLIM_MSGQ_BUF_LEN >> 2); i++)
+ SLIM_WARN(dev, "err desc[%d]:0x%x", i, addr[i]);
+ }
+ /* reclaim all packets that were delivered out of order */
+ if (idx != dev->tx_head)
+ SLIM_WARN(dev, "SLIM OUT OF ORDER TX:idx:%d, head:%d",
+ idx, dev->tx_head);
+ dev->tx_head = (dev->tx_head + 1) % MSM_TX_BUFS;
+ }
+}
+
+static u32 *msm_slim_modify_tx_buf(struct msm_slim_ctrl *dev,
+ struct completion *comp)
+{
+ struct msm_slim_endp *endpoint = &dev->tx_msgq;
+ struct sps_mem_buffer *mem = &endpoint->buf;
+ u32 *retbuf = NULL;
+
+ if ((dev->tx_tail + 1) % MSM_TX_BUFS == dev->tx_head)
+ return NULL;
+
+ retbuf = (u32 *)((u8 *)mem->base +
+ (dev->tx_tail * SLIM_MSGQ_BUF_LEN));
+ dev->wr_comp[dev->tx_tail] = comp;
+ dev->tx_tail = (dev->tx_tail + 1) % MSM_TX_BUFS;
+ return retbuf;
+}
+u32 *msm_slim_manage_tx_msgq(struct msm_slim_ctrl *dev, bool getbuf,
+ struct completion *comp, int err)
+{
+ int ret = 0;
+ int retries = 0;
+ u32 *retbuf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->tx_buf_lock, flags);
+ if (!getbuf) {
+ msm_slim_tx_msg_return(dev, err);
+ spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+ return NULL;
+ }
+
+ retbuf = msm_slim_modify_tx_buf(dev, comp);
+ if (retbuf) {
+ spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+ return retbuf;
+ }
+
+ do {
+ msm_slim_tx_msg_return(dev, err);
+ retbuf = msm_slim_modify_tx_buf(dev, comp);
+ if (!retbuf)
+ ret = -EAGAIN;
+ else {
+ if (retries > 0)
+ SLIM_INFO(dev, "SLIM TX retrieved:%d retries",
+ retries);
+ spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+ return retbuf;
+ }
+
+ /*
+ * superframe size will vary based on clock gear
+ * 1 superframe will consume at least 1 message
+ * if HW is in good condition. With MX_RETRIES,
+ * make sure we wait for ~2 superframes
+ * before deciding HW couldn't process descriptors
+ */
+ udelay(50);
+ retries++;
+ } while (ret && (retries < INIT_MX_RETRIES));
+
+ spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+ return NULL;
+}
+
+int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg)
+{
+ if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
+ int i;
+
+ for (i = 0; i < (len + 3) >> 2; i++) {
+ dev_dbg(dev->dev, "AHB TX data:0x%x\n", buf[i]);
+ writel_relaxed(buf[i], dev->base + tx_reg + (i * 4));
+ }
+ /* Guarantee that message is sent before returning */
+ mb();
+ return 0;
+ }
+ return msm_slim_post_tx_msgq(dev, (u8 *)buf, len);
+}
+
+u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len,
+ struct completion *comp)
+{
+ /*
+ * Currently we block a transaction until the current one completes.
+ * In case we need multiple transactions, use message Q
+ */
+ if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
+ dev->wr_comp[0] = comp;
+ return dev->tx_buf;
+ }
+
+ return msm_slim_manage_tx_msgq(dev, true, comp, 0);
+}
+
+static void
+msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
+{
+ if (ev->event_id == SPS_EVENT_DESC_DONE)
+ complete(&dev->rx_msgq_notify);
+ else
+ dev_err(dev->dev, "%s: unknown event %d\n",
+ __func__, ev->event_id);
+}
+
+static void
+msm_slim_handle_rx(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
+{
+ int ret = 0;
+ u32 mc = 0;
+ u32 mt = 0;
+ u8 msg_len = 0;
+
+ if (ev->event_id != SPS_EVENT_EOT) {
+ dev_err(dev->dev, "%s: unknown event %d\n",
+ __func__, ev->event_id);
+ return;
+ }
+
+ do {
+ ret = msm_slim_rx_msgq_get(dev, dev->current_rx_buf,
+ dev->current_count);
+ if (ret == -ENODATA) {
+ return;
+ } else if (ret) {
+ SLIM_ERR(dev, "rx_msgq_get() failed 0x%x\n",
+ ret);
+ return;
+ }
+
+ /* Traverse first byte of message for message length */
+ if (dev->current_count++ == 0) {
+ msg_len = *(dev->current_rx_buf) & 0x1F;
+ mt = (*(dev->current_rx_buf) >> 5) & 0x7;
+ mc = (*(dev->current_rx_buf) >> 8) & 0xff;
+ dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
+ }
+
+ msg_len = (msg_len < 4) ? 0 : (msg_len - 4);
+
+ if (!msg_len) {
+ dev->rx_slim(dev, (u8 *)dev->current_rx_buf);
+ dev->current_count = 0;
+ }
+
+ } while (1);
+}
+
+static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
+{
+ struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
+ /* is this manager controller or NGD controller? */
+ if (dev->ctrl.wakeup)
+ msm_slim_rx_msgq_event(dev, notify);
+ else
+ msm_slim_handle_rx(dev, notify);
+}
+
+/* Queue up Rx message buffer */
+static int msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
+{
+ int ret;
+ struct msm_slim_endp *endpoint = &dev->rx_msgq;
+ struct sps_mem_buffer *mem = &endpoint->buf;
+ struct sps_pipe *pipe = endpoint->sps;
+
+ /* Rx message queue buffers are 4 bytes in length */
+ u8 *virt_addr = mem->base + (4 * ix);
+ phys_addr_t phys_addr = mem->phys_base + (4 * ix);
+
+ ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, 0);
+ if (ret)
+ dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
+
+ return ret;
+}
+
+int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
+{
+ struct msm_slim_endp *endpoint = &dev->rx_msgq;
+ struct sps_mem_buffer *mem = &endpoint->buf;
+ struct sps_pipe *pipe = endpoint->sps;
+ struct sps_iovec iovec;
+ phys_addr_t addr;
+ int index;
+ int ret;
+
+ ret = sps_get_iovec(pipe, &iovec);
+ if (ret) {
+ dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
+ goto err_exit;
+ }
+
+ addr = DESC_FULL_ADDR(iovec.flags, iovec.addr);
+ pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
+ iovec.addr, iovec.size, iovec.flags);
+
+ /* no more descriptors */
+ if (!ret && (iovec.addr == 0) && (iovec.size == 0)) {
+ ret = -ENODATA;
+ goto err_exit;
+ }
+
+ /* Calculate buffer index */
+ index = (addr - mem->phys_base) / 4;
+ *(data + offset) = *((u32 *)mem->base + index);
+
+ pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
+
+ /* Add buffer back to the queue */
+ (void)msm_slim_post_rx_msgq(dev, index);
+
+err_exit:
+ return ret;
+}
+
+int msm_slim_connect_endp(struct msm_slim_ctrl *dev,
+ struct msm_slim_endp *endpoint)
+{
+ int i, ret;
+ struct sps_register_event sps_error_event; /* SPS_ERROR */
+ struct sps_register_event sps_descr_event; /* DESCR_DONE */
+ struct sps_connect *config = &endpoint->config;
+ unsigned long flags;
+
+ ret = sps_connect(endpoint->sps, config);
+ if (ret) {
+ dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
+ return ret;
+ }
+
+ memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
+
+ if (endpoint == &dev->rx_msgq) {
+ sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
+ sps_descr_event.options = SPS_O_EOT;
+ sps_descr_event.user = (void *)dev;
+ sps_descr_event.callback = msm_slim_rx_msgq_cb;
+ sps_descr_event.xfer_done = NULL;
+
+ ret = sps_register_event(endpoint->sps, &sps_descr_event);
+ if (ret) {
+ dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
+ goto sps_reg_event_failed;
+ }
+ }
+
+ /* Register callback for errors */
+ memset(&sps_error_event, 0x00, sizeof(sps_error_event));
+ sps_error_event.mode = SPS_TRIGGER_CALLBACK;
+ sps_error_event.options = SPS_O_ERROR;
+ sps_error_event.user = (void *)dev;
+ sps_error_event.callback = msm_slim_rx_msgq_cb;
+
+ ret = sps_register_event(endpoint->sps, &sps_error_event);
+ if (ret) {
+ dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
+ goto sps_reg_event_failed;
+ }
+
+ /*
+ * Call transfer_one for each 4-byte buffer
+ * Use (buf->size/4) - 1 for the number of buffer to post
+ */
+
+ if (endpoint == &dev->rx_msgq) {
+ /* Setup the transfer */
+ for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
+ ret = msm_slim_post_rx_msgq(dev, i);
+ if (ret) {
+ dev_err(dev->dev,
+ "post_rx_msgq() failed 0x%x\n", ret);
+ goto sps_transfer_failed;
+ }
+ }
+ dev->use_rx_msgqs = MSM_MSGQ_ENABLED;
+ } else {
+ spin_lock_irqsave(&dev->tx_buf_lock, flags);
+ dev->tx_tail = 0;
+ dev->tx_head = 0;
+ for (i = 0; i < MSM_TX_BUFS; i++)
+ dev->wr_comp[i] = NULL;
+ spin_unlock_irqrestore(&dev->tx_buf_lock, flags);
+ dev->use_tx_msgqs = MSM_MSGQ_ENABLED;
+ }
+
+ return 0;
+sps_transfer_failed:
+ memset(&sps_error_event, 0x00, sizeof(sps_error_event));
+ sps_register_event(endpoint->sps, &sps_error_event);
+sps_reg_event_failed:
+ sps_disconnect(endpoint->sps);
+ return ret;
+}
+
+static int msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
+{
+ int ret;
+ u32 pipe_offset;
+ struct msm_slim_endp *endpoint = &dev->rx_msgq;
+ struct sps_connect *config = &endpoint->config;
+ struct sps_mem_buffer *descr = &config->desc;
+ struct sps_mem_buffer *mem = &endpoint->buf;
+
+ if (dev->use_rx_msgqs == MSM_MSGQ_DISABLED)
+ return 0;
+
+ /* Allocate the endpoint */
+ ret = msm_slim_init_endpoint(dev, endpoint);
+ if (ret) {
+ dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
+ goto sps_init_endpoint_failed;
+ }
+
+ /* Get the pipe indices for the message queues */
+ pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
+ dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
+
+ config->mode = SPS_MODE_SRC;
+ config->source = dev->bam.hdl;
+ config->destination = SPS_DEV_HANDLE_MEM;
+ config->src_pipe_index = pipe_offset;
+ config->options = SPS_O_EOT | SPS_O_ERROR |
+ SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+ /* Allocate memory for the FIFO descriptors */
+ ret = msm_slim_sps_mem_alloc(dev, descr,
+ MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
+ if (ret) {
+ dev_err(dev->dev, "unable to allocate SPS descriptors\n");
+ goto alloc_descr_failed;
+ }
+
+ /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
+ ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
+ if (ret) {
+ dev_err(dev->dev, "dma_alloc_coherent failed\n");
+ goto alloc_buffer_failed;
+ }
+
+ ret = msm_slim_connect_endp(dev, endpoint);
+
+ if (!ret)
+ return 0;
+
+ msm_slim_sps_mem_free(dev, mem);
+alloc_buffer_failed:
+ msm_slim_sps_mem_free(dev, descr);
+alloc_descr_failed:
+ msm_slim_free_endpoint(endpoint);
+sps_init_endpoint_failed:
+ dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+ return ret;
+}
+
+static int msm_slim_init_tx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
+{
+ int ret;
+ u32 pipe_offset;
+ struct msm_slim_endp *endpoint = &dev->tx_msgq;
+ struct sps_connect *config = &endpoint->config;
+ struct sps_mem_buffer *descr = &config->desc;
+ struct sps_mem_buffer *mem = &endpoint->buf;
+
+ if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED)
+ return 0;
+
+ /* Allocate the endpoint */
+ ret = msm_slim_init_endpoint(dev, endpoint);
+ if (ret) {
+ dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
+ goto sps_init_endpoint_failed;
+ }
+
+ /* Get the pipe indices for the message queues */
+ pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
+ pipe_offset += 1;
+ dev_dbg(dev->dev, "TX Message queue pipe offset %d\n", pipe_offset);
+
+ config->mode = SPS_MODE_DEST;
+ config->source = SPS_DEV_HANDLE_MEM;
+ config->destination = dev->bam.hdl;
+ config->dest_pipe_index = pipe_offset;
+ config->src_pipe_index = 0;
+ config->options = SPS_O_ERROR | SPS_O_NO_Q |
+ SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+ /* Desc and TX buf are circular queues */
+ /* Allocate memory for the FIFO descriptors */
+ ret = msm_slim_sps_mem_alloc(dev, descr,
+ (MSM_TX_BUFS + 1) * sizeof(struct sps_iovec));
+ if (ret) {
+ dev_err(dev->dev, "unable to allocate SPS descriptors\n");
+ goto alloc_descr_failed;
+ }
+
+ /* Allocate TX buffer from which descriptors are created */
+ ret = msm_slim_sps_mem_alloc(dev, mem, ((MSM_TX_BUFS + 1) *
+ SLIM_MSGQ_BUF_LEN));
+ if (ret) {
+ dev_err(dev->dev, "dma_alloc_coherent failed\n");
+ goto alloc_buffer_failed;
+ }
+ ret = msm_slim_connect_endp(dev, endpoint);
+
+ if (!ret)
+ return 0;
+
+ msm_slim_sps_mem_free(dev, mem);
+alloc_buffer_failed:
+ msm_slim_sps_mem_free(dev, descr);
+alloc_descr_failed:
+ msm_slim_free_endpoint(endpoint);
+sps_init_endpoint_failed:
+ dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
+ return ret;
+}
+
+static int msm_slim_data_port_assign(struct msm_slim_ctrl *dev)
+{
+ int i, data_ports = 0;
+ /* First 7 bits are for message Qs */
+ for (i = 7; i < 32; i++) {
+ /* Check what pipes are owned by Apps. */
+ if ((dev->pdata.apps_pipes >> i) & 0x1) {
+ if (dev->pipes)
+ dev->pipes[data_ports].port_b = i - 7;
+ data_ports++;
+ }
+ }
+ return data_ports;
+}
+/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
+int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
+ u32 pipe_reg, bool remote)
+{
+ int ret;
+ unsigned long bam_handle;
+ struct sps_bam_props bam_props = {0};
+
+ static struct sps_bam_sec_config_props sec_props = {
+ .ees = {
+ [0] = { /* LPASS */
+ .vmid = 0,
+ .pipe_mask = 0xFFFF98,
+ },
+ [1] = { /* Krait Apps */
+ .vmid = 1,
+ .pipe_mask = 0x3F000007,
+ },
+ [2] = { /* Modem */
+ .vmid = 2,
+ .pipe_mask = 0x00000060,
+ },
+ },
+ };
+
+ if (dev->bam.hdl) {
+ bam_handle = dev->bam.hdl;
+ goto init_pipes;
+ }
+ bam_props.ee = dev->ee;
+ bam_props.virt_addr = dev->bam.base;
+ bam_props.phys_addr = bam_mem->start;
+ bam_props.irq = dev->bam.irq;
+ if (!remote) {
+ bam_props.manage = SPS_BAM_MGR_LOCAL;
+ bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
+ } else {
+ bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE |
+ SPS_BAM_MGR_MULTI_EE;
+ bam_props.sec_config = SPS_BAM_SEC_DO_NOT_CONFIG;
+ }
+ bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
+
+ bam_props.p_sec_config_props = &sec_props;
+
+ bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
+ SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+ /* override apps channel pipes if specified in platform-data or DT */
+ if (dev->pdata.apps_pipes)
+ sec_props.ees[dev->ee].pipe_mask = dev->pdata.apps_pipes;
+
+ /* Register the BAM device with the SPS driver */
+ ret = sps_register_bam_device(&bam_props, &bam_handle);
+ if (ret) {
+ dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
+ dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+ dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
+ return ret;
+ }
+ dev->bam.hdl = bam_handle;
+ dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%lx\n", bam_handle);
+
+init_pipes:
+ if (dev->port_nums)
+ goto init_msgq;
+
+ /* get the # of ports first */
+ dev->port_nums = msm_slim_data_port_assign(dev);
+ if (dev->port_nums && !dev->pipes) {
+ dev->pipes = kzalloc(sizeof(struct msm_slim_endp) *
+ dev->port_nums,
+ GFP_KERNEL);
+ if (IS_ERR_OR_NULL(dev->pipes)) {
+ dev_err(dev->dev, "no memory for data ports");
+ sps_deregister_bam_device(bam_handle);
+ return PTR_ERR(dev->pipes);
+ }
+ /* assign the ports now */
+ msm_slim_data_port_assign(dev);
+ }
+
+init_msgq:
+ ret = msm_slim_init_rx_msgq(dev, pipe_reg);
+ if (ret)
+ dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
+ if (ret && bam_handle)
+ dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
+
+ ret = msm_slim_init_tx_msgq(dev, pipe_reg);
+ if (ret)
+ dev_err(dev->dev, "msm_slim_init_tx_msgq failed 0x%x\n", ret);
+ if (ret && bam_handle)
+ dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
+
+ /*
+ * If command interface for BAM fails, register interface is used for
+ * commands.
+ * It is possible that other BAM usecases (e.g. apps channels) will
+ * still need BAM. Since BAM is successfully initialized, we can
+ * continue using it for non-command use cases.
+ */
+
+ return 0;
+}
+
+void msm_slim_disconnect_endp(struct msm_slim_ctrl *dev,
+ struct msm_slim_endp *endpoint,
+ enum msm_slim_msgq *msgq_flag)
+{
+ if (*msgq_flag >= MSM_MSGQ_ENABLED) {
+ sps_disconnect(endpoint->sps);
+ *msgq_flag = MSM_MSGQ_RESET;
+ }
+}
+
+static int msm_slim_discard_rx_data(struct msm_slim_ctrl *dev,
+ struct msm_slim_endp *endpoint)
+{
+ struct sps_iovec sio;
+ int desc_num = 0, ret = 0;
+
+ ret = sps_get_unused_desc_num(endpoint->sps, &desc_num);
+ if (ret) {
+ dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
+ return ret;
+ }
+ while (desc_num--)
+ sps_get_iovec(endpoint->sps, &sio);
+ return ret;
+}
+
+static void msm_slim_remove_ep(struct msm_slim_ctrl *dev,
+ struct msm_slim_endp *endpoint,
+ enum msm_slim_msgq *msgq_flag)
+{
+ struct sps_connect *config = &endpoint->config;
+ struct sps_mem_buffer *descr = &config->desc;
+ struct sps_mem_buffer *mem = &endpoint->buf;
+
+ msm_slim_sps_mem_free(dev, mem);
+ msm_slim_sps_mem_free(dev, descr);
+ msm_slim_free_endpoint(endpoint);
+}
+
+void msm_slim_deinit_ep(struct msm_slim_ctrl *dev,
+ struct msm_slim_endp *endpoint,
+ enum msm_slim_msgq *msgq_flag)
+{
+ int ret = 0;
+ struct sps_connect *config = &endpoint->config;
+
+ if (*msgq_flag == MSM_MSGQ_ENABLED) {
+ if (config->mode == SPS_MODE_SRC) {
+ ret = msm_slim_discard_rx_data(dev, endpoint);
+ if (ret)
+ SLIM_WARN(dev, "discarding Rx data failed\n");
+ }
+ msm_slim_disconnect_endp(dev, endpoint, msgq_flag);
+ msm_slim_remove_ep(dev, endpoint, msgq_flag);
+ }
+}
+
+static void msm_slim_sps_unreg_event(struct sps_pipe *sps)
+{
+ struct sps_register_event sps_event;
+
+ memset(&sps_event, 0x00, sizeof(sps_event));
+ /* Disable interrupt and signal notification for Rx/Tx pipe */
+ sps_register_event(sps, &sps_event);
+}
+
+void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg)
+{
+ int i;
+
+ if (dev->use_rx_msgqs >= MSM_MSGQ_ENABLED)
+ msm_slim_sps_unreg_event(dev->rx_msgq.sps);
+ if (dev->use_tx_msgqs >= MSM_MSGQ_ENABLED)
+ msm_slim_sps_unreg_event(dev->tx_msgq.sps);
+
+ for (i = 0; i < dev->port_nums; i++) {
+ if (dev->pipes[i].connected)
+ msm_slim_disconn_pipe_port(dev, i);
+ }
+ if (dereg) {
+ for (i = 0; i < dev->port_nums; i++) {
+ if (dev->pipes[i].connected)
+ msm_dealloc_port(&dev->ctrl, i);
+ }
+ sps_deregister_bam_device(dev->bam.hdl);
+ dev->bam.hdl = 0L;
+ kfree(dev->pipes);
+ dev->pipes = NULL;
+ }
+ dev->port_nums = 0;
+}
+
+/* Slimbus QMI Messaging */
+#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020
+#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020
+#define SLIMBUS_QMI_POWER_REQ_V01 0x0021
+#define SLIMBUS_QMI_POWER_RESP_V01 0x0021
+#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ 0x0022
+#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP 0x0022
+
+#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
+#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN 7
+
+enum slimbus_mode_enum_type_v01 {
+ /* To force a 32 bit signed enum. Do not change or use*/
+ SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+ SLIMBUS_MODE_SATELLITE_V01 = 1,
+ SLIMBUS_MODE_MASTER_V01 = 2,
+ SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+enum slimbus_pm_enum_type_v01 {
+ /* To force a 32 bit signed enum. Do not change or use*/
+ SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+ SLIMBUS_PM_INACTIVE_V01 = 1,
+ SLIMBUS_PM_ACTIVE_V01 = 2,
+ SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+struct slimbus_select_inst_req_msg_v01 {
+ /* Mandatory */
+ /* Hardware Instance Selection */
+ uint32_t instance;
+
+ /* Optional */
+ /* Optional Mode Request Operation */
+ /* Must be set to true if mode is being passed */
+ uint8_t mode_valid;
+ enum slimbus_mode_enum_type_v01 mode;
+};
+
+struct slimbus_select_inst_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct qmi_response_type_v01 resp;
+};
+
+struct slimbus_power_req_msg_v01 {
+ /* Mandatory */
+ /* Power Request Operation */
+ enum slimbus_pm_enum_type_v01 pm_req;
+};
+
+struct slimbus_power_resp_msg_v01 {
+ /* Mandatory */
+ /* Result Code */
+ struct qmi_response_type_v01 resp;
+};
+
+struct slimbus_chkfrm_resp_msg {
+ /* Mandatory */
+ /* Result Code */
+ struct qmi_response_type_v01 resp;
+};
+
+
+static struct elem_info slimbus_select_inst_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
+ instance),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
+ mode_valid),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(enum slimbus_mode_enum_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
+ mode),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x00,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static struct elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct slimbus_select_inst_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x00,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static struct elem_info slimbus_power_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(enum slimbus_pm_enum_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct slimbus_power_req_msg_v01, pm_req),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x00,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static struct elem_info slimbus_power_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct slimbus_power_resp_msg_v01, resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x00,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static struct elem_info slimbus_chkfrm_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct slimbus_chkfrm_resp_msg, resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x00,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static void msm_slim_qmi_recv_msg(struct kthread_work *work)
+{
+ int rc;
+ struct msm_slim_qmi *qmi =
+ container_of(work, struct msm_slim_qmi, kwork);
+
+ /* Drain all packets received */
+ do {
+ rc = qmi_recv_msg(qmi->handle);
+ } while (rc == 0);
+ if (rc != -ENOMSG)
+ pr_err("%s: Error receiving QMI message:%d\n", __func__, rc);
+}
+
+static void msm_slim_qmi_notify(struct qmi_handle *handle,
+ enum qmi_event_type event, void *notify_priv)
+{
+ struct msm_slim_ctrl *dev = notify_priv;
+ struct msm_slim_qmi *qmi = &dev->qmi;
+
+ switch (event) {
+ case QMI_RECV_MSG:
+ queue_kthread_work(&qmi->kworker, &qmi->kwork);
+ break;
+ default:
+ break;
+ }
+}
+
+static const char *get_qmi_error(struct qmi_response_type_v01 *r)
+{
+ if (r->result == QMI_RESULT_SUCCESS_V01 || r->error == QMI_ERR_NONE_V01)
+ return "No Error";
+ else if (r->error == QMI_ERR_NO_MEMORY_V01)
+ return "Out of Memory";
+ else if (r->error == QMI_ERR_INTERNAL_V01)
+ return "Unexpected error occurred";
+ else if (r->error == QMI_ERR_INCOMPATIBLE_STATE_V01)
+ return "Slimbus s/w already configured to a different mode";
+ else if (r->error == QMI_ERR_INVALID_ID_V01)
+ return "Slimbus hardware instance is not valid";
+ else
+ return "Unknown error";
+}
+
+static int msm_slim_qmi_send_select_inst_req(struct msm_slim_ctrl *dev,
+ struct slimbus_select_inst_req_msg_v01 *req)
+{
+ struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ req_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01;
+ req_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN;
+ req_desc.ei_array = slimbus_select_inst_req_msg_v01_ei;
+
+ resp_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01;
+ resp_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN;
+ resp_desc.ei_array = slimbus_select_inst_resp_msg_v01_ei;
+
+ rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
+ &resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
+ if (rc < 0) {
+ SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+ return rc;
+ }
+
+ /* Check the response */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
+ resp.resp.result, get_qmi_error(&resp.resp));
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int msm_slim_qmi_send_power_request(struct msm_slim_ctrl *dev,
+ struct slimbus_power_req_msg_v01 *req)
+{
+ struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ req_desc.msg_id = SLIMBUS_QMI_POWER_REQ_V01;
+ req_desc.max_msg_len = SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN;
+ req_desc.ei_array = slimbus_power_req_msg_v01_ei;
+
+ resp_desc.msg_id = SLIMBUS_QMI_POWER_RESP_V01;
+ resp_desc.max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
+ resp_desc.ei_array = slimbus_power_resp_msg_v01_ei;
+
+ rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
+ &resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
+ if (rc < 0) {
+ SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+ return rc;
+ }
+
+ /* Check the response */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
+ resp.resp.result, get_qmi_error(&resp.resp));
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master)
+{
+ int rc = 0;
+ struct qmi_handle *handle;
+ struct slimbus_select_inst_req_msg_v01 req;
+
+ init_kthread_worker(&dev->qmi.kworker);
+
+ dev->qmi.task = kthread_run(kthread_worker_fn,
+ &dev->qmi.kworker, "msm_slim_qmi_clnt%d", dev->ctrl.nr);
+
+ if (IS_ERR(dev->qmi.task)) {
+ pr_err("%s: Failed to create QMI client kthread\n", __func__);
+ return -ENOMEM;
+ }
+
+ init_kthread_work(&dev->qmi.kwork, msm_slim_qmi_recv_msg);
+
+ handle = qmi_handle_create(msm_slim_qmi_notify, dev);
+ if (!handle) {
+ rc = -ENOMEM;
+ pr_err("%s: QMI client handle alloc failed\n", __func__);
+ goto qmi_handle_create_failed;
+ }
+
+ rc = qmi_connect_to_service(handle, SLIMBUS_QMI_SVC_ID,
+ SLIMBUS_QMI_SVC_V1,
+ SLIMBUS_QMI_INS_ID);
+ if (rc < 0) {
+ SLIM_ERR(dev, "%s: QMI server not found\n", __func__);
+ goto qmi_connect_to_service_failed;
+ }
+
+ /* Instance is 0 based */
+ req.instance = (dev->ctrl.nr >> 1);
+ req.mode_valid = 1;
+
+ /* Mode indicates the role of the ADSP */
+ if (apps_is_master)
+ req.mode = SLIMBUS_MODE_SATELLITE_V01;
+ else
+ req.mode = SLIMBUS_MODE_MASTER_V01;
+
+ dev->qmi.handle = handle;
+
+ rc = msm_slim_qmi_send_select_inst_req(dev, &req);
+ if (rc) {
+ pr_err("%s: failed to select h/w instance\n", __func__);
+ goto qmi_select_instance_failed;
+ }
+
+ return 0;
+
+qmi_select_instance_failed:
+ dev->qmi.handle = NULL;
+qmi_connect_to_service_failed:
+ qmi_handle_destroy(handle);
+qmi_handle_create_failed:
+ flush_kthread_worker(&dev->qmi.kworker);
+ kthread_stop(dev->qmi.task);
+ dev->qmi.task = NULL;
+ return rc;
+}
+
+void msm_slim_qmi_exit(struct msm_slim_ctrl *dev)
+{
+ if (!dev->qmi.handle || !dev->qmi.task)
+ return;
+ qmi_handle_destroy(dev->qmi.handle);
+ flush_kthread_worker(&dev->qmi.kworker);
+ kthread_stop(dev->qmi.task);
+ dev->qmi.task = NULL;
+ dev->qmi.handle = NULL;
+}
+
+int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active)
+{
+ struct slimbus_power_req_msg_v01 req;
+
+ if (active)
+ req.pm_req = SLIMBUS_PM_ACTIVE_V01;
+ else
+ req.pm_req = SLIMBUS_PM_INACTIVE_V01;
+
+ return msm_slim_qmi_send_power_request(dev, &req);
+}
+
+int msm_slim_qmi_check_framer_request(struct msm_slim_ctrl *dev)
+{
+ struct slimbus_chkfrm_resp_msg resp = { { 0, 0 } };
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ req_desc.msg_id = SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ;
+ req_desc.max_msg_len = 0;
+ req_desc.ei_array = NULL;
+
+ resp_desc.msg_id = SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP;
+ resp_desc.max_msg_len = SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN;
+ resp_desc.ei_array = slimbus_chkfrm_resp_msg_v01_ei;
+
+ rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, NULL, 0,
+ &resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
+ if (rc < 0) {
+ SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+ return rc;
+ }
+ /* Check the response */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n",
+ __func__, resp.resp.result, get_qmi_error(&resp.resp));
+ return -EREMOTEIO;
+ }
+ return 0;
+}
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
new file mode 100644
index 0000000..65b9fae
--- /dev/null
+++ b/drivers/slimbus/slim-msm.h
@@ -0,0 +1,440 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SLIM_MSM_H
+#define _SLIM_MSM_H
+
+#include <linux/irq.h>
+#include <linux/kthread.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <linux/ipc_logging.h>
+
+/* Per spec.max 40 bytes per received message */
+#define SLIM_MSGQ_BUF_LEN 40
+
+#define MSM_TX_BUFS 32
+
+#define SLIM_USR_MC_GENERIC_ACK 0x25
+#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
+#define SLIM_USR_MC_REPORT_SATELLITE 0x1
+#define SLIM_USR_MC_ADDR_QUERY 0xD
+#define SLIM_USR_MC_ADDR_REPLY 0xE
+#define SLIM_USR_MC_DEFINE_CHAN 0x20
+#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
+#define SLIM_USR_MC_CHAN_CTRL 0x23
+#define SLIM_USR_MC_RECONFIG_NOW 0x24
+#define SLIM_USR_MC_REQ_BW 0x28
+#define SLIM_USR_MC_CONNECT_SRC 0x2C
+#define SLIM_USR_MC_CONNECT_SINK 0x2D
+#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
+
+#define SLIM_USR_MC_REPEAT_CHANGE_VALUE 0x0
+#define MSM_SLIM_VE_MAX_MAP_ADDR 0xFFF
+#define SLIM_MAX_VE_SLC_BYTES 16
+
+#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
+
+#define SLIM_RX_MSGQ_TIMEOUT_VAL 0x10000
+/*
+ * Messages that can be received simultaneously:
+ * Client reads, LPASS master responses, announcement messages
+ * Receive upto 10 messages simultaneously.
+ */
+#define MSM_SLIM_DESC_NUM 32
+
+/* MSM Slimbus peripheral settings */
+#define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000
+#define MSM_SLIM_NPORTS 24
+#define MSM_SLIM_NCHANS 32
+
+#define QC_MFGID_LSB 0x2
+#define QC_MFGID_MSB 0x17
+#define QC_CHIPID_SL 0x10
+#define QC_DEVID_SAT1 0x3
+#define QC_DEVID_SAT2 0x4
+#define QC_DEVID_PGD 0x5
+
+#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
+ ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
+
+#define INIT_MX_RETRIES 3
+#define DEF_RETRY_MS 10
+#define MSM_CONCUR_MSG 8
+#define SAT_CONCUR_MSG 8
+
+#define DEF_WATERMARK (8 << 1)
+#define DEF_ALIGN_LSB 0
+#define DEF_ALIGN_MSB (1 << 7)
+#define DEF_PACK (1 << 6)
+#define DEF_NO_PACK 0
+#define ENABLE_PORT 1
+
+#define DEF_BLKSZ 0
+#define DEF_TRANSZ 0
+
+#define SAT_MAGIC_LSB 0xD9
+#define SAT_MAGIC_MSB 0xC5
+#define SAT_MSG_VER 0x1
+#define SAT_MSG_PROT 0x1
+#define MSM_SAT_SUCCSS 0x20
+#define MSM_MAX_NSATS 2
+#define MSM_MAX_SATCH 32
+
+/* Slimbus QMI service */
+#define SLIMBUS_QMI_SVC_ID 0x0301
+#define SLIMBUS_QMI_SVC_V1 1
+#define SLIMBUS_QMI_INS_ID 0
+
+/* QMI response timeout of 500ms */
+#define SLIM_QMI_RESP_TOUT 1000
+
+#define PGD_THIS_EE(r, v) ((v) ? PGD_THIS_EE_V2(r) : PGD_THIS_EE_V1(r))
+#define PGD_PORT(r, p, v) ((v) ? PGD_PORT_V2(r, p) : PGD_PORT_V1(r, p))
+#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
+
+#define PGD_THIS_EE_V2(r) (dev->base + (r ## _V2) + (dev->ee * 0x1000))
+#define PGD_PORT_V2(r, p) (dev->base + (r ## _V2) + ((p) * 0x1000))
+#define CFG_PORT_V2(r) ((r ## _V2))
+/* Component registers */
+enum comp_reg_v2 {
+ COMP_CFG_V2 = 4,
+ COMP_TRUST_CFG_V2 = 0x3000,
+};
+
+/* Manager PGD registers */
+enum pgd_reg_v2 {
+ PGD_CFG_V2 = 0x800,
+ PGD_STAT_V2 = 0x804,
+ PGD_INT_EN_V2 = 0x810,
+ PGD_INT_STAT_V2 = 0x814,
+ PGD_INT_CLR_V2 = 0x818,
+ PGD_OWN_EEn_V2 = 0x300C,
+ PGD_PORT_INT_EN_EEn_V2 = 0x5000,
+ PGD_PORT_INT_ST_EEn_V2 = 0x5004,
+ PGD_PORT_INT_CL_EEn_V2 = 0x5008,
+ PGD_PORT_CFGn_V2 = 0x14000,
+ PGD_PORT_STATn_V2 = 0x14004,
+ PGD_PORT_PARAMn_V2 = 0x14008,
+ PGD_PORT_BLKn_V2 = 0x1400C,
+ PGD_PORT_TRANn_V2 = 0x14010,
+ PGD_PORT_MCHANn_V2 = 0x14014,
+ PGD_PORT_PSHPLLn_V2 = 0x14018,
+ PGD_PORT_PC_CFGn_V2 = 0x8000,
+ PGD_PORT_PC_VALn_V2 = 0x8004,
+ PGD_PORT_PC_VFR_TSn_V2 = 0x8008,
+ PGD_PORT_PC_VFR_STn_V2 = 0x800C,
+ PGD_PORT_PC_VFR_CLn_V2 = 0x8010,
+ PGD_IE_STAT_V2 = 0x820,
+ PGD_VE_STAT_V2 = 0x830,
+};
+
+#define PGD_THIS_EE_V1(r) (dev->base + (r ## _V1) + (dev->ee * 16))
+#define PGD_PORT_V1(r, p) (dev->base + (r ## _V1) + ((p) * 32))
+#define CFG_PORT_V1(r) ((r ## _V1))
+/* Component registers */
+enum comp_reg_v1 {
+ COMP_CFG_V1 = 0,
+ COMP_TRUST_CFG_V1 = 0x14,
+};
+
+/* Manager PGD registers */
+enum pgd_reg_v1 {
+ PGD_CFG_V1 = 0x1000,
+ PGD_STAT_V1 = 0x1004,
+ PGD_INT_EN_V1 = 0x1010,
+ PGD_INT_STAT_V1 = 0x1014,
+ PGD_INT_CLR_V1 = 0x1018,
+ PGD_OWN_EEn_V1 = 0x1020,
+ PGD_PORT_INT_EN_EEn_V1 = 0x1030,
+ PGD_PORT_INT_ST_EEn_V1 = 0x1034,
+ PGD_PORT_INT_CL_EEn_V1 = 0x1038,
+ PGD_PORT_CFGn_V1 = 0x1080,
+ PGD_PORT_STATn_V1 = 0x1084,
+ PGD_PORT_PARAMn_V1 = 0x1088,
+ PGD_PORT_BLKn_V1 = 0x108C,
+ PGD_PORT_TRANn_V1 = 0x1090,
+ PGD_PORT_MCHANn_V1 = 0x1094,
+ PGD_PORT_PSHPLLn_V1 = 0x1098,
+ PGD_PORT_PC_CFGn_V1 = 0x1600,
+ PGD_PORT_PC_VALn_V1 = 0x1604,
+ PGD_PORT_PC_VFR_TSn_V1 = 0x1608,
+ PGD_PORT_PC_VFR_STn_V1 = 0x160C,
+ PGD_PORT_PC_VFR_CLn_V1 = 0x1610,
+ PGD_IE_STAT_V1 = 0x1700,
+ PGD_VE_STAT_V1 = 0x1710,
+};
+
+enum msm_slim_port_status {
+ MSM_PORT_OVERFLOW = 1 << 2,
+ MSM_PORT_UNDERFLOW = 1 << 3,
+ MSM_PORT_DISCONNECT = 1 << 19,
+};
+
+enum msm_ctrl_state {
+ MSM_CTRL_AWAKE,
+ MSM_CTRL_IDLE,
+ MSM_CTRL_ASLEEP,
+ MSM_CTRL_DOWN,
+};
+
+enum msm_slim_msgq {
+ MSM_MSGQ_DISABLED,
+ MSM_MSGQ_RESET,
+ MSM_MSGQ_ENABLED,
+ MSM_MSGQ_DOWN,
+};
+
+struct msm_slim_sps_bam {
+ unsigned long hdl;
+ void __iomem *base;
+ int irq;
+};
+
+/*
+ * struct slim_pshpull_parm: Structure to store push pull protocol parameters
+ * @num_samples: Number of samples in a period
+ * @rpt_period: Repeat period value
+ */
+struct msm_slim_pshpull_parm {
+ int num_samples;
+ int rpt_period;
+};
+
+struct msm_slim_endp {
+ struct sps_pipe *sps;
+ struct sps_connect config;
+ struct sps_register_event event;
+ struct sps_mem_buffer buf;
+ bool connected;
+ int port_b;
+ struct msm_slim_pshpull_parm psh_pull;
+};
+
+struct msm_slim_qmi {
+ struct qmi_handle *handle;
+ struct task_struct *task;
+ struct task_struct *slave_thread;
+ struct completion slave_notify;
+ struct kthread_work kwork;
+ struct kthread_worker kworker;
+ struct completion qmi_comp;
+ struct notifier_block nb;
+};
+
+enum msm_slim_dom {
+ MSM_SLIM_DOM_NONE,
+ MSM_SLIM_DOM_PD,
+ MSM_SLIM_DOM_SS,
+};
+
+struct msm_slim_ss {
+ struct notifier_block nb;
+ void *domr;
+ enum msm_ctrl_state state;
+ struct work_struct dom_up;
+ enum msm_slim_dom dom_t;
+};
+
+struct msm_slim_pdata {
+ u32 apps_pipes;
+ u32 eapc;
+};
+
+struct msm_slim_bulk_wr {
+ dma_addr_t wr_dma;
+ void *base;
+ int size;
+ int buf_sz;
+ int (*cb)(void *ctx, int err);
+ void *ctx;
+ bool in_progress;
+};
+
+struct msm_slim_ctrl {
+ struct slim_controller ctrl;
+ struct slim_framer framer;
+ struct device *dev;
+ void __iomem *base;
+ struct resource *slew_mem;
+ struct resource *bam_mem;
+ u32 curr_bw;
+ u8 msg_cnt;
+ u32 tx_buf[10];
+ u8 rx_msgs[MSM_CONCUR_MSG][SLIM_MSGQ_BUF_LEN];
+ int tx_tail;
+ int tx_head;
+ spinlock_t rx_lock;
+ int head;
+ int tail;
+ int irq;
+ int err;
+ int ee;
+ struct completion **wr_comp;
+ struct msm_slim_sat *satd[MSM_MAX_NSATS];
+ struct msm_slim_endp *pipes;
+ struct msm_slim_sps_bam bam;
+ struct msm_slim_endp tx_msgq;
+ struct msm_slim_endp rx_msgq;
+ struct completion rx_msgq_notify;
+ struct task_struct *rx_msgq_thread;
+ struct clk *rclk;
+ struct clk *hclk;
+ struct mutex tx_lock;
+ struct mutex ssr_lock;
+ spinlock_t tx_buf_lock;
+ u8 pgdla;
+ enum msm_slim_msgq use_rx_msgqs;
+ enum msm_slim_msgq use_tx_msgqs;
+ int port_nums;
+ struct completion reconf;
+ bool reconf_busy;
+ bool chan_active;
+ enum msm_ctrl_state state;
+ struct completion ctrl_up;
+ int nsats;
+ u32 ver;
+ struct msm_slim_qmi qmi;
+ struct msm_slim_pdata pdata;
+ struct msm_slim_ss ext_mdm;
+ struct msm_slim_ss dsp;
+ struct msm_slim_bulk_wr bulk;
+ int default_ipc_log_mask;
+ int ipc_log_mask;
+ bool sysfs_created;
+ void *ipc_slimbus_log;
+ void (*rx_slim)(struct msm_slim_ctrl *dev, u8 *buf);
+ u32 current_rx_buf[10];
+ int current_count;
+ atomic_t ssr_in_progress;
+};
+
+struct msm_sat_chan {
+ u8 chan;
+ u16 chanh;
+ int req_rem;
+ int req_def;
+ bool reconf;
+};
+
+struct msm_slim_sat {
+ struct slim_device satcl;
+ struct msm_slim_ctrl *dev;
+ struct workqueue_struct *wq;
+ struct work_struct wd;
+ u8 sat_msgs[SAT_CONCUR_MSG][40];
+ struct msm_sat_chan *satch;
+ u8 nsatch;
+ bool sent_capability;
+ bool pending_reconf;
+ bool pending_capability;
+ int shead;
+ int stail;
+ spinlock_t lock;
+};
+
+enum rsc_grp {
+ EE_MGR_RSC_GRP = 1 << 10,
+ EE_NGD_2 = 2 << 6,
+ EE_NGD_1 = 0,
+};
+
+
+/* IPC logging stuff */
+#define IPC_SLIMBUS_LOG_PAGES 5
+
+/* Log levels */
+enum {
+ FATAL_LEV = 0U,
+ ERR_LEV = 1U,
+ WARN_LEV = 2U,
+ INFO_LEV = 3U,
+ DBG_LEV = 4U,
+};
+
+/* Default IPC log level INFO */
+#define SLIM_DBG(dev, x...) do { \
+ pr_debug(x); \
+ if (dev->ipc_slimbus_log && dev->ipc_log_mask >= DBG_LEV) { \
+ ipc_log_string(dev->ipc_slimbus_log, x); \
+ } \
+} while (0)
+
+#define SLIM_INFO(dev, x...) do { \
+ pr_debug(x); \
+ if (dev->ipc_slimbus_log && dev->ipc_log_mask >= INFO_LEV) {\
+ ipc_log_string(dev->ipc_slimbus_log, x); \
+ } \
+} while (0)
+
+/* warnings and errors show up on console always */
+#define SLIM_WARN(dev, x...) do { \
+ pr_warn(x); \
+ if (dev->ipc_slimbus_log && dev->ipc_log_mask >= WARN_LEV) \
+ ipc_log_string(dev->ipc_slimbus_log, x); \
+} while (0)
+
+/* ERROR condition in the driver sets the hs_serial_debug_mask
+ * to ERR_FATAL level, so that this message can be seen
+ * in IPC logging. Further errors continue to log on the console
+ */
+#define SLIM_ERR(dev, x...) do { \
+ pr_err(x); \
+ if (dev->ipc_slimbus_log && dev->ipc_log_mask >= ERR_LEV) { \
+ ipc_log_string(dev->ipc_slimbus_log, x); \
+ dev->default_ipc_log_mask = dev->ipc_log_mask; \
+ dev->ipc_log_mask = FATAL_LEV; \
+ } \
+} while (0)
+
+#define SLIM_RST_LOGLVL(dev) { \
+ dev->ipc_log_mask = dev->default_ipc_log_mask; \
+}
+
+int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len);
+int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf);
+int msm_slim_get_ctrl(struct msm_slim_ctrl *dev);
+void msm_slim_put_ctrl(struct msm_slim_ctrl *dev);
+irqreturn_t msm_slim_port_irq_handler(struct msm_slim_ctrl *dev, u32 pstat);
+int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep);
+void msm_slim_free_endpoint(struct msm_slim_endp *ep);
+void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pipenum, u8 portnum);
+int msm_alloc_port(struct slim_controller *ctrl, u8 pn);
+void msm_dealloc_port(struct slim_controller *ctrl, u8 pn);
+int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn);
+enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
+ u8 pn, phys_addr_t *done_buf, u32 *done_len);
+int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf,
+ u32 len, struct completion *comp);
+int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg);
+u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len,
+ struct completion *comp);
+u32 *msm_slim_manage_tx_msgq(struct msm_slim_ctrl *dev, bool getbuf,
+ struct completion *comp, int err);
+int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset);
+int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
+ u32 pipe_reg, bool remote);
+void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg);
+
+int msm_slim_connect_endp(struct msm_slim_ctrl *dev,
+ struct msm_slim_endp *endpoint);
+void msm_slim_disconnect_endp(struct msm_slim_ctrl *dev,
+ struct msm_slim_endp *endpoint,
+ enum msm_slim_msgq *msgq_flag);
+void msm_slim_deinit_ep(struct msm_slim_ctrl *dev,
+ struct msm_slim_endp *endpoint,
+ enum msm_slim_msgq *msgq_flag);
+
+void msm_slim_qmi_exit(struct msm_slim_ctrl *dev);
+int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master);
+int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active);
+int msm_slim_qmi_check_framer_request(struct msm_slim_ctrl *dev);
+#endif
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
new file mode 100644
index 0000000..ecb1056
--- /dev/null
+++ b/drivers/slimbus/slimbus.c
@@ -0,0 +1,3439 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/pm_runtime.h>
+#include <linux/slimbus/slimbus.h>
+
+#define SLIM_PORT_HDL(la, f, p) ((la)<<24 | (f) << 16 | (p))
+
+#define SLIM_HDL_TO_LA(hdl) ((u32)((hdl) & 0xFF000000) >> 24)
+#define SLIM_HDL_TO_FLOW(hdl) (((u32)(hdl) & 0xFF0000) >> 16)
+#define SLIM_HDL_TO_PORT(hdl) ((u32)(hdl) & 0xFF)
+
+#define SLIM_HDL_TO_CHIDX(hdl) ((u16)(hdl) & 0xFF)
+#define SLIM_GRP_TO_NCHAN(hdl) ((u16)(hdl >> 8) & 0xFF)
+
+#define SLIM_SLAVE_PORT(p, la) (((la)<<16) | (p))
+#define SLIM_MGR_PORT(p) ((0xFF << 16) | (p))
+#define SLIM_LA_MANAGER 0xFF
+
+#define SLIM_START_GRP (1 << 8)
+#define SLIM_END_GRP (1 << 9)
+
+#define SLIM_MAX_INTR_COEFF_3 (SLIM_SL_PER_SUPERFRAME/3)
+#define SLIM_MAX_INTR_COEFF_1 SLIM_SL_PER_SUPERFRAME
+
+static DEFINE_MUTEX(slim_lock);
+static DEFINE_IDR(ctrl_idr);
+static struct device_type slim_dev_type;
+static struct device_type slim_ctrl_type;
+
+#define DEFINE_SLIM_LDEST_TXN(name, mc, len, rl, rbuf, wbuf, la) \
+ struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_LOGICALADDR, 0,\
+ len, 0, la, false, rbuf, wbuf, NULL, }
+
+#define DEFINE_SLIM_BCAST_TXN(name, mc, len, rl, rbuf, wbuf, la) \
+ struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_BROADCAST, 0,\
+ len, 0, la, false, rbuf, wbuf, NULL, }
+
+static const struct slim_device_id *slim_match(const struct slim_device_id *id,
+ const struct slim_device *slim_dev)
+{
+ while (id->name[0]) {
+ if (strcmp(slim_dev->name, id->name) == 0)
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+const struct slim_device_id *slim_get_device_id(const struct slim_device *sdev)
+{
+ const struct slim_driver *sdrv = to_slim_driver(sdev->dev.driver);
+
+ return slim_match(sdrv->id_table, sdev);
+}
+EXPORT_SYMBOL(slim_get_device_id);
+
+static int slim_device_match(struct device *dev, struct device_driver *driver)
+{
+ struct slim_device *slim_dev;
+ struct slim_driver *drv = to_slim_driver(driver);
+
+ if (dev->type == &slim_dev_type)
+ slim_dev = to_slim_device(dev);
+ else
+ return 0;
+ if (drv->id_table)
+ return slim_match(drv->id_table, slim_dev) != NULL;
+
+ if (driver->name)
+ return strcmp(slim_dev->name, driver->name) == 0;
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int slim_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+ struct slim_device *slim_dev = NULL;
+ struct slim_driver *driver;
+
+ if (dev->type == &slim_dev_type)
+ slim_dev = to_slim_device(dev);
+
+ if (!slim_dev || !dev->driver)
+ return 0;
+
+ driver = to_slim_driver(dev->driver);
+ if (!driver->suspend)
+ return 0;
+
+ return driver->suspend(slim_dev, mesg);
+}
+
+static int slim_legacy_resume(struct device *dev)
+{
+ struct slim_device *slim_dev = NULL;
+ struct slim_driver *driver;
+
+ if (dev->type == &slim_dev_type)
+ slim_dev = to_slim_device(dev);
+
+ if (!slim_dev || !dev->driver)
+ return 0;
+
+ driver = to_slim_driver(dev->driver);
+ if (!driver->resume)
+ return 0;
+
+ return driver->resume(slim_dev);
+}
+
+static int slim_pm_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm)
+ return pm_generic_suspend(dev);
+ else
+ return slim_legacy_suspend(dev, PMSG_SUSPEND);
+}
+
+static int slim_pm_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm)
+ return pm_generic_resume(dev);
+ else
+ return slim_legacy_resume(dev);
+}
+
+#else
+#define slim_pm_suspend NULL
+#define slim_pm_resume NULL
+#endif
+
+static const struct dev_pm_ops slimbus_pm = {
+ .suspend = slim_pm_suspend,
+ .resume = slim_pm_resume,
+ SET_RUNTIME_PM_OPS(
+ pm_generic_suspend,
+ pm_generic_resume,
+ NULL
+ )
+};
+struct bus_type slimbus_type = {
+ .name = "slimbus",
+ .match = slim_device_match,
+ .pm = &slimbus_pm,
+};
+EXPORT_SYMBOL(slimbus_type);
+
+struct device slimbus_dev = {
+ .init_name = "slimbus",
+};
+
+static void __exit slimbus_exit(void)
+{
+ device_unregister(&slimbus_dev);
+ bus_unregister(&slimbus_type);
+}
+
+static int __init slimbus_init(void)
+{
+ int retval;
+
+ retval = bus_register(&slimbus_type);
+ if (!retval)
+ retval = device_register(&slimbus_dev);
+
+ if (retval)
+ bus_unregister(&slimbus_type);
+
+ return retval;
+}
+postcore_initcall(slimbus_init);
+module_exit(slimbus_exit);
+
+static int slim_drv_probe(struct device *dev)
+{
+ const struct slim_driver *sdrv = to_slim_driver(dev->driver);
+ struct slim_device *sbdev = to_slim_device(dev);
+ struct slim_controller *ctrl = sbdev->ctrl;
+
+ if (sdrv->probe) {
+ int ret;
+
+ ret = sdrv->probe(sbdev);
+ if (ret)
+ return ret;
+ if (sdrv->device_up)
+ queue_work(ctrl->wq, &sbdev->wd);
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int slim_drv_remove(struct device *dev)
+{
+ const struct slim_driver *sdrv = to_slim_driver(dev->driver);
+ struct slim_device *sbdev = to_slim_device(dev);
+
+ sbdev->notified = false;
+ if (sdrv->remove)
+ return sdrv->remove(to_slim_device(dev));
+ return -ENODEV;
+}
+
+static void slim_drv_shutdown(struct device *dev)
+{
+ const struct slim_driver *sdrv = to_slim_driver(dev->driver);
+
+ if (sdrv->shutdown)
+ sdrv->shutdown(to_slim_device(dev));
+}
+
+/*
+ * slim_driver_register: Client driver registration with slimbus
+ * @drv:Client driver to be associated with client-device.
+ * This API will register the client driver with the slimbus
+ * It is called from the driver's module-init function.
+ */
+int slim_driver_register(struct slim_driver *drv)
+{
+ drv->driver.bus = &slimbus_type;
+ if (drv->probe)
+ drv->driver.probe = slim_drv_probe;
+
+ if (drv->remove)
+ drv->driver.remove = slim_drv_remove;
+
+ if (drv->shutdown)
+ drv->driver.shutdown = slim_drv_shutdown;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(slim_driver_register);
+
+/*
+ * slim_driver_unregister: Undo effects of slim_driver_register
+ * @drv: Client driver to be unregistered
+ */
+void slim_driver_unregister(struct slim_driver *drv)
+{
+ if (drv)
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(slim_driver_unregister);
+
+#define slim_ctrl_attr_gr NULL
+
+static void slim_ctrl_release(struct device *dev)
+{
+ struct slim_controller *ctrl = to_slim_controller(dev);
+
+ complete(&ctrl->dev_released);
+}
+
+static struct device_type slim_ctrl_type = {
+ .groups = slim_ctrl_attr_gr,
+ .release = slim_ctrl_release,
+};
+
+static struct slim_controller *slim_ctrl_get(struct slim_controller *ctrl)
+{
+ if (!ctrl || !get_device(&ctrl->dev))
+ return NULL;
+
+ return ctrl;
+}
+
+static void slim_ctrl_put(struct slim_controller *ctrl)
+{
+ if (ctrl)
+ put_device(&ctrl->dev);
+}
+
+#define slim_device_attr_gr NULL
+#define slim_device_uevent NULL
+static void slim_dev_release(struct device *dev)
+{
+ struct slim_device *sbdev = to_slim_device(dev);
+
+ slim_ctrl_put(sbdev->ctrl);
+}
+
+static struct device_type slim_dev_type = {
+ .groups = slim_device_attr_gr,
+ .uevent = slim_device_uevent,
+ .release = slim_dev_release,
+};
+
+static void slim_report(struct work_struct *work)
+{
+ struct slim_driver *sbdrv;
+ struct slim_device *sbdev =
+ container_of(work, struct slim_device, wd);
+ if (!sbdev->dev.driver)
+ return;
+ /* check if device-up or down needs to be called */
+ if ((!sbdev->reported && !sbdev->notified) ||
+ (sbdev->reported && sbdev->notified))
+ return;
+
+ sbdrv = to_slim_driver(sbdev->dev.driver);
+ /*
+ * address no longer valid, means device reported absent, whereas
+ * address valid, means device reported present
+ */
+ if (sbdev->notified && !sbdev->reported) {
+ sbdev->notified = false;
+ if (sbdrv->device_down)
+ sbdrv->device_down(sbdev);
+ } else if (!sbdev->notified && sbdev->reported) {
+ sbdev->notified = true;
+ if (sbdrv->device_up)
+ sbdrv->device_up(sbdev);
+ }
+}
+
+/*
+ * slim_add_device: Add a new device without register board info.
+ * @ctrl: Controller to which this device is to be added to.
+ * Called when device doesn't have an explicit client-driver to be probed, or
+ * the client-driver is a module installed dynamically.
+ */
+int slim_add_device(struct slim_controller *ctrl, struct slim_device *sbdev)
+{
+ sbdev->dev.bus = &slimbus_type;
+ sbdev->dev.parent = ctrl->dev.parent;
+ sbdev->dev.type = &slim_dev_type;
+ sbdev->dev.driver = NULL;
+ sbdev->ctrl = ctrl;
+ slim_ctrl_get(ctrl);
+ dev_set_name(&sbdev->dev, "%s", sbdev->name);
+ mutex_init(&sbdev->sldev_reconf);
+ INIT_LIST_HEAD(&sbdev->mark_define);
+ INIT_LIST_HEAD(&sbdev->mark_suspend);
+ INIT_LIST_HEAD(&sbdev->mark_removal);
+ INIT_WORK(&sbdev->wd, slim_report);
+ mutex_lock(&ctrl->m_ctrl);
+ list_add_tail(&sbdev->dev_list, &ctrl->devs);
+ mutex_unlock(&ctrl->m_ctrl);
+ /* probe slave on this controller */
+ return device_register(&sbdev->dev);
+}
+EXPORT_SYMBOL(slim_add_device);
+
+struct sbi_boardinfo {
+ struct list_head list;
+ struct slim_boardinfo board_info;
+};
+
+static LIST_HEAD(board_list);
+static LIST_HEAD(slim_ctrl_list);
+static DEFINE_MUTEX(board_lock);
+
+/* If controller is not present, only add to boards list */
+static void slim_match_ctrl_to_boardinfo(struct slim_controller *ctrl,
+ struct slim_boardinfo *bi)
+{
+ int ret;
+
+ if (ctrl->nr != bi->bus_num)
+ return;
+
+ ret = slim_add_device(ctrl, bi->slim_slave);
+ if (ret != 0)
+ dev_err(ctrl->dev.parent, "can't create new device for %s\n",
+ bi->slim_slave->name);
+}
+
+/*
+ * slim_register_board_info: Board-initialization routine.
+ * @info: List of all devices on all controllers present on the board.
+ * @n: number of entries.
+ * API enumerates respective devices on corresponding controller.
+ * Called from board-init function.
+ */
+int slim_register_board_info(struct slim_boardinfo const *info, unsigned int n)
+{
+ struct sbi_boardinfo *bi;
+ int i;
+
+ bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
+ if (!bi)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++, bi++, info++) {
+ struct slim_controller *ctrl;
+
+ memcpy(&bi->board_info, info, sizeof(*info));
+ mutex_lock(&board_lock);
+ list_add_tail(&bi->list, &board_list);
+ list_for_each_entry(ctrl, &slim_ctrl_list, list)
+ slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
+ mutex_unlock(&board_lock);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(slim_register_board_info);
+
+/*
+ * slim_ctrl_add_boarddevs: Add devices registered by board-info
+ * @ctrl: Controller to which these devices are to be added to.
+ * This API is called by controller when it is up and running.
+ * If devices on a controller were registered before controller,
+ * this will make sure that they get probed when controller is up.
+ */
+void slim_ctrl_add_boarddevs(struct slim_controller *ctrl)
+{
+ struct sbi_boardinfo *bi;
+
+ mutex_lock(&board_lock);
+ list_add_tail(&ctrl->list, &slim_ctrl_list);
+ list_for_each_entry(bi, &board_list, list)
+ slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
+ mutex_unlock(&board_lock);
+}
+EXPORT_SYMBOL(slim_ctrl_add_boarddevs);
+
+/*
+ * slim_busnum_to_ctrl: Map bus number to controller
+ * @busnum: Bus number
+ * Returns controller representing this bus number
+ */
+struct slim_controller *slim_busnum_to_ctrl(u32 bus_num)
+{
+ struct slim_controller *ctrl;
+
+ mutex_lock(&board_lock);
+ list_for_each_entry(ctrl, &slim_ctrl_list, list)
+ if (bus_num == ctrl->nr) {
+ mutex_unlock(&board_lock);
+ return ctrl;
+ }
+ mutex_unlock(&board_lock);
+ return NULL;
+}
+EXPORT_SYMBOL(slim_busnum_to_ctrl);
+
+static int slim_register_controller(struct slim_controller *ctrl)
+{
+ int ret = 0;
+
+ /* Can't register until after driver model init */
+ if (WARN_ON(!slimbus_type.p)) {
+ ret = -EPROBE_DEFER;
+ goto out_list;
+ }
+
+ dev_set_name(&ctrl->dev, "sb-%d", ctrl->nr);
+ ctrl->dev.bus = &slimbus_type;
+ ctrl->dev.type = &slim_ctrl_type;
+ ctrl->num_dev = 0;
+ if (!ctrl->min_cg)
+ ctrl->min_cg = SLIM_MIN_CLK_GEAR;
+ if (!ctrl->max_cg)
+ ctrl->max_cg = SLIM_MAX_CLK_GEAR;
+ spin_lock_init(&ctrl->txn_lock);
+ mutex_init(&ctrl->m_ctrl);
+ mutex_init(&ctrl->sched.m_reconf);
+ ret = device_register(&ctrl->dev);
+ if (ret)
+ goto out_list;
+
+ dev_dbg(&ctrl->dev, "Bus [%s] registered:dev:%p\n", ctrl->name,
+ &ctrl->dev);
+
+ if (ctrl->nports) {
+ ctrl->ports = kcalloc(ctrl->nports, sizeof(struct slim_port),
+ GFP_KERNEL);
+ if (!ctrl->ports) {
+ ret = -ENOMEM;
+ goto err_port_failed;
+ }
+ }
+ if (ctrl->nchans) {
+ ctrl->chans = kcalloc(ctrl->nchans, sizeof(struct slim_ich),
+ GFP_KERNEL);
+ if (!ctrl->chans) {
+ ret = -ENOMEM;
+ goto err_chan_failed;
+ }
+
+ ctrl->sched.chc1 = kcalloc(ctrl->nchans,
+ sizeof(struct slim_ich *), GFP_KERNEL);
+ if (!ctrl->sched.chc1) {
+ kfree(ctrl->chans);
+ ret = -ENOMEM;
+ goto err_chan_failed;
+ }
+ ctrl->sched.chc3 = kcalloc(ctrl->nchans,
+ sizeof(struct slim_ich *), GFP_KERNEL);
+ if (!ctrl->sched.chc3) {
+ kfree(ctrl->sched.chc1);
+ kfree(ctrl->chans);
+ ret = -ENOMEM;
+ goto err_chan_failed;
+ }
+ }
+#ifdef DEBUG
+ ctrl->sched.slots = kzalloc(SLIM_SL_PER_SUPERFRAME, GFP_KERNEL);
+#endif
+ init_completion(&ctrl->pause_comp);
+
+ INIT_LIST_HEAD(&ctrl->devs);
+ ctrl->wq = create_singlethread_workqueue(dev_name(&ctrl->dev));
+ if (!ctrl->wq)
+ goto err_workq_failed;
+
+ return 0;
+
+err_workq_failed:
+ kfree(ctrl->sched.chc3);
+ kfree(ctrl->sched.chc1);
+ kfree(ctrl->chans);
+err_chan_failed:
+ kfree(ctrl->ports);
+err_port_failed:
+ device_unregister(&ctrl->dev);
+out_list:
+ mutex_lock(&slim_lock);
+ idr_remove(&ctrl_idr, ctrl->nr);
+ mutex_unlock(&slim_lock);
+ return ret;
+}
+
+/* slim_remove_device: Remove the effect of slim_add_device() */
+void slim_remove_device(struct slim_device *sbdev)
+{
+ struct slim_controller *ctrl = sbdev->ctrl;
+
+ mutex_lock(&ctrl->m_ctrl);
+ list_del_init(&sbdev->dev_list);
+ mutex_unlock(&ctrl->m_ctrl);
+ device_unregister(&sbdev->dev);
+}
+EXPORT_SYMBOL(slim_remove_device);
+
+static void slim_ctrl_remove_device(struct slim_controller *ctrl,
+ struct slim_boardinfo *bi)
+{
+ if (ctrl->nr == bi->bus_num)
+ slim_remove_device(bi->slim_slave);
+}
+
+/*
+ * slim_del_controller: Controller tear-down.
+ * Controller added with the above API is teared down using this API.
+ */
+int slim_del_controller(struct slim_controller *ctrl)
+{
+ struct slim_controller *found;
+ struct sbi_boardinfo *bi;
+
+ /* First make sure that this bus was added */
+ mutex_lock(&slim_lock);
+ found = idr_find(&ctrl_idr, ctrl->nr);
+ mutex_unlock(&slim_lock);
+ if (found != ctrl)
+ return -EINVAL;
+
+ /* Remove all clients */
+ mutex_lock(&board_lock);
+ list_for_each_entry(bi, &board_list, list)
+ slim_ctrl_remove_device(ctrl, &bi->board_info);
+ mutex_unlock(&board_lock);
+
+ init_completion(&ctrl->dev_released);
+ device_unregister(&ctrl->dev);
+
+ wait_for_completion(&ctrl->dev_released);
+ list_del(&ctrl->list);
+ destroy_workqueue(ctrl->wq);
+ /* free bus id */
+ mutex_lock(&slim_lock);
+ idr_remove(&ctrl_idr, ctrl->nr);
+ mutex_unlock(&slim_lock);
+
+ kfree(ctrl->sched.chc1);
+ kfree(ctrl->sched.chc3);
+#ifdef DEBUG
+ kfree(ctrl->sched.slots);
+#endif
+ kfree(ctrl->chans);
+ kfree(ctrl->ports);
+
+ return 0;
+}
+EXPORT_SYMBOL(slim_del_controller);
+
+/*
+ * slim_add_numbered_controller: Controller bring-up.
+ * @ctrl: Controller to be registered.
+ * A controller is registered with the framework using this API. ctrl->nr is the
+ * desired number with which slimbus framework registers the controller.
+ * Function will return -EBUSY if the number is in use.
+ */
+int slim_add_numbered_controller(struct slim_controller *ctrl)
+{
+ int id;
+
+ mutex_lock(&slim_lock);
+ id = idr_alloc(&ctrl_idr, ctrl, ctrl->nr, ctrl->nr + 1, GFP_KERNEL);
+ mutex_unlock(&slim_lock);
+
+ if (id < 0)
+ return id;
+
+ ctrl->nr = id;
+ return slim_register_controller(ctrl);
+}
+EXPORT_SYMBOL(slim_add_numbered_controller);
+
+/*
+ * slim_report_absent: Controller calls this function when a device
+ * reports absent, OR when the device cannot be communicated with
+ * @sbdev: Device that cannot be reached, or sent report absent
+ */
+void slim_report_absent(struct slim_device *sbdev)
+{
+ struct slim_controller *ctrl;
+ int i;
+
+ if (!sbdev)
+ return;
+ ctrl = sbdev->ctrl;
+ if (!ctrl)
+ return;
+ /* invalidate logical addresses */
+ mutex_lock(&ctrl->m_ctrl);
+ for (i = 0; i < ctrl->num_dev; i++) {
+ if (sbdev->laddr == ctrl->addrt[i].laddr)
+ ctrl->addrt[i].valid = false;
+ }
+ mutex_unlock(&ctrl->m_ctrl);
+ sbdev->reported = false;
+ queue_work(ctrl->wq, &sbdev->wd);
+}
+EXPORT_SYMBOL(slim_report_absent);
+
+static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc);
+/*
+ * slim_framer_booted: This function is called by controller after the active
+ * framer has booted (using Bus Reset sequence, or after it has shutdown and has
+ * come back up). Components, devices on the bus may be in undefined state,
+ * and this function triggers their drivers to do the needful
+ * to bring them back in Reset state so that they can acquire sync, report
+ * present and be operational again.
+ */
+void slim_framer_booted(struct slim_controller *ctrl)
+{
+ struct slim_device *sbdev;
+ struct list_head *pos, *next;
+ int i;
+
+ if (!ctrl)
+ return;
+
+ /* Since framer has rebooted, reset all data channels */
+ mutex_lock(&ctrl->sched.m_reconf);
+ for (i = 0; i < ctrl->nchans; i++) {
+ struct slim_ich *slc = &ctrl->chans[i];
+
+ if (slc->state > SLIM_CH_DEFINED)
+ slim_remove_ch(ctrl, slc);
+ }
+ mutex_unlock(&ctrl->sched.m_reconf);
+ mutex_lock(&ctrl->m_ctrl);
+ list_for_each_safe(pos, next, &ctrl->devs) {
+ struct slim_driver *sbdrv;
+
+ sbdev = list_entry(pos, struct slim_device, dev_list);
+ mutex_unlock(&ctrl->m_ctrl);
+ if (sbdev && sbdev->dev.driver) {
+ sbdrv = to_slim_driver(sbdev->dev.driver);
+ if (sbdrv->reset_device)
+ sbdrv->reset_device(sbdev);
+ }
+ mutex_lock(&ctrl->m_ctrl);
+ }
+ mutex_unlock(&ctrl->m_ctrl);
+}
+EXPORT_SYMBOL(slim_framer_booted);
+
+/*
+ * slim_msg_response: Deliver Message response received from a device to the
+ * framework.
+ * @ctrl: Controller handle
+ * @reply: Reply received from the device
+ * @len: Length of the reply
+ * @tid: Transaction ID received with which framework can associate reply.
+ * Called by controller to inform framework about the response received.
+ * This helps in making the API asynchronous, and controller-driver doesn't need
+ * to manage 1 more table other than the one managed by framework mapping TID
+ * with buffers
+ */
+void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
+{
+ int i;
+ unsigned long flags;
+ bool async;
+ struct slim_msg_txn *txn;
+
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ txn = ctrl->txnt[tid];
+ if (txn == NULL || txn->rbuf == NULL) {
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ if (txn == NULL)
+ dev_err(&ctrl->dev, "Got response to invalid TID:%d, len:%d",
+ tid, len);
+ else
+ dev_err(&ctrl->dev, "Invalid client buffer passed\n");
+ return;
+ }
+ async = txn->async;
+ for (i = 0; i < len; i++)
+ txn->rbuf[i] = reply[i];
+ if (txn->comp)
+ complete(txn->comp);
+ ctrl->txnt[tid] = NULL;
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ if (async)
+ kfree(txn);
+}
+EXPORT_SYMBOL(slim_msg_response);
+
+static int slim_processtxn(struct slim_controller *ctrl,
+ struct slim_msg_txn *txn, bool need_tid)
+{
+ u8 i = 0;
+ int ret = 0;
+ unsigned long flags;
+
+ if (need_tid) {
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ for (i = 0; i < ctrl->last_tid; i++) {
+ if (ctrl->txnt[i] == NULL)
+ break;
+ }
+ if (i >= ctrl->last_tid) {
+ if (ctrl->last_tid == 255) {
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ return -ENOMEM;
+ }
+ ctrl->last_tid++;
+ }
+ ctrl->txnt[i] = txn;
+ txn->tid = i;
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ }
+
+ ret = ctrl->xfer_msg(ctrl, txn);
+ return ret;
+}
+
+static int ctrl_getlogical_addr(struct slim_controller *ctrl, const u8 *eaddr,
+ u8 e_len, u8 *entry)
+{
+ u8 i;
+
+ for (i = 0; i < ctrl->num_dev; i++) {
+ if (ctrl->addrt[i].valid &&
+ memcmp(ctrl->addrt[i].eaddr, eaddr, e_len) == 0) {
+ *entry = i;
+ return 0;
+ }
+ }
+ return -ENXIO;
+}
+
+/*
+ * slim_assign_laddr: Assign logical address to a device enumerated.
+ * @ctrl: Controller with which device is enumerated.
+ * @e_addr: 6-byte elemental address of the device.
+ * @e_len: buffer length for e_addr
+ * @laddr: Return logical address (if valid flag is false)
+ * @valid: true if laddr holds a valid address that controller wants to
+ * set for this enumeration address. Otherwise framework sets index into
+ * address table as logical address.
+ * Called by controller in response to REPORT_PRESENT. Framework will assign
+ * a logical address to this enumeration address.
+ * Function returns -EXFULL to indicate that all logical addresses are already
+ * taken.
+ */
+int slim_assign_laddr(struct slim_controller *ctrl, const u8 *e_addr,
+ u8 e_len, u8 *laddr, bool valid)
+{
+ int ret;
+ u8 i = 0;
+ bool exists = false;
+ struct slim_device *sbdev;
+ struct list_head *pos, *next;
+ void *new_addrt = NULL;
+
+ mutex_lock(&ctrl->m_ctrl);
+ /* already assigned */
+ if (ctrl_getlogical_addr(ctrl, e_addr, e_len, &i) == 0) {
+ *laddr = ctrl->addrt[i].laddr;
+ exists = true;
+ } else {
+ if (ctrl->num_dev >= 254) {
+ ret = -EXFULL;
+ goto ret_assigned_laddr;
+ }
+ for (i = 0; i < ctrl->num_dev; i++) {
+ if (ctrl->addrt[i].valid == false)
+ break;
+ }
+ if (i == ctrl->num_dev) {
+ new_addrt = krealloc(ctrl->addrt,
+ (ctrl->num_dev + 1) *
+ sizeof(struct slim_addrt),
+ GFP_KERNEL);
+ if (!new_addrt) {
+ ret = -ENOMEM;
+ goto ret_assigned_laddr;
+ }
+ ctrl->addrt = new_addrt;
+ ctrl->num_dev++;
+ }
+ memcpy(ctrl->addrt[i].eaddr, e_addr, e_len);
+ ctrl->addrt[i].valid = true;
+ /* Preferred address is index into table */
+ if (!valid)
+ *laddr = i;
+ }
+
+ ret = ctrl->set_laddr(ctrl, (const u8 *)&ctrl->addrt[i].eaddr, 6,
+ *laddr);
+ if (ret) {
+ ctrl->addrt[i].valid = false;
+ goto ret_assigned_laddr;
+ }
+ ctrl->addrt[i].laddr = *laddr;
+
+ dev_dbg(&ctrl->dev, "setting slimbus l-addr:%x\n", *laddr);
+ret_assigned_laddr:
+ mutex_unlock(&ctrl->m_ctrl);
+ if (exists || ret)
+ return ret;
+
+ pr_info("slimbus:%d laddr:0x%x, EAPC:0x%x:0x%x", ctrl->nr, *laddr,
+ e_addr[1], e_addr[2]);
+ mutex_lock(&ctrl->m_ctrl);
+ list_for_each_safe(pos, next, &ctrl->devs) {
+ sbdev = list_entry(pos, struct slim_device, dev_list);
+ if (memcmp(sbdev->e_addr, e_addr, 6) == 0) {
+ struct slim_driver *sbdrv;
+
+ sbdev->laddr = *laddr;
+ sbdev->reported = true;
+ if (sbdev->dev.driver) {
+ sbdrv = to_slim_driver(sbdev->dev.driver);
+ if (sbdrv->device_up)
+ queue_work(ctrl->wq, &sbdev->wd);
+ }
+ break;
+ }
+ }
+ mutex_unlock(&ctrl->m_ctrl);
+ return 0;
+}
+EXPORT_SYMBOL(slim_assign_laddr);
+
+/*
+ * slim_get_logical_addr: Return the logical address of a slimbus device.
+ * @sb: client handle requesting the adddress.
+ * @e_addr: Elemental address of the device.
+ * @e_len: Length of e_addr
+ * @laddr: output buffer to store the address
+ * context: can sleep
+ * -EINVAL is returned in case of invalid parameters, and -ENXIO is returned if
+ * the device with this elemental address is not found.
+ */
+int slim_get_logical_addr(struct slim_device *sb, const u8 *e_addr,
+ u8 e_len, u8 *laddr)
+{
+ int ret = 0;
+ u8 entry;
+ struct slim_controller *ctrl = sb->ctrl;
+
+ if (!ctrl || !laddr || !e_addr || e_len != 6)
+ return -EINVAL;
+ mutex_lock(&ctrl->m_ctrl);
+ ret = ctrl_getlogical_addr(ctrl, e_addr, e_len, &entry);
+ if (!ret)
+ *laddr = ctrl->addrt[entry].laddr;
+ mutex_unlock(&ctrl->m_ctrl);
+ if (ret == -ENXIO && ctrl->get_laddr) {
+ ret = ctrl->get_laddr(ctrl, e_addr, e_len, laddr);
+ if (!ret)
+ ret = slim_assign_laddr(ctrl, e_addr, e_len, laddr,
+ true);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(slim_get_logical_addr);
+
+static int slim_ele_access_sanity(struct slim_ele_access *msg, int oper,
+ u8 *rbuf, const u8 *wbuf, u8 len)
+{
+ if (!msg || msg->num_bytes > 16 || msg->start_offset + len > 0xC00)
+ return -EINVAL;
+ switch (oper) {
+ case SLIM_MSG_MC_REQUEST_VALUE:
+ case SLIM_MSG_MC_REQUEST_INFORMATION:
+ if (rbuf == NULL)
+ return -EINVAL;
+ return 0;
+ case SLIM_MSG_MC_CHANGE_VALUE:
+ case SLIM_MSG_MC_CLEAR_INFORMATION:
+ if (wbuf == NULL)
+ return -EINVAL;
+ return 0;
+ case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
+ case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
+ if (rbuf == NULL || wbuf == NULL)
+ return -EINVAL;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u16 slim_slicecodefromsize(u32 req)
+{
+ u8 codetosize[8] = {1, 2, 3, 4, 6, 8, 12, 16};
+
+ if (req >= 8)
+ return 0;
+ else
+ return codetosize[req];
+}
+
+static u16 slim_slicesize(u32 code)
+{
+ u8 sizetocode[16] = {0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7};
+
+ if (code == 0)
+ code = 1;
+ if (code > 16)
+ code = 16;
+ return sizetocode[code - 1];
+}
+
+
+/* Message APIs Unicast message APIs used by slimbus slave drivers */
+
+/*
+ * Message API access routines.
+ * @sb: client handle requesting elemental message reads, writes.
+ * @msg: Input structure for start-offset, number of bytes to read.
+ * @rbuf: data buffer to be filled with values read.
+ * @len: data buffer size
+ * @wbuf: data buffer containing value/information to be written
+ * context: can sleep
+ * Returns:
+ * -EINVAL: Invalid parameters
+ * -ETIMEDOUT: If controller could not complete the request. This may happen if
+ * the bus lines are not clocked, controller is not powered-on, slave with
+ * given address is not enumerated/responding.
+ */
+int slim_request_val_element(struct slim_device *sb,
+ struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+
+ if (!ctrl)
+ return -EINVAL;
+ return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_VALUE, buf,
+ NULL, len);
+}
+EXPORT_SYMBOL(slim_request_val_element);
+
+int slim_request_inf_element(struct slim_device *sb,
+ struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+
+ if (!ctrl)
+ return -EINVAL;
+ return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_INFORMATION,
+ buf, NULL, len);
+}
+EXPORT_SYMBOL(slim_request_inf_element);
+
+int slim_change_val_element(struct slim_device *sb, struct slim_ele_access *msg,
+ const u8 *buf, u8 len)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+
+ if (!ctrl)
+ return -EINVAL;
+ return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CHANGE_VALUE, NULL, buf,
+ len);
+}
+EXPORT_SYMBOL(slim_change_val_element);
+
+int slim_clear_inf_element(struct slim_device *sb, struct slim_ele_access *msg,
+ u8 *buf, u8 len)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+
+ if (!ctrl)
+ return -EINVAL;
+ return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CLEAR_INFORMATION, NULL,
+ buf, len);
+}
+EXPORT_SYMBOL(slim_clear_inf_element);
+
+int slim_request_change_val_element(struct slim_device *sb,
+ struct slim_ele_access *msg, u8 *rbuf,
+ const u8 *wbuf, u8 len)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+
+ if (!ctrl)
+ return -EINVAL;
+ return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_CHANGE_VALUE,
+ rbuf, wbuf, len);
+}
+EXPORT_SYMBOL(slim_request_change_val_element);
+
+int slim_request_clear_inf_element(struct slim_device *sb,
+ struct slim_ele_access *msg, u8 *rbuf,
+ const u8 *wbuf, u8 len)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+
+ if (!ctrl)
+ return -EINVAL;
+ return slim_xfer_msg(ctrl, sb, msg,
+ SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION,
+ rbuf, wbuf, len);
+}
+EXPORT_SYMBOL(slim_request_clear_inf_element);
+
+/*
+ * Broadcast message API:
+ * call this API directly with sbdev = NULL.
+ * For broadcast reads, make sure that buffers are big-enough to incorporate
+ * replies from all logical addresses.
+ * All controllers may not support broadcast
+ */
+int slim_xfer_msg(struct slim_controller *ctrl, struct slim_device *sbdev,
+ struct slim_ele_access *msg, u16 mc, u8 *rbuf,
+ const u8 *wbuf, u8 len)
+{
+ DECLARE_COMPLETION_ONSTACK(complete);
+ DEFINE_SLIM_LDEST_TXN(txn_stack, mc, len, 6, rbuf, wbuf, sbdev->laddr);
+ struct slim_msg_txn *txn;
+ int ret;
+ u16 sl, cur;
+
+ if (msg->comp && rbuf) {
+ txn = kmalloc(sizeof(struct slim_msg_txn),
+ GFP_KERNEL);
+ if (IS_ERR_OR_NULL(txn))
+ return PTR_ERR(txn);
+ *txn = txn_stack;
+ txn->async = true;
+ txn->comp = msg->comp;
+ } else {
+ txn = &txn_stack;
+ if (rbuf)
+ txn->comp = &complete;
+ }
+
+ ret = slim_ele_access_sanity(msg, mc, rbuf, wbuf, len);
+ if (ret)
+ goto xfer_err;
+
+ sl = slim_slicesize(len);
+ dev_dbg(&ctrl->dev, "SB xfer msg:os:%x, len:%d, MC:%x, sl:%x\n",
+ msg->start_offset, len, mc, sl);
+
+ cur = slim_slicecodefromsize(sl);
+ txn->ec = ((sl | (1 << 3)) | ((msg->start_offset & 0xFFF) << 4));
+
+ if (wbuf)
+ txn->rl += len;
+ if (rbuf) {
+ unsigned long flags;
+
+ txn->rl++;
+ ret = slim_processtxn(ctrl, txn, true);
+
+ /* sync read */
+ if (!ret && !msg->comp) {
+ ret = wait_for_completion_timeout(&complete, HZ);
+ if (!ret) {
+ dev_err(&ctrl->dev, "slimbus Read timed out");
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ /* Invalidate the transaction */
+ ctrl->txnt[txn->tid] = NULL;
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ ret = -ETIMEDOUT;
+ } else
+ ret = 0;
+ } else if (ret < 0 && !msg->comp) {
+ dev_err(&ctrl->dev, "slimbus Read error");
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ /* Invalidate the transaction */
+ ctrl->txnt[txn->tid] = NULL;
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ }
+
+ } else
+ ret = slim_processtxn(ctrl, txn, false);
+xfer_err:
+ return ret;
+}
+EXPORT_SYMBOL(slim_xfer_msg);
+
+/*
+ * User message:
+ * slim_user_msg: Send user message that is interpreted by destination device
+ * @sb: Client handle sending the message
+ * @la: Destination device for this user message
+ * @mt: Message Type (Soruce-referred, or Destination-referred)
+ * @mc: Message Code
+ * @msg: Message structure (start offset, number of bytes) to be sent
+ * @buf: data buffer to be sent
+ * @len: data buffer size in bytes
+ */
+int slim_user_msg(struct slim_device *sb, u8 la, u8 mt, u8 mc,
+ struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+ if (!sb || !sb->ctrl || !msg || mt == SLIM_MSG_MT_CORE)
+ return -EINVAL;
+ if (!sb->ctrl->xfer_user_msg)
+ return -EPROTONOSUPPORT;
+ return sb->ctrl->xfer_user_msg(sb->ctrl, la, mt, mc, msg, buf, len);
+}
+EXPORT_SYMBOL(slim_user_msg);
+
+/*
+ * Queue bulk of message writes:
+ * slim_bulk_msg_write: Write bulk of messages (e.g. downloading FW)
+ * @sb: Client handle sending these messages
+ * @la: Destination device for these messages
+ * @mt: Message Type
+ * @mc: Message Code
+ * @msgs: List of messages to be written in bulk
+ * @n: Number of messages in the list
+ * @cb: Callback if client needs this to be non-blocking
+ * @ctx: Context for this callback
+ * If supported by controller, this message list will be sent in bulk to the HW
+ * If the client specifies this to be non-blocking, the callback will be
+ * called from atomic context.
+ */
+int slim_bulk_msg_write(struct slim_device *sb, u8 mt, u8 mc,
+ struct slim_val_inf msgs[], int n,
+ int (*comp_cb)(void *ctx, int err), void *ctx)
+{
+ int i, ret;
+
+ if (!sb || !sb->ctrl || !msgs)
+ return -EINVAL;
+ if (!sb->ctrl->xfer_bulk_wr) {
+ pr_warn("controller does not support bulk WR, serializing");
+ for (i = 0; i < n; i++) {
+ struct slim_ele_access ele;
+
+ ele.comp = NULL;
+ ele.start_offset = msgs[i].start_offset;
+ ele.num_bytes = msgs[i].num_bytes;
+ ret = slim_xfer_msg(sb->ctrl, sb, &ele, mc,
+ msgs[i].rbuf, msgs[i].wbuf,
+ ele.num_bytes);
+ if (ret)
+ return ret;
+ }
+ return ret;
+ }
+ return sb->ctrl->xfer_bulk_wr(sb->ctrl, sb->laddr, mt, mc, msgs, n,
+ comp_cb, ctx);
+}
+EXPORT_SYMBOL(slim_bulk_msg_write);
+
+/*
+ * slim_alloc_mgrports: Allocate port on manager side.
+ * @sb: device/client handle.
+ * @req: Port request type.
+ * @nports: Number of ports requested
+ * @rh: output buffer to store the port handles
+ * @hsz: size of buffer storing handles
+ * context: can sleep
+ * This port will be typically used by SW. e.g. client driver wants to receive
+ * some data from audio codec HW using a data channel.
+ * Port allocated using this API will be used to receive the data.
+ * If half-duplex ports are requested, two adjacent ports are allocated for
+ * 1 half-duplex port. So the handle-buffer size should be twice the number
+ * of half-duplex ports to be allocated.
+ * -EDQUOT is returned if all ports are in use.
+ */
+int slim_alloc_mgrports(struct slim_device *sb, enum slim_port_req req,
+ int nports, u32 *rh, int hsz)
+{
+ int i, j;
+ int ret = -EINVAL;
+ int nphysp = nports;
+ struct slim_controller *ctrl = sb->ctrl;
+
+ if (!rh || !ctrl)
+ return -EINVAL;
+ if (req == SLIM_REQ_HALF_DUP)
+ nphysp *= 2;
+ if (hsz/sizeof(u32) < nphysp)
+ return -EINVAL;
+ mutex_lock(&ctrl->m_ctrl);
+
+ for (i = 0; i < ctrl->nports; i++) {
+ bool multiok = true;
+
+ if (ctrl->ports[i].state != SLIM_P_FREE)
+ continue;
+ /* Start half duplex channel at even port */
+ if (req == SLIM_REQ_HALF_DUP && (i % 2))
+ continue;
+ /* Allocate ports contiguously for multi-ch */
+ if (ctrl->nports < (i + nphysp)) {
+ i = ctrl->nports;
+ break;
+ }
+ if (req == SLIM_REQ_MULTI_CH) {
+ multiok = true;
+ for (j = i; j < i + nphysp; j++) {
+ if (ctrl->ports[j].state != SLIM_P_FREE) {
+ multiok = false;
+ break;
+ }
+ }
+ if (!multiok)
+ continue;
+ }
+ break;
+ }
+ if (i >= ctrl->nports) {
+ ret = -EDQUOT;
+ goto alloc_err;
+ }
+ ret = 0;
+ for (j = i; j < i + nphysp; j++) {
+ ctrl->ports[j].state = SLIM_P_UNCFG;
+ ctrl->ports[j].req = req;
+ if (req == SLIM_REQ_HALF_DUP && (j % 2))
+ ctrl->ports[j].flow = SLIM_SINK;
+ else
+ ctrl->ports[j].flow = SLIM_SRC;
+ if (ctrl->alloc_port)
+ ret = ctrl->alloc_port(ctrl, j);
+ if (ret) {
+ for (; j >= i; j--)
+ ctrl->ports[j].state = SLIM_P_FREE;
+ goto alloc_err;
+ }
+ *rh++ = SLIM_PORT_HDL(SLIM_LA_MANAGER, 0, j);
+ }
+alloc_err:
+ mutex_unlock(&ctrl->m_ctrl);
+ return ret;
+}
+EXPORT_SYMBOL(slim_alloc_mgrports);
+
+/* Deallocate the port(s) allocated using the API above */
+int slim_dealloc_mgrports(struct slim_device *sb, u32 *hdl, int nports)
+{
+ int i;
+ struct slim_controller *ctrl = sb->ctrl;
+
+ if (!ctrl || !hdl)
+ return -EINVAL;
+
+ mutex_lock(&ctrl->m_ctrl);
+
+ for (i = 0; i < nports; i++) {
+ u8 pn;
+
+ pn = SLIM_HDL_TO_PORT(hdl[i]);
+
+ if (pn >= ctrl->nports || ctrl->ports[pn].state == SLIM_P_CFG) {
+ int j, ret;
+
+ if (pn >= ctrl->nports) {
+ dev_err(&ctrl->dev, "invalid port number");
+ ret = -EINVAL;
+ } else {
+ dev_err(&ctrl->dev,
+ "Can't dealloc connected port:%d", i);
+ ret = -EISCONN;
+ }
+ for (j = i - 1; j >= 0; j--) {
+ pn = SLIM_HDL_TO_PORT(hdl[j]);
+ ctrl->ports[pn].state = SLIM_P_UNCFG;
+ }
+ mutex_unlock(&ctrl->m_ctrl);
+ return ret;
+ }
+ if (ctrl->dealloc_port)
+ ctrl->dealloc_port(ctrl, pn);
+ ctrl->ports[pn].state = SLIM_P_FREE;
+ }
+ mutex_unlock(&ctrl->m_ctrl);
+ return 0;
+}
+EXPORT_SYMBOL(slim_dealloc_mgrports);
+
+/*
+ * slim_config_mgrports: Configure manager side ports
+ * @sb: device/client handle.
+ * @ph: array of port handles for which this configuration is valid
+ * @nports: Number of ports in ph
+ * @cfg: configuration requested for port(s)
+ * Configure port settings if they are different than the default ones.
+ * Returns success if the config could be applied. Returns -EISCONN if the
+ * port is in use
+ */
+int slim_config_mgrports(struct slim_device *sb, u32 *ph, int nports,
+ struct slim_port_cfg *cfg)
+{
+ int i;
+ struct slim_controller *ctrl;
+
+ if (!sb || !ph || !nports || !sb->ctrl || !cfg)
+ return -EINVAL;
+
+ ctrl = sb->ctrl;
+ mutex_lock(&ctrl->sched.m_reconf);
+ for (i = 0; i < nports; i++) {
+ u8 pn = SLIM_HDL_TO_PORT(ph[i]);
+
+ if (ctrl->ports[pn].state == SLIM_P_CFG)
+ return -EISCONN;
+ ctrl->ports[pn].cfg = *cfg;
+ }
+ mutex_unlock(&ctrl->sched.m_reconf);
+ return 0;
+}
+EXPORT_SYMBOL(slim_config_mgrports);
+
+/*
+ * slim_get_slaveport: Get slave port handle
+ * @la: slave device logical address.
+ * @idx: port index at slave
+ * @rh: return handle
+ * @flw: Flow type (source or destination)
+ * This API only returns a slave port's representation as expected by slimbus
+ * driver. This port is not managed by the slimbus driver. Caller is expected
+ * to have visibility of this port since it's a device-port.
+ */
+int slim_get_slaveport(u8 la, int idx, u32 *rh, enum slim_port_flow flw)
+{
+ if (rh == NULL)
+ return -EINVAL;
+ *rh = SLIM_PORT_HDL(la, flw, idx);
+ return 0;
+}
+EXPORT_SYMBOL(slim_get_slaveport);
+
+static int connect_port_ch(struct slim_controller *ctrl, u8 ch, u32 ph,
+ enum slim_port_flow flow)
+{
+ int ret;
+ u8 buf[2];
+ u32 la = SLIM_HDL_TO_LA(ph);
+ u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
+ DEFINE_SLIM_LDEST_TXN(txn, 0, 2, 6, NULL, buf, la);
+
+ if (flow == SLIM_SRC)
+ txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
+ else
+ txn.mc = SLIM_MSG_MC_CONNECT_SINK;
+ buf[0] = pn;
+ buf[1] = ctrl->chans[ch].chan;
+ if (la == SLIM_LA_MANAGER)
+ ctrl->ports[pn].flow = flow;
+ ret = slim_processtxn(ctrl, &txn, false);
+ if (!ret && la == SLIM_LA_MANAGER)
+ ctrl->ports[pn].state = SLIM_P_CFG;
+ return ret;
+}
+
+static int disconnect_port_ch(struct slim_controller *ctrl, u32 ph)
+{
+ int ret;
+ u32 la = SLIM_HDL_TO_LA(ph);
+ u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
+ DEFINE_SLIM_LDEST_TXN(txn, 0, 1, 5, NULL, &pn, la);
+
+ txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
+ ret = slim_processtxn(ctrl, &txn, false);
+ if (ret)
+ return ret;
+ if (la == SLIM_LA_MANAGER) {
+ ctrl->ports[pn].state = SLIM_P_UNCFG;
+ ctrl->ports[pn].cfg.watermark = 0;
+ ctrl->ports[pn].cfg.port_opts = 0;
+ ctrl->ports[pn].ch = NULL;
+ }
+ return 0;
+}
+
+/*
+ * slim_connect_src: Connect source port to channel.
+ * @sb: client handle
+ * @srch: source handle to be connected to this channel
+ * @chanh: Channel with which the ports need to be associated with.
+ * Per slimbus specification, a channel may have 1 source port.
+ * Channel specified in chanh needs to be allocated first.
+ * Returns -EALREADY if source is already configured for this channel.
+ * Returns -ENOTCONN if channel is not allocated
+ * Returns -EINVAL if invalid direction is specified for non-manager port,
+ * or if the manager side port number is out of bounds, or in incorrect state
+ */
+int slim_connect_src(struct slim_device *sb, u32 srch, u16 chanh)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+ int ret;
+ u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+ struct slim_ich *slc = &ctrl->chans[chan];
+ enum slim_port_flow flow = SLIM_HDL_TO_FLOW(srch);
+ u8 la = SLIM_HDL_TO_LA(srch);
+ u8 pn = SLIM_HDL_TO_PORT(srch);
+
+ /* manager ports don't have direction when they are allocated */
+ if (la != SLIM_LA_MANAGER && flow != SLIM_SRC)
+ return -EINVAL;
+
+ mutex_lock(&ctrl->sched.m_reconf);
+
+ if (la == SLIM_LA_MANAGER) {
+ if (pn >= ctrl->nports ||
+ ctrl->ports[pn].state != SLIM_P_UNCFG) {
+ ret = -EINVAL;
+ goto connect_src_err;
+ }
+ }
+
+ if (slc->state == SLIM_CH_FREE) {
+ ret = -ENOTCONN;
+ goto connect_src_err;
+ }
+ /*
+ * Once channel is removed, its ports can be considered disconnected
+ * So its ports can be reassigned. Source port is zeroed
+ * when channel is deallocated.
+ */
+ if (slc->srch) {
+ ret = -EALREADY;
+ goto connect_src_err;
+ }
+ ctrl->ports[pn].ch = &slc->prop;
+ ret = connect_port_ch(ctrl, chan, srch, SLIM_SRC);
+
+ if (!ret)
+ slc->srch = srch;
+
+connect_src_err:
+ mutex_unlock(&ctrl->sched.m_reconf);
+ return ret;
+}
+EXPORT_SYMBOL(slim_connect_src);
+
+/*
+ * slim_connect_sink: Connect sink port(s) to channel.
+ * @sb: client handle
+ * @sinkh: sink handle(s) to be connected to this channel
+ * @nsink: number of sinks
+ * @chanh: Channel with which the ports need to be associated with.
+ * Per slimbus specification, a channel may have multiple sink-ports.
+ * Channel specified in chanh needs to be allocated first.
+ * Returns -EALREADY if sink is already configured for this channel.
+ * Returns -ENOTCONN if channel is not allocated
+ * Returns -EINVAL if invalid parameters are passed, or invalid direction is
+ * specified for non-manager port, or if the manager side port number is out of
+ * bounds, or in incorrect state
+ */
+int slim_connect_sink(struct slim_device *sb, u32 *sinkh, int nsink, u16 chanh)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+ int j;
+ int ret = 0;
+ u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+ struct slim_ich *slc = &ctrl->chans[chan];
+ void *new_sinkh = NULL;
+
+ if (!sinkh || !nsink)
+ return -EINVAL;
+
+ mutex_lock(&ctrl->sched.m_reconf);
+
+ /*
+ * Once channel is removed, its ports can be considered disconnected
+ * So its ports can be reassigned. Sink ports are freed when channel
+ * is deallocated.
+ */
+ if (slc->state == SLIM_CH_FREE) {
+ ret = -ENOTCONN;
+ goto connect_sink_err;
+ }
+
+ for (j = 0; j < nsink; j++) {
+ enum slim_port_flow flow = SLIM_HDL_TO_FLOW(sinkh[j]);
+ u8 la = SLIM_HDL_TO_LA(sinkh[j]);
+ u8 pn = SLIM_HDL_TO_PORT(sinkh[j]);
+
+ if (la != SLIM_LA_MANAGER && flow != SLIM_SINK) {
+ ret = -EINVAL;
+ } else if (la == SLIM_LA_MANAGER &&
+ (pn >= ctrl->nports ||
+ ctrl->ports[pn].state != SLIM_P_UNCFG)) {
+ ret = -EINVAL;
+ } else {
+ ctrl->ports[pn].ch = &slc->prop;
+ ret = connect_port_ch(ctrl, chan, sinkh[j], SLIM_SINK);
+ }
+ if (ret) {
+ for (j = j - 1; j >= 0; j--)
+ disconnect_port_ch(ctrl, sinkh[j]);
+ goto connect_sink_err;
+ }
+ }
+
+ new_sinkh = krealloc(slc->sinkh, (sizeof(u32) * (slc->nsink + nsink)),
+ GFP_KERNEL);
+ if (!new_sinkh) {
+ ret = -ENOMEM;
+ for (j = 0; j < nsink; j++)
+ disconnect_port_ch(ctrl, sinkh[j]);
+ goto connect_sink_err;
+ }
+
+ slc->sinkh = new_sinkh;
+ memcpy(slc->sinkh + slc->nsink, sinkh, (sizeof(u32) * nsink));
+ slc->nsink += nsink;
+
+connect_sink_err:
+ mutex_unlock(&ctrl->sched.m_reconf);
+ return ret;
+}
+EXPORT_SYMBOL(slim_connect_sink);
+
+/*
+ * slim_disconnect_ports: Disconnect port(s) from channel
+ * @sb: client handle
+ * @ph: ports to be disconnected
+ * @nph: number of ports.
+ * Disconnects ports from a channel.
+ */
+int slim_disconnect_ports(struct slim_device *sb, u32 *ph, int nph)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+ int i;
+
+ mutex_lock(&ctrl->sched.m_reconf);
+
+ for (i = 0; i < nph; i++)
+ disconnect_port_ch(ctrl, ph[i]);
+ mutex_unlock(&ctrl->sched.m_reconf);
+ return 0;
+}
+EXPORT_SYMBOL(slim_disconnect_ports);
+
+/*
+ * slim_port_xfer: Schedule buffer to be transferred/received using port-handle.
+ * @sb: client handle
+ * @ph: port-handle
+ * @iobuf: buffer to be transferred or populated
+ * @len: buffer size.
+ * @comp: completion signal to indicate transfer done or error.
+ * context: can sleep
+ * Returns number of bytes transferred/received if used synchronously.
+ * Will return 0 if used asynchronously.
+ * Client will call slim_port_get_xfer_status to get error and/or number of
+ * bytes transferred if used asynchronously.
+ */
+int slim_port_xfer(struct slim_device *sb, u32 ph, phys_addr_t iobuf, u32 len,
+ struct completion *comp)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+ u8 pn = SLIM_HDL_TO_PORT(ph);
+
+ dev_dbg(&ctrl->dev, "port xfer: num:%d", pn);
+ return ctrl->port_xfer(ctrl, pn, iobuf, len, comp);
+}
+EXPORT_SYMBOL(slim_port_xfer);
+
+/*
+ * slim_port_get_xfer_status: Poll for port transfers, or get transfer status
+ * after completion is done.
+ * @sb: client handle
+ * @ph: port-handle
+ * @done_buf: return pointer (iobuf from slim_port_xfer) which is processed.
+ * @done_len: Number of bytes transferred.
+ * This can be called when port_xfer complition is signalled.
+ * The API will return port transfer error (underflow/overflow/disconnect)
+ * and/or done_len will reflect number of bytes transferred. Note that
+ * done_len may be valid even if port error (overflow/underflow) has happened.
+ * e.g. If the transfer was scheduled with a few bytes to be transferred and
+ * client has not supplied more data to be transferred, done_len will indicate
+ * number of bytes transferred with underflow error. To avoid frequent underflow
+ * errors, multiple transfers can be queued (e.g. ping-pong buffers) so that
+ * channel has data to be transferred even if client is not ready to transfer
+ * data all the time. done_buf will indicate address of the last buffer
+ * processed from the multiple transfers.
+ */
+enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb, u32 ph,
+ phys_addr_t *done_buf, u32 *done_len)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+ u8 pn = SLIM_HDL_TO_PORT(ph);
+ u32 la = SLIM_HDL_TO_LA(ph);
+ enum slim_port_err err;
+
+ dev_dbg(&ctrl->dev, "get status port num:%d", pn);
+ /*
+ * Framework only has insight into ports managed by ported device
+ * used by the manager and not slave
+ */
+ if (la != SLIM_LA_MANAGER) {
+ if (done_buf)
+ *done_buf = 0;
+ if (done_len)
+ *done_len = 0;
+ return SLIM_P_NOT_OWNED;
+ }
+ err = ctrl->port_xfer_status(ctrl, pn, done_buf, done_len);
+ if (err == SLIM_P_INPROGRESS)
+ err = ctrl->ports[pn].err;
+ return err;
+}
+EXPORT_SYMBOL(slim_port_get_xfer_status);
+
+static void slim_add_ch(struct slim_controller *ctrl, struct slim_ich *slc)
+{
+ struct slim_ich **arr;
+ int i, j;
+ int *len;
+ int sl = slc->seglen << slc->rootexp;
+ /* Channel is already active and other end is transmitting data */
+ if (slc->state >= SLIM_CH_ACTIVE)
+ return;
+ if (slc->coeff == SLIM_COEFF_1) {
+ arr = ctrl->sched.chc1;
+ len = &ctrl->sched.num_cc1;
+ } else {
+ arr = ctrl->sched.chc3;
+ len = &ctrl->sched.num_cc3;
+ sl *= 3;
+ }
+
+ *len += 1;
+
+ /* Insert the channel based on rootexp and seglen */
+ for (i = 0; i < *len - 1; i++) {
+ /*
+ * Primary key: exp low to high.
+ * Secondary key: seglen: high to low
+ */
+ if ((slc->rootexp > arr[i]->rootexp) ||
+ ((slc->rootexp == arr[i]->rootexp) &&
+ (slc->seglen < arr[i]->seglen)))
+ continue;
+ else
+ break;
+ }
+ for (j = *len - 1; j > i; j--)
+ arr[j] = arr[j - 1];
+ arr[i] = slc;
+ if (!ctrl->allocbw)
+ ctrl->sched.usedslots += sl;
+}
+
+static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc)
+{
+ struct slim_ich **arr;
+ int i;
+ u32 la, ph;
+ int *len;
+
+ if (slc->coeff == SLIM_COEFF_1) {
+ arr = ctrl->sched.chc1;
+ len = &ctrl->sched.num_cc1;
+ } else {
+ arr = ctrl->sched.chc3;
+ len = &ctrl->sched.num_cc3;
+ }
+
+ for (i = 0; i < *len; i++) {
+ if (arr[i] == slc)
+ break;
+ }
+ if (i >= *len)
+ return -EXFULL;
+ for (; i < *len - 1; i++)
+ arr[i] = arr[i + 1];
+ *len -= 1;
+ arr[*len] = NULL;
+
+ slc->state = SLIM_CH_ALLOCATED;
+ slc->def = 0;
+ slc->newintr = 0;
+ slc->newoff = 0;
+ for (i = 0; i < slc->nsink; i++) {
+ ph = slc->sinkh[i];
+ la = SLIM_HDL_TO_LA(ph);
+ /*
+ * For ports managed by manager's ported device, no need to send
+ * disconnect. It is client's responsibility to call disconnect
+ * on ports owned by the slave device
+ */
+ if (la == SLIM_LA_MANAGER) {
+ ctrl->ports[SLIM_HDL_TO_PORT(ph)].state = SLIM_P_UNCFG;
+ ctrl->ports[SLIM_HDL_TO_PORT(ph)].ch = NULL;
+ }
+ }
+
+ ph = slc->srch;
+ la = SLIM_HDL_TO_LA(ph);
+ if (la == SLIM_LA_MANAGER) {
+ u8 pn = SLIM_HDL_TO_PORT(ph);
+
+ ctrl->ports[pn].state = SLIM_P_UNCFG;
+ ctrl->ports[pn].cfg.watermark = 0;
+ ctrl->ports[pn].cfg.port_opts = 0;
+ }
+
+ kfree(slc->sinkh);
+ slc->sinkh = NULL;
+ slc->srch = 0;
+ slc->nsink = 0;
+ return 0;
+}
+
+static u32 slim_calc_prrate(struct slim_controller *ctrl, struct slim_ch *prop)
+{
+ u32 rate = 0, rate4k = 0, rate11k = 0;
+ u32 exp = 0;
+ u32 pr = 0;
+ bool exact = true;
+ bool done = false;
+ enum slim_ch_rate ratefam;
+
+ if (prop->prot >= SLIM_ASYNC_SMPLX)
+ return 0;
+ if (prop->baser == SLIM_RATE_1HZ) {
+ rate = prop->ratem / 4000;
+ rate4k = rate;
+ if (rate * 4000 == prop->ratem)
+ ratefam = SLIM_RATE_4000HZ;
+ else {
+ rate = prop->ratem / 11025;
+ rate11k = rate;
+ if (rate * 11025 == prop->ratem)
+ ratefam = SLIM_RATE_11025HZ;
+ else
+ ratefam = SLIM_RATE_1HZ;
+ }
+ } else {
+ ratefam = prop->baser;
+ rate = prop->ratem;
+ }
+ if (ratefam == SLIM_RATE_1HZ) {
+ exact = false;
+ if ((rate4k + 1) * 4000 < (rate11k + 1) * 11025) {
+ rate = rate4k + 1;
+ ratefam = SLIM_RATE_4000HZ;
+ } else {
+ rate = rate11k + 1;
+ ratefam = SLIM_RATE_11025HZ;
+ }
+ }
+ /* covert rate to coeff-exp */
+ while (!done) {
+ while ((rate & 0x1) != 0x1) {
+ rate >>= 1;
+ exp++;
+ }
+ if (rate > 3) {
+ /* roundup if not exact */
+ rate++;
+ exact = false;
+ } else
+ done = true;
+ }
+ if (ratefam == SLIM_RATE_4000HZ) {
+ if (rate == 1)
+ pr = 0x10;
+ else {
+ pr = 0;
+ exp++;
+ }
+ } else {
+ pr = 8;
+ exp++;
+ }
+ if (exp <= 7) {
+ pr |= exp;
+ if (exact)
+ pr |= 0x80;
+ } else
+ pr = 0;
+ return pr;
+}
+
+static int slim_nextdefine_ch(struct slim_device *sb, u8 chan)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+ u32 chrate = 0;
+ u32 exp = 0;
+ u32 coeff = 0;
+ bool exact = true;
+ bool done = false;
+ int ret = 0;
+ struct slim_ich *slc = &ctrl->chans[chan];
+ struct slim_ch *prop = &slc->prop;
+
+ slc->prrate = slim_calc_prrate(ctrl, prop);
+ dev_dbg(&ctrl->dev, "ch:%d, chan PR rate:%x\n", chan, slc->prrate);
+ if (prop->baser == SLIM_RATE_4000HZ)
+ chrate = 4000 * prop->ratem;
+ else if (prop->baser == SLIM_RATE_11025HZ)
+ chrate = 11025 * prop->ratem;
+ else
+ chrate = prop->ratem;
+ /* max allowed sample freq = 768 seg/frame */
+ if (chrate > 3600000)
+ return -EDQUOT;
+ if (prop->baser == SLIM_RATE_4000HZ &&
+ ctrl->a_framer->superfreq == 4000)
+ coeff = prop->ratem;
+ else if (prop->baser == SLIM_RATE_11025HZ &&
+ ctrl->a_framer->superfreq == 3675)
+ coeff = 3 * prop->ratem;
+ else {
+ u32 tempr = 0;
+
+ tempr = chrate * SLIM_CL_PER_SUPERFRAME_DIV8;
+ coeff = tempr / ctrl->a_framer->rootfreq;
+ if (coeff * ctrl->a_framer->rootfreq != tempr) {
+ coeff++;
+ exact = false;
+ }
+ }
+
+ /* convert coeff to coeff-exponent */
+ exp = 0;
+ while (!done) {
+ while ((coeff & 0x1) != 0x1) {
+ coeff >>= 1;
+ exp++;
+ }
+ if (coeff > 3) {
+ coeff++;
+ exact = false;
+ } else
+ done = true;
+ }
+ if (prop->prot == SLIM_HARD_ISO && !exact)
+ return -EPROTONOSUPPORT;
+ else if (prop->prot == SLIM_AUTO_ISO) {
+ if (exact)
+ prop->prot = SLIM_HARD_ISO;
+ else
+ prop->prot = SLIM_PUSH;
+ }
+ slc->rootexp = exp;
+ slc->seglen = prop->sampleszbits/SLIM_CL_PER_SL;
+ if (prop->prot != SLIM_HARD_ISO)
+ slc->seglen++;
+ if (prop->prot >= SLIM_EXT_SMPLX)
+ slc->seglen++;
+ /* convert coeff to enum */
+ if (coeff == 1) {
+ if (exp > 9)
+ ret = -EIO;
+ coeff = SLIM_COEFF_1;
+ } else {
+ if (exp > 8)
+ ret = -EIO;
+ coeff = SLIM_COEFF_3;
+ }
+ slc->coeff = coeff;
+
+ return ret;
+}
+
+/*
+ * slim_alloc_ch: Allocate a slimbus channel and return its handle.
+ * @sb: client handle.
+ * @chanh: return channel handle
+ * Slimbus channels are limited to 256 per specification.
+ * -EXFULL is returned if all channels are in use.
+ * Although slimbus specification supports 256 channels, a controller may not
+ * support that many channels.
+ */
+int slim_alloc_ch(struct slim_device *sb, u16 *chanh)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+ u16 i;
+
+ if (!ctrl)
+ return -EINVAL;
+ mutex_lock(&ctrl->sched.m_reconf);
+ for (i = 0; i < ctrl->nchans; i++) {
+ if (ctrl->chans[i].state == SLIM_CH_FREE)
+ break;
+ }
+ if (i >= ctrl->nchans) {
+ mutex_unlock(&ctrl->sched.m_reconf);
+ return -EXFULL;
+ }
+ *chanh = i;
+ ctrl->chans[i].nextgrp = 0;
+ ctrl->chans[i].state = SLIM_CH_ALLOCATED;
+ ctrl->chans[i].chan = (u8)(ctrl->reserved + i);
+
+ mutex_unlock(&ctrl->sched.m_reconf);
+ return 0;
+}
+EXPORT_SYMBOL(slim_alloc_ch);
+
+/*
+ * slim_query_ch: Get reference-counted handle for a channel number. Every
+ * channel is reference counted by upto one as producer and the others as
+ * consumer)
+ * @sb: client handle
+ * @chan: slimbus channel number
+ * @chanh: return channel handle
+ * If request channel number is not in use, it is allocated, and reference
+ * count is set to one. If the channel was was already allocated, this API
+ * will return handle to that channel and reference count is incremented.
+ * -EXFULL is returned if all channels are in use
+ */
+int slim_query_ch(struct slim_device *sb, u8 ch, u16 *chanh)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+ u16 i, j;
+ int ret = 0;
+
+ if (!ctrl || !chanh)
+ return -EINVAL;
+ mutex_lock(&ctrl->sched.m_reconf);
+ /* start with modulo number */
+ i = ch % ctrl->nchans;
+
+ for (j = 0; j < ctrl->nchans; j++) {
+ if (ctrl->chans[i].chan == ch) {
+ *chanh = i;
+ ctrl->chans[i].ref++;
+ if (ctrl->chans[i].state == SLIM_CH_FREE)
+ ctrl->chans[i].state = SLIM_CH_ALLOCATED;
+ goto query_out;
+ }
+ i = (i + 1) % ctrl->nchans;
+ }
+
+ /* Channel not in table yet */
+ ret = -EXFULL;
+ for (j = 0; j < ctrl->nchans; j++) {
+ if (ctrl->chans[i].state == SLIM_CH_FREE) {
+ ctrl->chans[i].state =
+ SLIM_CH_ALLOCATED;
+ *chanh = i;
+ ctrl->chans[i].ref++;
+ ctrl->chans[i].chan = ch;
+ ctrl->chans[i].nextgrp = 0;
+ ret = 0;
+ break;
+ }
+ i = (i + 1) % ctrl->nchans;
+ }
+query_out:
+ mutex_unlock(&ctrl->sched.m_reconf);
+ dev_dbg(&ctrl->dev, "query ch:%d,hdl:%d,ref:%d,ret:%d",
+ ch, i, ctrl->chans[i].ref, ret);
+ return ret;
+}
+EXPORT_SYMBOL(slim_query_ch);
+
+/*
+ * slim_dealloc_ch: Deallocate channel allocated using the API above
+ * -EISCONN is returned if the channel is tried to be deallocated without
+ * being removed first.
+ * -ENOTCONN is returned if deallocation is tried on a channel that's not
+ * allocated.
+ */
+int slim_dealloc_ch(struct slim_device *sb, u16 chanh)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+ u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+ struct slim_ich *slc = &ctrl->chans[chan];
+
+ if (!ctrl)
+ return -EINVAL;
+
+ mutex_lock(&ctrl->sched.m_reconf);
+ if (slc->state == SLIM_CH_FREE) {
+ mutex_unlock(&ctrl->sched.m_reconf);
+ return -ENOTCONN;
+ }
+ if (slc->ref > 1) {
+ slc->ref--;
+ mutex_unlock(&ctrl->sched.m_reconf);
+ dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
+ slc->chan, chanh, slc->ref);
+ return 0;
+ }
+ if (slc->state >= SLIM_CH_PENDING_ACTIVE) {
+ dev_err(&ctrl->dev, "Channel:%d should be removed first", chan);
+ mutex_unlock(&ctrl->sched.m_reconf);
+ return -EISCONN;
+ }
+ slc->ref--;
+ slc->state = SLIM_CH_FREE;
+ mutex_unlock(&ctrl->sched.m_reconf);
+ dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
+ slc->chan, chanh, slc->ref);
+ return 0;
+}
+EXPORT_SYMBOL(slim_dealloc_ch);
+
+/*
+ * slim_get_ch_state: Channel state.
+ * This API returns the channel's state (active, suspended, inactive etc)
+ */
+enum slim_ch_state slim_get_ch_state(struct slim_device *sb, u16 chanh)
+{
+ u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+ struct slim_ich *slc = &sb->ctrl->chans[chan];
+
+ return slc->state;
+}
+EXPORT_SYMBOL(slim_get_ch_state);
+
+/*
+ * slim_define_ch: Define a channel.This API defines channel parameters for a
+ * given channel.
+ * @sb: client handle.
+ * @prop: slim_ch structure with channel parameters desired to be used.
+ * @chanh: list of channels to be defined.
+ * @nchan: number of channels in a group (1 if grp is false)
+ * @grp: Are the channels grouped
+ * @grph: return group handle if grouping of channels is desired.
+ * Channels can be grouped if multiple channels use same parameters
+ * (e.g. 5.1 audio has 6 channels with same parameters. They will all be grouped
+ * and given 1 handle for simplicity and avoid repeatedly calling the API)
+ * -EISCONN is returned if channel is already used with different parameters.
+ * -ENXIO is returned if the channel is not yet allocated.
+ */
+int slim_define_ch(struct slim_device *sb, struct slim_ch *prop, u16 *chanh,
+ u8 nchan, bool grp, u16 *grph)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+ int i, ret = 0;
+
+ if (!ctrl || !chanh || !prop || !nchan)
+ return -EINVAL;
+ mutex_lock(&ctrl->sched.m_reconf);
+ for (i = 0; i < nchan; i++) {
+ u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
+ struct slim_ich *slc = &ctrl->chans[chan];
+
+ dev_dbg(&ctrl->dev, "define_ch: ch:%d, state:%d", chan,
+ (int)ctrl->chans[chan].state);
+ if (slc->state < SLIM_CH_ALLOCATED) {
+ ret = -ENXIO;
+ goto err_define_ch;
+ }
+ if (slc->state >= SLIM_CH_DEFINED && slc->ref >= 2) {
+ if (prop->ratem != slc->prop.ratem ||
+ prop->sampleszbits != slc->prop.sampleszbits ||
+ prop->baser != slc->prop.baser) {
+ ret = -EISCONN;
+ goto err_define_ch;
+ }
+ } else if (slc->state > SLIM_CH_DEFINED) {
+ ret = -EISCONN;
+ goto err_define_ch;
+ } else {
+ ctrl->chans[chan].prop = *prop;
+ ret = slim_nextdefine_ch(sb, chan);
+ if (ret)
+ goto err_define_ch;
+ }
+ if (i < (nchan - 1))
+ ctrl->chans[chan].nextgrp = chanh[i + 1];
+ if (i == 0)
+ ctrl->chans[chan].nextgrp |= SLIM_START_GRP;
+ if (i == (nchan - 1))
+ ctrl->chans[chan].nextgrp |= SLIM_END_GRP;
+ }
+
+ if (grp)
+ *grph = ((nchan << 8) | SLIM_HDL_TO_CHIDX(chanh[0]));
+ for (i = 0; i < nchan; i++) {
+ u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
+ struct slim_ich *slc = &ctrl->chans[chan];
+
+ if (slc->state == SLIM_CH_ALLOCATED)
+ slc->state = SLIM_CH_DEFINED;
+ }
+err_define_ch:
+ dev_dbg(&ctrl->dev, "define_ch: ch:%d, ret:%d", *chanh, ret);
+ mutex_unlock(&ctrl->sched.m_reconf);
+ return ret;
+}
+EXPORT_SYMBOL(slim_define_ch);
+
+static u32 getsubfrmcoding(u32 *ctrlw, u32 *subfrml, u32 *msgsl)
+{
+ u32 code = 0;
+
+ if (*ctrlw == *subfrml) {
+ *ctrlw = 8;
+ *subfrml = 8;
+ *msgsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME
+ - SLIM_GDE_SLOTS_PER_SUPERFRAME;
+ return 0;
+ }
+ if (*subfrml == 6) {
+ code = 0;
+ *msgsl = 256;
+ } else if (*subfrml == 8) {
+ code = 1;
+ *msgsl = 192;
+ } else if (*subfrml == 24) {
+ code = 2;
+ *msgsl = 64;
+ } else { /* 32 */
+ code = 3;
+ *msgsl = 48;
+ }
+
+ if (*ctrlw < 8) {
+ if (*ctrlw >= 6) {
+ *ctrlw = 6;
+ code |= 0x14;
+ } else {
+ if (*ctrlw == 5)
+ *ctrlw = 4;
+ code |= (*ctrlw << 2);
+ }
+ } else {
+ code -= 2;
+ if (*ctrlw >= 24) {
+ *ctrlw = 24;
+ code |= 0x1e;
+ } else if (*ctrlw >= 16) {
+ *ctrlw = 16;
+ code |= 0x1c;
+ } else if (*ctrlw >= 12) {
+ *ctrlw = 12;
+ code |= 0x1a;
+ } else {
+ *ctrlw = 8;
+ code |= 0x18;
+ }
+ }
+
+ *msgsl = (*msgsl * *ctrlw) - SLIM_FRM_SLOTS_PER_SUPERFRAME -
+ SLIM_GDE_SLOTS_PER_SUPERFRAME;
+ return code;
+}
+
+static void shiftsegoffsets(struct slim_controller *ctrl, struct slim_ich **ach,
+ int sz, u32 shft)
+{
+ int i;
+ u32 oldoff;
+
+ for (i = 0; i < sz; i++) {
+ struct slim_ich *slc;
+
+ if (ach[i] == NULL)
+ continue;
+ slc = ach[i];
+ if (slc->state == SLIM_CH_PENDING_REMOVAL)
+ continue;
+ oldoff = slc->newoff;
+ slc->newoff += shft;
+ /* seg. offset must be <= interval */
+ if (slc->newoff >= slc->newintr)
+ slc->newoff -= slc->newintr;
+ }
+}
+
+static inline int slim_sched_4k_coeff1_chans(struct slim_controller *ctrl,
+ struct slim_ich **slc, int *coeff, int *opensl1,
+ u32 expshft, u32 curintr, u32 curmaxsl,
+ int curexp, int finalexp)
+{
+ int coeff1;
+ struct slim_ich *slc1;
+
+ if (unlikely(!coeff || !slc || !ctrl || !opensl1))
+ return -EINVAL;
+
+ coeff1 = *coeff;
+ slc1 = *slc;
+ while ((coeff1 < ctrl->sched.num_cc1) &&
+ (curexp == (int)(slc1->rootexp + expshft))) {
+ if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
+ coeff1++;
+ slc1 = ctrl->sched.chc1[coeff1];
+ continue;
+ }
+ if (opensl1[1] >= opensl1[0] ||
+ (finalexp == (int)slc1->rootexp &&
+ curintr <= 24 && opensl1[0] == curmaxsl)) {
+ opensl1[1] -= slc1->seglen;
+ slc1->newoff = curmaxsl + opensl1[1];
+ if (opensl1[1] < 0 && opensl1[0] == curmaxsl) {
+ opensl1[0] += opensl1[1];
+ opensl1[1] = 0;
+ if (opensl1[0] < 0) {
+ dev_dbg(&ctrl->dev,
+ "reconfig failed:%d\n",
+ __LINE__);
+ return -EXFULL;
+ }
+ }
+ } else {
+ if (slc1->seglen > opensl1[0]) {
+ dev_dbg(&ctrl->dev,
+ "reconfig failed:%d\n", __LINE__);
+ return -EXFULL;
+ }
+ slc1->newoff = opensl1[0] - slc1->seglen;
+ opensl1[0] = slc1->newoff;
+ }
+ slc1->newintr = curintr;
+ coeff1++;
+ slc1 = ctrl->sched.chc1[coeff1];
+ }
+ *coeff = coeff1;
+ *slc = slc1;
+ return 0;
+}
+
+static int slim_sched_chans(struct slim_device *sb, u32 clkgear,
+ u32 *ctrlw, u32 *subfrml)
+{
+ int coeff1, coeff3;
+ enum slim_ch_coeff bias;
+ struct slim_controller *ctrl = sb->ctrl;
+ int last1 = ctrl->sched.num_cc1 - 1;
+ int last3 = ctrl->sched.num_cc3 - 1;
+
+ /*
+ * Find first channels with coeff 1 & 3 as starting points for
+ * scheduling
+ */
+ for (coeff3 = 0; coeff3 < ctrl->sched.num_cc3; coeff3++) {
+ struct slim_ich *slc = ctrl->sched.chc3[coeff3];
+
+ if (slc->state == SLIM_CH_PENDING_REMOVAL)
+ continue;
+ else
+ break;
+ }
+ for (coeff1 = 0; coeff1 < ctrl->sched.num_cc1; coeff1++) {
+ struct slim_ich *slc = ctrl->sched.chc1[coeff1];
+
+ if (slc->state == SLIM_CH_PENDING_REMOVAL)
+ continue;
+ else
+ break;
+ }
+ if (coeff3 == ctrl->sched.num_cc3 && coeff1 == ctrl->sched.num_cc1) {
+ *ctrlw = 8;
+ *subfrml = 8;
+ return 0;
+ } else if (coeff3 == ctrl->sched.num_cc3)
+ bias = SLIM_COEFF_1;
+ else
+ bias = SLIM_COEFF_3;
+
+ /*
+ * Find last chan in coeff1, 3 list, we will use to know when we
+ * have done scheduling all coeff1 channels
+ */
+ while (last1 >= 0) {
+ if (ctrl->sched.chc1[last1] != NULL &&
+ (ctrl->sched.chc1[last1])->state !=
+ SLIM_CH_PENDING_REMOVAL)
+ break;
+ last1--;
+ }
+ while (last3 >= 0) {
+ if (ctrl->sched.chc3[last3] != NULL &&
+ (ctrl->sched.chc3[last3])->state !=
+ SLIM_CH_PENDING_REMOVAL)
+ break;
+ last3--;
+ }
+
+ if (bias == SLIM_COEFF_1) {
+ struct slim_ich *slc1 = ctrl->sched.chc1[coeff1];
+ u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
+ int curexp, finalexp;
+ u32 curintr, curmaxsl;
+ int opensl1[2];
+ int maxctrlw1;
+ int ret;
+
+ finalexp = (ctrl->sched.chc1[last1])->rootexp;
+ curexp = (int)expshft - 1;
+
+ curintr = (SLIM_MAX_INTR_COEFF_1 * 2) >> (curexp + 1);
+ curmaxsl = curintr >> 1;
+ opensl1[0] = opensl1[1] = curmaxsl;
+
+ while ((coeff1 < ctrl->sched.num_cc1) || (curintr > 24)) {
+ curintr >>= 1;
+ curmaxsl >>= 1;
+
+ /* update 4K family open slot records */
+ if (opensl1[1] < opensl1[0])
+ opensl1[1] -= curmaxsl;
+ else
+ opensl1[1] = opensl1[0] - curmaxsl;
+ opensl1[0] = curmaxsl;
+ if (opensl1[1] < 0) {
+ opensl1[0] += opensl1[1];
+ opensl1[1] = 0;
+ }
+ if (opensl1[0] <= 0) {
+ dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
+ __LINE__);
+ return -EXFULL;
+ }
+ curexp++;
+ /* schedule 4k family channels */
+ ret = slim_sched_4k_coeff1_chans(ctrl, &slc1, &coeff1,
+ opensl1, expshft, curintr, curmaxsl,
+ curexp, finalexp);
+ if (ret)
+ return ret;
+ }
+ /* Leave some slots for messaging space */
+ if (opensl1[1] <= 0 && opensl1[0] <= 0)
+ return -EXFULL;
+ if (opensl1[1] > opensl1[0]) {
+ int temp = opensl1[0];
+
+ opensl1[0] = opensl1[1];
+ opensl1[1] = temp;
+ shiftsegoffsets(ctrl, ctrl->sched.chc1,
+ ctrl->sched.num_cc1, curmaxsl);
+ }
+ /* choose subframe mode to maximize bw */
+ maxctrlw1 = opensl1[0];
+ if (opensl1[0] == curmaxsl)
+ maxctrlw1 += opensl1[1];
+ if (curintr >= 24) {
+ *subfrml = 24;
+ *ctrlw = maxctrlw1;
+ } else if (curintr == 12) {
+ if (maxctrlw1 > opensl1[1] * 4) {
+ *subfrml = 24;
+ *ctrlw = maxctrlw1;
+ } else {
+ *subfrml = 6;
+ *ctrlw = opensl1[1];
+ }
+ } else {
+ *subfrml = 6;
+ *ctrlw = maxctrlw1;
+ }
+ } else {
+ struct slim_ich *slc1 = NULL;
+ struct slim_ich *slc3 = ctrl->sched.chc3[coeff3];
+ u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
+ int curexp, finalexp, exp1;
+ u32 curintr, curmaxsl;
+ int opensl3[2];
+ int opensl1[6];
+ bool opensl1valid = false;
+ int maxctrlw1, maxctrlw3, i;
+
+ finalexp = (ctrl->sched.chc3[last3])->rootexp;
+ if (last1 >= 0) {
+ slc1 = ctrl->sched.chc1[coeff1];
+ exp1 = (ctrl->sched.chc1[last1])->rootexp;
+ if (exp1 > finalexp)
+ finalexp = exp1;
+ }
+ curexp = (int)expshft - 1;
+
+ curintr = (SLIM_MAX_INTR_COEFF_3 * 2) >> (curexp + 1);
+ curmaxsl = curintr >> 1;
+ opensl3[0] = opensl3[1] = curmaxsl;
+
+ while (coeff1 < ctrl->sched.num_cc1 ||
+ coeff3 < ctrl->sched.num_cc3 ||
+ curintr > 32) {
+ curintr >>= 1;
+ curmaxsl >>= 1;
+
+ /* update 12k family open slot records */
+ if (opensl3[1] < opensl3[0])
+ opensl3[1] -= curmaxsl;
+ else
+ opensl3[1] = opensl3[0] - curmaxsl;
+ opensl3[0] = curmaxsl;
+ if (opensl3[1] < 0) {
+ opensl3[0] += opensl3[1];
+ opensl3[1] = 0;
+ }
+ if (opensl3[0] <= 0) {
+ dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
+ __LINE__);
+ return -EXFULL;
+ }
+ curexp++;
+
+ /* schedule 12k family channels */
+ while (coeff3 < ctrl->sched.num_cc3 &&
+ curexp == (int)slc3->rootexp + expshft) {
+ if (slc3->state == SLIM_CH_PENDING_REMOVAL) {
+ coeff3++;
+ slc3 = ctrl->sched.chc3[coeff3];
+ continue;
+ }
+ opensl1valid = false;
+ if (opensl3[1] >= opensl3[0] ||
+ (finalexp == (int)slc3->rootexp &&
+ curintr <= 32 &&
+ opensl3[0] == curmaxsl &&
+ last1 < 0)) {
+ opensl3[1] -= slc3->seglen;
+ slc3->newoff = curmaxsl + opensl3[1];
+ if (opensl3[1] < 0 &&
+ opensl3[0] == curmaxsl) {
+ opensl3[0] += opensl3[1];
+ opensl3[1] = 0;
+ }
+ if (opensl3[0] < 0) {
+ dev_dbg(&ctrl->dev,
+ "reconfig failed:%d\n",
+ __LINE__);
+ return -EXFULL;
+ }
+ } else {
+ if (slc3->seglen > opensl3[0]) {
+ dev_dbg(&ctrl->dev,
+ "reconfig failed:%d\n",
+ __LINE__);
+ return -EXFULL;
+ }
+ slc3->newoff = opensl3[0] -
+ slc3->seglen;
+ opensl3[0] = slc3->newoff;
+ }
+ slc3->newintr = curintr;
+ coeff3++;
+ slc3 = ctrl->sched.chc3[coeff3];
+ }
+ /* update 4k openslot records */
+ if (opensl1valid == false) {
+ for (i = 0; i < 3; i++) {
+ opensl1[i * 2] = opensl3[0];
+ opensl1[(i * 2) + 1] = opensl3[1];
+ }
+ } else {
+ int opensl1p[6];
+
+ memcpy(opensl1p, opensl1, sizeof(opensl1));
+ for (i = 0; i < 3; i++) {
+ if (opensl1p[i] < opensl1p[i + 3])
+ opensl1[(i * 2) + 1] =
+ opensl1p[i];
+ else
+ opensl1[(i * 2) + 1] =
+ opensl1p[i + 3];
+ }
+ for (i = 0; i < 3; i++) {
+ opensl1[(i * 2) + 1] -= curmaxsl;
+ opensl1[i * 2] = curmaxsl;
+ if (opensl1[(i * 2) + 1] < 0) {
+ opensl1[i * 2] +=
+ opensl1[(i * 2) + 1];
+ opensl1[(i * 2) + 1] = 0;
+ }
+ if (opensl1[i * 2] < 0) {
+ dev_dbg(&ctrl->dev,
+ "reconfig failed:%d\n",
+ __LINE__);
+ return -EXFULL;
+ }
+ }
+ }
+ /* schedule 4k family channels */
+ while (coeff1 < ctrl->sched.num_cc1 &&
+ curexp == (int)slc1->rootexp + expshft) {
+ /* searchorder effective when opensl valid */
+ static const int srcho[] = { 5, 2, 4, 1, 3, 0 };
+ int maxopensl = 0;
+ int maxi = 0;
+
+ if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
+ coeff1++;
+ slc1 = ctrl->sched.chc1[coeff1];
+ continue;
+ }
+ opensl1valid = true;
+ for (i = 0; i < 6; i++) {
+ if (opensl1[srcho[i]] > maxopensl) {
+ maxopensl = opensl1[srcho[i]];
+ maxi = srcho[i];
+ }
+ }
+ opensl1[maxi] -= slc1->seglen;
+ slc1->newoff = (curmaxsl * maxi) +
+ opensl1[maxi];
+ if (opensl1[maxi] < 0 && (maxi & 1) == 1 &&
+ opensl1[maxi - 1] == curmaxsl) {
+ opensl1[maxi - 1] += opensl1[maxi];
+ if (opensl3[0] > opensl1[maxi - 1])
+ opensl3[0] = opensl1[maxi - 1];
+ opensl3[1] = 0;
+ opensl1[maxi] = 0;
+ if (opensl1[maxi - 1] < 0) {
+ dev_dbg(&ctrl->dev,
+ "reconfig failed:%d\n",
+ __LINE__);
+ return -EXFULL;
+ }
+ } else if (opensl1[maxi] < 0) {
+ dev_dbg(&ctrl->dev,
+ "reconfig failed:%d\n",
+ __LINE__);
+ return -EXFULL;
+ } else if (opensl3[maxi & 1] > opensl1[maxi]) {
+ opensl3[maxi & 1] = opensl1[maxi];
+ }
+ slc1->newintr = curintr * 3;
+ coeff1++;
+ slc1 = ctrl->sched.chc1[coeff1];
+ }
+ }
+ /* Leave some slots for messaging space */
+ if (opensl3[1] <= 0 && opensl3[0] <= 0)
+ return -EXFULL;
+ /* swap 1st and 2nd bucket if 2nd bucket has more open slots */
+ if (opensl3[1] > opensl3[0]) {
+ int temp = opensl3[0];
+
+ opensl3[0] = opensl3[1];
+ opensl3[1] = temp;
+ temp = opensl1[5];
+ opensl1[5] = opensl1[4];
+ opensl1[4] = opensl1[3];
+ opensl1[3] = opensl1[2];
+ opensl1[2] = opensl1[1];
+ opensl1[1] = opensl1[0];
+ opensl1[0] = temp;
+ shiftsegoffsets(ctrl, ctrl->sched.chc1,
+ ctrl->sched.num_cc1, curmaxsl);
+ shiftsegoffsets(ctrl, ctrl->sched.chc3,
+ ctrl->sched.num_cc3, curmaxsl);
+ }
+ /* subframe mode to maximize BW */
+ maxctrlw3 = opensl3[0];
+ maxctrlw1 = opensl1[0];
+ if (opensl3[0] == curmaxsl)
+ maxctrlw3 += opensl3[1];
+ for (i = 0; i < 5 && opensl1[i] == curmaxsl; i++)
+ maxctrlw1 += opensl1[i + 1];
+ if (curintr >= 32) {
+ *subfrml = 32;
+ *ctrlw = maxctrlw3;
+ } else if (curintr == 16) {
+ if (maxctrlw3 > (opensl3[1] * 4)) {
+ *subfrml = 32;
+ *ctrlw = maxctrlw3;
+ } else {
+ *subfrml = 8;
+ *ctrlw = opensl3[1];
+ }
+ } else {
+ if ((maxctrlw1 * 8) >= (maxctrlw3 * 24)) {
+ *subfrml = 24;
+ *ctrlw = maxctrlw1;
+ } else {
+ *subfrml = 8;
+ *ctrlw = maxctrlw3;
+ }
+ }
+ }
+ return 0;
+}
+
+#ifdef DEBUG
+static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
+ u32 subfrml, u32 clkgear)
+{
+ int sl, i;
+ int cc1 = 0;
+ int cc3 = 0;
+ struct slim_ich *slc = NULL;
+
+ if (!ctrl->sched.slots)
+ return 0;
+ memset(ctrl->sched.slots, 0, SLIM_SL_PER_SUPERFRAME);
+ dev_dbg(&ctrl->dev, "Clock gear is:%d\n", clkgear);
+ for (sl = 0; sl < SLIM_SL_PER_SUPERFRAME; sl += subfrml) {
+ for (i = 0; i < ctrlw; i++)
+ ctrl->sched.slots[sl + i] = 33;
+ }
+ while (cc1 < ctrl->sched.num_cc1) {
+ slc = ctrl->sched.chc1[cc1];
+ if (slc == NULL) {
+ dev_err(&ctrl->dev, "SLC1 null in verify: chan%d\n",
+ cc1);
+ return -EIO;
+ }
+ dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
+ (slc - ctrl->chans), slc->newoff,
+ slc->newintr, slc->seglen);
+
+ if (slc->state != SLIM_CH_PENDING_REMOVAL) {
+ for (sl = slc->newoff;
+ sl < SLIM_SL_PER_SUPERFRAME;
+ sl += slc->newintr) {
+ for (i = 0; i < slc->seglen; i++) {
+ if (ctrl->sched.slots[sl + i])
+ return -EXFULL;
+ ctrl->sched.slots[sl + i] = cc1 + 1;
+ }
+ }
+ }
+ cc1++;
+ }
+ while (cc3 < ctrl->sched.num_cc3) {
+ slc = ctrl->sched.chc3[cc3];
+ if (slc == NULL) {
+ dev_err(&ctrl->dev, "SLC3 null in verify: chan%d\n",
+ cc3);
+ return -EIO;
+ }
+ dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
+ (slc - ctrl->chans), slc->newoff,
+ slc->newintr, slc->seglen);
+ if (slc->state != SLIM_CH_PENDING_REMOVAL) {
+ for (sl = slc->newoff;
+ sl < SLIM_SL_PER_SUPERFRAME;
+ sl += slc->newintr) {
+ for (i = 0; i < slc->seglen; i++) {
+ if (ctrl->sched.slots[sl + i])
+ return -EXFULL;
+ ctrl->sched.slots[sl + i] = cc3 + 1;
+ }
+ }
+ }
+ cc3++;
+ }
+
+ return 0;
+}
+#else
+static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
+ u32 subfrml, u32 clkgear)
+{
+ return 0;
+}
+#endif
+
+static void slim_sort_chan_grp(struct slim_controller *ctrl,
+ struct slim_ich *slc)
+{
+ u8 last = (u8)-1;
+ u8 second = 0;
+
+ for (; last > 0; last--) {
+ struct slim_ich *slc1 = slc;
+ struct slim_ich *slc2;
+ u8 next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
+
+ slc2 = &ctrl->chans[next];
+ for (second = 1; second <= last && slc2 &&
+ (slc2->state == SLIM_CH_ACTIVE ||
+ slc2->state == SLIM_CH_PENDING_ACTIVE); second++) {
+ if (slc1->newoff > slc2->newoff) {
+ u32 temp = slc2->newoff;
+
+ slc2->newoff = slc1->newoff;
+ slc1->newoff = temp;
+ }
+ if (slc2->nextgrp & SLIM_END_GRP) {
+ last = second;
+ break;
+ }
+ slc1 = slc2;
+ next = SLIM_HDL_TO_CHIDX(slc1->nextgrp);
+ slc2 = &ctrl->chans[next];
+ }
+ if (slc2 == NULL)
+ last = second - 1;
+ }
+}
+
+
+static int slim_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear)
+{
+ u32 msgsl = 0;
+ u32 ctrlw = 0;
+ u32 subfrml = 0;
+ int ret = -EIO;
+ struct slim_controller *ctrl = sb->ctrl;
+ u32 usedsl = ctrl->sched.usedslots + ctrl->sched.pending_msgsl;
+ u32 availsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME -
+ SLIM_GDE_SLOTS_PER_SUPERFRAME;
+ *clkgear = SLIM_MAX_CLK_GEAR;
+
+ dev_dbg(&ctrl->dev, "used sl:%u, availlable sl:%u\n", usedsl, availsl);
+ dev_dbg(&ctrl->dev, "pending:chan sl:%u, :msg sl:%u, clkgear:%u\n",
+ ctrl->sched.usedslots,
+ ctrl->sched.pending_msgsl, *clkgear);
+ /*
+ * If number of slots are 0, that means channels are inactive.
+ * It is very likely that the manager will call clock pause very soon.
+ * By making sure that bus is in MAX_GEAR, clk pause sequence will take
+ * minimum amount of time.
+ */
+ if (ctrl->sched.usedslots != 0) {
+ while ((usedsl * 2 <= availsl) && (*clkgear > ctrl->min_cg)) {
+ *clkgear -= 1;
+ usedsl *= 2;
+ }
+ }
+
+ /*
+ * Try scheduling data channels at current clock gear, if all channels
+ * can be scheduled, or reserved BW can't be satisfied, increase clock
+ * gear and try again
+ */
+ for (; *clkgear <= ctrl->max_cg; (*clkgear)++) {
+ ret = slim_sched_chans(sb, *clkgear, &ctrlw, &subfrml);
+
+ if (ret == 0) {
+ *subfrmc = getsubfrmcoding(&ctrlw, &subfrml, &msgsl);
+ if ((msgsl >> (ctrl->max_cg - *clkgear) <
+ ctrl->sched.pending_msgsl) &&
+ (*clkgear < ctrl->max_cg))
+ continue;
+ else
+ break;
+ }
+ }
+ if (ret == 0) {
+ int i;
+ /* Sort channel-groups */
+ for (i = 0; i < ctrl->sched.num_cc1; i++) {
+ struct slim_ich *slc = ctrl->sched.chc1[i];
+
+ if (slc->state == SLIM_CH_PENDING_REMOVAL)
+ continue;
+ if ((slc->nextgrp & SLIM_START_GRP) &&
+ !(slc->nextgrp & SLIM_END_GRP)) {
+ slim_sort_chan_grp(ctrl, slc);
+ }
+ }
+ for (i = 0; i < ctrl->sched.num_cc3; i++) {
+ struct slim_ich *slc = ctrl->sched.chc3[i];
+
+ if (slc->state == SLIM_CH_PENDING_REMOVAL)
+ continue;
+ if ((slc->nextgrp & SLIM_START_GRP) &&
+ !(slc->nextgrp & SLIM_END_GRP)) {
+ slim_sort_chan_grp(ctrl, slc);
+ }
+ }
+
+ ret = slim_verifychansched(ctrl, ctrlw, subfrml, *clkgear);
+ }
+
+ return ret;
+}
+
+static void slim_change_existing_chans(struct slim_controller *ctrl, int coeff)
+{
+ struct slim_ich **arr;
+ int len, i;
+
+ if (coeff == SLIM_COEFF_1) {
+ arr = ctrl->sched.chc1;
+ len = ctrl->sched.num_cc1;
+ } else {
+ arr = ctrl->sched.chc3;
+ len = ctrl->sched.num_cc3;
+ }
+ for (i = 0; i < len; i++) {
+ struct slim_ich *slc = arr[i];
+
+ if (slc->state == SLIM_CH_ACTIVE ||
+ slc->state == SLIM_CH_SUSPENDED)
+ slc->offset = slc->newoff;
+ slc->interval = slc->newintr;
+ }
+}
+static void slim_chan_changes(struct slim_device *sb, bool revert)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+
+ while (!list_empty(&sb->mark_define)) {
+ struct slim_ich *slc;
+ struct slim_pending_ch *pch =
+ list_entry(sb->mark_define.next,
+ struct slim_pending_ch, pending);
+ slc = &ctrl->chans[pch->chan];
+ if (revert) {
+ if (slc->state == SLIM_CH_PENDING_ACTIVE) {
+ u32 sl = slc->seglen << slc->rootexp;
+
+ if (slc->coeff == SLIM_COEFF_3)
+ sl *= 3;
+ if (!ctrl->allocbw)
+ ctrl->sched.usedslots -= sl;
+ slim_remove_ch(ctrl, slc);
+ slc->state = SLIM_CH_DEFINED;
+ }
+ } else {
+ slc->state = SLIM_CH_ACTIVE;
+ slc->def++;
+ }
+ list_del_init(&pch->pending);
+ kfree(pch);
+ }
+
+ while (!list_empty(&sb->mark_removal)) {
+ struct slim_pending_ch *pch =
+ list_entry(sb->mark_removal.next,
+ struct slim_pending_ch, pending);
+ struct slim_ich *slc = &ctrl->chans[pch->chan];
+ u32 sl = slc->seglen << slc->rootexp;
+
+ if (revert || slc->def > 0) {
+ if (slc->coeff == SLIM_COEFF_3)
+ sl *= 3;
+ if (!ctrl->allocbw)
+ ctrl->sched.usedslots += sl;
+ if (revert)
+ slc->def++;
+ slc->state = SLIM_CH_ACTIVE;
+ } else
+ slim_remove_ch(ctrl, slc);
+ list_del_init(&pch->pending);
+ kfree(pch);
+ }
+
+ while (!list_empty(&sb->mark_suspend)) {
+ struct slim_pending_ch *pch =
+ list_entry(sb->mark_suspend.next,
+ struct slim_pending_ch, pending);
+ struct slim_ich *slc = &ctrl->chans[pch->chan];
+
+ if (revert)
+ slc->state = SLIM_CH_ACTIVE;
+ list_del_init(&pch->pending);
+ kfree(pch);
+ }
+ /* Change already active channel if reconfig succeeded */
+ if (!revert) {
+ slim_change_existing_chans(ctrl, SLIM_COEFF_1);
+ slim_change_existing_chans(ctrl, SLIM_COEFF_3);
+ }
+}
+
+/*
+ * slim_reconfigure_now: Request reconfiguration now.
+ * @sb: client handle
+ * This API does what commit flag in other scheduling APIs do.
+ * -EXFULL is returned if there is no space in TDM to reserve the
+ * bandwidth. -EBUSY is returned if reconfiguration request is already in
+ * progress.
+ */
+int slim_reconfigure_now(struct slim_device *sb)
+{
+ u8 i;
+ u8 wbuf[4];
+ u32 clkgear, subframe;
+ u32 curexp;
+ int ret;
+ struct slim_controller *ctrl = sb->ctrl;
+ u32 expshft;
+ u32 segdist;
+ struct slim_pending_ch *pch;
+ DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, 3,
+ NULL, NULL, sb->laddr);
+
+ mutex_lock(&ctrl->sched.m_reconf);
+ /*
+ * If there are no pending changes from this client, avoid sending
+ * the reconfiguration sequence
+ */
+ if (sb->pending_msgsl == sb->cur_msgsl &&
+ list_empty(&sb->mark_define) &&
+ list_empty(&sb->mark_suspend)) {
+ struct list_head *pos, *next;
+
+ list_for_each_safe(pos, next, &sb->mark_removal) {
+ struct slim_ich *slc;
+
+ pch = list_entry(pos, struct slim_pending_ch, pending);
+ slc = &ctrl->chans[pch->chan];
+ if (slc->def > 0)
+ slc->def--;
+ /* Disconnect source port to free it up */
+ if (SLIM_HDL_TO_LA(slc->srch) == sb->laddr)
+ slc->srch = 0;
+ /*
+ * If controller overrides BW allocation,
+ * delete this in remove channel itself
+ */
+ if (slc->def != 0 && !ctrl->allocbw) {
+ list_del(&pch->pending);
+ kfree(pch);
+ }
+ }
+ if (list_empty(&sb->mark_removal)) {
+ mutex_unlock(&ctrl->sched.m_reconf);
+ pr_info("SLIM_CL: skip reconfig sequence");
+ return 0;
+ }
+ }
+
+ ctrl->sched.pending_msgsl += sb->pending_msgsl - sb->cur_msgsl;
+ list_for_each_entry(pch, &sb->mark_define, pending) {
+ struct slim_ich *slc = &ctrl->chans[pch->chan];
+
+ slim_add_ch(ctrl, slc);
+ if (slc->state < SLIM_CH_ACTIVE)
+ slc->state = SLIM_CH_PENDING_ACTIVE;
+ }
+
+ list_for_each_entry(pch, &sb->mark_removal, pending) {
+ struct slim_ich *slc = &ctrl->chans[pch->chan];
+ u32 sl = slc->seglen << slc->rootexp;
+
+ if (slc->coeff == SLIM_COEFF_3)
+ sl *= 3;
+ if (!ctrl->allocbw)
+ ctrl->sched.usedslots -= sl;
+ slc->state = SLIM_CH_PENDING_REMOVAL;
+ }
+ list_for_each_entry(pch, &sb->mark_suspend, pending) {
+ struct slim_ich *slc = &ctrl->chans[pch->chan];
+
+ slc->state = SLIM_CH_SUSPENDED;
+ }
+
+ /*
+ * Controller can override default channel scheduling algorithm.
+ * (e.g. if controller needs to use fixed channel scheduling based
+ * on number of channels)
+ */
+ if (ctrl->allocbw)
+ ret = ctrl->allocbw(sb, &subframe, &clkgear);
+ else
+ ret = slim_allocbw(sb, &subframe, &clkgear);
+
+ if (!ret) {
+ ret = slim_processtxn(ctrl, &txn, false);
+ dev_dbg(&ctrl->dev, "sending begin_reconfig:ret:%d\n", ret);
+ }
+
+ if (!ret && subframe != ctrl->sched.subfrmcode) {
+ wbuf[0] = (u8)(subframe & 0xFF);
+ txn.mc = SLIM_MSG_MC_NEXT_SUBFRAME_MODE;
+ txn.len = 1;
+ txn.rl = 4;
+ txn.wbuf = wbuf;
+ ret = slim_processtxn(ctrl, &txn, false);
+ dev_dbg(&ctrl->dev, "sending subframe:%d,ret:%d\n",
+ (int)wbuf[0], ret);
+ }
+ if (!ret && clkgear != ctrl->clkgear) {
+ wbuf[0] = (u8)(clkgear & 0xFF);
+ txn.mc = SLIM_MSG_MC_NEXT_CLOCK_GEAR;
+ txn.len = 1;
+ txn.rl = 4;
+ txn.wbuf = wbuf;
+ ret = slim_processtxn(ctrl, &txn, false);
+ dev_dbg(&ctrl->dev, "sending clkgear:%d,ret:%d\n",
+ (int)wbuf[0], ret);
+ }
+ if (ret)
+ goto revert_reconfig;
+
+ expshft = SLIM_MAX_CLK_GEAR - clkgear;
+ /* activate/remove channel */
+ list_for_each_entry(pch, &sb->mark_define, pending) {
+ struct slim_ich *slc = &ctrl->chans[pch->chan];
+ /* Define content */
+ wbuf[0] = slc->chan;
+ wbuf[1] = slc->prrate;
+ wbuf[2] = slc->prop.dataf | (slc->prop.auxf << 4);
+ wbuf[3] = slc->prop.sampleszbits / SLIM_CL_PER_SL;
+ txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CONTENT;
+ txn.len = 4;
+ txn.rl = 7;
+ txn.wbuf = wbuf;
+ dev_dbg(&ctrl->dev, "define content, activate:%x, %x, %x, %x\n",
+ wbuf[0], wbuf[1], wbuf[2], wbuf[3]);
+ /* Right now, channel link bit is not supported */
+ ret = slim_processtxn(ctrl, &txn, false);
+ if (ret)
+ goto revert_reconfig;
+
+ txn.mc = SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL;
+ txn.len = 1;
+ txn.rl = 4;
+ ret = slim_processtxn(ctrl, &txn, false);
+ if (ret)
+ goto revert_reconfig;
+ }
+
+ list_for_each_entry(pch, &sb->mark_removal, pending) {
+ struct slim_ich *slc = &ctrl->chans[pch->chan];
+
+ dev_dbg(&ctrl->dev, "remove chan:%x\n", pch->chan);
+ wbuf[0] = slc->chan;
+ txn.mc = SLIM_MSG_MC_NEXT_REMOVE_CHANNEL;
+ txn.len = 1;
+ txn.rl = 4;
+ txn.wbuf = wbuf;
+ ret = slim_processtxn(ctrl, &txn, false);
+ if (ret)
+ goto revert_reconfig;
+ }
+ list_for_each_entry(pch, &sb->mark_suspend, pending) {
+ struct slim_ich *slc = &ctrl->chans[pch->chan];
+
+ dev_dbg(&ctrl->dev, "suspend chan:%x\n", pch->chan);
+ wbuf[0] = slc->chan;
+ txn.mc = SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL;
+ txn.len = 1;
+ txn.rl = 4;
+ txn.wbuf = wbuf;
+ ret = slim_processtxn(ctrl, &txn, false);
+ if (ret)
+ goto revert_reconfig;
+ }
+
+ /* Define CC1 channel */
+ for (i = 0; i < ctrl->sched.num_cc1; i++) {
+ struct slim_ich *slc = ctrl->sched.chc1[i];
+
+ if (slc->state == SLIM_CH_PENDING_REMOVAL)
+ continue;
+ curexp = slc->rootexp + expshft;
+ segdist = (slc->newoff << curexp) & 0x1FF;
+ expshft = SLIM_MAX_CLK_GEAR - clkgear;
+ dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
+ slc->newintr, slc->interval, segdist);
+ dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
+ slc->newoff, slc->offset);
+
+ if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
+ slc->newintr != slc->interval ||
+ slc->newoff != slc->offset) {
+ segdist |= 0x200;
+ segdist >>= curexp;
+ segdist |= (slc->newoff << (curexp + 1)) & 0xC00;
+ wbuf[0] = slc->chan;
+ wbuf[1] = (u8)(segdist & 0xFF);
+ wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
+ (slc->prop.prot << 4);
+ wbuf[3] = slc->seglen;
+ txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
+ txn.len = 4;
+ txn.rl = 7;
+ txn.wbuf = wbuf;
+ ret = slim_processtxn(ctrl, &txn, false);
+ if (ret)
+ goto revert_reconfig;
+ }
+ }
+
+ /* Define CC3 channels */
+ for (i = 0; i < ctrl->sched.num_cc3; i++) {
+ struct slim_ich *slc = ctrl->sched.chc3[i];
+
+ if (slc->state == SLIM_CH_PENDING_REMOVAL)
+ continue;
+ curexp = slc->rootexp + expshft;
+ segdist = (slc->newoff << curexp) & 0x1FF;
+ expshft = SLIM_MAX_CLK_GEAR - clkgear;
+ dev_dbg(&ctrl->dev, "new-intr:%d, old-intr:%d, dist:%d\n",
+ slc->newintr, slc->interval, segdist);
+ dev_dbg(&ctrl->dev, "new-off:%d, old-off:%d\n",
+ slc->newoff, slc->offset);
+
+ if (slc->state < SLIM_CH_ACTIVE || slc->def < slc->ref ||
+ slc->newintr != slc->interval ||
+ slc->newoff != slc->offset) {
+ segdist |= 0x200;
+ segdist >>= curexp;
+ segdist |= 0xC00;
+ wbuf[0] = slc->chan;
+ wbuf[1] = (u8)(segdist & 0xFF);
+ wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
+ (slc->prop.prot << 4);
+ wbuf[3] = (u8)(slc->seglen);
+ txn.mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
+ txn.len = 4;
+ txn.rl = 7;
+ txn.wbuf = wbuf;
+ ret = slim_processtxn(ctrl, &txn, false);
+ if (ret)
+ goto revert_reconfig;
+ }
+ }
+ txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
+ txn.len = 0;
+ txn.rl = 3;
+ txn.wbuf = NULL;
+ ret = slim_processtxn(ctrl, &txn, false);
+ dev_dbg(&ctrl->dev, "reconfig now:ret:%d\n", ret);
+ if (!ret) {
+ ctrl->sched.subfrmcode = subframe;
+ ctrl->clkgear = clkgear;
+ ctrl->sched.msgsl = ctrl->sched.pending_msgsl;
+ sb->cur_msgsl = sb->pending_msgsl;
+ slim_chan_changes(sb, false);
+ mutex_unlock(&ctrl->sched.m_reconf);
+ return 0;
+ }
+
+revert_reconfig:
+ /* Revert channel changes */
+ slim_chan_changes(sb, true);
+ mutex_unlock(&ctrl->sched.m_reconf);
+ return ret;
+}
+EXPORT_SYMBOL(slim_reconfigure_now);
+
+static int add_pending_ch(struct list_head *listh, u8 chan)
+{
+ struct slim_pending_ch *pch;
+
+ pch = kmalloc(sizeof(struct slim_pending_ch), GFP_KERNEL);
+ if (!pch)
+ return -ENOMEM;
+ pch->chan = chan;
+ list_add_tail(&pch->pending, listh);
+ return 0;
+}
+
+/*
+ * slim_control_ch: Channel control API.
+ * @sb: client handle
+ * @chanh: group or channel handle to be controlled
+ * @chctrl: Control command (activate/suspend/remove)
+ * @commit: flag to indicate whether the control should take effect right-away.
+ * This API activates, removes or suspends a channel (or group of channels)
+ * chanh indicates the channel or group handle (returned by the define_ch API).
+ * Reconfiguration may be time-consuming since it can change all other active
+ * channel allocations on the bus, change in clock gear used by the slimbus,
+ * and change in the control space width used for messaging.
+ * commit makes sure that multiple channels can be activated/deactivated before
+ * reconfiguration is started.
+ * -EXFULL is returned if there is no space in TDM to reserve the bandwidth.
+ * -EISCONN/-ENOTCONN is returned if the channel is already connected or not
+ * yet defined.
+ * -EINVAL is returned if individual control of a grouped-channel is attempted.
+ */
+int slim_control_ch(struct slim_device *sb, u16 chanh,
+ enum slim_ch_control chctrl, bool commit)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+ int ret = 0;
+ /* Get rid of the group flag in MSB if any */
+ u8 chan = SLIM_HDL_TO_CHIDX(chanh);
+ u8 nchan = 0;
+ struct slim_ich *slc = &ctrl->chans[chan];
+
+ if (!(slc->nextgrp & SLIM_START_GRP))
+ return -EINVAL;
+
+ mutex_lock(&sb->sldev_reconf);
+ mutex_lock(&ctrl->sched.m_reconf);
+ do {
+ struct slim_pending_ch *pch;
+ u8 add_mark_removal = true;
+
+ slc = &ctrl->chans[chan];
+ dev_dbg(&ctrl->dev, "chan:%d,ctrl:%d,def:%d", chan, chctrl,
+ slc->def);
+ if (slc->state < SLIM_CH_DEFINED) {
+ ret = -ENOTCONN;
+ break;
+ }
+ if (chctrl == SLIM_CH_SUSPEND) {
+ ret = add_pending_ch(&sb->mark_suspend, chan);
+ if (ret)
+ break;
+ } else if (chctrl == SLIM_CH_ACTIVATE) {
+ if (slc->state > SLIM_CH_ACTIVE) {
+ ret = -EISCONN;
+ break;
+ }
+ ret = add_pending_ch(&sb->mark_define, chan);
+ if (ret)
+ break;
+ } else {
+ if (slc->state < SLIM_CH_ACTIVE) {
+ ret = -ENOTCONN;
+ break;
+ }
+ /* If channel removal request comes when pending
+ * in the mark_define, remove it from the define
+ * list instead of adding it to removal list
+ */
+ if (!list_empty(&sb->mark_define)) {
+ struct list_head *pos, *next;
+
+ list_for_each_safe(pos, next,
+ &sb->mark_define) {
+ pch = list_entry(pos,
+ struct slim_pending_ch,
+ pending);
+ if (pch->chan == chan) {
+ list_del(&pch->pending);
+ kfree(pch);
+ add_mark_removal = false;
+ break;
+ }
+ }
+ }
+ if (add_mark_removal == true) {
+ ret = add_pending_ch(&sb->mark_removal, chan);
+ if (ret)
+ break;
+ }
+ }
+
+ nchan++;
+ if (nchan < SLIM_GRP_TO_NCHAN(chanh))
+ chan = SLIM_HDL_TO_CHIDX(slc->nextgrp);
+ } while (nchan < SLIM_GRP_TO_NCHAN(chanh));
+ mutex_unlock(&ctrl->sched.m_reconf);
+ if (!ret && commit == true)
+ ret = slim_reconfigure_now(sb);
+ mutex_unlock(&sb->sldev_reconf);
+ return ret;
+}
+EXPORT_SYMBOL(slim_control_ch);
+
+/*
+ * slim_reservemsg_bw: Request to reserve bandwidth for messages.
+ * @sb: client handle
+ * @bw_bps: message bandwidth in bits per second to be requested
+ * @commit: indicates whether the reconfiguration needs to be acted upon.
+ * This API call can be grouped with slim_control_ch API call with only one of
+ * the APIs specifying the commit flag to avoid reconfiguration being called too
+ * frequently. -EXFULL is returned if there is no space in TDM to reserve the
+ * bandwidth. -EBUSY is returned if reconfiguration is requested, but a request
+ * is already in progress.
+ */
+int slim_reservemsg_bw(struct slim_device *sb, u32 bw_bps, bool commit)
+{
+ struct slim_controller *ctrl = sb->ctrl;
+ int ret = 0;
+ int sl;
+
+ mutex_lock(&sb->sldev_reconf);
+ if ((bw_bps >> 3) >= ctrl->a_framer->rootfreq)
+ sl = SLIM_SL_PER_SUPERFRAME;
+ else {
+ sl = (bw_bps * (SLIM_CL_PER_SUPERFRAME_DIV8/SLIM_CL_PER_SL/2) +
+ (ctrl->a_framer->rootfreq/2 - 1)) /
+ (ctrl->a_framer->rootfreq/2);
+ }
+ dev_dbg(&ctrl->dev, "request:bw:%d, slots:%d, current:%d\n", bw_bps, sl,
+ sb->cur_msgsl);
+ sb->pending_msgsl = sl;
+ if (commit == true)
+ ret = slim_reconfigure_now(sb);
+ mutex_unlock(&sb->sldev_reconf);
+ return ret;
+}
+EXPORT_SYMBOL(slim_reservemsg_bw);
+
+/*
+ * slim_ctrl_clk_pause: Called by slimbus controller to request clock to be
+ * paused or woken up out of clock pause
+ * or woken up from clock pause
+ * @ctrl: controller requesting bus to be paused or woken up
+ * @wakeup: Wakeup this controller from clock pause.
+ * @restart: Restart time value per spec used for clock pause. This value
+ * isn't used when controller is to be woken up.
+ * This API executes clock pause reconfiguration sequence if wakeup is false.
+ * If wakeup is true, controller's wakeup is called
+ * Slimbus clock is idle and can be disabled by the controller later.
+ */
+int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart)
+{
+ int ret = 0;
+ int i;
+ DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_CLK_PAUSE_SEQ_FLG |
+ SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, 3,
+ NULL, NULL, 0);
+
+ if (wakeup == false && restart > SLIM_CLK_UNSPECIFIED)
+ return -EINVAL;
+ mutex_lock(&ctrl->m_ctrl);
+ if (wakeup) {
+ if (ctrl->clk_state == SLIM_CLK_ACTIVE) {
+ mutex_unlock(&ctrl->m_ctrl);
+ return 0;
+ }
+ wait_for_completion(&ctrl->pause_comp);
+ /*
+ * Slimbus framework will call controller wakeup
+ * Controller should make sure that it sets active framer
+ * out of clock pause by doing appropriate setting
+ */
+ if (ctrl->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup)
+ ret = ctrl->wakeup(ctrl);
+ /*
+ * If wakeup fails, make sure that next attempt can succeed.
+ * Since we already consumed pause_comp, complete it so
+ * that next wakeup isn't blocked forever
+ */
+ if (!ret)
+ ctrl->clk_state = SLIM_CLK_ACTIVE;
+ else
+ complete(&ctrl->pause_comp);
+ mutex_unlock(&ctrl->m_ctrl);
+ return ret;
+ }
+
+ switch (ctrl->clk_state) {
+ case SLIM_CLK_ENTERING_PAUSE:
+ case SLIM_CLK_PAUSE_FAILED:
+ /*
+ * If controller is already trying to enter clock pause,
+ * let it finish.
+ * In case of error, retry
+ * In both cases, previous clock pause has signalled
+ * completion.
+ */
+ wait_for_completion(&ctrl->pause_comp);
+ /* retry upon failure */
+ if (ctrl->clk_state == SLIM_CLK_PAUSE_FAILED) {
+ ctrl->clk_state = SLIM_CLK_ACTIVE;
+ } else {
+ mutex_unlock(&ctrl->m_ctrl);
+ /*
+ * Signal completion so that wakeup can wait on
+ * it.
+ */
+ complete(&ctrl->pause_comp);
+ return 0;
+ }
+ break;
+ case SLIM_CLK_PAUSED:
+ /* already paused */
+ mutex_unlock(&ctrl->m_ctrl);
+ return 0;
+ case SLIM_CLK_ACTIVE:
+ default:
+ break;
+ }
+ /* Pending response for a message */
+ for (i = 0; i < ctrl->last_tid; i++) {
+ if (ctrl->txnt[i]) {
+ ret = -EBUSY;
+ pr_info("slim_clk_pause: txn-rsp for %d pending", i);
+ mutex_unlock(&ctrl->m_ctrl);
+ return -EBUSY;
+ }
+ }
+ ctrl->clk_state = SLIM_CLK_ENTERING_PAUSE;
+ mutex_unlock(&ctrl->m_ctrl);
+
+ mutex_lock(&ctrl->sched.m_reconf);
+ /* Data channels active */
+ if (ctrl->sched.usedslots) {
+ pr_info("slim_clk_pause: data channel active");
+ ret = -EBUSY;
+ goto clk_pause_ret;
+ }
+
+ ret = slim_processtxn(ctrl, &txn, false);
+ if (ret)
+ goto clk_pause_ret;
+
+ txn.mc = SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_NEXT_PAUSE_CLOCK;
+ txn.len = 1;
+ txn.rl = 4;
+ txn.wbuf = &restart;
+ ret = slim_processtxn(ctrl, &txn, false);
+ if (ret)
+ goto clk_pause_ret;
+
+ txn.mc = SLIM_MSG_CLK_PAUSE_SEQ_FLG | SLIM_MSG_MC_RECONFIGURE_NOW;
+ txn.len = 0;
+ txn.rl = 3;
+ txn.wbuf = NULL;
+ ret = slim_processtxn(ctrl, &txn, false);
+ if (ret)
+ goto clk_pause_ret;
+
+clk_pause_ret:
+ if (ret)
+ ctrl->clk_state = SLIM_CLK_PAUSE_FAILED;
+ else
+ ctrl->clk_state = SLIM_CLK_PAUSED;
+ complete(&ctrl->pause_comp);
+ mutex_unlock(&ctrl->sched.m_reconf);
+ return ret;
+}
+EXPORT_SYMBOL(slim_ctrl_clk_pause);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Slimbus module");
+MODULE_ALIAS("platform:slimbus");