esoc: Snapshot esoc drivers
Snapshot esoc components, headers and UAPI headers from
msm-3.18@24d0c1f91eb2850
(Merge "msm: mdss: dp: handle fast attention events")
Change-Id: I55e7ea4359c1f5b855f082e66d5816316da2fd48
Signed-off-by: Abhimanyu Kapur <abhimany@codeaurora.org>
(cherry picked from commit 5d8ee90e7f7e576b877813535d08a4123bedc49e)
Signed-off-by: Channagoud Kadabi <ckadabi@codeaurora.org>
diff --git a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
new file mode 100644
index 0000000..6ddc725
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
@@ -0,0 +1,148 @@
+Attached MDM Modem Devices
+
+External modems are devices that are attached to the msm and controlled by gpios.
+There is also a data channel between the msm and the external modem that sometimes needs
+to be reset.
+
+Required Properties:
+- compatible: The bus devices need to be compatible with
+ "qcom,mdm2-modem", "qcom,ext-mdm9x25", "qcom,ext-mdm9x35", "qcom, ext-mdm9x45",
+ "qcom,ext-mdm9x55".
+
+Required named gpio properties:
+- qcom,mdm2ap-errfatal-gpio: gpio for the external modem to indicate to the apps processor
+ of an error fatal condition on the modem.
+- qcom,ap2mdm-errfatal-gpio: gpio for the apps processor to indicate to the external modem
+ of an error fatal condition on the apps processor.
+- qcom,mdm2ap-status-gpio: gpio to indicate to the apps processor when there is a watchdog
+ bite on the external modem.
+- qcom,ap2mdm-status-gpio: gpio for the apps processor to indicate to the modem that an apps
+ processor watchdog bite has occurred.
+- qcom,ap2mdm-soft-reset-gpio: gpio for the apps processor to use to soft-reset the external
+ modem. If the flags parameter has a value of 0x1 then the gpio is active LOW.
+
+Required Interrupts:
+- "err_fatal_irq": Interrupt generated on the apps processor when the error fatal gpio is pulled
+ high by the external modem.
+- "status_irq": Interrupt generated on the apps processor when the mdm2ap-status gpio falls low
+ on the external modem. This usually indicates a watchdog bite on the modem.
+- "plbrdy_irq": Interrupt generated on the aps processor when the mdm2ap-pblrdy gpio is pulled
+ either high or low by the external modem. This is an indication that the modem
+ has rebooted.
+- "mdm2ap_vddmin_irq": Interrupt generated on the apps processor when the external modem goes
+ into vddmin power state.
+
+Optional named gpio properties:
+- qcom,mdm2ap-pblrdy-gpio: gpio used by some external modems to indicate when the modem has
+ booted into the PBL bootloader.
+- qcom,ap2mdm-wakeup-gpio: gpio used by the apps processor to wake the external modem
+ out of a low power state.
+- qcom,ap2mdm-chnl-rdy-gpio: gpio used by the apps processor to inform the external modem
+ that data link is ready.
+- qcom,mdm2ap-wakeup-gpio: gpio from the external modem to the apps processor to wake it
+ out of a low power state.
+- qcom,ap2mdm-vddmin-gpio: gpio to indicate to the external modem when the apps processor
+ is about to enter vddmin power state.
+- qcom,mdm2ap-vddmin-gpio: gpio used by the external modem to inform the apps processor
+ when it is about to enter vddmin power state.
+- qcom,ap2mdm-kpdpwr-gpio: gpio used to simulate a power button press on the external
+ modem. Some modems use this as part of their initial power-up sequence.
+ If the "flags" parameter has a value of 0x1 then it is active LOW.
+- qcom,ap2mdm-pmic-pwr-en-gpio: Some modems need this gpio for the apps processor to enable
+ the pmic on the external modem.
+- qcom,use-usb-port-gpio: some modems use this gpio to switch a port connection from uart to usb.
+ This is used during firmware upgrade of some modems.
+- qcom,mdm-link-detect-gpio: some modems may support two interfaces. This gpio
+ indicates whether only one or both links can be used.
+
+Optional driver parameters:
+- qcom,ramdump-delay-ms: time in milliseconds to wait before starting to collect ramdumps.
+ This interval is the time to wait after an error on the external modem is
+ signaled to the apps processor before starting to collect ramdumps. Its
+ value depends on the type of external modem (e.g. MDM vs QSC), and how
+ error fatal handing is done on the modem.
+ The default value is 2 seconds (2000 milliseconds) as specified by the
+ mdm9x15 software developer. Consultation with the developer of the modem
+ software is required to determine this value for that modem.
+- qcom,ps-hold-delay-ms: minimum delay in milliseconds between consecutive PS_HOLD toggles.
+ SGLTE targets that use a QSC1215 modem require a minimum delay between consecutive
+ toggling of the PS_HOLD pmic input. For one target it is 500 milliseconds but it
+ may vary depending on the target and how the external modem is connected. The value
+ is specified by the hardware designers.
+- qcom,early-power-on: boolean flag to indicate if to power on the modem when the device is probed.
+- qcom,sfr-query: boolean flag to indicate if to query the modem for a reset reason.
+- qcom,no-powerdown-after-ramdumps: boolean flag to indicate if to power down the modem after ramdumps.
+- qcom,no-a2m-errfatal-on-ssr: boolean to tell driver not to raise ap2mdm errfatal during SSR.
+- qcom,no-reset-on-first-powerup: boolean to tell driver not to reset the modem when first
+ powering up the modem.
+- qcom,ramdump-timeout-ms: ramdump timeout interval in milliseconds.
+ This interval is the time to wait for collection of the external modem's ramdump
+ to complete. It's value depends on the speed of the data connection between the
+ external modem and the apps processor on the platform. If the connection is a
+ UART port then this delay needs to be longer in order to avoid premature timeout
+ of the ramdump collection.
+ The default value is 2 minutes (120000 milliseconds) which is based on the
+ measured time it takes over a UART connection. It is reduced when the data
+ connection is an HSIC port. The value is usually tuned empirically for a
+ particular target.
+- qcom,image-upgrade-supported: boolean flag to indicate if software upgrade is supported.
+- qcom,support-shutdown: boolean flag to indicate if graceful shutdown is supported.
+- qcom,vddmin-drive-strength: drive strength in milliamps of the ap2mdm-vddmin gpio.
+ The ap2mdm_vddmin gpio is controlled by the RPM processor. It is pulled low
+ to indicate to the external modem that the apps processor has entered vddmin
+ state, and high to indicate the reverse. Its parameters are passed to the RPM
+ software from the HLOS because the RPM software has to way of saving this type
+ of configuration when an external modem is attached.
+ The value of the drive strength is specified by the hardware designers. A value
+ of 8 milliamps is typical.
+ This property is ignored if the property "qcom,ap2mdm-vddmin-gpio" is
+ not set.
+- qcom,vddmin-modes: a string indicating the "modes" requested for the ap2mdm-vddmin gpio.
+ This value is passed to RPM and is used by the RPM module to determine the
+ gpio mux function. The only currently supported modes string is "normal" and
+ corresponds to the value 0x03 that is passed to RPM.
+- qcom,restart-group: List of subsystems that will need to restart together.
+- qcom,mdm-dual-link: Boolean indicates whether both links can used for
+ communication.
+- qcom,ssctl-instance-id: Instance id used by the subsystem to connect with the SSCTL service.
+- qcom,sysmon-id: platform device id that sysmon is probed with for the subsystem.
+- qcom,pil-force-shutdown: Boolean. If set, the SSR framework will not trigger graceful shutdown
+ on behalf of the subsystem driver.
+
+Example:
+ mdm0: qcom,mdm0 {
+ compatible = "qcom,mdm2-modem";
+ cell-index = <0>;
+ #address-cells = <0>;
+ interrupt-parent = <&mdm0>;
+ interrupts = <0 1 2 3>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map =
+ <0 &msmgpio 82 0x3
+ 1 &msmgpio 46 0x3
+ 2 &msmgpio 80 0x3
+ 3 &msmgpio 27 0x3>;
+ interrupt-names =
+ "err_fatal_irq",
+ "status_irq",
+ "plbrdy_irq",
+ "mdm2ap_vddmin_irq";
+
+ qcom,mdm2ap-errfatal-gpio = <&msmgpio 82 0x00>;
+ qcom,ap2mdm-errfatal-gpio = <&msmgpio 106 0x00>;
+ qcom,mdm2ap-status-gpio = <&msmgpio 46 0x00>;
+ qcom,ap2mdm-status-gpio = <&msmgpio 105 0x00>;
+ qcom,ap2mdm-soft-reset-gpio = <&msmgpio 24 0x00>;
+ qcom,mdm2ap-pblrdy-gpio = <&msmgpio 80 0x00>;
+ qcom,ap2mdm-wakeup-gpio = <&msmgpio 104 0x00>;
+ qcom,ap2mdm-vddmin-gpio = <&msmgpio 108 0x00>;
+ qcom,mdm2ap-vddmin-gpio = <&msmgpio 27 0x00>;
+
+ qcom,ramdump-delay-ms = <2000>;
+ qcom,ramdump-timeout-ms = <120000>;
+ qcom,vddmin-modes = "normal";
+ qcom,vddmin-drive-strength = <8>;
+ qcom,ssctl-instance-id = <10>;
+ qcom,sysmon-id = <20>;
+ };
diff --git a/drivers/Kconfig b/drivers/Kconfig
index e1e2066..cc11302 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -112,6 +112,8 @@
source "drivers/rtc/Kconfig"
+source "drivers/esoc/Kconfig"
+
source "drivers/dma/Kconfig"
source "drivers/dma-buf/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 194d20b..cf40194 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -172,4 +172,5 @@
obj-$(CONFIG_STM) += hwtracing/stm/
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_NVMEM) += nvmem/
+obj-$(CONFIG_ESOC) += esoc/
obj-$(CONFIG_FPGA) += fpga/
diff --git a/drivers/esoc/Kconfig b/drivers/esoc/Kconfig
new file mode 100644
index 0000000..35b2082
--- /dev/null
+++ b/drivers/esoc/Kconfig
@@ -0,0 +1,55 @@
+#
+# External soc control infrastructure and drivers
+#
+menuconfig ESOC
+ bool "External SOCs Control"
+ help
+ External SOCs can be powered on and monitored by user
+ space or kernel drivers. Additionally they can be controlled
+ to respond to control commands. This framework provides an
+ interface to track events related to the external slave socs.
+
+if ESOC
+
+config ESOC_DEV
+ bool "ESOC userspace interface"
+ help
+ Say yes here to enable a userspace representation of the control
+ link. Userspace can register a request engine or a command engine
+ for the external soc. It can receive event notifications from the
+ control link.
+
+config ESOC_CLIENT
+ bool "ESOC client interface"
+ depends on OF
+ help
+ Say yes here to enable client interface for external socs.
+ Clients can specify the external soc that they are interested in
+ by using device tree phandles. Based on this, clients can register
+ for notifications from a specific soc.
+
+config ESOC_DEBUG
+ bool "ESOC debug support"
+ help
+ Say yes here to enable debugging support in the ESOC framework
+ and individual esoc drivers.
+ If you wish to debug the esoc driver and enable more logging enable
+ this option. Based on this, DEBUG macro would be defined which will
+ allow logging of different esoc driver traces.
+
+config ESOC_MDM_4x
+ bool "Add support for external mdm9x25/mdm9x35/mdm9x45/mdm9x55"
+ help
+ In some Qualcomm Technologies, Inc. boards, an external modem such as
+ mdm9x25 or mdm9x35 is connected to a primary msm. The primary soc can
+ control/monitor the modem via gpios. The data communication with such
+ modems can occur over PCIE or HSIC.
+
+config ESOC_MDM_DRV
+ tristate "Command engine for 4x series external modems"
+ help
+ Provides a command engine to control the behavior of an external modem
+ such as mdm9x25/mdm9x35/mdm9x45/mdm9x55/QSC. Allows the primary soc to put the
+ external modem in a specific mode. Also listens for events on the
+ external modem.
+endif
diff --git a/drivers/esoc/Makefile b/drivers/esoc/Makefile
new file mode 100644
index 0000000..0987215
--- /dev/null
+++ b/drivers/esoc/Makefile
@@ -0,0 +1,8 @@
+# generic external soc control support
+
+ccflags-$(CONFIG_ESOC_DEBUG) := -DDEBUG
+obj-$(CONFIG_ESOC) += esoc_bus.o
+obj-$(CONFIG_ESOC_DEV) += esoc_dev.o
+obj-$(CONFIG_ESOC_CLIENT) += esoc_client.o
+obj-$(CONFIG_ESOC_MDM_4x) += esoc-mdm-pon.o esoc-mdm-4x.o
+obj-$(CONFIG_ESOC_MDM_DRV) += esoc-mdm-drv.o
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
new file mode 100644
index 0000000..b1834e2
--- /dev/null
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -0,0 +1,1033 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/coresight.h>
+#include <linux/coresight-cti.h>
+#include <linux/workqueue.h>
+#include <soc/qcom/sysmon.h>
+#include "esoc-mdm.h"
+
+enum gpio_update_config {
+ GPIO_UPDATE_BOOTING_CONFIG = 1,
+ GPIO_UPDATE_RUNNING_CONFIG,
+};
+
+enum irq_mask {
+ IRQ_ERRFATAL = 0x1,
+ IRQ_STATUS = 0x2,
+ IRQ_PBLRDY = 0x4,
+};
+
+
+static struct gpio_map {
+ const char *name;
+ int index;
+} gpio_map[] = {
+ {"qcom,mdm2ap-errfatal-gpio", MDM2AP_ERRFATAL},
+ {"qcom,ap2mdm-errfatal-gpio", AP2MDM_ERRFATAL},
+ {"qcom,mdm2ap-status-gpio", MDM2AP_STATUS},
+ {"qcom,ap2mdm-status-gpio", AP2MDM_STATUS},
+ {"qcom,mdm2ap-pblrdy-gpio", MDM2AP_PBLRDY},
+ {"qcom,ap2mdm-wakeup-gpio", AP2MDM_WAKEUP},
+ {"qcom,ap2mdm-chnlrdy-gpio", AP2MDM_CHNLRDY},
+ {"qcom,mdm2ap-wakeup-gpio", MDM2AP_WAKEUP},
+ {"qcom,ap2mdm-vddmin-gpio", AP2MDM_VDDMIN},
+ {"qcom,mdm2ap-vddmin-gpio", MDM2AP_VDDMIN},
+ {"qcom,ap2mdm-pmic-pwr-en-gpio", AP2MDM_PMIC_PWR_EN},
+ {"qcom,mdm-link-detect-gpio", MDM_LINK_DETECT},
+};
+
+/* Required gpios */
+static const int required_gpios[] = {
+ MDM2AP_ERRFATAL,
+ AP2MDM_ERRFATAL,
+ MDM2AP_STATUS,
+ AP2MDM_STATUS,
+};
+
+static void mdm_debug_gpio_show(struct mdm_ctrl *mdm)
+{
+ struct device *dev = mdm->dev;
+
+ dev_dbg(dev, "%s: MDM2AP_ERRFATAL gpio = %d\n",
+ __func__, MDM_GPIO(mdm, MDM2AP_ERRFATAL));
+ dev_dbg(dev, "%s: AP2MDM_ERRFATAL gpio = %d\n",
+ __func__, MDM_GPIO(mdm, AP2MDM_ERRFATAL));
+ dev_dbg(dev, "%s: MDM2AP_STATUS gpio = %d\n",
+ __func__, MDM_GPIO(mdm, MDM2AP_STATUS));
+ dev_dbg(dev, "%s: AP2MDM_STATUS gpio = %d\n",
+ __func__, MDM_GPIO(mdm, AP2MDM_STATUS));
+ dev_dbg(dev, "%s: AP2MDM_SOFT_RESET gpio = %d\n",
+ __func__, MDM_GPIO(mdm, AP2MDM_SOFT_RESET));
+ dev_dbg(dev, "%s: MDM2AP_WAKEUP gpio = %d\n",
+ __func__, MDM_GPIO(mdm, MDM2AP_WAKEUP));
+ dev_dbg(dev, "%s: AP2MDM_WAKEUP gpio = %d\n",
+ __func__, MDM_GPIO(mdm, AP2MDM_WAKEUP));
+ dev_dbg(dev, "%s: AP2MDM_PMIC_PWR_EN gpio = %d\n",
+ __func__, MDM_GPIO(mdm, AP2MDM_PMIC_PWR_EN));
+ dev_dbg(dev, "%s: MDM2AP_PBLRDY gpio = %d\n",
+ __func__, MDM_GPIO(mdm, MDM2AP_PBLRDY));
+ dev_dbg(dev, "%s: AP2MDM_VDDMIN gpio = %d\n",
+ __func__, MDM_GPIO(mdm, AP2MDM_VDDMIN));
+ dev_dbg(dev, "%s: MDM2AP_VDDMIN gpio = %d\n",
+ __func__, MDM_GPIO(mdm, MDM2AP_VDDMIN));
+}
+
+static void mdm_enable_irqs(struct mdm_ctrl *mdm)
+{
+ if (!mdm)
+ return;
+ if (mdm->irq_mask & IRQ_ERRFATAL) {
+ enable_irq(mdm->errfatal_irq);
+ irq_set_irq_wake(mdm->errfatal_irq, 1);
+ mdm->irq_mask &= ~IRQ_ERRFATAL;
+ }
+ if (mdm->irq_mask & IRQ_STATUS) {
+ enable_irq(mdm->status_irq);
+ irq_set_irq_wake(mdm->status_irq, 1);
+ mdm->irq_mask &= ~IRQ_STATUS;
+ }
+ if (mdm->irq_mask & IRQ_PBLRDY) {
+ enable_irq(mdm->pblrdy_irq);
+ mdm->irq_mask &= ~IRQ_PBLRDY;
+ }
+}
+
+static void mdm_disable_irqs(struct mdm_ctrl *mdm)
+{
+ if (!mdm)
+ return;
+ if (!(mdm->irq_mask & IRQ_ERRFATAL)) {
+ irq_set_irq_wake(mdm->errfatal_irq, 0);
+ disable_irq_nosync(mdm->errfatal_irq);
+ mdm->irq_mask |= IRQ_ERRFATAL;
+ }
+ if (!(mdm->irq_mask & IRQ_STATUS)) {
+ irq_set_irq_wake(mdm->status_irq, 0);
+ disable_irq_nosync(mdm->status_irq);
+ mdm->irq_mask |= IRQ_STATUS;
+ }
+ if (!(mdm->irq_mask & IRQ_PBLRDY)) {
+ disable_irq_nosync(mdm->pblrdy_irq);
+ mdm->irq_mask |= IRQ_PBLRDY;
+ }
+}
+
+static void mdm_deconfigure_ipc(struct mdm_ctrl *mdm)
+{
+ int i;
+
+ for (i = 0; i < NUM_GPIOS; ++i) {
+ if (gpio_is_valid(MDM_GPIO(mdm, i)))
+ gpio_free(MDM_GPIO(mdm, i));
+ }
+ if (mdm->mdm_queue) {
+ destroy_workqueue(mdm->mdm_queue);
+ mdm->mdm_queue = NULL;
+ }
+}
+
+static void mdm_update_gpio_configs(struct mdm_ctrl *mdm,
+ enum gpio_update_config gpio_config)
+{
+ struct pinctrl_state *pins_state = NULL;
+ /* Some gpio configuration may need updating after modem bootup.*/
+ switch (gpio_config) {
+ case GPIO_UPDATE_RUNNING_CONFIG:
+ pins_state = mdm->gpio_state_running;
+ break;
+ case GPIO_UPDATE_BOOTING_CONFIG:
+ pins_state = mdm->gpio_state_booting;
+ break;
+ default:
+ pins_state = NULL;
+ dev_err(mdm->dev, "%s: called with no config\n", __func__);
+ break;
+ }
+ if (pins_state != NULL) {
+ if (pinctrl_select_state(mdm->pinctrl, pins_state))
+ dev_err(mdm->dev, "switching gpio config failed\n");
+ }
+}
+
+static void mdm_trigger_dbg(struct mdm_ctrl *mdm)
+{
+ int ret;
+
+ if (mdm->dbg_mode && !mdm->trig_cnt) {
+ ret = coresight_cti_pulse_trig(mdm->cti, MDM_CTI_CH);
+ mdm->trig_cnt++;
+ if (ret)
+ dev_err(mdm->dev, "unable to trigger cti pulse on\n");
+ }
+}
+
+static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
+{
+ unsigned long end_time;
+ bool status_down = false;
+ struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
+ struct device *dev = mdm->dev;
+ int ret;
+ bool graceful_shutdown = false;
+
+ switch (cmd) {
+ case ESOC_PWR_ON:
+ gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
+ mdm_enable_irqs(mdm);
+ mdm->init = 1;
+ mdm_do_first_power_on(mdm);
+ break;
+ case ESOC_PWR_OFF:
+ mdm_disable_irqs(mdm);
+ mdm->debug = 0;
+ mdm->ready = false;
+ mdm->trig_cnt = 0;
+ graceful_shutdown = true;
+ ret = sysmon_send_shutdown(&esoc->subsys);
+ if (ret) {
+ dev_err(mdm->dev, "sysmon shutdown fail, ret = %d\n",
+ ret);
+ graceful_shutdown = false;
+ goto force_poff;
+ }
+ dev_dbg(mdm->dev, "Waiting for status gpio go low\n");
+ status_down = false;
+ end_time = jiffies + msecs_to_jiffies(10000);
+ while (time_before(jiffies, end_time)) {
+ if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS))
+ == 0) {
+ dev_dbg(dev, "Status went low\n");
+ status_down = true;
+ break;
+ }
+ msleep(100);
+ }
+ if (status_down)
+ dev_dbg(dev, "shutdown successful\n");
+ else
+ dev_err(mdm->dev, "graceful poff ipc fail\n");
+ break;
+force_poff:
+ case ESOC_FORCE_PWR_OFF:
+ if (!graceful_shutdown) {
+ mdm_disable_irqs(mdm);
+ mdm->debug = 0;
+ mdm->ready = false;
+ mdm->trig_cnt = 0;
+
+ dev_err(mdm->dev, "Graceful shutdown fail, ret = %d\n",
+ esoc->subsys.sysmon_shutdown_ret);
+ }
+
+ /*
+ * Force a shutdown of the mdm. This is required in order
+ * to prevent the mdm from immediately powering back on
+ * after the shutdown
+ */
+ gpio_set_value(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
+ esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
+ mdm_power_down(mdm);
+ mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
+ break;
+ case ESOC_RESET:
+ mdm_toggle_soft_reset(mdm, false);
+ break;
+ case ESOC_PREPARE_DEBUG:
+ /*
+ * disable all irqs except request irq (pblrdy)
+ * force a reset of the mdm by signaling
+ * an APQ crash, wait till mdm is ready for ramdumps.
+ */
+ mdm->ready = false;
+ cancel_delayed_work(&mdm->mdm2ap_status_check_work);
+ gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
+ dev_dbg(mdm->dev, "set ap2mdm errfatal to force reset\n");
+ msleep(mdm->ramdump_delay_ms);
+ break;
+ case ESOC_EXE_DEBUG:
+ mdm->debug = 1;
+ mdm->trig_cnt = 0;
+ mdm_toggle_soft_reset(mdm, false);
+ /*
+ * wait for ramdumps to be collected
+ * then power down the mdm and switch gpios to booting
+ * config
+ */
+ wait_for_completion(&mdm->debug_done);
+ if (mdm->debug_fail) {
+ dev_err(mdm->dev, "unable to collect ramdumps\n");
+ mdm->debug = 0;
+ return -EIO;
+ }
+ dev_dbg(mdm->dev, "ramdump collection done\n");
+ mdm->debug = 0;
+ init_completion(&mdm->debug_done);
+ break;
+ case ESOC_EXIT_DEBUG:
+ /*
+ * Deassert APQ to mdm err fatal
+ * Power on the mdm
+ */
+ gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
+ dev_dbg(mdm->dev, "exiting debug state after power on\n");
+ mdm->get_restart_reason = true;
+ break;
+ default:
+ return -EINVAL;
+ };
+ return 0;
+}
+
+static void mdm2ap_status_check(struct work_struct *work)
+{
+ struct mdm_ctrl *mdm =
+ container_of(work, struct mdm_ctrl,
+ mdm2ap_status_check_work.work);
+ struct device *dev = mdm->dev;
+ struct esoc_clink *esoc = mdm->esoc;
+
+ if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0) {
+ dev_dbg(dev, "MDM2AP_STATUS did not go high\n");
+ esoc_clink_evt_notify(ESOC_UNEXPECTED_RESET, esoc);
+ }
+}
+
+static void mdm_status_fn(struct work_struct *work)
+{
+ struct mdm_ctrl *mdm =
+ container_of(work, struct mdm_ctrl, mdm_status_work);
+ struct device *dev = mdm->dev;
+ int value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
+
+ dev_dbg(dev, "%s: status:%d\n", __func__, value);
+ /* Update gpio configuration to "running" config. */
+ mdm_update_gpio_configs(mdm, GPIO_UPDATE_RUNNING_CONFIG);
+}
+
+static void mdm_get_restart_reason(struct work_struct *work)
+{
+ int ret, ntries = 0;
+ char sfr_buf[RD_BUF_SIZE];
+ struct mdm_ctrl *mdm =
+ container_of(work, struct mdm_ctrl, restart_reason_work);
+ struct device *dev = mdm->dev;
+
+ do {
+ ret = sysmon_get_reason(&mdm->esoc->subsys, sfr_buf,
+ sizeof(sfr_buf));
+ if (!ret) {
+ dev_err(dev, "mdm restart reason is %s\n", sfr_buf);
+ break;
+ }
+ msleep(SFR_RETRY_INTERVAL);
+ } while (++ntries < SFR_MAX_RETRIES);
+ if (ntries == SFR_MAX_RETRIES)
+ dev_dbg(dev, "%s: Error retrieving restart reason: %d\n",
+ __func__, ret);
+ mdm->get_restart_reason = false;
+}
+
+static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc)
+{
+ bool status_down;
+ uint64_t timeout;
+ uint64_t now;
+ struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
+ struct device *dev = mdm->dev;
+
+ switch (notify) {
+ case ESOC_IMG_XFER_DONE:
+ if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0)
+ schedule_delayed_work(&mdm->mdm2ap_status_check_work,
+ msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
+ break;
+ case ESOC_BOOT_DONE:
+ esoc_clink_evt_notify(ESOC_RUN_STATE, esoc);
+ break;
+ case ESOC_IMG_XFER_RETRY:
+ mdm->init = 1;
+ mdm_toggle_soft_reset(mdm, false);
+ break;
+ case ESOC_IMG_XFER_FAIL:
+ esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc);
+ break;
+ case ESOC_BOOT_FAIL:
+ esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc);
+ break;
+ case ESOC_UPGRADE_AVAILABLE:
+ break;
+ case ESOC_DEBUG_DONE:
+ mdm->debug_fail = false;
+ mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
+ complete(&mdm->debug_done);
+ break;
+ case ESOC_DEBUG_FAIL:
+ mdm->debug_fail = true;
+ complete(&mdm->debug_done);
+ break;
+ case ESOC_PRIMARY_CRASH:
+ mdm_disable_irqs(mdm);
+ status_down = false;
+ dev_dbg(dev, "signal apq err fatal for graceful restart\n");
+ gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
+ timeout = local_clock();
+ do_div(timeout, NSEC_PER_MSEC);
+ timeout += MDM_MODEM_TIMEOUT;
+ do {
+ if (gpio_get_value(MDM_GPIO(mdm,
+ MDM2AP_STATUS)) == 0) {
+ status_down = true;
+ break;
+ }
+ now = local_clock();
+ do_div(now, NSEC_PER_MSEC);
+ } while (!time_after64(now, timeout));
+
+ if (!status_down) {
+ dev_err(mdm->dev, "%s MDM2AP status did not go low\n",
+ __func__);
+ mdm_toggle_soft_reset(mdm, true);
+ }
+ break;
+ case ESOC_PRIMARY_REBOOT:
+ mdm_disable_irqs(mdm);
+ mdm->debug = 0;
+ mdm->ready = false;
+ mdm_cold_reset(mdm);
+ break;
+ };
+}
+
+static irqreturn_t mdm_errfatal(int irq, void *dev_id)
+{
+ struct mdm_ctrl *mdm = (struct mdm_ctrl *)dev_id;
+ struct esoc_clink *esoc;
+ struct device *dev;
+
+ if (!mdm)
+ goto no_mdm_irq;
+ dev = mdm->dev;
+ if (!mdm->ready)
+ goto mdm_pwroff_irq;
+ esoc = mdm->esoc;
+ dev_err(dev, "%s: mdm sent errfatal interrupt\n",
+ __func__);
+ /* disable irq ?*/
+ esoc_clink_evt_notify(ESOC_ERR_FATAL, esoc);
+ return IRQ_HANDLED;
+mdm_pwroff_irq:
+ dev_info(dev, "errfatal irq when in pwroff\n");
+no_mdm_irq:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mdm_status_change(int irq, void *dev_id)
+{
+ int value;
+ struct esoc_clink *esoc;
+ struct mdm_ctrl *mdm = (struct mdm_ctrl *)dev_id;
+ struct device *dev = mdm->dev;
+
+ if (!mdm)
+ return IRQ_HANDLED;
+ esoc = mdm->esoc;
+ value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
+ if (value == 0 && mdm->ready) {
+ dev_err(dev, "unexpected reset external modem\n");
+ esoc_clink_evt_notify(ESOC_UNEXPECTED_RESET, esoc);
+ } else if (value == 1) {
+ cancel_delayed_work(&mdm->mdm2ap_status_check_work);
+ dev_dbg(dev, "status = 1: mdm is now ready\n");
+ mdm->ready = true;
+ mdm_trigger_dbg(mdm);
+ queue_work(mdm->mdm_queue, &mdm->mdm_status_work);
+ if (mdm->get_restart_reason)
+ queue_work(mdm->mdm_queue, &mdm->restart_reason_work);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mdm_pblrdy_change(int irq, void *dev_id)
+{
+ struct mdm_ctrl *mdm;
+ struct device *dev;
+ struct esoc_clink *esoc;
+
+ mdm = (struct mdm_ctrl *)dev_id;
+ if (!mdm)
+ return IRQ_HANDLED;
+ esoc = mdm->esoc;
+ dev = mdm->dev;
+ dev_dbg(dev, "pbl ready %d:\n",
+ gpio_get_value(MDM_GPIO(mdm, MDM2AP_PBLRDY)));
+ if (mdm->init) {
+ mdm->init = 0;
+ mdm_trigger_dbg(mdm);
+ esoc_clink_queue_request(ESOC_REQ_IMG, esoc);
+ return IRQ_HANDLED;
+ }
+ if (mdm->debug)
+ esoc_clink_queue_request(ESOC_REQ_DEBUG, esoc);
+ return IRQ_HANDLED;
+}
+
+static int mdm_get_status(u32 *status, struct esoc_clink *esoc)
+{
+ struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
+
+ if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0)
+ *status = 0;
+ else
+ *status = 1;
+ return 0;
+}
+
+static void mdm_configure_debug(struct mdm_ctrl *mdm)
+{
+ void __iomem *addr;
+ unsigned int val;
+ int ret;
+ struct device_node *node = mdm->dev->of_node;
+
+ addr = of_iomap(node, 0);
+ if (IS_ERR(addr)) {
+ dev_err(mdm->dev, "failed to get debug base address\n");
+ return;
+ }
+ mdm->dbg_addr = addr + MDM_DBG_OFFSET;
+ val = readl_relaxed(mdm->dbg_addr);
+ if (val == MDM_DBG_MODE) {
+ mdm->dbg_mode = true;
+ mdm->cti = coresight_cti_get(MDM_CTI_NAME);
+ if (IS_ERR(mdm->cti)) {
+ dev_err(mdm->dev, "unable to get cti handle\n");
+ goto cti_get_err;
+ }
+ ret = coresight_cti_map_trigout(mdm->cti, MDM_CTI_TRIG,
+ MDM_CTI_CH);
+ if (ret) {
+ dev_err(mdm->dev, "unable to map trig to channel\n");
+ goto cti_map_err;
+ }
+ mdm->trig_cnt = 0;
+ } else {
+ dev_dbg(mdm->dev, "Not in debug mode. debug mode = %u\n", val);
+ mdm->dbg_mode = false;
+ }
+ return;
+cti_map_err:
+ coresight_cti_put(mdm->cti);
+cti_get_err:
+ mdm->dbg_mode = false;
+}
+
+/* Fail if any of the required gpios is absent. */
+static int mdm_dt_parse_gpios(struct mdm_ctrl *mdm)
+{
+ int i, val, rc = 0;
+ struct device_node *node = mdm->dev->of_node;
+
+ for (i = 0; i < NUM_GPIOS; i++)
+ mdm->gpios[i] = INVALID_GPIO;
+
+ for (i = 0; i < ARRAY_SIZE(gpio_map); i++) {
+ val = of_get_named_gpio(node, gpio_map[i].name, 0);
+ if (val >= 0)
+ MDM_GPIO(mdm, gpio_map[i].index) = val;
+ }
+ /* These two are special because they can be inverted. */
+ /* Verify that the required gpios have valid values */
+ for (i = 0; i < ARRAY_SIZE(required_gpios); i++) {
+ if (MDM_GPIO(mdm, required_gpios[i]) == INVALID_GPIO) {
+ rc = -ENXIO;
+ break;
+ }
+ }
+ mdm_debug_gpio_show(mdm);
+ return rc;
+}
+
+static int mdm_configure_ipc(struct mdm_ctrl *mdm, struct platform_device *pdev)
+{
+ int ret = -1;
+ int irq;
+ struct device *dev = mdm->dev;
+ struct device_node *node = pdev->dev.of_node;
+
+ ret = of_property_read_u32(node, "qcom,ramdump-timeout-ms",
+ &mdm->dump_timeout_ms);
+ if (ret)
+ mdm->dump_timeout_ms = DEF_RAMDUMP_TIMEOUT;
+ ret = of_property_read_u32(node, "qcom,ramdump-delay-ms",
+ &mdm->ramdump_delay_ms);
+ if (ret)
+ mdm->ramdump_delay_ms = DEF_RAMDUMP_DELAY;
+ /* Multilple gpio_request calls are allowed */
+ if (gpio_request(MDM_GPIO(mdm, AP2MDM_STATUS), "AP2MDM_STATUS"))
+ dev_err(dev, "Failed to configure AP2MDM_STATUS gpio\n");
+ /* Multilple gpio_request calls are allowed */
+ if (gpio_request(MDM_GPIO(mdm, AP2MDM_ERRFATAL), "AP2MDM_ERRFATAL"))
+ dev_err(dev, "%s Failed to configure AP2MDM_ERRFATAL gpio\n",
+ __func__);
+ if (gpio_request(MDM_GPIO(mdm, MDM2AP_STATUS), "MDM2AP_STATUS")) {
+ dev_err(dev, "%s Failed to configure MDM2AP_STATUS gpio\n",
+ __func__);
+ goto fatal_err;
+ }
+ if (gpio_request(MDM_GPIO(mdm, MDM2AP_ERRFATAL), "MDM2AP_ERRFATAL")) {
+ dev_err(dev, "%s Failed to configure MDM2AP_ERRFATAL gpio\n",
+ __func__);
+ goto fatal_err;
+ }
+ if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
+ if (gpio_request(MDM_GPIO(mdm, MDM2AP_PBLRDY),
+ "MDM2AP_PBLRDY")) {
+ dev_err(dev, "Cannot configure MDM2AP_PBLRDY gpio\n");
+ goto fatal_err;
+ }
+ }
+ if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_WAKEUP))) {
+ if (gpio_request(MDM_GPIO(mdm, AP2MDM_WAKEUP),
+ "AP2MDM_WAKEUP")) {
+ dev_err(dev, "Cannot configure AP2MDM_WAKEUP gpio\n");
+ goto fatal_err;
+ }
+ }
+ if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_CHNLRDY))) {
+ if (gpio_request(MDM_GPIO(mdm, AP2MDM_CHNLRDY),
+ "AP2MDM_CHNLRDY")) {
+ dev_err(dev, "Cannot configure AP2MDM_CHNLRDY gpio\n");
+ goto fatal_err;
+ }
+ }
+
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
+
+ if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_CHNLRDY)))
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_CHNLRDY), 0);
+
+ gpio_direction_input(MDM_GPIO(mdm, MDM2AP_STATUS));
+ gpio_direction_input(MDM_GPIO(mdm, MDM2AP_ERRFATAL));
+
+ /* ERR_FATAL irq. */
+ irq = gpio_to_irq(MDM_GPIO(mdm, MDM2AP_ERRFATAL));
+ if (irq < 0) {
+ dev_err(dev, "bad MDM2AP_ERRFATAL IRQ resource\n");
+ goto errfatal_err;
+
+ }
+ ret = request_irq(irq, mdm_errfatal,
+ IRQF_TRIGGER_RISING, "mdm errfatal", mdm);
+
+ if (ret < 0) {
+ dev_err(dev, "%s: MDM2AP_ERRFATAL IRQ#%d request failed,\n",
+ __func__, irq);
+ goto errfatal_err;
+ }
+ mdm->errfatal_irq = irq;
+
+errfatal_err:
+ /* status irq */
+ irq = gpio_to_irq(MDM_GPIO(mdm, MDM2AP_STATUS));
+ if (irq < 0) {
+ dev_err(dev, "%s: bad MDM2AP_STATUS IRQ resource, err = %d\n",
+ __func__, irq);
+ goto status_err;
+ }
+ ret = request_threaded_irq(irq, NULL, mdm_status_change,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "mdm status", mdm);
+ if (ret < 0) {
+ dev_err(dev, "%s: MDM2AP_STATUS IRQ#%d request failed, err=%d",
+ __func__, irq, ret);
+ goto status_err;
+ }
+ mdm->status_irq = irq;
+status_err:
+ if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
+ irq = platform_get_irq_byname(pdev, "plbrdy_irq");
+ if (irq < 0) {
+ dev_err(dev, "%s: MDM2AP_PBLRDY IRQ request failed\n",
+ __func__);
+ goto pblrdy_err;
+ }
+
+ ret = request_threaded_irq(irq, NULL, mdm_pblrdy_change,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "mdm pbl ready", mdm);
+ if (ret < 0) {
+ dev_err(dev, "MDM2AP_PBL IRQ#%d request failed %d\n",
+ irq, ret);
+ goto pblrdy_err;
+ }
+ mdm->pblrdy_irq = irq;
+ }
+ mdm_disable_irqs(mdm);
+pblrdy_err:
+ return 0;
+fatal_err:
+ mdm_deconfigure_ipc(mdm);
+ return ret;
+
+}
+
+static int mdm_pinctrl_init(struct mdm_ctrl *mdm)
+{
+ int retval = 0;
+
+ mdm->pinctrl = devm_pinctrl_get(mdm->dev);
+ if (IS_ERR_OR_NULL(mdm->pinctrl)) {
+ retval = PTR_ERR(mdm->pinctrl);
+ goto err_state_suspend;
+ }
+ mdm->gpio_state_booting =
+ pinctrl_lookup_state(mdm->pinctrl,
+ "mdm_booting");
+ if (IS_ERR_OR_NULL(mdm->gpio_state_booting)) {
+ mdm->gpio_state_running = NULL;
+ mdm->gpio_state_booting = NULL;
+ } else {
+ mdm->gpio_state_running =
+ pinctrl_lookup_state(mdm->pinctrl,
+ "mdm_running");
+ if (IS_ERR_OR_NULL(mdm->gpio_state_running)) {
+ mdm->gpio_state_booting = NULL;
+ mdm->gpio_state_running = NULL;
+ }
+ }
+ mdm->gpio_state_active =
+ pinctrl_lookup_state(mdm->pinctrl,
+ "mdm_active");
+ if (IS_ERR_OR_NULL(mdm->gpio_state_active)) {
+ retval = PTR_ERR(mdm->gpio_state_active);
+ goto err_state_active;
+ }
+ mdm->gpio_state_suspend =
+ pinctrl_lookup_state(mdm->pinctrl,
+ "mdm_suspend");
+ if (IS_ERR_OR_NULL(mdm->gpio_state_suspend)) {
+ retval = PTR_ERR(mdm->gpio_state_suspend);
+ goto err_state_suspend;
+ }
+ retval = pinctrl_select_state(mdm->pinctrl, mdm->gpio_state_active);
+ return retval;
+
+err_state_suspend:
+ mdm->gpio_state_active = NULL;
+err_state_active:
+ mdm->gpio_state_suspend = NULL;
+ mdm->gpio_state_booting = NULL;
+ mdm->gpio_state_running = NULL;
+ return retval;
+}
+static int mdm9x25_setup_hw(struct mdm_ctrl *mdm,
+ const struct mdm_ops *ops,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct esoc_clink *esoc;
+ const struct esoc_clink_ops *clink_ops = ops->clink_ops;
+ const struct mdm_pon_ops *pon_ops = ops->pon_ops;
+
+ mdm->dev = &pdev->dev;
+ mdm->pon_ops = pon_ops;
+ esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
+ if (IS_ERR(esoc)) {
+ dev_err(mdm->dev, "cannot allocate esoc device\n");
+ return PTR_ERR(esoc);
+ }
+ mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
+ if (!mdm->mdm_queue) {
+ dev_err(mdm->dev, "could not create mdm_queue\n");
+ return -ENOMEM;
+ }
+ mdm->irq_mask = 0;
+ mdm->ready = false;
+ ret = mdm_dt_parse_gpios(mdm);
+ if (ret)
+ return ret;
+ dev_err(mdm->dev, "parsing gpio done\n");
+ ret = mdm_pon_dt_init(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "pon dt init done\n");
+ ret = mdm_pinctrl_init(mdm);
+ if (ret)
+ return ret;
+ dev_err(mdm->dev, "pinctrl init done\n");
+ ret = mdm_pon_setup(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "pon setup done\n");
+ ret = mdm_configure_ipc(mdm, pdev);
+ if (ret)
+ return ret;
+ mdm_configure_debug(mdm);
+ dev_err(mdm->dev, "ipc configure done\n");
+ esoc->name = MDM9x25_LABEL;
+ esoc->link_name = MDM9x25_HSIC;
+ esoc->clink_ops = clink_ops;
+ esoc->parent = mdm->dev;
+ esoc->owner = THIS_MODULE;
+ esoc->np = pdev->dev.of_node;
+ set_esoc_clink_data(esoc, mdm);
+ ret = esoc_clink_register(esoc);
+ if (ret) {
+ dev_err(mdm->dev, "esoc registration failed\n");
+ return ret;
+ }
+ dev_dbg(mdm->dev, "esoc registration done\n");
+ init_completion(&mdm->debug_done);
+ INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
+ INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
+ INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
+ mdm->get_restart_reason = false;
+ mdm->debug_fail = false;
+ mdm->esoc = esoc;
+ mdm->init = 0;
+ return 0;
+}
+
+static int mdm9x35_setup_hw(struct mdm_ctrl *mdm,
+ const struct mdm_ops *ops,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *node;
+ struct esoc_clink *esoc;
+ const struct esoc_clink_ops *clink_ops = ops->clink_ops;
+ const struct mdm_pon_ops *pon_ops = ops->pon_ops;
+
+ mdm->dev = &pdev->dev;
+ mdm->pon_ops = pon_ops;
+ node = pdev->dev.of_node;
+ esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
+ if (IS_ERR(esoc)) {
+ dev_err(mdm->dev, "cannot allocate esoc device\n");
+ return PTR_ERR(esoc);
+ }
+ mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
+ if (!mdm->mdm_queue) {
+ dev_err(mdm->dev, "could not create mdm_queue\n");
+ return -ENOMEM;
+ }
+ mdm->irq_mask = 0;
+ mdm->ready = false;
+ ret = mdm_dt_parse_gpios(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "parsing gpio done\n");
+ ret = mdm_pon_dt_init(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "pon dt init done\n");
+ ret = mdm_pinctrl_init(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "pinctrl init done\n");
+ ret = mdm_pon_setup(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "pon setup done\n");
+ ret = mdm_configure_ipc(mdm, pdev);
+ if (ret)
+ return ret;
+ mdm_configure_debug(mdm);
+ dev_dbg(mdm->dev, "ipc configure done\n");
+ esoc->name = MDM9x35_LABEL;
+ mdm->dual_interface = of_property_read_bool(node,
+ "qcom,mdm-dual-link");
+ /* Check if link gpio is available */
+ if (gpio_is_valid(MDM_GPIO(mdm, MDM_LINK_DETECT))) {
+ if (mdm->dual_interface) {
+ if (gpio_get_value(MDM_GPIO(mdm, MDM_LINK_DETECT)))
+ esoc->link_name = MDM9x35_DUAL_LINK;
+ else
+ esoc->link_name = MDM9x35_PCIE;
+ } else {
+ if (gpio_get_value(MDM_GPIO(mdm, MDM_LINK_DETECT)))
+ esoc->link_name = MDM9x35_HSIC;
+ else
+ esoc->link_name = MDM9x35_PCIE;
+ }
+ } else if (mdm->dual_interface)
+ esoc->link_name = MDM9x35_DUAL_LINK;
+ else
+ esoc->link_name = MDM9x35_HSIC;
+ esoc->clink_ops = clink_ops;
+ esoc->parent = mdm->dev;
+ esoc->owner = THIS_MODULE;
+ esoc->np = pdev->dev.of_node;
+ set_esoc_clink_data(esoc, mdm);
+ ret = esoc_clink_register(esoc);
+ if (ret) {
+ dev_err(mdm->dev, "esoc registration failed\n");
+ return ret;
+ }
+ dev_dbg(mdm->dev, "esoc registration done\n");
+ init_completion(&mdm->debug_done);
+ INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
+ INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
+ INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
+ mdm->get_restart_reason = false;
+ mdm->debug_fail = false;
+ mdm->esoc = esoc;
+ mdm->init = 0;
+ return 0;
+}
+
+static int mdm9x55_setup_hw(struct mdm_ctrl *mdm,
+ const struct mdm_ops *ops,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *node;
+ struct esoc_clink *esoc;
+ const struct esoc_clink_ops *clink_ops = ops->clink_ops;
+ const struct mdm_pon_ops *pon_ops = ops->pon_ops;
+
+ mdm->dev = &pdev->dev;
+ mdm->pon_ops = pon_ops;
+ node = pdev->dev.of_node;
+ esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
+ if (IS_ERR(esoc)) {
+ dev_err(mdm->dev, "cannot allocate esoc device\n");
+ return PTR_ERR(esoc);
+ }
+ mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
+ if (!mdm->mdm_queue) {
+ dev_err(mdm->dev, "could not create mdm_queue\n");
+ return -ENOMEM;
+ }
+ mdm->irq_mask = 0;
+ mdm->ready = false;
+ ret = mdm_dt_parse_gpios(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "parsing gpio done\n");
+ ret = mdm_pon_dt_init(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "pon dt init done\n");
+ ret = mdm_pinctrl_init(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "pinctrl init done\n");
+ ret = mdm_pon_setup(mdm);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "pon setup done\n");
+ ret = mdm_configure_ipc(mdm, pdev);
+ if (ret)
+ return ret;
+ dev_dbg(mdm->dev, "ipc configure done\n");
+ esoc->name = MDM9x55_LABEL;
+ mdm->dual_interface = of_property_read_bool(node,
+ "qcom,mdm-dual-link");
+ esoc->link_name = MDM9x55_PCIE;
+ esoc->clink_ops = clink_ops;
+ esoc->parent = mdm->dev;
+ esoc->owner = THIS_MODULE;
+ esoc->np = pdev->dev.of_node;
+ set_esoc_clink_data(esoc, mdm);
+ ret = esoc_clink_register(esoc);
+ if (ret) {
+ dev_err(mdm->dev, "esoc registration failed\n");
+ return ret;
+ }
+ dev_dbg(mdm->dev, "esoc registration done\n");
+ init_completion(&mdm->debug_done);
+ INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
+ INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
+ INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
+ mdm->get_restart_reason = false;
+ mdm->debug_fail = false;
+ mdm->esoc = esoc;
+ mdm->init = 0;
+ return 0;
+}
+
+static struct esoc_clink_ops mdm_cops = {
+ .cmd_exe = mdm_cmd_exe,
+ .get_status = mdm_get_status,
+ .notify = mdm_notify,
+};
+
+static struct mdm_ops mdm9x25_ops = {
+ .clink_ops = &mdm_cops,
+ .config_hw = mdm9x25_setup_hw,
+ .pon_ops = &mdm9x25_pon_ops,
+};
+
+static struct mdm_ops mdm9x35_ops = {
+ .clink_ops = &mdm_cops,
+ .config_hw = mdm9x35_setup_hw,
+ .pon_ops = &mdm9x35_pon_ops,
+};
+
+static struct mdm_ops mdm9x55_ops = {
+ .clink_ops = &mdm_cops,
+ .config_hw = mdm9x55_setup_hw,
+ .pon_ops = &mdm9x55_pon_ops,
+};
+
+static const struct of_device_id mdm_dt_match[] = {
+ { .compatible = "qcom,ext-mdm9x25",
+ .data = &mdm9x25_ops, },
+ { .compatible = "qcom,ext-mdm9x35",
+ .data = &mdm9x35_ops, },
+ { .compatible = "qcom,ext-mdm9x55",
+ .data = &mdm9x55_ops, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mdm_dt_match);
+
+static int mdm_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct mdm_ops *mdm_ops;
+ struct device_node *node = pdev->dev.of_node;
+ struct mdm_ctrl *mdm;
+
+ match = of_match_node(mdm_dt_match, node);
+ if (IS_ERR(match))
+ return PTR_ERR(match);
+ mdm_ops = match->data;
+ mdm = devm_kzalloc(&pdev->dev, sizeof(*mdm), GFP_KERNEL);
+ if (IS_ERR(mdm))
+ return PTR_ERR(mdm);
+ return mdm_ops->config_hw(mdm, mdm_ops, pdev);
+}
+
+static struct platform_driver mdm_driver = {
+ .probe = mdm_probe,
+ .driver = {
+ .name = "ext-mdm",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(mdm_dt_match),
+ },
+};
+
+static int __init mdm_register(void)
+{
+ return platform_driver_register(&mdm_driver);
+}
+module_init(mdm_register);
+
+static void __exit mdm_unregister(void)
+{
+ platform_driver_unregister(&mdm_driver);
+}
+module_exit(mdm_unregister);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
new file mode 100644
index 0000000..c62efe4
--- /dev/null
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -0,0 +1,280 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/reboot.h>
+#include "esoc.h"
+
+enum {
+ PWR_OFF = 0x1,
+ PWR_ON,
+ BOOT,
+ RUN,
+ CRASH,
+ IN_DEBUG,
+ SHUTDOWN,
+ RESET,
+ PEER_CRASH,
+};
+
+struct mdm_drv {
+ unsigned int mode;
+ struct esoc_eng cmd_eng;
+ struct completion boot_done;
+ struct completion req_eng_wait;
+ struct esoc_clink *esoc_clink;
+ bool boot_fail;
+ struct workqueue_struct *mdm_queue;
+ struct work_struct ssr_work;
+ struct notifier_block esoc_restart;
+};
+#define to_mdm_drv(d) container_of(d, struct mdm_drv, cmd_eng)
+
+static int esoc_msm_restart_handler(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct mdm_drv *mdm_drv = container_of(nb, struct mdm_drv,
+ esoc_restart);
+ struct esoc_clink *esoc_clink = mdm_drv->esoc_clink;
+ const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+ if (action == SYS_RESTART) {
+ dev_dbg(&esoc_clink->dev, "Notifying esoc of cold reboot\n");
+ clink_ops->notify(ESOC_PRIMARY_REBOOT, esoc_clink);
+ }
+ return NOTIFY_OK;
+}
+static void mdm_handle_clink_evt(enum esoc_evt evt,
+ struct esoc_eng *eng)
+{
+ struct mdm_drv *mdm_drv = to_mdm_drv(eng);
+
+ switch (evt) {
+ case ESOC_INVALID_STATE:
+ mdm_drv->boot_fail = true;
+ complete(&mdm_drv->boot_done);
+ break;
+ case ESOC_RUN_STATE:
+ mdm_drv->boot_fail = false;
+ mdm_drv->mode = RUN,
+ complete(&mdm_drv->boot_done);
+ break;
+ case ESOC_UNEXPECTED_RESET:
+ case ESOC_ERR_FATAL:
+ if (mdm_drv->mode == CRASH)
+ return;
+ mdm_drv->mode = CRASH;
+ queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work);
+ break;
+ case ESOC_REQ_ENG_ON:
+ complete(&mdm_drv->req_eng_wait);
+ break;
+ default:
+ break;
+ }
+}
+
+static void mdm_ssr_fn(struct work_struct *work)
+{
+ struct mdm_drv *mdm_drv = container_of(work, struct mdm_drv, ssr_work);
+
+ /*
+ * If restarting esoc fails, the SSR framework triggers a kernel panic
+ */
+ esoc_clink_request_ssr(mdm_drv->esoc_clink);
+}
+
+static void mdm_crash_shutdown(const struct subsys_desc *mdm_subsys)
+{
+ struct esoc_clink *esoc_clink =
+ container_of(mdm_subsys,
+ struct esoc_clink,
+ subsys);
+ const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+ clink_ops->notify(ESOC_PRIMARY_CRASH, esoc_clink);
+}
+
+static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys,
+ bool force_stop)
+{
+ int ret;
+ struct esoc_clink *esoc_clink =
+ container_of(crashed_subsys, struct esoc_clink, subsys);
+ struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
+ const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+ if (mdm_drv->mode == CRASH || mdm_drv->mode == PEER_CRASH) {
+ ret = clink_ops->cmd_exe(ESOC_PREPARE_DEBUG,
+ esoc_clink);
+ if (ret) {
+ dev_err(&esoc_clink->dev, "failed to enter debug\n");
+ return ret;
+ }
+ mdm_drv->mode = IN_DEBUG;
+ } else if (!force_stop) {
+ if (esoc_clink->subsys.sysmon_shutdown_ret)
+ ret = clink_ops->cmd_exe(ESOC_FORCE_PWR_OFF,
+ esoc_clink);
+ else
+ ret = clink_ops->cmd_exe(ESOC_PWR_OFF, esoc_clink);
+ if (ret) {
+ dev_err(&esoc_clink->dev, "failed to exe power off\n");
+ return ret;
+ }
+ mdm_drv->mode = PWR_OFF;
+ }
+ return 0;
+}
+
+static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
+{
+ int ret;
+ struct esoc_clink *esoc_clink =
+ container_of(crashed_subsys, struct esoc_clink,
+ subsys);
+ struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
+ const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+ if (!esoc_req_eng_enabled(esoc_clink)) {
+ dev_dbg(&esoc_clink->dev, "Wait for req eng registration\n");
+ wait_for_completion(&mdm_drv->req_eng_wait);
+ }
+ if (mdm_drv->mode == PWR_OFF) {
+ ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink);
+ if (ret) {
+ dev_err(&esoc_clink->dev, "pwr on fail\n");
+ return ret;
+ }
+ } else if (mdm_drv->mode == IN_DEBUG) {
+ ret = clink_ops->cmd_exe(ESOC_EXIT_DEBUG, esoc_clink);
+ if (ret) {
+ dev_err(&esoc_clink->dev, "cannot exit debug mode\n");
+ return ret;
+ }
+ mdm_drv->mode = PWR_OFF;
+ ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink);
+ if (ret) {
+ dev_err(&esoc_clink->dev, "pwr on fail\n");
+ return ret;
+ }
+ }
+ wait_for_completion(&mdm_drv->boot_done);
+ if (mdm_drv->boot_fail) {
+ dev_err(&esoc_clink->dev, "booting failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int mdm_subsys_ramdumps(int want_dumps,
+ const struct subsys_desc *crashed_subsys)
+{
+ int ret;
+ struct esoc_clink *esoc_clink =
+ container_of(crashed_subsys, struct esoc_clink,
+ subsys);
+ const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+ if (want_dumps) {
+ ret = clink_ops->cmd_exe(ESOC_EXE_DEBUG, esoc_clink);
+ if (ret) {
+ dev_err(&esoc_clink->dev, "debugging failed\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int mdm_register_ssr(struct esoc_clink *esoc_clink)
+{
+ esoc_clink->subsys.shutdown = mdm_subsys_shutdown;
+ esoc_clink->subsys.ramdump = mdm_subsys_ramdumps;
+ esoc_clink->subsys.powerup = mdm_subsys_powerup;
+ esoc_clink->subsys.crash_shutdown = mdm_crash_shutdown;
+ return esoc_clink_register_ssr(esoc_clink);
+}
+
+int esoc_ssr_probe(struct esoc_clink *esoc_clink, struct esoc_drv *drv)
+{
+ int ret;
+ struct mdm_drv *mdm_drv;
+ struct esoc_eng *esoc_eng;
+
+ mdm_drv = devm_kzalloc(&esoc_clink->dev, sizeof(*mdm_drv), GFP_KERNEL);
+ if (IS_ERR(mdm_drv))
+ return PTR_ERR(mdm_drv);
+ esoc_eng = &mdm_drv->cmd_eng;
+ esoc_eng->handle_clink_evt = mdm_handle_clink_evt;
+ ret = esoc_clink_register_cmd_eng(esoc_clink, esoc_eng);
+ if (ret) {
+ dev_err(&esoc_clink->dev, "failed to register cmd engine\n");
+ return ret;
+ }
+ ret = mdm_register_ssr(esoc_clink);
+ if (ret)
+ goto ssr_err;
+ mdm_drv->mdm_queue = alloc_workqueue("mdm_drv_queue", 0, 0);
+ if (!mdm_drv->mdm_queue) {
+ dev_err(&esoc_clink->dev, "could not create mdm_queue\n");
+ goto queue_err;
+ }
+ esoc_set_drv_data(esoc_clink, mdm_drv);
+ init_completion(&mdm_drv->boot_done);
+ init_completion(&mdm_drv->req_eng_wait);
+ INIT_WORK(&mdm_drv->ssr_work, mdm_ssr_fn);
+ mdm_drv->esoc_clink = esoc_clink;
+ mdm_drv->mode = PWR_OFF;
+ mdm_drv->boot_fail = false;
+ mdm_drv->esoc_restart.notifier_call = esoc_msm_restart_handler;
+ ret = register_reboot_notifier(&mdm_drv->esoc_restart);
+ if (ret)
+ dev_err(&esoc_clink->dev, "register for reboot failed\n");
+ return 0;
+queue_err:
+ esoc_clink_unregister_ssr(esoc_clink);
+ssr_err:
+ esoc_clink_unregister_cmd_eng(esoc_clink, esoc_eng);
+ return ret;
+}
+
+static struct esoc_compat compat_table[] = {
+ { .name = "MDM9x25",
+ .data = NULL,
+ },
+ {
+ .name = "MDM9x35",
+ .data = NULL,
+ },
+ {
+ .name = "MDM9x55",
+ .data = NULL,
+ },
+};
+
+static struct esoc_drv esoc_ssr_drv = {
+ .owner = THIS_MODULE,
+ .probe = esoc_ssr_probe,
+ .compat_table = compat_table,
+ .compat_entries = ARRAY_SIZE(compat_table),
+ .driver = {
+ .name = "mdm-4x",
+ },
+};
+
+int __init esoc_ssr_init(void)
+{
+ return esoc_drv_register(&esoc_ssr_drv);
+}
+module_init(esoc_ssr_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
new file mode 100644
index 0000000..47d54db
--- /dev/null
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -0,0 +1,220 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "esoc-mdm.h"
+
+/* This function can be called from atomic context. */
+static int mdm4x_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
+{
+ int soft_reset_direction_assert = 0,
+ soft_reset_direction_de_assert = 1;
+
+ if (mdm->soft_reset_inverted) {
+ soft_reset_direction_assert = 1;
+ soft_reset_direction_de_assert = 0;
+ }
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction_assert);
+ /*
+ * Allow PS hold assert to be detected
+ */
+ if (!atomic)
+ usleep_range(8000, 9000);
+ else
+ mdelay(6);
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction_de_assert);
+ return 0;
+}
+
+/* This function can be called from atomic context. */
+static int mdm9x55_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
+{
+ int soft_reset_direction_assert = 0,
+ soft_reset_direction_de_assert = 1;
+
+ if (mdm->soft_reset_inverted) {
+ soft_reset_direction_assert = 1;
+ soft_reset_direction_de_assert = 0;
+ }
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction_assert);
+ /*
+ * Allow PS hold assert to be detected
+ */
+ if (!atomic)
+ usleep_range(203000, 300000);
+ else
+ mdelay(203);
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction_de_assert);
+ return 0;
+}
+
+
+static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm)
+{
+ int i;
+ int pblrdy;
+ struct device *dev = mdm->dev;
+
+ dev_dbg(dev, "Powering on modem for the first time\n");
+ mdm_toggle_soft_reset(mdm, false);
+ /* Add a delay to allow PON sequence to complete*/
+ mdelay(50);
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 1);
+ if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
+ for (i = 0; i < MDM_PBLRDY_CNT; i++) {
+ pblrdy = gpio_get_value(MDM_GPIO(mdm, MDM2AP_PBLRDY));
+ if (pblrdy)
+ break;
+ usleep_range(5000, 6000);
+ }
+ dev_dbg(dev, "pblrdy i:%d\n", i);
+ mdelay(200);
+ }
+ /*
+ * No PBLRDY gpio associated with this modem
+ * Send request for image. Let userspace confirm establishment of
+ * link to external modem.
+ */
+ else
+ esoc_clink_queue_request(ESOC_REQ_IMG, mdm->esoc);
+ return 0;
+}
+
+static int mdm4x_power_down(struct mdm_ctrl *mdm)
+{
+ struct device *dev = mdm->dev;
+ int soft_reset_direction = mdm->soft_reset_inverted ? 1 : 0;
+ /* Assert the soft reset line whether mdm2ap_status went low or not */
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction);
+ dev_dbg(dev, "Doing a hard reset\n");
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction);
+ /*
+ * Currently, there is a debounce timer on the charm PMIC. It is
+ * necessary to hold the PMIC RESET low for 400ms
+ * for the reset to fully take place. Sleep here to ensure the
+ * reset has occurred before the function exits.
+ */
+ mdelay(400);
+ return 0;
+}
+
+static int mdm9x55_power_down(struct mdm_ctrl *mdm)
+{
+ struct device *dev = mdm->dev;
+ int soft_reset_direction = mdm->soft_reset_inverted ? 1 : 0;
+ /* Assert the soft reset line whether mdm2ap_status went low or not */
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction);
+ dev_dbg(dev, "Doing a hard reset\n");
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ soft_reset_direction);
+ /*
+ * Currently, there is a debounce timer on the charm PMIC. It is
+ * necessary to hold the PMIC RESET low for 406ms
+ * for the reset to fully take place. Sleep here to ensure the
+ * reset has occurred before the function exits.
+ */
+ mdelay(406);
+ return 0;
+}
+
+static void mdm4x_cold_reset(struct mdm_ctrl *mdm)
+{
+ dev_dbg(mdm->dev, "Triggering mdm cold reset");
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ !!mdm->soft_reset_inverted);
+ mdelay(300);
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ !mdm->soft_reset_inverted);
+}
+
+static void mdm9x55_cold_reset(struct mdm_ctrl *mdm)
+{
+ dev_dbg(mdm->dev, "Triggering mdm cold reset");
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ !!mdm->soft_reset_inverted);
+ mdelay(334);
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ !mdm->soft_reset_inverted);
+}
+
+static int mdm4x_pon_dt_init(struct mdm_ctrl *mdm)
+{
+ int val;
+ struct device_node *node = mdm->dev->of_node;
+ enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
+
+ val = of_get_named_gpio_flags(node, "qcom,ap2mdm-soft-reset-gpio",
+ 0, &flags);
+ if (val >= 0) {
+ MDM_GPIO(mdm, AP2MDM_SOFT_RESET) = val;
+ if (flags & OF_GPIO_ACTIVE_LOW)
+ mdm->soft_reset_inverted = 1;
+ return 0;
+ } else
+ return -EIO;
+}
+
+static int mdm4x_pon_setup(struct mdm_ctrl *mdm)
+{
+ struct device *dev = mdm->dev;
+
+ if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_SOFT_RESET))) {
+ if (gpio_request(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+ "AP2MDM_SOFT_RESET")) {
+ dev_err(dev, "Cannot config AP2MDM_SOFT_RESET gpio\n");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+struct mdm_pon_ops mdm9x25_pon_ops = {
+ .pon = mdm4x_do_first_power_on,
+ .soft_reset = mdm4x_toggle_soft_reset,
+ .poff_force = mdm4x_power_down,
+ .cold_reset = mdm4x_cold_reset,
+ .dt_init = mdm4x_pon_dt_init,
+ .setup = mdm4x_pon_setup,
+};
+
+struct mdm_pon_ops mdm9x35_pon_ops = {
+ .pon = mdm4x_do_first_power_on,
+ .soft_reset = mdm4x_toggle_soft_reset,
+ .poff_force = mdm4x_power_down,
+ .cold_reset = mdm4x_cold_reset,
+ .dt_init = mdm4x_pon_dt_init,
+ .setup = mdm4x_pon_setup,
+};
+
+struct mdm_pon_ops mdm9x45_pon_ops = {
+ .pon = mdm4x_do_first_power_on,
+ .soft_reset = mdm4x_toggle_soft_reset,
+ .poff_force = mdm4x_power_down,
+ .cold_reset = mdm4x_cold_reset,
+ .dt_init = mdm4x_pon_dt_init,
+ .setup = mdm4x_pon_setup,
+};
+
+struct mdm_pon_ops mdm9x55_pon_ops = {
+ .pon = mdm4x_do_first_power_on,
+ .soft_reset = mdm9x55_toggle_soft_reset,
+ .poff_force = mdm9x55_power_down,
+ .cold_reset = mdm9x55_cold_reset,
+ .dt_init = mdm4x_pon_dt_init,
+ .setup = mdm4x_pon_setup,
+};
diff --git a/drivers/esoc/esoc-mdm.h b/drivers/esoc/esoc-mdm.h
new file mode 100644
index 0000000..fa3a576
--- /dev/null
+++ b/drivers/esoc/esoc-mdm.h
@@ -0,0 +1,156 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ESOC_MDM_H__
+#define __ESOC_MDM_H__
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include "esoc.h"
+
+#define MDM_PBLRDY_CNT 20
+#define INVALID_GPIO (-1)
+#define MDM_GPIO(mdm, i) (mdm->gpios[i])
+#define MDM9x25_LABEL "MDM9x25"
+#define MDM9x25_HSIC "HSIC"
+#define MDM9x35_LABEL "MDM9x35"
+#define MDM9x35_PCIE "PCIe"
+#define MDM9x35_DUAL_LINK "HSIC+PCIe"
+#define MDM9x35_HSIC "HSIC"
+#define MDM9x45_LABEL "MDM9x45"
+#define MDM9x45_PCIE "PCIe"
+#define MDM9x55_LABEL "MDM9x55"
+#define MDM9x55_PCIE "PCIe"
+#define MDM2AP_STATUS_TIMEOUT_MS 120000L
+#define MDM_MODEM_TIMEOUT 3000
+#define DEF_RAMDUMP_TIMEOUT 120000
+#define DEF_RAMDUMP_DELAY 2000
+#define RD_BUF_SIZE 100
+#define SFR_MAX_RETRIES 10
+#define SFR_RETRY_INTERVAL 1000
+#define MDM_DBG_OFFSET 0x934
+#define MDM_DBG_MODE 0x53444247
+#define MDM_CTI_NAME "coresight-cti-rpm-cpu0"
+#define MDM_CTI_TRIG 0
+#define MDM_CTI_CH 0
+
+enum mdm_gpio {
+ AP2MDM_WAKEUP = 0,
+ AP2MDM_STATUS,
+ AP2MDM_SOFT_RESET,
+ AP2MDM_VDD_MIN,
+ AP2MDM_CHNLRDY,
+ AP2MDM_ERRFATAL,
+ AP2MDM_VDDMIN,
+ AP2MDM_PMIC_PWR_EN,
+ MDM2AP_WAKEUP,
+ MDM2AP_ERRFATAL,
+ MDM2AP_PBLRDY,
+ MDM2AP_STATUS,
+ MDM2AP_VDDMIN,
+ MDM_LINK_DETECT,
+ NUM_GPIOS,
+};
+
+struct mdm_pon_ops;
+
+struct mdm_ctrl {
+ unsigned int gpios[NUM_GPIOS];
+ spinlock_t status_lock;
+ struct workqueue_struct *mdm_queue;
+ struct delayed_work mdm2ap_status_check_work;
+ struct work_struct mdm_status_work;
+ struct work_struct restart_reason_work;
+ struct completion debug_done;
+ struct device *dev;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state_booting;
+ struct pinctrl_state *gpio_state_running;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+ int mdm2ap_status_valid_old_config;
+ int soft_reset_inverted;
+ int errfatal_irq;
+ int status_irq;
+ int pblrdy_irq;
+ int debug;
+ int init;
+ bool debug_fail;
+ unsigned int dump_timeout_ms;
+ unsigned int ramdump_delay_ms;
+ struct esoc_clink *esoc;
+ bool get_restart_reason;
+ unsigned long irq_mask;
+ bool ready;
+ bool dual_interface;
+ u32 status;
+ void __iomem *dbg_addr;
+ bool dbg_mode;
+ struct coresight_cti *cti;
+ int trig_cnt;
+ const struct mdm_pon_ops *pon_ops;
+};
+
+struct mdm_pon_ops {
+ int (*pon)(struct mdm_ctrl *mdm);
+ int (*soft_reset)(struct mdm_ctrl *mdm, bool atomic);
+ int (*poff_force)(struct mdm_ctrl *mdm);
+ int (*poff_cleanup)(struct mdm_ctrl *mdm);
+ void (*cold_reset)(struct mdm_ctrl *mdm);
+ int (*dt_init)(struct mdm_ctrl *mdm);
+ int (*setup)(struct mdm_ctrl *mdm);
+};
+
+struct mdm_ops {
+ struct esoc_clink_ops *clink_ops;
+ struct mdm_pon_ops *pon_ops;
+ int (*config_hw)(struct mdm_ctrl *mdm, const struct mdm_ops *ops,
+ struct platform_device *pdev);
+};
+
+static inline int mdm_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
+{
+ return mdm->pon_ops->soft_reset(mdm, atomic);
+}
+static inline int mdm_do_first_power_on(struct mdm_ctrl *mdm)
+{
+ return mdm->pon_ops->pon(mdm);
+}
+static inline int mdm_power_down(struct mdm_ctrl *mdm)
+{
+ return mdm->pon_ops->poff_force(mdm);
+}
+static inline void mdm_cold_reset(struct mdm_ctrl *mdm)
+{
+ mdm->pon_ops->cold_reset(mdm);
+}
+static inline int mdm_pon_dt_init(struct mdm_ctrl *mdm)
+{
+ return mdm->pon_ops->dt_init(mdm);
+}
+static inline int mdm_pon_setup(struct mdm_ctrl *mdm)
+{
+ return mdm->pon_ops->setup(mdm);
+}
+
+extern struct mdm_pon_ops mdm9x25_pon_ops;
+extern struct mdm_pon_ops mdm9x35_pon_ops;
+extern struct mdm_pon_ops mdm9x45_pon_ops;
+extern struct mdm_pon_ops mdm9x55_pon_ops;
+#endif
diff --git a/drivers/esoc/esoc.h b/drivers/esoc/esoc.h
new file mode 100644
index 0000000..0cec985
--- /dev/null
+++ b/drivers/esoc/esoc.h
@@ -0,0 +1,165 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ESOC_H__
+#define __ESOC_H__
+
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/esoc_ctrl.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+
+#define ESOC_DEV_MAX 4
+#define ESOC_NAME_LEN 20
+#define ESOC_LINK_LEN 20
+
+struct esoc_clink;
+/**
+ * struct esoc_eng: Engine of the esoc control link
+ * @handle_clink_req: handle incoming esoc requests.
+ * @handle_clink_evt: handle for esoc events.
+ * @esoc_clink: pointer to esoc control link.
+ */
+struct esoc_eng {
+ void (*handle_clink_req)(enum esoc_req req,
+ struct esoc_eng *eng);
+ void (*handle_clink_evt)(enum esoc_evt evt,
+ struct esoc_eng *eng);
+ struct esoc_clink *esoc_clink;
+};
+
+/**
+ * struct esoc_clink: Representation of external esoc device
+ * @name: Name of the external esoc.
+ * @link_name: name of the physical link.
+ * @parent: parent device.
+ * @dev: device for userspace interface.
+ * @id: id of the external device.
+ * @owner: owner of the device.
+ * @clink_ops: control operations for the control link
+ * @req_eng: handle for request engine.
+ * @cmd_eng: handle for command engine.
+ * @clink_data: private data of esoc control link.
+ * @compat_data: compat data of esoc driver.
+ * @subsys_desc: descriptor for subsystem restart
+ * @subsys_dev: ssr device handle.
+ * @np: device tree node for esoc_clink.
+ */
+struct esoc_clink {
+ const char *name;
+ const char *link_name;
+ struct device *parent;
+ struct device dev;
+ unsigned int id;
+ struct module *owner;
+ const struct esoc_clink_ops *clink_ops;
+ struct esoc_eng *req_eng;
+ struct esoc_eng *cmd_eng;
+ spinlock_t notify_lock;
+ void *clink_data;
+ void *compat_data;
+ struct subsys_desc subsys;
+ struct subsys_device *subsys_dev;
+ struct device_node *np;
+};
+
+/**
+ * struct esoc_clink_ops: Operations to control external soc
+ * @cmd_exe: Execute control command
+ * @get_status: Get current status, or response to previous command
+ * @notify_esoc: notify external soc of events
+ */
+struct esoc_clink_ops {
+ int (*cmd_exe)(enum esoc_cmd cmd, struct esoc_clink *dev);
+ int (*get_status)(u32 *status, struct esoc_clink *dev);
+ void (*notify)(enum esoc_notify notify, struct esoc_clink *dev);
+};
+
+/**
+ * struct esoc_compat: Compatibility of esoc drivers.
+ * @name: esoc link that driver is compatible with.
+ * @data: driver data associated with esoc clink.
+ */
+struct esoc_compat {
+ const char *name;
+ void *data;
+};
+
+/**
+ * struct esoc_drv: Driver for an esoc clink
+ * @driver: drivers for esoc.
+ * @owner: module owner of esoc driver.
+ * @compat_table: compatible table for driver.
+ * @compat_entries
+ * @probe: probe function for esoc driver.
+ */
+struct esoc_drv {
+ struct device_driver driver;
+ struct module *owner;
+ struct esoc_compat *compat_table;
+ unsigned int compat_entries;
+ int (*probe)(struct esoc_clink *esoc_clink,
+ struct esoc_drv *drv);
+};
+
+#define to_esoc_clink(d) container_of(d, struct esoc_clink, dev)
+#define to_esoc_drv(d) container_of(d, struct esoc_drv, driver)
+
+extern struct bus_type esoc_bus_type;
+
+
+/* Exported apis */
+void esoc_dev_exit(void);
+int esoc_dev_init(void);
+void esoc_clink_unregister(struct esoc_clink *esoc_dev);
+int esoc_clink_register(struct esoc_clink *esoc_dev);
+struct esoc_clink *get_esoc_clink(int id);
+struct esoc_clink *get_esoc_clink_by_node(struct device_node *node);
+void put_esoc_clink(struct esoc_clink *esoc_clink);
+void *get_esoc_clink_data(struct esoc_clink *esoc);
+void set_esoc_clink_data(struct esoc_clink *esoc, void *data);
+void esoc_clink_evt_notify(enum esoc_evt, struct esoc_clink *esoc_dev);
+void esoc_clink_queue_request(enum esoc_req req, struct esoc_clink *esoc_dev);
+void esoc_for_each_dev(void *data, int (*fn)(struct device *dev,
+ void *data));
+int esoc_clink_register_cmd_eng(struct esoc_clink *esoc_clink,
+ struct esoc_eng *eng);
+void esoc_clink_unregister_cmd_eng(struct esoc_clink *esoc_clink,
+ struct esoc_eng *eng);
+int esoc_clink_register_req_eng(struct esoc_clink *esoc_clink,
+ struct esoc_eng *eng);
+void esoc_clink_unregister_req_eng(struct esoc_clink *esoc_clink,
+ struct esoc_eng *eng);
+int esoc_drv_register(struct esoc_drv *driver);
+void esoc_set_drv_data(struct esoc_clink *esoc_clink, void *data);
+void *esoc_get_drv_data(struct esoc_clink *esoc_clink);
+/* ssr operations */
+int esoc_clink_register_ssr(struct esoc_clink *esoc_clink);
+int esoc_clink_request_ssr(struct esoc_clink *esoc_clink);
+void esoc_clink_unregister_ssr(struct esoc_clink *esoc_clink);
+/* client notification */
+#ifdef CONFIG_ESOC_CLIENT
+void notify_esoc_clients(struct esoc_clink *esoc_clink, unsigned long evt);
+#else
+static inline void notify_esoc_clients(struct esoc_clink *esoc_clink,
+ unsigned long evt)
+{
+}
+#endif
+bool esoc_req_eng_enabled(struct esoc_clink *esoc_clink);
+bool esoc_cmd_eng_enabled(struct esoc_clink *esoc_clink);
+#endif
diff --git a/drivers/esoc/esoc_bus.c b/drivers/esoc/esoc_bus.c
new file mode 100644
index 0000000..4807e2b
--- /dev/null
+++ b/drivers/esoc/esoc_bus.c
@@ -0,0 +1,386 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include "esoc.h"
+
+static DEFINE_IDA(esoc_ida);
+
+/* SYSFS */
+static ssize_t
+esoc_name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, ESOC_NAME_LEN, "%s", to_esoc_clink(dev)->name);
+}
+
+static ssize_t
+esoc_link_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, ESOC_LINK_LEN, "%s",
+ to_esoc_clink(dev)->link_name);
+}
+
+static struct device_attribute esoc_clink_attrs[] = {
+
+ __ATTR_RO(esoc_name),
+ __ATTR_RO(esoc_link),
+ __ATTR_NULL,
+};
+
+static int esoc_bus_match(struct device *dev, struct device_driver *drv)
+{
+ int i = 0, match = 1;
+ struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+ struct esoc_drv *esoc_drv = to_esoc_drv(drv);
+ int entries = esoc_drv->compat_entries;
+ struct esoc_compat *table = esoc_drv->compat_table;
+
+ for (i = 0; i < entries; i++) {
+ if (strcasecmp(esoc_clink->name, table[i].name) == 0)
+ return match;
+ }
+ return 0;
+}
+
+static int esoc_bus_probe(struct device *dev)
+{
+ int ret;
+ struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+ struct esoc_drv *esoc_drv = to_esoc_drv(dev->driver);
+
+ ret = esoc_drv->probe(esoc_clink, esoc_drv);
+ if (ret) {
+ pr_err("failed to probe %s dev\n", esoc_clink->name);
+ return ret;
+ }
+ return 0;
+}
+
+struct bus_type esoc_bus_type = {
+ .name = "esoc",
+ .match = esoc_bus_match,
+ .dev_attrs = esoc_clink_attrs,
+};
+EXPORT_SYMBOL(esoc_bus_type);
+
+struct device esoc_bus = {
+ .init_name = "esoc-bus"
+};
+EXPORT_SYMBOL(esoc_bus);
+
+/* bus accessor */
+static void esoc_clink_release(struct device *dev)
+{
+ struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+
+ ida_simple_remove(&esoc_ida, esoc_clink->id);
+ kfree(esoc_clink);
+}
+
+static int esoc_clink_match_id(struct device *dev, void *id)
+{
+ struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+ int *esoc_id = (int *)id;
+
+ if (esoc_clink->id == *esoc_id) {
+ if (!try_module_get(esoc_clink->owner))
+ return 0;
+ return 1;
+ }
+ return 0;
+}
+
+static int esoc_clink_match_node(struct device *dev, void *id)
+{
+ struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+ struct device_node *node = id;
+
+ if (esoc_clink->np == node) {
+ if (!try_module_get(esoc_clink->owner))
+ return 0;
+ return 1;
+ }
+ return 0;
+}
+
+void esoc_for_each_dev(void *data, int (*fn)(struct device *dev, void *))
+{
+ int ret;
+
+ ret = bus_for_each_dev(&esoc_bus_type, NULL, data, fn);
+}
+EXPORT_SYMBOL(esoc_for_each_dev);
+
+struct esoc_clink *get_esoc_clink(int id)
+{
+ struct esoc_clink *esoc_clink;
+ struct device *dev;
+
+ dev = bus_find_device(&esoc_bus_type, NULL, &id, esoc_clink_match_id);
+ if (IS_ERR(dev))
+ return NULL;
+ esoc_clink = to_esoc_clink(dev);
+ return esoc_clink;
+}
+EXPORT_SYMBOL(get_esoc_clink);
+
+struct esoc_clink *get_esoc_clink_by_node(struct device_node *node)
+{
+ struct esoc_clink *esoc_clink;
+ struct device *dev;
+
+ dev = bus_find_device(&esoc_bus_type, NULL, node,
+ esoc_clink_match_node);
+ if (IS_ERR(dev))
+ return NULL;
+ esoc_clink = to_esoc_clink(dev);
+ return esoc_clink;
+}
+
+void put_esoc_clink(struct esoc_clink *esoc_clink)
+{
+ module_put(esoc_clink->owner);
+}
+EXPORT_SYMBOL(put_esoc_clink);
+
+bool esoc_req_eng_enabled(struct esoc_clink *esoc_clink)
+{
+ return !esoc_clink->req_eng ? false : true;
+}
+EXPORT_SYMBOL(esoc_req_eng_enabled);
+
+bool esoc_cmd_eng_enabled(struct esoc_clink *esoc_clink)
+{
+ return !esoc_clink->cmd_eng ? false : true;
+}
+EXPORT_SYMBOL(esoc_cmd_eng_enabled);
+/* ssr operations */
+int esoc_clink_register_ssr(struct esoc_clink *esoc_clink)
+{
+ int ret;
+ int len;
+ char *subsys_name;
+
+ len = strlen("esoc") + sizeof(esoc_clink->id);
+ subsys_name = kzalloc(len, GFP_KERNEL);
+ if (IS_ERR(subsys_name))
+ return PTR_ERR(subsys_name);
+ snprintf(subsys_name, len, "esoc%d", esoc_clink->id);
+ esoc_clink->subsys.name = subsys_name;
+ esoc_clink->dev.of_node = esoc_clink->np;
+ esoc_clink->subsys.dev = &esoc_clink->dev;
+ esoc_clink->subsys_dev = subsys_register(&esoc_clink->subsys);
+ if (IS_ERR(esoc_clink->subsys_dev)) {
+ dev_err(&esoc_clink->dev, "failed to register ssr node\n");
+ ret = PTR_ERR(esoc_clink->subsys_dev);
+ goto subsys_err;
+ }
+ return 0;
+subsys_err:
+ kfree(subsys_name);
+ return ret;
+}
+EXPORT_SYMBOL(esoc_clink_register_ssr);
+
+void esoc_clink_unregister_ssr(struct esoc_clink *esoc_clink)
+{
+ subsys_unregister(esoc_clink->subsys_dev);
+ kfree(esoc_clink->subsys.name);
+}
+EXPORT_SYMBOL(esoc_clink_unregister_ssr);
+
+int esoc_clink_request_ssr(struct esoc_clink *esoc_clink)
+{
+ subsystem_restart_dev(esoc_clink->subsys_dev);
+ return 0;
+}
+EXPORT_SYMBOL(esoc_clink_request_ssr);
+
+/* bus operations */
+void esoc_clink_evt_notify(enum esoc_evt evt, struct esoc_clink *esoc_clink)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&esoc_clink->notify_lock, flags);
+ notify_esoc_clients(esoc_clink, evt);
+ if (esoc_clink->req_eng && esoc_clink->req_eng->handle_clink_evt)
+ esoc_clink->req_eng->handle_clink_evt(evt, esoc_clink->req_eng);
+ if (esoc_clink->cmd_eng && esoc_clink->cmd_eng->handle_clink_evt)
+ esoc_clink->cmd_eng->handle_clink_evt(evt, esoc_clink->cmd_eng);
+ spin_unlock_irqrestore(&esoc_clink->notify_lock, flags);
+}
+EXPORT_SYMBOL(esoc_clink_evt_notify);
+
+void *get_esoc_clink_data(struct esoc_clink *esoc)
+{
+ return esoc->clink_data;
+}
+EXPORT_SYMBOL(get_esoc_clink_data);
+
+void set_esoc_clink_data(struct esoc_clink *esoc, void *data)
+{
+ esoc->clink_data = data;
+}
+EXPORT_SYMBOL(set_esoc_clink_data);
+
+void esoc_clink_queue_request(enum esoc_req req, struct esoc_clink *esoc_clink)
+{
+ unsigned long flags;
+ struct esoc_eng *req_eng;
+
+ spin_lock_irqsave(&esoc_clink->notify_lock, flags);
+ if (esoc_clink->req_eng != NULL) {
+ req_eng = esoc_clink->req_eng;
+ req_eng->handle_clink_req(req, req_eng);
+ }
+ spin_unlock_irqrestore(&esoc_clink->notify_lock, flags);
+}
+EXPORT_SYMBOL(esoc_clink_queue_request);
+
+void esoc_set_drv_data(struct esoc_clink *esoc_clink, void *data)
+{
+ dev_set_drvdata(&esoc_clink->dev, data);
+}
+EXPORT_SYMBOL(esoc_set_drv_data);
+
+void *esoc_get_drv_data(struct esoc_clink *esoc_clink)
+{
+ return dev_get_drvdata(&esoc_clink->dev);
+}
+EXPORT_SYMBOL(esoc_get_drv_data);
+
+/* bus registration functions */
+void esoc_clink_unregister(struct esoc_clink *esoc_clink)
+{
+ if (get_device(&esoc_clink->dev) != NULL) {
+ device_unregister(&esoc_clink->dev);
+ put_device(&esoc_clink->dev);
+ }
+}
+EXPORT_SYMBOL(esoc_clink_unregister);
+
+int esoc_clink_register(struct esoc_clink *esoc_clink)
+{
+ int id, err;
+ struct device *dev;
+
+ if (!esoc_clink->name || !esoc_clink->link_name ||
+ !esoc_clink->clink_ops) {
+ dev_err(esoc_clink->parent, "invalid esoc arguments\n");
+ return -EINVAL;
+ }
+ id = ida_simple_get(&esoc_ida, 0, ESOC_DEV_MAX, GFP_KERNEL);
+ if (id < 0) {
+ err = id;
+ goto exit_ida;
+ }
+ esoc_clink->id = id;
+ dev = &esoc_clink->dev;
+ dev->bus = &esoc_bus_type;
+ dev->release = esoc_clink_release;
+ if (!esoc_clink->parent)
+ dev->parent = &esoc_bus;
+ else
+ dev->parent = esoc_clink->parent;
+ dev_set_name(dev, "esoc%d", id);
+ err = device_register(dev);
+ if (err) {
+ dev_err(esoc_clink->parent, "esoc device register failed\n");
+ goto exit_ida;
+ }
+ spin_lock_init(&esoc_clink->notify_lock);
+ return 0;
+exit_ida:
+ ida_simple_remove(&esoc_ida, id);
+ pr_err("unable to register %s, err = %d\n", esoc_clink->name, err);
+ return err;
+}
+EXPORT_SYMBOL(esoc_clink_register);
+
+int esoc_clink_register_req_eng(struct esoc_clink *esoc_clink,
+ struct esoc_eng *eng)
+{
+ if (esoc_clink->req_eng)
+ return -EBUSY;
+ if (!eng->handle_clink_req)
+ return -EINVAL;
+ esoc_clink->req_eng = eng;
+ eng->esoc_clink = esoc_clink;
+ esoc_clink_evt_notify(ESOC_REQ_ENG_ON, esoc_clink);
+ return 0;
+}
+EXPORT_SYMBOL(esoc_clink_register_req_eng);
+
+int esoc_clink_register_cmd_eng(struct esoc_clink *esoc_clink,
+ struct esoc_eng *eng)
+{
+ if (esoc_clink->cmd_eng)
+ return -EBUSY;
+ esoc_clink->cmd_eng = eng;
+ eng->esoc_clink = esoc_clink;
+ esoc_clink_evt_notify(ESOC_CMD_ENG_ON, esoc_clink);
+ return 0;
+}
+EXPORT_SYMBOL(esoc_clink_register_cmd_eng);
+
+void esoc_clink_unregister_req_eng(struct esoc_clink *esoc_clink,
+ struct esoc_eng *eng)
+{
+ esoc_clink->req_eng = NULL;
+ esoc_clink_evt_notify(ESOC_REQ_ENG_OFF, esoc_clink);
+}
+EXPORT_SYMBOL(esoc_clink_unregister_req_eng);
+
+void esoc_clink_unregister_cmd_eng(struct esoc_clink *esoc_clink,
+ struct esoc_eng *eng)
+{
+ esoc_clink->cmd_eng = NULL;
+ esoc_clink_evt_notify(ESOC_CMD_ENG_OFF, esoc_clink);
+}
+EXPORT_SYMBOL(esoc_clink_unregister_cmd_eng);
+
+int esoc_drv_register(struct esoc_drv *driver)
+{
+ int ret;
+
+ driver->driver.bus = &esoc_bus_type;
+ driver->driver.probe = esoc_bus_probe;
+ ret = driver_register(&driver->driver);
+ if (ret)
+ return ret;
+ return 0;
+}
+EXPORT_SYMBOL(esoc_drv_register);
+
+static int __init esoc_init(void)
+{
+ int ret;
+
+ ret = device_register(&esoc_bus);
+ if (ret) {
+ pr_err("esoc bus device register fail\n");
+ return ret;
+ }
+ ret = bus_register(&esoc_bus_type);
+ if (ret) {
+ pr_err("esoc bus register fail\n");
+ return ret;
+ }
+ pr_debug("esoc bus registration done\n");
+ return 0;
+}
+
+subsys_initcall(esoc_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/esoc/esoc_client.c b/drivers/esoc/esoc_client.c
new file mode 100644
index 0000000..5b194e31
--- /dev/null
+++ b/drivers/esoc/esoc_client.c
@@ -0,0 +1,132 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/esoc_client.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include "esoc.h"
+
+static DEFINE_SPINLOCK(notify_lock);
+static ATOMIC_NOTIFIER_HEAD(client_notify);
+
+static void devm_esoc_desc_release(struct device *dev, void *res)
+{
+ struct esoc_desc *esoc_desc = res;
+
+ kfree(esoc_desc->name);
+ kfree(esoc_desc->link);
+ put_esoc_clink(esoc_desc->priv);
+}
+
+static int devm_esoc_desc_match(struct device *dev, void *res, void *data)
+{
+ struct esoc_desc *esoc_desc = res;
+ return esoc_desc == data;
+}
+
+struct esoc_desc *devm_register_esoc_client(struct device *dev,
+ const char *name)
+{
+ int ret, index;
+ const char *client_desc;
+ char *esoc_prop;
+ const __be32 *parp;
+ struct device_node *esoc_node;
+ struct device_node *np = dev->of_node;
+ struct esoc_clink *esoc_clink;
+ struct esoc_desc *desc;
+ char *esoc_name, *esoc_link;
+
+ for (index = 0;; index++) {
+ esoc_prop = kasprintf(GFP_KERNEL, "esoc-%d", index);
+ parp = of_get_property(np, esoc_prop, NULL);
+ if (parp == NULL) {
+ dev_err(dev, "esoc device not present\n");
+ kfree(esoc_prop);
+ return NULL;
+ }
+ ret = of_property_read_string_index(np, "esoc-names", index,
+ &client_desc);
+ if (ret) {
+ dev_err(dev, "cannot find matching string\n");
+ kfree(esoc_prop);
+ return NULL;
+ }
+ if (strcmp(client_desc, name)) {
+ kfree(esoc_prop);
+ continue;
+ }
+ kfree(esoc_prop);
+ esoc_node = of_find_node_by_phandle(be32_to_cpup(parp));
+ esoc_clink = get_esoc_clink_by_node(esoc_node);
+ if (IS_ERR_OR_NULL(esoc_clink)) {
+ dev_err(dev, "matching esoc clink not present\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ esoc_name = kasprintf(GFP_KERNEL, "esoc%d",
+ esoc_clink->id);
+ if (IS_ERR_OR_NULL(esoc_name)) {
+ dev_err(dev, "unable to allocate esoc name\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ esoc_link = kasprintf(GFP_KERNEL, "%s", esoc_clink->link_name);
+ if (IS_ERR_OR_NULL(esoc_link)) {
+ dev_err(dev, "unable to allocate esoc link name\n");
+ kfree(esoc_name);
+ return ERR_PTR(-ENOMEM);
+ }
+ desc = devres_alloc(devm_esoc_desc_release,
+ sizeof(*desc), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(desc)) {
+ kfree(esoc_name);
+ kfree(esoc_link);
+ dev_err(dev, "unable to allocate esoc descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ desc->name = esoc_name;
+ desc->link = esoc_link;
+ desc->priv = esoc_clink;
+ devres_add(dev, desc);
+ return desc;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(devm_register_esoc_client);
+
+void devm_unregister_esoc_client(struct device *dev,
+ struct esoc_desc *esoc_desc)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_esoc_desc_release,
+ devm_esoc_desc_match, esoc_desc);
+ WARN_ON(ret);
+}
+EXPORT_SYMBOL(devm_unregister_esoc_client);
+
+int esoc_register_client_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&client_notify, nb);
+}
+EXPORT_SYMBOL(esoc_register_client_notifier);
+
+void notify_esoc_clients(struct esoc_clink *esoc_clink, unsigned long evt)
+{
+ unsigned int id;
+ unsigned long flags;
+
+ spin_lock_irqsave(¬ify_lock, flags);
+ id = esoc_clink->id;
+ atomic_notifier_call_chain(&client_notify, evt, &id);
+ spin_unlock_irqrestore(¬ify_lock, flags);
+}
+EXPORT_SYMBOL(notify_esoc_clients);
diff --git a/drivers/esoc/esoc_dev.c b/drivers/esoc/esoc_dev.c
new file mode 100644
index 0000000..17a30b8
--- /dev/null
+++ b/drivers/esoc/esoc_dev.c
@@ -0,0 +1,392 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kfifo.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include "esoc.h"
+
+/**
+ * struct esoc_udev: Userspace char interface
+ * @dev: interface device.
+ * @req_fifio: fifo for clink requests.
+ * @req_wait: signal availability of request from clink
+ * @req_fifo_lock: serialize access to req fifo
+ * @evt_fito: fifo for clink events
+ * @evt_wait: signal availability of clink event
+ * @evt_fifo_lock: serialize access to event fifo
+ * @list: entry in esoc dev list.
+ * @clink: reference to contorl link
+ */
+struct esoc_udev {
+ struct device *dev;
+ struct kfifo req_fifo;
+ wait_queue_head_t req_wait;
+ spinlock_t req_fifo_lock;
+ struct kfifo evt_fifo;
+ wait_queue_head_t evt_wait;
+ spinlock_t evt_fifo_lock;
+ struct list_head list;
+ struct esoc_clink *clink;
+};
+
+/**
+ * struct esoc_uhandle: Userspace handle of esoc
+ * @esoc_clink: esoc control link.
+ * @eng: esoc engine for commands/ requests.
+ * @esoc_udev: user interface device.
+ * @req_eng_reg: indicates if engine is registered as request eng
+ * @cmd_eng_reg: indicates if engine is registered as cmd eng
+ */
+struct esoc_uhandle {
+ struct esoc_clink *esoc_clink;
+ struct esoc_eng eng;
+ struct esoc_udev *esoc_udev;
+ bool req_eng_reg;
+ bool cmd_eng_reg;
+};
+
+#define ESOC_MAX_MINOR 256
+#define ESOC_MAX_REQ 8
+#define ESOC_MAX_EVT 4
+
+static LIST_HEAD(esoc_udev_list);
+static DEFINE_SPINLOCK(esoc_udev_list_lock);
+struct class *esoc_class;
+static int esoc_major;
+
+static struct esoc_udev *get_free_esoc_udev(struct esoc_clink *esoc_clink)
+{
+ struct esoc_udev *esoc_udev;
+ int err;
+
+ if (esoc_clink->id > ESOC_MAX_MINOR) {
+ pr_err("too many esoc devices\n");
+ return ERR_PTR(-ENODEV);
+ }
+ esoc_udev = kzalloc(sizeof(*esoc_udev), GFP_KERNEL);
+ if (!esoc_udev)
+ return ERR_PTR(-ENOMEM);
+ err = kfifo_alloc(&esoc_udev->req_fifo, (sizeof(u32)) * ESOC_MAX_REQ,
+ GFP_KERNEL);
+ if (err) {
+ pr_err("unable to allocate request fifo for %s\n",
+ esoc_clink->name);
+ goto req_fifo_fail;
+ }
+ err = kfifo_alloc(&esoc_udev->evt_fifo, (sizeof(u32)) * ESOC_MAX_EVT,
+ GFP_KERNEL);
+ if (err) {
+ pr_err("unable to allocate evt fifo for %s\n",
+ esoc_clink->name);
+ goto evt_fifo_fail;
+ }
+ init_waitqueue_head(&esoc_udev->req_wait);
+ init_waitqueue_head(&esoc_udev->evt_wait);
+ spin_lock_init(&esoc_udev->req_fifo_lock);
+ spin_lock_init(&esoc_udev->evt_fifo_lock);
+ esoc_udev->clink = esoc_clink;
+ spin_lock(&esoc_udev_list_lock);
+ list_add_tail(&esoc_udev->list, &esoc_udev_list);
+ spin_unlock(&esoc_udev_list_lock);
+ return esoc_udev;
+evt_fifo_fail:
+ kfifo_free(&esoc_udev->req_fifo);
+req_fifo_fail:
+ kfree(esoc_udev);
+ return ERR_PTR(-ENODEV);
+}
+
+static void return_esoc_udev(struct esoc_udev *esoc_udev)
+{
+ spin_lock(&esoc_udev_list_lock);
+ list_del(&esoc_udev->list);
+ spin_unlock(&esoc_udev_list_lock);
+ kfifo_free(&esoc_udev->req_fifo);
+ kfifo_free(&esoc_udev->evt_fifo);
+ kfree(esoc_udev);
+}
+
+static struct esoc_udev *esoc_udev_get_by_minor(unsigned int index)
+{
+ struct esoc_udev *esoc_udev;
+
+ spin_lock(&esoc_udev_list_lock);
+ list_for_each_entry(esoc_udev, &esoc_udev_list, list) {
+ if (esoc_udev->clink->id == index)
+ goto found;
+ }
+ esoc_udev = NULL;
+found:
+ spin_unlock(&esoc_udev_list_lock);
+ return esoc_udev;
+}
+
+void esoc_udev_handle_clink_req(enum esoc_req req, struct esoc_eng *eng)
+{
+ int err;
+ u32 clink_req;
+ struct esoc_clink *esoc_clink = eng->esoc_clink;
+ struct esoc_udev *esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
+
+ if (!esoc_udev)
+ return;
+ clink_req = (u32)req;
+ err = kfifo_in_spinlocked(&esoc_udev->req_fifo, &clink_req,
+ sizeof(clink_req),
+ &esoc_udev->req_fifo_lock);
+ if (err != sizeof(clink_req)) {
+ pr_err("unable to queue request for %s\n", esoc_clink->name);
+ return;
+ }
+ wake_up_interruptible(&esoc_udev->req_wait);
+}
+
+void esoc_udev_handle_clink_evt(enum esoc_evt evt, struct esoc_eng *eng)
+{
+ int err;
+ u32 clink_evt;
+ struct esoc_clink *esoc_clink = eng->esoc_clink;
+ struct esoc_udev *esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
+
+ if (!esoc_udev)
+ return;
+ clink_evt = (u32)evt;
+ err = kfifo_in_spinlocked(&esoc_udev->evt_fifo, &clink_evt,
+ sizeof(clink_evt),
+ &esoc_udev->evt_fifo_lock);
+ if (err != sizeof(clink_evt)) {
+ pr_err("unable to queue event for %s\n", esoc_clink->name);
+ return;
+ }
+ wake_up_interruptible(&esoc_udev->evt_wait);
+}
+
+static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int err;
+ u32 esoc_cmd, status, req, evt;
+ struct esoc_uhandle *uhandle = file->private_data;
+ struct esoc_udev *esoc_udev = uhandle->esoc_udev;
+ struct esoc_clink *esoc_clink = uhandle->esoc_clink;
+ const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+ void __user *uarg = (void __user *)arg;
+
+ switch (cmd) {
+ case ESOC_REG_REQ_ENG:
+ err = esoc_clink_register_req_eng(esoc_clink, &uhandle->eng);
+ if (err)
+ return err;
+ uhandle->req_eng_reg = true;
+ break;
+ case ESOC_REG_CMD_ENG:
+ err = esoc_clink_register_cmd_eng(esoc_clink, &uhandle->eng);
+ if (err)
+ return err;
+ uhandle->cmd_eng_reg = true;
+ break;
+ case ESOC_CMD_EXE:
+ if (esoc_clink->cmd_eng != &uhandle->eng)
+ return -EACCES;
+ get_user(esoc_cmd, (u32 __user *)arg);
+ return clink_ops->cmd_exe(esoc_cmd, esoc_clink);
+ case ESOC_WAIT_FOR_REQ:
+ if (esoc_clink->req_eng != &uhandle->eng)
+ return -EACCES;
+ err = wait_event_interruptible(esoc_udev->req_wait,
+ !kfifo_is_empty(&esoc_udev->req_fifo));
+ if (!err) {
+ err = kfifo_out_spinlocked(&esoc_udev->req_fifo, &req,
+ sizeof(req),
+ &esoc_udev->req_fifo_lock);
+ if (err != sizeof(req)) {
+ pr_err("read from clink %s req q failed\n",
+ esoc_clink->name);
+ return -EIO;
+ }
+ put_user(req, (unsigned long __user *)uarg);
+
+ }
+ return err;
+ case ESOC_NOTIFY:
+ get_user(esoc_cmd, (u32 __user *)arg);
+ clink_ops->notify(esoc_cmd, esoc_clink);
+ break;
+ case ESOC_GET_STATUS:
+ err = clink_ops->get_status(&status, esoc_clink);
+ if (err)
+ return err;
+ put_user(status, (unsigned long __user *)uarg);
+ break;
+ case ESOC_WAIT_FOR_CRASH:
+ err = wait_event_interruptible(esoc_udev->evt_wait,
+ !kfifo_is_empty(&esoc_udev->evt_fifo));
+ if (!err) {
+ err = kfifo_out_spinlocked(&esoc_udev->evt_fifo, &evt,
+ sizeof(evt),
+ &esoc_udev->evt_fifo_lock);
+ if (err != sizeof(evt)) {
+ pr_err("read from clink %s evt q failed\n",
+ esoc_clink->name);
+ return -EIO;
+ }
+ put_user(evt, (unsigned long __user *)uarg);
+ }
+ return err;
+ default:
+ return -EINVAL;
+ };
+ return 0;
+}
+
+static int esoc_dev_open(struct inode *inode, struct file *file)
+{
+ struct esoc_uhandle *uhandle;
+ struct esoc_udev *esoc_udev;
+ struct esoc_clink *esoc_clink;
+ struct esoc_eng *eng;
+ unsigned int minor = iminor(inode);
+
+ esoc_udev = esoc_udev_get_by_minor(minor);
+ esoc_clink = get_esoc_clink(esoc_udev->clink->id);
+
+ uhandle = kzalloc(sizeof(*uhandle), GFP_KERNEL);
+ if (!uhandle) {
+ put_esoc_clink(esoc_clink);
+ return -ENOMEM;
+ }
+ uhandle->esoc_udev = esoc_udev;
+ uhandle->esoc_clink = esoc_clink;
+ eng = &uhandle->eng;
+ eng->handle_clink_req = esoc_udev_handle_clink_req;
+ eng->handle_clink_evt = esoc_udev_handle_clink_evt;
+ file->private_data = uhandle;
+ return 0;
+}
+
+static int esoc_dev_release(struct inode *inode, struct file *file)
+{
+ struct esoc_clink *esoc_clink;
+ struct esoc_uhandle *uhandle = file->private_data;
+
+ esoc_clink = uhandle->esoc_clink;
+ if (uhandle->req_eng_reg)
+ esoc_clink_unregister_req_eng(esoc_clink, &uhandle->eng);
+ if (uhandle->cmd_eng_reg)
+ esoc_clink_unregister_cmd_eng(esoc_clink, &uhandle->eng);
+ uhandle->req_eng_reg = false;
+ uhandle->cmd_eng_reg = false;
+ put_esoc_clink(esoc_clink);
+ kfree(uhandle);
+ return 0;
+}
+static const struct file_operations esoc_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = esoc_dev_open,
+ .unlocked_ioctl = esoc_dev_ioctl,
+ .release = esoc_dev_release,
+};
+
+int esoc_clink_add_device(struct device *dev, void *dummy)
+{
+ struct esoc_udev *esoc_udev;
+ struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+
+ esoc_udev = get_free_esoc_udev(esoc_clink);
+ if (IS_ERR(esoc_udev))
+ return PTR_ERR(esoc_udev);
+ esoc_udev->dev = device_create(esoc_class, &esoc_clink->dev,
+ MKDEV(esoc_major, esoc_clink->id),
+ esoc_clink, "esoc-%d", esoc_clink->id);
+ if (IS_ERR(esoc_udev->dev)) {
+ pr_err("failed to create user device\n");
+ goto dev_err;
+ }
+ return 0;
+dev_err:
+ return_esoc_udev(esoc_udev);
+ return -ENODEV;
+}
+
+int esoc_clink_del_device(struct device *dev, void *dummy)
+{
+ struct esoc_udev *esoc_udev;
+ struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+
+ esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
+ if (!esoc_udev)
+ return 0;
+ return_esoc_udev(esoc_udev);
+ device_destroy(esoc_class, MKDEV(esoc_major, esoc_clink->id));
+ return_esoc_udev(esoc_udev);
+ return 0;
+}
+
+static int esoc_dev_notifier_call(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ return esoc_clink_add_device(dev, NULL);
+ case BUS_NOTIFY_DEL_DEVICE:
+ return esoc_clink_del_device(dev, NULL);
+ };
+ return 0;
+}
+
+static struct notifier_block esoc_dev_notifier = {
+ .notifier_call = esoc_dev_notifier_call,
+};
+
+int __init esoc_dev_init(void)
+{
+ int ret = 0;
+
+ esoc_class = class_create(THIS_MODULE, "esoc-dev");
+
+ if (IS_ERR(esoc_class)) {
+ pr_err("coudn't create class");
+ return PTR_ERR(esoc_class);
+ }
+ esoc_major = register_chrdev(0, "esoc", &esoc_dev_fops);
+ if (esoc_major < 0) {
+ pr_err("failed to allocate char dev\n");
+ ret = esoc_major;
+ goto class_unreg;
+ }
+ ret = bus_register_notifier(&esoc_bus_type, &esoc_dev_notifier);
+ if (ret)
+ goto chrdev_unreg;
+ esoc_for_each_dev(NULL, esoc_clink_add_device);
+ return ret;
+chrdev_unreg:
+ unregister_chrdev(esoc_major, "esoc");
+class_unreg:
+ class_destroy(esoc_class);
+ return 0;
+}
+
+void __exit esoc_dev_exit(void)
+{
+ bus_unregister_notifier(&esoc_bus_type, &esoc_dev_notifier);
+ class_destroy(esoc_class);
+ unregister_chrdev(esoc_major, "esoc-dev");
+}
+
+MODULE_LICENSE("GPL v2");
+module_init(esoc_dev_init);
+module_exit(esoc_dev_exit);
diff --git a/include/linux/esoc_client.h b/include/linux/esoc_client.h
new file mode 100644
index 0000000..77a8b50
--- /dev/null
+++ b/include/linux/esoc_client.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ESOC_CLIENT_H_
+#define __ESOC_CLIENT_H_
+
+#include <linux/device.h>
+#include <linux/esoc_ctrl.h>
+#include <linux/notifier.h>
+
+/*
+ * struct esoc_desc: Describes an external soc
+ * @name: external soc name
+ * @priv: private data for external soc
+ */
+struct esoc_desc {
+ const char *name;
+ const char *link;
+ void *priv;
+};
+
+#ifdef CONFIG_ESOC_CLIENT
+/* Can return probe deferral */
+struct esoc_desc *devm_register_esoc_client(struct device *dev,
+ const char *name);
+void devm_unregister_esoc_client(struct device *dev,
+ struct esoc_desc *esoc_desc);
+int esoc_register_client_notifier(struct notifier_block *nb);
+#else
+static inline struct esoc_desc *devm_register_esoc_client(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+static inline void devm_unregister_esoc_client(struct device *dev,
+ struct esoc_desc *esoc_desc)
+{
+}
+static inline int esoc_register_client_notifier(struct notifier_block *nb)
+{
+ return -EIO;
+}
+#endif
+#endif
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 8f09a32..f8e6b3b 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -117,6 +117,7 @@
header-y += elf.h
header-y += errno.h
header-y += errqueue.h
+header-y += esoc_ctrl.h
header-y += ethtool.h
header-y += eventpoll.h
header-y += fadvise.h
diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h
new file mode 100644
index 0000000..1b17e1c
--- /dev/null
+++ b/include/uapi/linux/esoc_ctrl.h
@@ -0,0 +1,75 @@
+#ifndef _UAPI_ESOC_CTRL_H_
+#define _UAPI_ESOC_CTRL_H_
+
+#include <linux/types.h>
+
+#define ESOC_CODE 0xCC
+
+#define ESOC_CMD_EXE _IOW(ESOC_CODE, 1, __u32)
+#define ESOC_WAIT_FOR_REQ _IOR(ESOC_CODE, 2, __u32)
+#define ESOC_NOTIFY _IOW(ESOC_CODE, 3, __u32)
+#define ESOC_GET_STATUS _IOR(ESOC_CODE, 4, __u32)
+#define ESOC_WAIT_FOR_CRASH _IOR(ESOC_CODE, 6, __u32)
+#define ESOC_REG_REQ_ENG _IO(ESOC_CODE, 7)
+#define ESOC_REG_CMD_ENG _IO(ESOC_CODE, 8)
+
+/*Link types for communication with external SOCs*/
+#define HSIC "HSIC"
+#define HSICPCIe "HSIC+PCIe"
+#define PCIe "PCIe"
+
+enum esoc_evt {
+ ESOC_RUN_STATE = 0x1,
+ ESOC_UNEXPECTED_RESET,
+ ESOC_ERR_FATAL,
+ ESOC_IN_DEBUG,
+ ESOC_REQ_ENG_ON,
+ ESOC_REQ_ENG_OFF,
+ ESOC_CMD_ENG_ON,
+ ESOC_CMD_ENG_OFF,
+ ESOC_INVALID_STATE,
+};
+
+enum esoc_cmd {
+ ESOC_PWR_ON = 1,
+ ESOC_PWR_OFF,
+ ESOC_FORCE_PWR_OFF,
+ ESOC_RESET,
+ ESOC_PREPARE_DEBUG,
+ ESOC_EXE_DEBUG,
+ ESOC_EXIT_DEBUG,
+};
+
+enum esoc_notify {
+ ESOC_IMG_XFER_DONE = 1,
+ ESOC_BOOT_DONE,
+ ESOC_BOOT_FAIL,
+ ESOC_IMG_XFER_RETRY,
+ ESOC_IMG_XFER_FAIL,
+ ESOC_UPGRADE_AVAILABLE,
+ ESOC_DEBUG_DONE,
+ ESOC_DEBUG_FAIL,
+ ESOC_PRIMARY_CRASH,
+ ESOC_PRIMARY_REBOOT,
+};
+
+enum esoc_req {
+ ESOC_REQ_IMG = 1,
+ ESOC_REQ_DEBUG,
+ ESOC_REQ_SHUTDOWN,
+};
+
+#ifdef __KERNEL__
+/**
+ * struct esoc_handle: Handle for clients of esoc
+ * @name: name of the external soc.
+ * @link: link of external soc.
+ * @id: id of external soc.
+ */
+struct esoc_handle {
+ const char *name;
+ const char *link;
+ unsigned int id;
+};
+#endif
+#endif