Merge "defconfig: Add changes to build cfg80211 as module" into msm-3.0
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt b/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt
new file mode 100644
index 0000000..8ebd3ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt
@@ -0,0 +1,30 @@
+Resource Power Manager(RPM)
+
+RPM is a dedicated hardware engine for managing shared SoC resources,
+which includes buses, clocks, power rails, etc. The goal of RPM is
+to achieve the maximum power savings while satisfying the SoC's
+operational and performance requirements. RPM accepts resource
+requests from multiple RPM masters. It arbitrates and aggregates the
+requests, and configures the shared resources. The RPM masters are
+the application processor, the modem processor, as well as hardware
+accelerators. The RPM driver communicates with the hardware engine using
+SMD.
+
+The devicetree representation of the SPM block should be:
+
+Required properties
+
+- compatible: "qcom,rpm-smd"
+- rpm-channel-name: The string corresponding to the channel name of the
+ peripheral subsystem
+- rpm-channel-type: The interal SMD edge for this subsystem found in
+ <mach/msm_smd.h>
+
+Example:
+
+ qcom,rpm-smd {
+ compatible = "qcom,rpm-smd"
+ qcom,rpm-channel-name = "rpm_requests";
+ qcom,rpm-channel-type = 15; /* SMD_APPS_RPM */
+ }
+}
diff --git a/arch/arm/boot/dts/msmcopper.dtsi b/arch/arm/boot/dts/msmcopper.dtsi
index aa7adc5..2230e0e 100644
--- a/arch/arm/boot/dts/msmcopper.dtsi
+++ b/arch/arm/boot/dts/msmcopper.dtsi
@@ -278,7 +278,7 @@
reg = <0xfe200000 0x00100>,
<0xfd485100 0x00010>;
- qcom,firmware-name = "lpass";
+ qcom,firmware-name = "adsp";
};
qcom,pronto@fb21b000 {
@@ -294,4 +294,10 @@
qcom,ocmem@fdd00000 {
compatible = "qcom,msm_ocmem";
};
+
+ qcom,rpm-smd {
+ compatible = "qcom,rpm-smd";
+ rpm-channel-name = "rpm_requests";
+ rpm-channel-type = <15>; /* SMD_APPS_RPM */
+ };
};
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index c9d0000..423e71e 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -257,8 +257,8 @@
select MSM_PIL
select MSM_SPM_V2
select MSM_L2_SPM
- select MSM_RPM
select MSM_PM8X60 if PM
+ select MSM_RPM_SMD
config ARCH_FSM9XXX
bool "FSM9XXX"
@@ -387,6 +387,10 @@
bool "Resource Power Manager"
select MSM_MPM
+config MSM_RPM_SMD
+ depends on MSM_SMD
+ bool "Support for using SMD as the transport layer for communicatons with RPM"
+
config MSM_MPM
bool "Modem Power Manager"
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 2295679..6a90d6d 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -364,3 +364,5 @@
obj-$(CONFIG_MSM_HSIC_SYSMON) += hsic_sysmon.o
obj-$(CONFIG_MSM_HSIC_SYSMON_TEST) += hsic_sysmon_test.o
+
+obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index d04e5e4..7ab3894 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -865,9 +865,11 @@
static void __init apq8064_ehci_host_init(void)
{
- if (machine_is_apq8064_liquid()) {
- msm_ehci_host_pdata3.dock_connect_irq =
- PM8921_MPP_IRQ(PM8921_IRQ_BASE, 9);
+ if (machine_is_apq8064_liquid() || machine_is_mpq8064_cdp() ||
+ machine_is_mpq8064_hrd() || machine_is_mpq8064_dtv()) {
+ if (machine_is_apq8064_liquid())
+ msm_ehci_host_pdata3.dock_connect_irq =
+ PM8921_MPP_IRQ(PM8921_IRQ_BASE, 9);
apq8064_device_ehci_host3.dev.platform_data =
&msm_ehci_host_pdata3;
diff --git a/arch/arm/mach-msm/board-copper.c b/arch/arm/mach-msm/board-copper.c
index c28d172..f5fe3d1 100644
--- a/arch/arm/mach-msm/board-copper.c
+++ b/arch/arm/mach-msm/board-copper.c
@@ -39,6 +39,7 @@
#endif
#include <mach/msm_memtypes.h>
#include <mach/msm_smd.h>
+#include <mach/rpm-smd.h>
#include <mach/qpnp-int.h>
#include <mach/socinfo.h>
#include "clock.h"
@@ -448,6 +449,7 @@
void __init msm_copper_add_drivers(void)
{
msm_smd_init();
+ msm_rpm_driver_init();
msm_spm_device_init();
regulator_stub_init();
}
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 93251a4..3ee59b1 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -42,6 +42,7 @@
/* Peripheral clock registers. */
#define ADM0_PBUS_CLK_CTL_REG REG(0x2208)
+#define SFAB_SATA_S_HCLK_CTL_REG REG(0x2480)
#define CE1_HCLK_CTL_REG REG(0x2720)
#define CE1_CORE_CLK_CTL_REG REG(0x2724)
#define PRNG_CLK_NS_REG REG(0x2E80)
@@ -108,10 +109,12 @@
#define TSIF_REF_CLK_MD_REG REG(0x270C)
#define TSIF_REF_CLK_NS_REG REG(0x2710)
#define TSSC_CLK_CTL_REG REG(0x2CA0)
+#define SATA_HCLK_CTL_REG REG(0x2C00)
#define SATA_CLK_SRC_NS_REG REG(0x2C08)
#define SATA_RXOOB_CLK_CTL_REG REG(0x2C0C)
#define SATA_PMALIVE_CLK_CTL_REG REG(0x2C10)
#define SATA_PHY_REF_CLK_CTL_REG REG(0x2C14)
+#define SATA_ACLK_CTL_REG REG(0x2C20)
#define SATA_PHY_CFG_CLK_CTL_REG REG(0x2C40)
#define USB_FSn_HCLK_CTL_REG(n) REG(0x2960+(0x20*((n)-1)))
#define USB_FSn_RESET_REG(n) REG(0x2974+(0x20*((n)-1)))
@@ -1926,6 +1929,69 @@
}
};
+#define F_SATA(f, s, d) \
+ { \
+ .freq_hz = f, \
+ .src_clk = &s##_clk.c, \
+ .ns_val = NS_DIVSRC(6, 3, d, 2, 0, s##_to_bb_mux), \
+ }
+
+static struct clk_freq_tbl clk_tbl_sata[] = {
+ F_SATA( 0, gnd, 1),
+ F_SATA( 48000000, pll8, 8),
+ F_SATA(100000000, pll3, 12),
+ F_END
+};
+
+static struct rcg_clk sata_src_clk = {
+ .b = {
+ .ctl_reg = SATA_CLK_SRC_NS_REG,
+ .halt_check = NOCHECK,
+ },
+ .ns_reg = SATA_CLK_SRC_NS_REG,
+ .root_en_mask = BIT(7),
+ .ns_mask = BM(6, 0),
+ .set_rate = set_rate_nop,
+ .freq_tbl = clk_tbl_sata,
+ .current_freq = &rcg_dummy_freq,
+ .c = {
+ .dbg_name = "sata_src_clk",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOW, 50000000, NOMINAL, 100000000),
+ CLK_INIT(sata_src_clk.c),
+ },
+};
+
+static struct branch_clk sata_rxoob_clk = {
+ .b = {
+ .ctl_reg = SATA_RXOOB_CLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
+ .halt_bit = 26,
+ },
+ .parent = &sata_src_clk.c,
+ .c = {
+ .dbg_name = "sata_rxoob_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sata_rxoob_clk.c),
+ },
+};
+
+static struct branch_clk sata_pmalive_clk = {
+ .b = {
+ .ctl_reg = SATA_PMALIVE_CLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
+ .halt_bit = 25,
+ },
+ .parent = &sata_src_clk.c,
+ .c = {
+ .dbg_name = "sata_pmalive_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sata_pmalive_clk.c),
+ },
+};
+
static struct branch_clk sata_phy_ref_clk = {
.b = {
.ctl_reg = SATA_PHY_REF_CLK_CTL_REG,
@@ -1941,6 +2007,47 @@
},
};
+static struct branch_clk sata_a_clk = {
+ .b = {
+ .ctl_reg = SATA_ACLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_AFAB_SFAB_STATEA_REG,
+ .halt_bit = 12,
+ },
+ .c = {
+ .dbg_name = "sata_a_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sata_a_clk.c),
+ },
+};
+
+static struct branch_clk sata_p_clk = {
+ .b = {
+ .ctl_reg = SATA_HCLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
+ .halt_bit = 27,
+ },
+ .c = {
+ .dbg_name = "sata_p_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sata_p_clk.c),
+ },
+};
+
+static struct branch_clk sfab_sata_s_p_clk = {
+ .b = {
+ .ctl_reg = SFAB_SATA_S_HCLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_AFAB_SFAB_STATEB_REG,
+ .halt_bit = 14,
+ },
+ .c = {
+ .dbg_name = "sfab_sata_s_p_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sfab_sata_s_p_clk.c),
+ },
+};
static struct branch_clk pcie_p_clk = {
.b = {
.ctl_reg = PCIE_HCLK_CTL_REG,
@@ -4550,7 +4657,11 @@
{ TEST_PER_LS(0x56), &gsbi7_uart_clk.c },
{ TEST_PER_LS(0x58), &gsbi7_qup_clk.c },
{ TEST_PER_LS(0x59), &gsbi8_p_clk.c },
+ { TEST_PER_LS(0x59), &sfab_sata_s_p_clk.c },
{ TEST_PER_LS(0x5A), &gsbi8_uart_clk.c },
+ { TEST_PER_LS(0x5A), &sata_p_clk.c },
+ { TEST_PER_LS(0x5B), &sata_rxoob_clk.c },
+ { TEST_PER_LS(0x5C), &sata_pmalive_clk.c },
{ TEST_PER_LS(0x5C), &gsbi8_qup_clk.c },
{ TEST_PER_LS(0x5D), &gsbi9_p_clk.c },
{ TEST_PER_LS(0x5E), &gsbi9_uart_clk.c },
@@ -4606,6 +4717,7 @@
{ TEST_PER_HS(0x26), &q6sw_clk },
{ TEST_PER_HS(0x27), &q6fw_clk },
{ TEST_PER_HS(0x2A), &adm0_clk.c },
+ { TEST_PER_HS(0x31), &sata_a_clk.c },
{ TEST_PER_HS(0x2D), &pcie_phy_ref_clk.c },
{ TEST_PER_HS(0x32), &pcie_a_clk.c },
{ TEST_PER_HS(0x34), &ebi1_clk.c },
@@ -4967,6 +5079,12 @@
CLK_LOOKUP("sys_clk", usb_fs1_sys_clk.c, ""),
CLK_LOOKUP("ref_clk", sata_phy_ref_clk.c, ""),
CLK_LOOKUP("cfg_clk", sata_phy_cfg_clk.c, ""),
+ CLK_LOOKUP("src_clk", sata_src_clk.c, ""),
+ CLK_LOOKUP("core_rxoob_clk", sata_rxoob_clk.c, ""),
+ CLK_LOOKUP("core_pmalive_clk", sata_pmalive_clk.c, ""),
+ CLK_LOOKUP("bus_clk", sata_a_clk.c, ""),
+ CLK_LOOKUP("iface_clk", sata_p_clk.c, ""),
+ CLK_LOOKUP("slave_iface_clk", sfab_sata_s_p_clk.c, ""),
CLK_LOOKUP("iface_clk", ce3_p_clk.c, "qce.0"),
CLK_LOOKUP("iface_clk", ce3_p_clk.c, "qcrypto.0"),
CLK_LOOKUP("core_clk", ce3_core_clk.c, "qce.0"),
@@ -5982,9 +6100,14 @@
if (cpu_is_msm8960() || cpu_is_apq8064())
rmwreg(0x2, DSI2_BYTE_NS_REG, 0x7);
- /* Source the sata_phy_ref_clk from PXO */
- if (cpu_is_apq8064())
+ /*
+ * Source the sata_phy_ref_clk from PXO and set predivider of
+ * sata_pmalive_clk to 1.
+ */
+ if (cpu_is_apq8064()) {
rmwreg(0, SATA_PHY_REF_CLK_CTL_REG, 0x1);
+ rmwreg(0, SATA_PMALIVE_CLK_CTL_REG, 0x3);
+ }
/*
* TODO: Programming below PLLs and prng_clk is temporary and
diff --git a/arch/arm/mach-msm/include/mach/rpm-smd.h b/arch/arm/mach-msm/include/mach/rpm-smd.h
new file mode 100644
index 0000000..ff58fed
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/rpm-smd.h
@@ -0,0 +1,254 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_SMD_H
+#define __ARCH_ARM_MACH_MSM_RPM_SMD_H
+
+/**
+ * enum msm_rpm_set - RPM enumerations for sleep/active set
+ * %MSM_RPM_CTX_SET_0: Set resource parameters for active mode.
+ * %MSM_RPM_CTX_SET_SLEEP: Set resource parameters for sleep.
+ */
+enum msm_rpm_set {
+ MSM_RPM_CTX_ACTIVE_SET,
+ MSM_RPM_CTX_SLEEP_SET,
+};
+
+struct msm_rpm_request;
+
+struct msm_rpm_kvp {
+ uint32_t key;
+ uint32_t length;
+ uint8_t *data;
+};
+#ifdef CONFIG_MSM_RPM_SMD
+/**
+ * msm_rpm_request() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_request_noirq() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource. This function is similar to msm_rpm_create_request
+ * except that it has to be called with interrupts masked.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_add_kvp_data() - Adds a Key value pair to a existing RPM resource.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key: unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size: size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size);
+
+/**
+ * msm_rpm_add_kvp_data_noirq() - Adds a Key value pair to a existing RPM
+ * resource. This function is similar to msm_rpm_add_kvp_data except that it
+ * has to be called with interrupts masked.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key: unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size: size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size);
+
+/** msm_rpm_free_request() - clean up the RPM request handle created with
+ * msm_rpm_create_request
+ *
+ * @handle: RPM resource handle to be cleared.
+ */
+
+void msm_rpm_free_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request() - Send the RPM messages using SMD. The function
+ * assigns a message id before sending the data out to the RPM. RPM hardware
+ * uses the message id to acknowledge the messages.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request_noirq() - Send the RPM messages using SMD. The
+ * function assigns a message id before sending the data out to the RPM.
+ * RPM hardware uses the message id to acknowledge the messages. This function
+ * is similar to msm_rpm_send_request except that it has to be called with
+ * interrupts masked.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_wait_for_ack() - A blocking call that waits for acknowledgment of
+ * a message from RPM.
+ *
+ * @msg_id: the return from msm_rpm_send_requests
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack(uint32_t msg_id);
+
+/**
+ * msm_rpm_wait_for_ack_noirq() - A blocking call that waits for acknowledgment
+ * of a message from RPM. This function is similar to msm_rpm_wait_for_ack
+ * except that it has to be called with interrupts masked.
+ *
+ * @msg_id: the return from msm_rpm_send_request
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id);
+
+/**
+ * msm_rpm_send_message() -Wrapper function for clients to send data given an
+ * array of key value pairs.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns 0 on success and errno on failure.
+ */
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_send_message_noirq() -Wrapper function for clients to send data
+ * given an array of key value pairs. This function is similar to the
+ * msm_rpm_send_message() except that it has to be called with interrupts
+ * disabled. Clients should choose the irq version when possible for system
+ * performance.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns 0 on success and errno on failure.
+ */
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_driver_init() - Initialization function that registers for a
+ * rpm platform driver.
+ *
+ * returns 0 on success.
+ */
+int __init msm_rpm_driver_init(void);
+
+#else
+
+static inline struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return NULL;
+}
+
+static inline struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return NULL;
+
+}
+static inline uint32_t msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int count)
+{
+ return 0;
+}
+static inline uint32_t msm_rpm_add_kvp_data_noirq(
+ struct msm_rpm_request *handle, uint32_t key,
+ const uint8_t *data, int count)
+{
+ return 0;
+}
+
+static inline void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+ return ;
+}
+
+static inline int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+ return 0;
+}
+
+static inline int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+ return 0;
+
+}
+static inline int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+ return 0;
+
+}
+static inline int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+ return 0;
+}
+
+static inline int __init msm_rpm_driver_init(void)
+{
+ return 0;
+}
+#endif
+#endif /*__ARCH_ARM_MACH_MSM_RPM_SMD_H*/
diff --git a/arch/arm/mach-msm/rpm-notifier.h b/arch/arm/mach-msm/rpm-notifier.h
new file mode 100644
index 0000000..df8d9b3
--- /dev/null
+++ b/arch/arm/mach-msm/rpm-notifier.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+#define __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+
+struct msm_rpm_notifier_data {
+ uint32_t rsc_type;
+ uint32_t rsc_id;
+ uint32_t key;
+ uint32_t size;
+ uint8_t *value;
+};
+
+int msm_rpm_register_notifier(struct notifier_block *nb);
+int msm_rpm_unregister_notifier(struct notifier_block *nb);
+
+#endif /*__ARCH_ARM_MACH_MSM_RPM_NOTIF_H */
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
new file mode 100644
index 0000000..75f4d92
--- /dev/null
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -0,0 +1,826 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <mach/socinfo.h>
+#include <mach/msm_smd.h>
+#include <mach/rpm-smd.h>
+#include "rpm-notifier.h"
+
+struct msm_rpm_driver_data {
+ const char *ch_name;
+ uint32_t ch_type;
+ smd_channel_t *ch_info;
+ struct work_struct work;
+ spinlock_t smd_lock_write;
+ spinlock_t smd_lock_read;
+ struct completion smd_open;
+};
+
+#define DEFAULT_BUFFER_SIZE 256
+#define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_KERNEL)
+#define INV_HDR "resource does not exist"
+#define ERR "err\0"
+#define MAX_ERR_BUFFER_SIZE 60
+
+static struct atomic_notifier_head msm_rpm_sleep_notifier;
+static bool standalone;
+
+int msm_rpm_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
+}
+
+int msm_rpm_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
+}
+
+static struct workqueue_struct *msm_rpm_smd_wq;
+
+enum {
+ MSM_RPM_MSG_REQUEST_TYPE = 0,
+ MSM_RPM_MSG_TYPE_NR,
+};
+
+static const uint32_t msm_rpm_request_service[MSM_RPM_MSG_TYPE_NR] = {
+ 0x716572, /* 'req\0' */
+};
+
+/*the order of fields matter and reflect the order expected by the RPM*/
+struct rpm_request_header {
+ uint32_t service_type;
+ uint32_t request_len;
+};
+
+struct rpm_message_header {
+ uint32_t msg_id;
+ enum msm_rpm_set set;
+ uint32_t resource_type;
+ uint32_t resource_id;
+ uint32_t data_len;
+};
+
+struct msm_rpm_kvp_data {
+ uint32_t key;
+ uint32_t nbytes; /* number of bytes */
+ uint8_t *value;
+ bool valid;
+};
+
+static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
+
+static struct msm_rpm_driver_data msm_rpm_data;
+
+struct msm_rpm_request {
+ struct rpm_request_header req_hdr;
+ struct rpm_message_header msg_hdr;
+ struct msm_rpm_kvp_data *kvp;
+ uint32_t num_elements;
+ uint32_t write_idx;
+ uint8_t *buf;
+ uint32_t numbytes;
+};
+
+/*
+ * Data related to message acknowledgement
+ */
+
+LIST_HEAD(msm_rpm_wait_list);
+
+struct msm_rpm_wait_data {
+ struct list_head list;
+ uint32_t msg_id;
+ bool ack_recd;
+ int errno;
+ struct completion ack;
+};
+DEFINE_SPINLOCK(msm_rpm_list_lock);
+
+struct msm_rpm_ack_msg {
+ uint32_t req;
+ uint32_t req_len;
+ uint32_t rsc_id;
+ uint32_t msg_len;
+ uint32_t id_ack;
+};
+
+static int irq_process;
+
+LIST_HEAD(msm_rpm_ack_list);
+
+static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr,
+ struct msm_rpm_kvp_data *kvp)
+{
+ struct msm_rpm_notifier_data notif;
+
+ notif.rsc_type = hdr->resource_type;
+ notif.rsc_id = hdr->resource_id;
+ notif.key = kvp->key;
+ notif.size = kvp->nbytes;
+ notif.value = kvp->value;
+ atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, ¬if);
+}
+
+static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size, bool noirq)
+{
+ int i;
+ int data_size, msg_size;
+
+ if (!handle)
+ return -EINVAL;
+
+ data_size = ALIGN(size, SZ_4);
+ msg_size = data_size + sizeof(struct rpm_request_header);
+
+ for (i = 0; i < handle->write_idx; i++) {
+ if (handle->kvp[i].key != key)
+ continue;
+ if (handle->kvp[i].nbytes != data_size) {
+ kfree(handle->kvp[i].value);
+ handle->kvp[i].value = NULL;
+ } else {
+ if (!memcmp(handle->kvp[i].value, data, data_size))
+ return 0;
+ }
+ break;
+ }
+
+ if (i >= handle->num_elements)
+ return -ENOMEM;
+
+ if (i == handle->write_idx)
+ handle->write_idx++;
+
+ if (!handle->kvp[i].value) {
+ handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq));
+
+ if (!handle->kvp[i].value)
+ return -ENOMEM;
+ } else {
+ /* We enter the else case, if a key already exists but the
+ * data doesn't match. In which case, we should zero the data
+ * out.
+ */
+ memset(handle->kvp[i].value, 0, data_size);
+ }
+
+ if (!handle->kvp[i].valid)
+ handle->msg_hdr.data_len += msg_size;
+ else
+ handle->msg_hdr.data_len += (data_size - handle->kvp[i].nbytes);
+
+ handle->kvp[i].nbytes = data_size;
+ handle->kvp[i].key = key;
+ memcpy(handle->kvp[i].value, data, size);
+ handle->kvp[i].valid = true;
+
+ if (handle->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET)
+ msm_rpm_notify_sleep_chain(&handle->msg_hdr, &handle->kvp[i]);
+
+ return 0;
+
+}
+
+static struct msm_rpm_request *msm_rpm_create_request_common(
+ enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
+ int num_elements, bool noirq)
+{
+ struct msm_rpm_request *cdata;
+
+ cdata = kzalloc(sizeof(struct msm_rpm_request),
+ GFP_FLAG(noirq));
+
+ if (!cdata) {
+ printk(KERN_INFO"%s():Cannot allocate memory for client data\n",
+ __func__);
+ goto cdata_alloc_fail;
+ }
+
+ cdata->msg_hdr.set = set;
+ cdata->msg_hdr.resource_type = rsc_type;
+ cdata->msg_hdr.resource_id = rsc_id;
+ cdata->msg_hdr.data_len = 0;
+
+ cdata->num_elements = num_elements;
+ cdata->write_idx = 0;
+
+ cdata->kvp = kzalloc(sizeof(struct msm_rpm_kvp_data) * num_elements,
+ GFP_FLAG(noirq));
+
+ if (!cdata->kvp) {
+ pr_warn("%s(): Cannot allocate memory for key value data\n",
+ __func__);
+ goto kvp_alloc_fail;
+ }
+
+ cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq));
+
+ if (!cdata->buf)
+ goto buf_alloc_fail;
+
+ cdata->numbytes = DEFAULT_BUFFER_SIZE;
+ return cdata;
+
+buf_alloc_fail:
+ kfree(cdata->kvp);
+kvp_alloc_fail:
+ kfree(cdata);
+cdata_alloc_fail:
+ return NULL;
+
+}
+
+void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+ int i;
+
+ if (!handle)
+ return;
+ for (i = 0; i < handle->write_idx; i++)
+ kfree(handle->kvp[i].value);
+ kfree(handle->kvp);
+ kfree(handle);
+}
+EXPORT_SYMBOL(msm_rpm_free_request);
+
+struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+ num_elements, false);
+}
+EXPORT_SYMBOL(msm_rpm_create_request);
+
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+ num_elements, true);
+}
+EXPORT_SYMBOL(msm_rpm_create_request_noirq);
+
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size)
+{
+ return msm_rpm_add_kvp_data_common(handle, key, data, size, false);
+
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data);
+
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size)
+{
+ return msm_rpm_add_kvp_data_common(handle, key, data, size, true);
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
+
+/* Runs in interrupt context */
+static void msm_rpm_notify(void *data, unsigned event)
+{
+ struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data;
+ BUG_ON(!pdata);
+
+ if (!(pdata->ch_info))
+ return;
+
+ switch (event) {
+ case SMD_EVENT_DATA:
+ queue_work(msm_rpm_smd_wq, &pdata->work);
+ break;
+ case SMD_EVENT_OPEN:
+ complete(&pdata->smd_open);
+ break;
+ case SMD_EVENT_CLOSE:
+ case SMD_EVENT_STATUS:
+ case SMD_EVENT_REOPEN_READY:
+ break;
+ default:
+ pr_info("Unknown SMD event\n");
+
+ }
+}
+
+static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
+{
+ struct list_head *ptr;
+ struct msm_rpm_wait_data *elem;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+ list_for_each(ptr, &msm_rpm_wait_list) {
+ elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+ if (elem && (elem->msg_id == msg_id))
+ break;
+ elem = NULL;
+ }
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+ return elem;
+}
+
+static int msm_rpm_get_next_msg_id(void)
+{
+ int id;
+
+ do {
+ id = atomic_inc_return(&msm_rpm_msg_id);
+ } while ((id == 0) || msm_rpm_get_entry_from_msg_id(id));
+
+ return id;
+}
+
+static int msm_rpm_add_wait_list(uint32_t msg_id)
+{
+ unsigned long flags;
+ struct msm_rpm_wait_data *data =
+ kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
+
+ if (!data)
+ return -ENOMEM;
+
+ init_completion(&data->ack);
+ data->ack_recd = false;
+ data->msg_id = msg_id;
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+ list_add(&data->list, &msm_rpm_wait_list);
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+
+ return 0;
+}
+
+static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+ list_del(&elem->list);
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+ kfree(elem);
+}
+
+static void msm_rpm_process_ack(uint32_t msg_id, int errno)
+{
+ struct list_head *ptr;
+ struct msm_rpm_wait_data *elem;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+ list_for_each(ptr, &msm_rpm_wait_list) {
+ elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+ if (elem && (elem->msg_id == msg_id)) {
+ elem->errno = errno;
+ elem->ack_recd = true;
+ complete(&elem->ack);
+ break;
+ }
+ elem = NULL;
+ }
+ WARN_ON(!elem);
+
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+}
+
+struct msm_rpm_kvp_packet {
+ uint32_t id;
+ uint32_t len;
+ uint32_t val;
+};
+
+static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
+{
+ return ((struct msm_rpm_ack_msg *)buf)->id_ack;
+}
+
+static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
+{
+ uint8_t *tmp;
+ uint32_t req_len = ((struct msm_rpm_ack_msg *)buf)->req_len;
+
+ int rc = -ENODEV;
+
+ req_len -= sizeof(struct msm_rpm_ack_msg);
+ req_len += 2 * sizeof(uint32_t);
+ if (!req_len)
+ return 0;
+
+ tmp = buf + sizeof(struct msm_rpm_ack_msg);
+
+ BUG_ON(memcmp(tmp, ERR, sizeof(uint32_t)));
+
+ tmp += 2 * sizeof(uint32_t);
+
+ if (!(memcmp(tmp, INV_HDR, min(req_len, sizeof(INV_HDR))-1)))
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static void msm_rpm_read_smd_data(char *buf)
+{
+ int pkt_sz;
+ int bytes_read = 0;
+
+ pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
+
+ BUG_ON(pkt_sz > MAX_ERR_BUFFER_SIZE);
+
+ if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info))
+ return;
+
+ BUG_ON(pkt_sz == 0);
+
+ do {
+ int len;
+
+ len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz);
+ pkt_sz -= len;
+ bytes_read += len;
+
+ } while (pkt_sz > 0);
+
+ BUG_ON(pkt_sz < 0);
+}
+
+static void msm_rpm_smd_work(struct work_struct *work)
+{
+ uint32_t msg_id;
+ int errno;
+ char buf[MAX_ERR_BUFFER_SIZE] = {0};
+ unsigned long flags;
+
+ while (smd_is_pkt_avail(msm_rpm_data.ch_info) && !irq_process) {
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
+ msm_rpm_read_smd_data(buf);
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+ msg_id = msm_rpm_get_msg_id_from_ack(buf);
+ errno = msm_rpm_get_error_from_ack(buf);
+ msm_rpm_process_ack(msg_id, errno);
+ }
+}
+
+static int msm_rpm_send_data(struct msm_rpm_request *cdata,
+ int msg_type, bool noirq)
+{
+ uint8_t *tmpbuff;
+ int i, ret, msg_size;
+ unsigned long flags;
+
+ int req_hdr_sz, msg_hdr_sz;
+
+ if (!cdata->msg_hdr.data_len)
+ return 0;
+ req_hdr_sz = sizeof(cdata->req_hdr);
+ msg_hdr_sz = sizeof(cdata->msg_hdr);
+
+ cdata->req_hdr.service_type = msm_rpm_request_service[msg_type];
+
+ cdata->msg_hdr.msg_id = msm_rpm_get_next_msg_id();
+
+ cdata->req_hdr.request_len = cdata->msg_hdr.data_len + msg_hdr_sz;
+ msg_size = cdata->req_hdr.request_len + req_hdr_sz;
+
+ /* populate data_len */
+ if (msg_size > cdata->numbytes) {
+ kfree(cdata->buf);
+ cdata->numbytes = msg_size;
+ cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq));
+ }
+
+ if (!cdata->buf)
+ return 0;
+
+ tmpbuff = cdata->buf;
+
+ memcpy(tmpbuff, &cdata->req_hdr, req_hdr_sz + msg_hdr_sz);
+
+ tmpbuff += req_hdr_sz + msg_hdr_sz;
+
+ for (i = 0; (i < cdata->write_idx); i++) {
+ /* Sanity check */
+ BUG_ON((tmpbuff - cdata->buf) > cdata->numbytes);
+
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
+ tmpbuff += sizeof(uint32_t);
+
+ memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
+ tmpbuff += sizeof(uint32_t);
+
+ memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
+ tmpbuff += cdata->kvp[i].nbytes;
+ }
+
+ if (standalone) {
+ for (i = 0; (i < cdata->write_idx); i++)
+ cdata->kvp[i].valid = false;
+
+ cdata->msg_hdr.data_len = 0;
+ ret = cdata->msg_hdr.msg_id;
+ return ret;
+ }
+
+ msm_rpm_add_wait_list(cdata->msg_hdr.msg_id);
+
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+
+ ret = smd_write_avail(msm_rpm_data.ch_info);
+
+ if (ret < 0) {
+ pr_warn("%s(): SMD not initialized\n", __func__);
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+ return 0;
+ }
+
+ while ((ret < msg_size)) {
+ if (!noirq) {
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write,
+ flags);
+ cpu_relax();
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+ } else
+ udelay(5);
+ ret = smd_write_avail(msm_rpm_data.ch_info);
+ }
+
+ ret = smd_write(msm_rpm_data.ch_info, &cdata->buf[0], msg_size);
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+
+ if (ret == msg_size) {
+ for (i = 0; (i < cdata->write_idx); i++)
+ cdata->kvp[i].valid = false;
+ cdata->msg_hdr.data_len = 0;
+ ret = cdata->msg_hdr.msg_id;
+ } else if (ret < msg_size) {
+ struct msm_rpm_wait_data *rc;
+ ret = 0;
+ pr_info("Failed to write data msg_size:%d ret:%d\n",
+ msg_size, ret);
+ rc = msm_rpm_get_entry_from_msg_id(cdata->msg_hdr.msg_id);
+ if (rc)
+ msm_rpm_free_list_entry(rc);
+ }
+ return ret;
+}
+
+int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+ return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false);
+}
+EXPORT_SYMBOL(msm_rpm_send_request);
+
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+ return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true);
+}
+EXPORT_SYMBOL(msm_rpm_send_request_noirq);
+
+int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+ struct msm_rpm_wait_data *elem;
+ int rc = 0;
+
+ if (!msg_id)
+ return -EINVAL;
+
+ if (standalone)
+ return 0;
+
+ elem = msm_rpm_get_entry_from_msg_id(msg_id);
+ if (!elem)
+ return 0;
+
+ rc = wait_for_completion_timeout(&elem->ack, msecs_to_jiffies(1));
+ if (!rc) {
+ pr_warn("%s(): Timed out after 1 ms\n", __func__);
+ rc = -ETIMEDOUT;
+ } else {
+ rc = elem->errno;
+ msm_rpm_free_list_entry(elem);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack);
+
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+ struct msm_rpm_wait_data *elem;
+ unsigned long flags;
+ int rc = 0;
+ uint32_t id = 0;
+ int count = 0;
+
+ if (!msg_id)
+ return -EINVAL;
+
+ if (standalone)
+ return 0;
+
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
+ irq_process = true;
+
+ elem = msm_rpm_get_entry_from_msg_id(msg_id);
+
+ if (!elem)
+ /* Should this be a bug
+ * Is it ok for another thread to read the msg?
+ */
+ goto wait_ack_cleanup;
+
+ while ((id != msg_id) && (count++ < 10)) {
+ if (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+ int errno;
+ char buf[MAX_ERR_BUFFER_SIZE] = {};
+
+ msm_rpm_read_smd_data(buf);
+ id = msm_rpm_get_msg_id_from_ack(buf);
+ errno = msm_rpm_get_error_from_ack(buf);
+ msm_rpm_process_ack(id, errno);
+ } else
+ udelay(100);
+ }
+
+ if (count == 10) {
+ rc = -ETIMEDOUT;
+ pr_warn("%s(): Timed out after 1ms\n", __func__);
+ } else {
+ rc = elem->errno;
+ msm_rpm_free_list_entry(elem);
+ }
+wait_ack_cleanup:
+ irq_process = false;
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
+
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+ int i, rc;
+ struct msm_rpm_request *req =
+ msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
+ if (!req)
+ return -ENOMEM;
+
+ for (i = 0; i < nelems; i++) {
+ rc = msm_rpm_add_kvp_data(req, kvp[i].key,
+ kvp[i].data, kvp[i].length);
+ if (rc)
+ goto bail;
+ }
+
+ rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
+bail:
+ msm_rpm_free_request(req);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message);
+
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+ int i, rc;
+ struct msm_rpm_request *req =
+ msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems);
+ if (!req)
+ return -ENOMEM;
+
+ for (i = 0; i < nelems; i++) {
+ rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key,
+ kvp[i].data, kvp[i].length);
+ if (rc)
+ goto bail;
+ }
+
+ rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req));
+bail:
+ msm_rpm_free_request(req);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message_noirq);
+static bool msm_rpm_set_standalone(void)
+{
+ if (machine_is_copper()) {
+ pr_warn("%s(): Running in standalone mode, requests "
+ "will not be sent to RPM\n", __func__);
+ standalone = true;
+ }
+ return standalone;
+}
+
+static int __devinit msm_rpm_dev_probe(struct platform_device *pdev)
+{
+ char *key = NULL;
+ int ret;
+
+ key = "rpm-channel-name";
+ ret = of_property_read_string(pdev->dev.of_node, key,
+ &msm_rpm_data.ch_name);
+ if (ret)
+ goto fail;
+
+ key = "rpm-channel-type";
+ ret = of_property_read_u32(pdev->dev.of_node, key,
+ &msm_rpm_data.ch_type);
+ if (ret)
+ goto fail;
+
+ init_completion(&msm_rpm_data.smd_open);
+ spin_lock_init(&msm_rpm_data.smd_lock_write);
+ spin_lock_init(&msm_rpm_data.smd_lock_read);
+ INIT_WORK(&msm_rpm_data.work, msm_rpm_smd_work);
+
+ if (smd_named_open_on_edge(msm_rpm_data.ch_name, msm_rpm_data.ch_type,
+ &msm_rpm_data.ch_info, &msm_rpm_data,
+ msm_rpm_notify)) {
+ pr_info("Cannot open RPM channel %s %d\n", msm_rpm_data.ch_name,
+ msm_rpm_data.ch_type);
+
+ msm_rpm_set_standalone();
+ BUG_ON(!standalone);
+ complete(&msm_rpm_data.smd_open);
+ }
+
+ ret = wait_for_completion_timeout(&msm_rpm_data.smd_open,
+ msecs_to_jiffies(5));
+
+ BUG_ON(!ret);
+
+ smd_disable_read_intr(msm_rpm_data.ch_info);
+
+ if (!standalone) {
+ msm_rpm_smd_wq = create_singlethread_workqueue("rpm-smd");
+ if (!msm_rpm_smd_wq)
+ return -EINVAL;
+ }
+
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ return 0;
+fail:
+ pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
+ pdev->dev.of_node->full_name, key);
+ return -EINVAL;
+}
+
+static struct of_device_id msm_rpm_match_table[] = {
+ {.compatible = "qcom,rpm-smd"},
+ {},
+};
+
+static struct platform_driver msm_rpm_device_driver = {
+ .probe = msm_rpm_dev_probe,
+ .driver = {
+ .name = "rpm-smd",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_rpm_match_table,
+ },
+};
+
+int __init msm_rpm_driver_init(void)
+{
+ static bool registered;
+
+ if (registered)
+ return 0;
+ registered = true;
+
+ return platform_driver_register(&msm_rpm_device_driver);
+}
+EXPORT_SYMBOL(msm_rpm_driver_init);
+late_initcall(msm_rpm_driver_init);
diff --git a/block/blk-core.c b/block/blk-core.c
index 35ae52d..a6a8ccb 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1467,7 +1467,7 @@
goto end_io;
}
- if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
+ if (unlikely(!(bio->bi_rw & (REQ_DISCARD | REQ_SANITIZE)) &&
nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
@@ -1521,6 +1521,14 @@
goto end_io;
}
+ if ((bio->bi_rw & REQ_SANITIZE) &&
+ (!blk_queue_sanitize(q))) {
+ pr_info("%s - got a SANITIZE request but the queue "
+ "doesn't support sanitize requests", __func__);
+ err = -EOPNOTSUPP;
+ goto end_io;
+ }
+
if (blk_throtl_bio(q, &bio))
goto end_io;
@@ -1611,7 +1619,8 @@
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
*/
- if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
+ if (bio_has_data(bio) &&
+ (!(rw & (REQ_DISCARD | REQ_SANITIZE)))) {
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
@@ -1657,7 +1666,7 @@
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
- if (rq->cmd_flags & REQ_DISCARD)
+ if (rq->cmd_flags & (REQ_DISCARD | REQ_SANITIZE))
return 0;
if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 78e627e..39a7f25 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -112,6 +112,57 @@
EXPORT_SYMBOL(blkdev_issue_discard);
/**
+ * blkdev_issue_sanitize - queue a sanitize request
+ * @bdev: blockdev to issue sanitize for
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ * Issue a sanitize request for the specified block device
+ */
+int blkdev_issue_sanitize(struct block_device *bdev, gfp_t gfp_mask)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request_queue *q = bdev_get_queue(bdev);
+ int type = REQ_WRITE | REQ_SANITIZE;
+ struct bio_batch bb;
+ struct bio *bio;
+ int ret = 0;
+
+ if (!q)
+ return -ENXIO;
+
+ if (!blk_queue_sanitize(q)) {
+ pr_err("%s - card doesn't support sanitize", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ bio = bio_alloc(gfp_mask, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ atomic_set(&bb.done, 1);
+ bb.flags = 1 << BIO_UPTODATE;
+ bb.wait = &wait;
+
+ bio->bi_end_io = bio_batch_end_io;
+ bio->bi_bdev = bdev;
+ bio->bi_private = &bb;
+
+ atomic_inc(&bb.done);
+ submit_bio(type, bio);
+
+ /* Wait for bios in-flight */
+ if (!atomic_dec_and_test(&bb.done))
+ wait_for_completion(&wait);
+
+ if (!test_bit(BIO_UPTODATE, &bb.flags))
+ ret = -EIO;
+
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_sanitize);
+
+/**
* blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue
* @sector: start sector
diff --git a/block/blk-merge.c b/block/blk-merge.c
index cfcc37c..f3ed15b 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -383,6 +383,12 @@
return 0;
/*
+ * Don't merge file system requests and sanitize requests
+ */
+ if ((req->cmd_flags & REQ_SANITIZE) != (next->cmd_flags & REQ_SANITIZE))
+ return 0;
+
+ /*
* not contiguous
*/
if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
diff --git a/block/elevator.c b/block/elevator.c
index b0b38ce..78a14b5 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -89,6 +89,12 @@
return 0;
/*
+ * Don't merge sanitize requests
+ */
+ if ((bio->bi_rw & REQ_SANITIZE) != (rq->bio->bi_rw & REQ_SANITIZE))
+ return 0;
+
+ /*
* different data direction or already started, don't merge
*/
if (bio_data_dir(bio) != rq_data_dir(rq))
@@ -657,7 +663,7 @@
if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||
- (rq->cmd_flags & REQ_DISCARD)) {
+ (rq->cmd_flags & (REQ_DISCARD | REQ_SANITIZE))) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
diff --git a/block/ioctl.c b/block/ioctl.c
index 1124cd2..dbc103b 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -131,6 +131,11 @@
return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
}
+static int blk_ioctl_sanitize(struct block_device *bdev)
+{
+ return blkdev_issue_sanitize(bdev, GFP_KERNEL);
+}
+
static int put_ushort(unsigned long arg, unsigned short val)
{
return put_user(val, (unsigned short __user *)arg);
@@ -215,6 +220,10 @@
set_device_ro(bdev, n);
return 0;
+ case BLKSANITIZE:
+ ret = blk_ioctl_sanitize(bdev);
+ break;
+
case BLKDISCARD:
case BLKSECDISCARD: {
uint64_t range[2];
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index bd7cc05..ae5a62c 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -319,6 +319,25 @@
This line discipline provides support for the GSM MUX protocol and
presents the mux as a set of 61 individual tty devices.
+config N_SMUX
+ tristate "SMUX line discipline support"
+ depends on NET && SERIAL_MSM_HS
+ help
+ This line discipline provides support for the Serial MUX protocol
+ and provides a TTY and kernel API for multiple logical channels.
+
+config N_SMUX_LOOPBACK
+ tristate "SMUX line discipline loopback support"
+ depends on N_SMUX
+ help
+ Provides loopback and unit testing support for the Serial MUX Protocol.
+
+config SMUX_CTL
+ tristate "SMUX control driver"
+ depends on N_SMUX
+ help
+ Support for SMUX control driver on top of serial MUX.
+
config TRACE_ROUTER
tristate "Trace data router for MIPI P1149.7 cJTAG standard"
depends on TRACE_SINK
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index ea89b0b..3078e8d 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -6,6 +6,9 @@
obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_N_HDLC) += n_hdlc.o
obj-$(CONFIG_N_GSM) += n_gsm.o
+obj-$(CONFIG_N_SMUX) += n_smux.o
+obj-$(CONFIG_N_SMUX_LOOPBACK) += smux_test.o smux_loopback.o
+obj-$(CONFIG_SMUX_CTL) += smux_ctl.o
obj-$(CONFIG_TRACE_ROUTER) += n_tracerouter.o
obj-$(CONFIG_TRACE_SINK) += n_tracesink.o
obj-$(CONFIG_R3964) += n_r3964.o
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
new file mode 100644
index 0000000..7ba54fe
--- /dev/null
+++ b/drivers/tty/n_smux.c
@@ -0,0 +1,2938 @@
+/* drivers/tty/n_smux.c
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/tty_driver.h>
+#include <linux/smux.h>
+#include <linux/list.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <mach/msm_serial_hs.h>
+#include "smux_private.h"
+#include "smux_loopback.h"
+
+#define SMUX_NOTIFY_FIFO_SIZE 128
+#define SMUX_TX_QUEUE_SIZE 256
+#define SMUX_GET_RX_BUFF_MAX_RETRY_CNT 2
+#define SMUX_WM_LOW 2
+#define SMUX_WM_HIGH 4
+#define SMUX_PKT_LOG_SIZE 80
+
+/* Maximum size we can accept in a single RX buffer */
+#define TTY_RECEIVE_ROOM 65536
+#define TTY_BUFFER_FULL_WAIT_MS 50
+
+/* maximum sleep time between wakeup attempts */
+#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
+
+/* minimum delay for scheduling delayed work */
+#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
+
+/* inactivity timeout for no rx/tx activity */
+#define SMUX_INACTIVITY_TIMEOUT_MS 1000
+
+enum {
+ MSM_SMUX_DEBUG = 1U << 0,
+ MSM_SMUX_INFO = 1U << 1,
+ MSM_SMUX_POWER_INFO = 1U << 2,
+ MSM_SMUX_PKT = 1U << 3,
+};
+
+static int smux_debug_mask;
+module_param_named(debug_mask, smux_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+/* Simulated wakeup used for testing */
+int smux_byte_loopback;
+module_param_named(byte_loopback, smux_byte_loopback,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+int smux_simulate_wakeup_delay = 1;
+module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define SMUX_DBG(x...) do { \
+ if (smux_debug_mask & MSM_SMUX_DEBUG) \
+ pr_info(x); \
+} while (0)
+
+#define SMUX_LOG_PKT_RX(pkt) do { \
+ if (smux_debug_mask & MSM_SMUX_PKT) \
+ smux_log_pkt(pkt, 1); \
+} while (0)
+
+#define SMUX_LOG_PKT_TX(pkt) do { \
+ if (smux_debug_mask & MSM_SMUX_PKT) \
+ smux_log_pkt(pkt, 0); \
+} while (0)
+
+/**
+ * Return true if channel is fully opened (both
+ * local and remote sides are in the OPENED state).
+ */
+#define IS_FULLY_OPENED(ch) \
+ (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
+ && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
+
+static struct platform_device smux_devs[] = {
+ {.name = "SMUX_CTL", .id = -1},
+ {.name = "SMUX_RMNET", .id = -1},
+ {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
+ {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
+ {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
+ {.name = "SMUX_DIAG", .id = -1},
+};
+
+enum {
+ SMUX_CMD_STATUS_RTC = 1 << 0,
+ SMUX_CMD_STATUS_RTR = 1 << 1,
+ SMUX_CMD_STATUS_RI = 1 << 2,
+ SMUX_CMD_STATUS_DCD = 1 << 3,
+ SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
+};
+
+/* Channel mode */
+enum {
+ SMUX_LCH_MODE_NORMAL,
+ SMUX_LCH_MODE_LOCAL_LOOPBACK,
+ SMUX_LCH_MODE_REMOTE_LOOPBACK,
+};
+
+enum {
+ SMUX_RX_IDLE,
+ SMUX_RX_MAGIC,
+ SMUX_RX_HDR,
+ SMUX_RX_PAYLOAD,
+ SMUX_RX_FAILURE,
+};
+
+/**
+ * Power states.
+ *
+ * The _FLUSH states are internal transitional states and are not part of the
+ * official state machine.
+ */
+enum {
+ SMUX_PWR_OFF,
+ SMUX_PWR_TURNING_ON,
+ SMUX_PWR_ON,
+ SMUX_PWR_TURNING_OFF_FLUSH,
+ SMUX_PWR_TURNING_OFF,
+ SMUX_PWR_OFF_FLUSH,
+};
+
+/**
+ * Logical Channel Structure. One instance per channel.
+ *
+ * Locking Hierarchy
+ * Each lock has a postfix that describes the locking level. If multiple locks
+ * are required, only increasing lock hierarchy numbers may be locked which
+ * ensures avoiding a deadlock.
+ *
+ * Locking Example
+ * If state_lock_lhb1 is currently held and the TX list needs to be
+ * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
+ * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
+ * not be acquired since it would result in a deadlock.
+ *
+ * Note that the Line Discipline locks (*_lha) should always be acquired
+ * before the logical channel locks.
+ */
+struct smux_lch_t {
+ /* channel state */
+ spinlock_t state_lock_lhb1;
+ uint8_t lcid;
+ unsigned local_state;
+ unsigned local_mode;
+ uint8_t local_tiocm;
+
+ unsigned remote_state;
+ unsigned remote_mode;
+ uint8_t remote_tiocm;
+
+ int tx_flow_control;
+
+ /* client callbacks and private data */
+ void *priv;
+ void (*notify)(void *priv, int event_type, const void *metadata);
+ int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
+ int size);
+
+ /* TX Info */
+ spinlock_t tx_lock_lhb2;
+ struct list_head tx_queue;
+ struct list_head tx_ready_list;
+ unsigned tx_pending_data_cnt;
+ unsigned notify_lwm;
+};
+
+union notifier_metadata {
+ struct smux_meta_disconnected disconnected;
+ struct smux_meta_read read;
+ struct smux_meta_write write;
+ struct smux_meta_tiocm tiocm;
+};
+
+struct smux_notify_handle {
+ void (*notify)(void *priv, int event_type, const void *metadata);
+ void *priv;
+ int event_type;
+ union notifier_metadata *metadata;
+};
+
+/**
+ * Line discipline and module structure.
+ *
+ * Only one instance since multiple instances of line discipline are not
+ * allowed.
+ */
+struct smux_ldisc_t {
+ spinlock_t lock_lha0;
+
+ int is_initialized;
+ int in_reset;
+ int ld_open_count;
+ struct tty_struct *tty;
+
+ /* RX State Machine */
+ spinlock_t rx_lock_lha1;
+ unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
+ unsigned int recv_len;
+ unsigned int pkt_remain;
+ unsigned rx_state;
+ unsigned rx_activity_flag;
+
+ /* TX / Power */
+ spinlock_t tx_lock_lha2;
+ struct list_head lch_tx_ready_list;
+ unsigned power_state;
+ unsigned pwr_wakeup_delay_us;
+ unsigned tx_activity_flag;
+ unsigned powerdown_enabled;
+};
+
+
+/* data structures */
+static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
+static struct smux_ldisc_t smux;
+static const char *tty_error_type[] = {
+ [TTY_NORMAL] = "normal",
+ [TTY_OVERRUN] = "overrun",
+ [TTY_BREAK] = "break",
+ [TTY_PARITY] = "parity",
+ [TTY_FRAME] = "framing",
+};
+
+static const char *smux_cmds[] = {
+ [SMUX_CMD_DATA] = "DATA",
+ [SMUX_CMD_OPEN_LCH] = "OPEN",
+ [SMUX_CMD_CLOSE_LCH] = "CLOSE",
+ [SMUX_CMD_STATUS] = "STATUS",
+ [SMUX_CMD_PWR_CTL] = "PWR",
+ [SMUX_CMD_BYTE] = "Raw Byte",
+};
+
+static void smux_notify_local_fn(struct work_struct *work);
+static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
+
+static struct workqueue_struct *smux_notify_wq;
+static size_t handle_size;
+static struct kfifo smux_notify_fifo;
+static int queued_fifo_notifications;
+static DEFINE_SPINLOCK(notify_lock_lhc1);
+
+static struct workqueue_struct *smux_tx_wq;
+static void smux_tx_worker(struct work_struct *work);
+static DECLARE_WORK(smux_tx_work, smux_tx_worker);
+
+static void smux_wakeup_worker(struct work_struct *work);
+static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
+static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
+
+static void smux_inactivity_worker(struct work_struct *work);
+static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
+static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
+ smux_inactivity_worker);
+
+static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
+static void list_channel(struct smux_lch_t *ch);
+static int smux_send_status_cmd(struct smux_lch_t *ch);
+static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
+
+/**
+ * Convert TTY Error Flags to string for logging purposes.
+ *
+ * @flag TTY_* flag
+ * @returns String description or NULL if unknown
+ */
+static const char *tty_flag_to_str(unsigned flag)
+{
+ if (flag < ARRAY_SIZE(tty_error_type))
+ return tty_error_type[flag];
+ return NULL;
+}
+
+/**
+ * Convert SMUX Command to string for logging purposes.
+ *
+ * @cmd SMUX command
+ * @returns String description or NULL if unknown
+ */
+static const char *cmd_to_str(unsigned cmd)
+{
+ if (cmd < ARRAY_SIZE(smux_cmds))
+ return smux_cmds[cmd];
+ return NULL;
+}
+
+/**
+ * Set the reset state due to an unrecoverable failure.
+ */
+static void smux_enter_reset(void)
+{
+ pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
+ smux.in_reset = 1;
+}
+
+static int lch_init(void)
+{
+ unsigned int id;
+ struct smux_lch_t *ch;
+ int i = 0;
+
+ handle_size = sizeof(struct smux_notify_handle *);
+
+ smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
+ smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
+
+ if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
+ SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ i |= kfifo_alloc(&smux_notify_fifo,
+ SMUX_NOTIFY_FIFO_SIZE * handle_size,
+ GFP_KERNEL);
+ i |= smux_loopback_init();
+
+ if (i) {
+ pr_err("%s: out of memory error\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
+ ch = &smux_lch[id];
+
+ spin_lock_init(&ch->state_lock_lhb1);
+ ch->lcid = id;
+ ch->local_state = SMUX_LCH_LOCAL_CLOSED;
+ ch->local_mode = SMUX_LCH_MODE_NORMAL;
+ ch->local_tiocm = 0x0;
+ ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
+ ch->remote_mode = SMUX_LCH_MODE_NORMAL;
+ ch->remote_tiocm = 0x0;
+ ch->tx_flow_control = 0;
+ ch->priv = 0;
+ ch->notify = 0;
+ ch->get_rx_buffer = 0;
+
+ spin_lock_init(&ch->tx_lock_lhb2);
+ INIT_LIST_HEAD(&ch->tx_queue);
+ INIT_LIST_HEAD(&ch->tx_ready_list);
+ ch->tx_pending_data_cnt = 0;
+ ch->notify_lwm = 0;
+ }
+
+ return 0;
+}
+
+int smux_assert_lch_id(uint32_t lcid)
+{
+ if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
+ return -ENXIO;
+ else
+ return 0;
+}
+
+/**
+ * Log packet information for debug purposes.
+ *
+ * @pkt Packet to log
+ * @is_recv 1 = RX packet; 0 = TX Packet
+ *
+ * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
+ *
+ * PKT Info:
+ * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
+ *
+ * Direction: R = Receive, S = Send
+ * Local State: C = Closed; c = closing; o = opening; O = Opened
+ * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
+ * Remote State: C = Closed; O = Opened
+ * Remote Mode: R = Remote loopback; N = Normal
+ */
+static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
+{
+ char logbuf[SMUX_PKT_LOG_SIZE];
+ char cmd_extra[16];
+ int i = 0;
+ int count;
+ int len;
+ char local_state;
+ char local_mode;
+ char remote_state;
+ char remote_mode;
+ struct smux_lch_t *ch;
+ unsigned char *data;
+
+ ch = &smux_lch[pkt->hdr.lcid];
+
+ switch (ch->local_state) {
+ case SMUX_LCH_LOCAL_CLOSED:
+ local_state = 'C';
+ break;
+ case SMUX_LCH_LOCAL_OPENING:
+ local_state = 'o';
+ break;
+ case SMUX_LCH_LOCAL_OPENED:
+ local_state = 'O';
+ break;
+ case SMUX_LCH_LOCAL_CLOSING:
+ local_state = 'c';
+ break;
+ default:
+ local_state = 'U';
+ break;
+ }
+
+ switch (ch->local_mode) {
+ case SMUX_LCH_MODE_LOCAL_LOOPBACK:
+ local_mode = 'L';
+ break;
+ case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+ local_mode = 'R';
+ break;
+ case SMUX_LCH_MODE_NORMAL:
+ local_mode = 'N';
+ break;
+ default:
+ local_mode = 'U';
+ break;
+ }
+
+ switch (ch->remote_state) {
+ case SMUX_LCH_REMOTE_CLOSED:
+ remote_state = 'C';
+ break;
+ case SMUX_LCH_REMOTE_OPENED:
+ remote_state = 'O';
+ break;
+
+ default:
+ remote_state = 'U';
+ break;
+ }
+
+ switch (ch->remote_mode) {
+ case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+ remote_mode = 'R';
+ break;
+ case SMUX_LCH_MODE_NORMAL:
+ remote_mode = 'N';
+ break;
+ default:
+ remote_mode = 'U';
+ break;
+ }
+
+ /* determine command type (ACK, etc) */
+ cmd_extra[0] = '\0';
+ switch (pkt->hdr.cmd) {
+ case SMUX_CMD_OPEN_LCH:
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
+ snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
+ break;
+ case SMUX_CMD_CLOSE_LCH:
+ if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
+ snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
+ break;
+ };
+
+ i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
+ "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
+ is_recv ? 'R' : 'S', pkt->hdr.lcid,
+ local_state, local_mode,
+ remote_state, remote_mode,
+ cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
+ pkt->hdr.payload_len, pkt->hdr.pad_len);
+
+ len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
+ data = (unsigned char *)pkt->payload;
+ for (count = 0; count < len; count++)
+ i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
+ "%02x ", (unsigned)data[count]);
+
+ pr_info("%s\n", logbuf);
+}
+
+static void smux_notify_local_fn(struct work_struct *work)
+{
+ struct smux_notify_handle *notify_handle = NULL;
+ union notifier_metadata *metadata = NULL;
+ unsigned long flags;
+ int i;
+
+ for (;;) {
+ /* retrieve notification */
+ spin_lock_irqsave(¬ify_lock_lhc1, flags);
+ if (kfifo_len(&smux_notify_fifo) >= handle_size) {
+ i = kfifo_out(&smux_notify_fifo,
+ ¬ify_handle,
+ handle_size);
+ if (i != handle_size) {
+ pr_err("%s: unable to retrieve handle %d expected %d\n",
+ __func__, i, handle_size);
+ spin_unlock_irqrestore(¬ify_lock_lhc1, flags);
+ break;
+ }
+ } else {
+ spin_unlock_irqrestore(¬ify_lock_lhc1, flags);
+ break;
+ }
+ --queued_fifo_notifications;
+ spin_unlock_irqrestore(¬ify_lock_lhc1, flags);
+
+ /* notify client */
+ metadata = notify_handle->metadata;
+ notify_handle->notify(notify_handle->priv,
+ notify_handle->event_type,
+ metadata);
+
+ kfree(metadata);
+ kfree(notify_handle);
+ }
+}
+
+/**
+ * Initialize existing packet.
+ */
+void smux_init_pkt(struct smux_pkt_t *pkt)
+{
+ memset(pkt, 0x0, sizeof(*pkt));
+ pkt->hdr.magic = SMUX_MAGIC;
+ INIT_LIST_HEAD(&pkt->list);
+}
+
+/**
+ * Allocate and initialize packet.
+ *
+ * If a payload is needed, either set it directly and ensure that it's freed or
+ * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
+ * automatically when smd_free_pkt() is called.
+ */
+struct smux_pkt_t *smux_alloc_pkt(void)
+{
+ struct smux_pkt_t *pkt;
+
+ /* Consider a free list implementation instead of kmalloc */
+ pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
+ if (!pkt) {
+ pr_err("%s: out of memory\n", __func__);
+ return NULL;
+ }
+ smux_init_pkt(pkt);
+ pkt->allocated = 1;
+
+ return pkt;
+}
+
+/**
+ * Free packet.
+ *
+ * @pkt Packet to free (may be NULL)
+ *
+ * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
+ * well. Otherwise, the caller is responsible for freeing the payload.
+ */
+void smux_free_pkt(struct smux_pkt_t *pkt)
+{
+ if (pkt) {
+ if (pkt->free_payload)
+ kfree(pkt->payload);
+ if (pkt->allocated)
+ kfree(pkt);
+ }
+}
+
+/**
+ * Allocate packet payload.
+ *
+ * @pkt Packet to add payload to
+ *
+ * @returns 0 on success, <0 upon error
+ *
+ * A flag is set to signal smux_free_pkt() to free the payload.
+ */
+int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
+{
+ if (!pkt)
+ return -EINVAL;
+
+ pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
+ pkt->free_payload = 1;
+ if (!pkt->payload) {
+ pr_err("%s: unable to malloc %d bytes for payload\n",
+ __func__, pkt->hdr.payload_len);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int schedule_notify(uint8_t lcid, int event,
+ const union notifier_metadata *metadata)
+{
+ struct smux_notify_handle *notify_handle = 0;
+ union notifier_metadata *meta_copy = 0;
+ struct smux_lch_t *ch;
+ int i;
+ unsigned long flags;
+ int ret = 0;
+
+ ch = &smux_lch[lcid];
+ notify_handle = kzalloc(sizeof(struct smux_notify_handle),
+ GFP_ATOMIC);
+ if (!notify_handle) {
+ pr_err("%s: out of memory\n", __func__);
+ ret = -ENOMEM;
+ goto free_out;
+ }
+
+ notify_handle->notify = ch->notify;
+ notify_handle->priv = ch->priv;
+ notify_handle->event_type = event;
+ if (metadata) {
+ meta_copy = kzalloc(sizeof(union notifier_metadata),
+ GFP_ATOMIC);
+ if (!meta_copy) {
+ pr_err("%s: out of memory\n", __func__);
+ ret = -ENOMEM;
+ goto free_out;
+ }
+ *meta_copy = *metadata;
+ notify_handle->metadata = meta_copy;
+ } else {
+ notify_handle->metadata = NULL;
+ }
+
+ spin_lock_irqsave(¬ify_lock_lhc1, flags);
+ i = kfifo_avail(&smux_notify_fifo);
+ if (i < handle_size) {
+ pr_err("%s: fifo full error %d expected %d\n",
+ __func__, i, handle_size);
+ ret = -ENOMEM;
+ goto unlock_out;
+ }
+
+ i = kfifo_in(&smux_notify_fifo, ¬ify_handle, handle_size);
+ if (i < 0 || i != handle_size) {
+ pr_err("%s: fifo not available error %d (expected %d)\n",
+ __func__, i, handle_size);
+ ret = -ENOSPC;
+ goto unlock_out;
+ }
+ ++queued_fifo_notifications;
+
+unlock_out:
+ spin_unlock_irqrestore(¬ify_lock_lhc1, flags);
+
+free_out:
+ queue_work(smux_notify_wq, &smux_notify_local);
+ if (ret < 0 && notify_handle) {
+ kfree(notify_handle->metadata);
+ kfree(notify_handle);
+ }
+ return ret;
+}
+
+/**
+ * Returns the serialized size of a packet.
+ *
+ * @pkt Packet to serialize
+ *
+ * @returns Serialized length of packet
+ */
+static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
+{
+ unsigned int size;
+
+ size = sizeof(struct smux_hdr_t);
+ size += pkt->hdr.payload_len;
+ size += pkt->hdr.pad_len;
+
+ return size;
+}
+
+/**
+ * Serialize packet @pkt into output buffer @data.
+ *
+ * @pkt Packet to serialize
+ * @out Destination buffer pointer
+ * @out_len Size of serialized packet
+ *
+ * @returns 0 for success
+ */
+int smux_serialize(struct smux_pkt_t *pkt, char *out,
+ unsigned int *out_len)
+{
+ char *data_start = out;
+
+ if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
+ pr_err("%s: packet size %d too big\n",
+ __func__, smux_serialize_size(pkt));
+ return -E2BIG;
+ }
+
+ memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
+ out += sizeof(struct smux_hdr_t);
+ if (pkt->payload) {
+ memcpy(out, pkt->payload, pkt->hdr.payload_len);
+ out += pkt->hdr.payload_len;
+ }
+ if (pkt->hdr.pad_len) {
+ memset(out, 0x0, pkt->hdr.pad_len);
+ out += pkt->hdr.pad_len;
+ }
+ *out_len = out - data_start;
+ return 0;
+}
+
+/**
+ * Serialize header and provide pointer to the data.
+ *
+ * @pkt Packet
+ * @out[out] Pointer to the serialized header data
+ * @out_len[out] Pointer to the serialized header length
+ */
+static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
+ unsigned int *out_len)
+{
+ *out = (char *)&pkt->hdr;
+ *out_len = sizeof(struct smux_hdr_t);
+}
+
+/**
+ * Serialize payload and provide pointer to the data.
+ *
+ * @pkt Packet
+ * @out[out] Pointer to the serialized payload data
+ * @out_len[out] Pointer to the serialized payload length
+ */
+static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
+ unsigned int *out_len)
+{
+ *out = pkt->payload;
+ *out_len = pkt->hdr.payload_len;
+}
+
+/**
+ * Serialize padding and provide pointer to the data.
+ *
+ * @pkt Packet
+ * @out[out] Pointer to the serialized padding (always NULL)
+ * @out_len[out] Pointer to the serialized payload length
+ *
+ * Since the padding field value is undefined, only the size of the patting
+ * (@out_len) is set and the buffer pointer (@out) will always be NULL.
+ */
+static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
+ unsigned int *out_len)
+{
+ *out = NULL;
+ *out_len = pkt->hdr.pad_len;
+}
+
+/**
+ * Write data to TTY framework and handle breaking the writes up if needed.
+ *
+ * @data Data to write
+ * @len Length of data
+ *
+ * @returns 0 for success, < 0 for failure
+ */
+static int write_to_tty(char *data, unsigned len)
+{
+ int data_written;
+
+ if (!data)
+ return 0;
+
+ while (len > 0) {
+ data_written = smux.tty->ops->write(smux.tty, data, len);
+ if (data_written >= 0) {
+ len -= data_written;
+ data += data_written;
+ } else {
+ pr_err("%s: TTY write returned error %d\n",
+ __func__, data_written);
+ return data_written;
+ }
+
+ if (len)
+ tty_wait_until_sent(smux.tty,
+ msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
+
+ /* FUTURE - add SSR logic */
+ }
+ return 0;
+}
+
+/**
+ * Write packet to TTY.
+ *
+ * @pkt packet to write
+ *
+ * @returns 0 on success
+ */
+static int smux_tx_tty(struct smux_pkt_t *pkt)
+{
+ char *data;
+ unsigned int len;
+ int ret;
+
+ if (!smux.tty) {
+ pr_err("%s: TTY not initialized", __func__);
+ return -ENOTTY;
+ }
+
+ if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
+ SMUX_DBG("%s: tty send single byte\n", __func__);
+ ret = write_to_tty(&pkt->hdr.flags, 1);
+ return ret;
+ }
+
+ smux_serialize_hdr(pkt, &data, &len);
+ ret = write_to_tty(data, len);
+ if (ret) {
+ pr_err("%s: failed %d to write header %d\n",
+ __func__, ret, len);
+ return ret;
+ }
+
+ smux_serialize_payload(pkt, &data, &len);
+ ret = write_to_tty(data, len);
+ if (ret) {
+ pr_err("%s: failed %d to write payload %d\n",
+ __func__, ret, len);
+ return ret;
+ }
+
+ smux_serialize_padding(pkt, &data, &len);
+ while (len > 0) {
+ char zero = 0x0;
+ ret = write_to_tty(&zero, 1);
+ if (ret) {
+ pr_err("%s: failed %d to write padding %d\n",
+ __func__, ret, len);
+ return ret;
+ }
+ --len;
+ }
+ return 0;
+}
+
+/**
+ * Send a single character.
+ *
+ * @ch Character to send
+ */
+static void smux_send_byte(char ch)
+{
+ struct smux_pkt_t pkt;
+
+ smux_init_pkt(&pkt);
+
+ pkt.hdr.cmd = SMUX_CMD_BYTE;
+ pkt.hdr.flags = ch;
+ pkt.hdr.lcid = 0;
+ pkt.hdr.flags = ch;
+ SMUX_LOG_PKT_TX(&pkt);
+ if (!smux_byte_loopback)
+ smux_tx_tty(&pkt);
+ else
+ smux_tx_loopback(&pkt);
+}
+
+/**
+ * Receive a single-character packet (used for internal testing).
+ *
+ * @ch Character to receive
+ * @lcid Logical channel ID for packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static int smux_receive_byte(char ch, int lcid)
+{
+ struct smux_pkt_t pkt;
+
+ smux_init_pkt(&pkt);
+ pkt.hdr.lcid = lcid;
+ pkt.hdr.cmd = SMUX_CMD_BYTE;
+ pkt.hdr.flags = ch;
+
+ return smux_dispatch_rx_pkt(&pkt);
+}
+
+/**
+ * Queue packet for transmit.
+ *
+ * @pkt_ptr Packet to queue
+ * @ch Channel to queue packet on
+ * @queue Queue channel on ready list
+ */
+static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
+ int queue)
+{
+ unsigned long flags;
+
+ SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
+
+ spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
+ list_add_tail(&pkt_ptr->list, &ch->tx_queue);
+ spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
+
+ if (queue)
+ list_channel(ch);
+}
+
+/**
+ * Handle receive OPEN ACK command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ int enable_powerdown = 0;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+
+ spin_lock(&ch->state_lock_lhb1);
+ if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
+ SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
+ ch->local_state,
+ SMUX_LCH_LOCAL_OPENED);
+
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
+ enable_powerdown = 1;
+
+ ch->local_state = SMUX_LCH_LOCAL_OPENED;
+ if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
+ schedule_notify(lcid, SMUX_CONNECTED, NULL);
+ ret = 0;
+ } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
+ SMUX_DBG("Remote loopback OPEN ACK received\n");
+ ret = 0;
+ } else {
+ pr_err("%s: lcid %d state 0x%x open ack invalid\n",
+ __func__, lcid, ch->local_state);
+ ret = -EINVAL;
+ }
+ spin_unlock(&ch->state_lock_lhb1);
+
+ if (enable_powerdown) {
+ spin_lock(&smux.tx_lock_lha2);
+ if (!smux.powerdown_enabled) {
+ smux.powerdown_enabled = 1;
+ SMUX_DBG("%s: enabling power-collapse support\n",
+ __func__);
+ }
+ spin_unlock(&smux.tx_lock_lha2);
+ }
+
+ return ret;
+}
+
+static int smux_handle_close_ack(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ union notifier_metadata meta_disconnected;
+ unsigned long flags;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+ meta_disconnected.disconnected.is_ssr = 0;
+
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
+ SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
+ SMUX_LCH_LOCAL_CLOSING,
+ SMUX_LCH_LOCAL_CLOSED);
+ ch->local_state = SMUX_LCH_LOCAL_CLOSED;
+ if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
+ schedule_notify(lcid, SMUX_DISCONNECTED,
+ &meta_disconnected);
+ ret = 0;
+ } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
+ SMUX_DBG("Remote loopback CLOSE ACK received\n");
+ ret = 0;
+ } else {
+ pr_err("%s: lcid %d state 0x%x close ack invalid\n",
+ __func__, lcid, ch->local_state);
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ return ret;
+}
+
+/**
+ * Handle receive OPEN command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *ack_pkt;
+ int tx_ready = 0;
+ int enable_powerdown = 0;
+
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
+ return smux_handle_rx_open_ack(pkt);
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+
+ spin_lock(&ch->state_lock_lhb1);
+
+ if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
+ SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
+ SMUX_LCH_REMOTE_CLOSED,
+ SMUX_LCH_REMOTE_OPENED);
+
+ ch->remote_state = SMUX_LCH_REMOTE_OPENED;
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
+ enable_powerdown = 1;
+
+ /* Send Open ACK */
+ ack_pkt = smux_alloc_pkt();
+ if (!ack_pkt) {
+ /* exit out to allow retrying this later */
+ ret = -ENOMEM;
+ goto out;
+ }
+ ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
+ ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
+ | SMUX_CMD_OPEN_POWER_COLLAPSE;
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.payload_len = 0;
+ ack_pkt->hdr.pad_len = 0;
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
+ ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
+ ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
+ }
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+
+ if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
+ /*
+ * Send an Open command to the remote side to
+ * simulate our local client doing it.
+ */
+ ack_pkt = smux_alloc_pkt();
+ if (ack_pkt) {
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
+ ack_pkt->hdr.flags =
+ SMUX_CMD_OPEN_POWER_COLLAPSE;
+ ack_pkt->hdr.payload_len = 0;
+ ack_pkt->hdr.pad_len = 0;
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+ } else {
+ pr_err("%s: Remote loopack allocation failure\n",
+ __func__);
+ }
+ } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
+ schedule_notify(lcid, SMUX_CONNECTED, NULL);
+ }
+ ret = 0;
+ } else {
+ pr_err("%s: lcid %d remote state 0x%x open invalid\n",
+ __func__, lcid, ch->remote_state);
+ ret = -EINVAL;
+ }
+
+out:
+ spin_unlock(&ch->state_lock_lhb1);
+
+ if (enable_powerdown) {
+ spin_lock(&smux.tx_lock_lha2);
+ smux.powerdown_enabled = 1;
+ SMUX_DBG("%s: enabling power-collapse support\n", __func__);
+ spin_unlock(&smux.tx_lock_lha2);
+ }
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Handle receive CLOSE command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *ack_pkt;
+ union notifier_metadata meta_disconnected;
+ int tx_ready = 0;
+
+ if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
+ return smux_handle_close_ack(pkt);
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+ meta_disconnected.disconnected.is_ssr = 0;
+
+ spin_lock(&ch->state_lock_lhb1);
+ if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
+ SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
+ SMUX_LCH_REMOTE_OPENED,
+ SMUX_LCH_REMOTE_CLOSED);
+
+ ack_pkt = smux_alloc_pkt();
+ if (!ack_pkt) {
+ /* exit out to allow retrying this later */
+ ret = -ENOMEM;
+ goto out;
+ }
+ ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
+ ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.payload_len = 0;
+ ack_pkt->hdr.pad_len = 0;
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+
+ if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
+ /*
+ * Send a Close command to the remote side to simulate
+ * our local client doing it.
+ */
+ ack_pkt = smux_alloc_pkt();
+ if (ack_pkt) {
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ ack_pkt->hdr.flags = 0;
+ ack_pkt->hdr.payload_len = 0;
+ ack_pkt->hdr.pad_len = 0;
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+ } else {
+ pr_err("%s: Remote loopack allocation failure\n",
+ __func__);
+ }
+ }
+
+ if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
+ schedule_notify(lcid, SMUX_DISCONNECTED,
+ &meta_disconnected);
+ ret = 0;
+ } else {
+ pr_err("%s: lcid %d remote state 0x%x close invalid\n",
+ __func__, lcid, ch->remote_state);
+ ret = -EINVAL;
+ }
+out:
+ spin_unlock(&ch->state_lock_lhb1);
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/*
+ * Handle receive DATA command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ int i;
+ int tmp;
+ int rx_len;
+ struct smux_lch_t *ch;
+ union notifier_metadata metadata;
+ int remote_loopback;
+ int tx_ready = 0;
+ struct smux_pkt_t *ack_pkt;
+ unsigned long flags;
+
+ if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
+ return -ENXIO;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+ remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
+
+ if (ch->local_state != SMUX_LCH_LOCAL_OPENED
+ && !remote_loopback) {
+ pr_err("smux: ch %d error data on local state 0x%x",
+ lcid, ch->local_state);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
+ pr_err("smux: ch %d error data on remote state 0x%x",
+ lcid, ch->remote_state);
+ ret = -EIO;
+ goto out;
+ }
+
+ rx_len = pkt->hdr.payload_len;
+ if (rx_len == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < SMUX_GET_RX_BUFF_MAX_RETRY_CNT; ++i) {
+ metadata.read.pkt_priv = 0;
+ metadata.read.buffer = 0;
+
+ if (!remote_loopback) {
+ tmp = ch->get_rx_buffer(ch->priv,
+ (void **)&metadata.read.pkt_priv,
+ (void **)&metadata.read.buffer,
+ rx_len);
+ if (tmp == 0 && metadata.read.buffer) {
+ /* place data into RX buffer */
+ memcpy(metadata.read.buffer, pkt->payload,
+ rx_len);
+ metadata.read.len = rx_len;
+ schedule_notify(lcid, SMUX_READ_DONE,
+ &metadata);
+ ret = 0;
+ break;
+ } else if (tmp == -EAGAIN) {
+ ret = -ENOMEM;
+ } else if (tmp < 0) {
+ schedule_notify(lcid, SMUX_READ_FAIL, NULL);
+ ret = -ENOMEM;
+ break;
+ } else if (!metadata.read.buffer) {
+ pr_err("%s: get_rx_buffer() buffer is NULL\n",
+ __func__);
+ ret = -ENOMEM;
+ }
+ } else {
+ /* Echo the data back to the remote client. */
+ ack_pkt = smux_alloc_pkt();
+ if (ack_pkt) {
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.cmd = SMUX_CMD_DATA;
+ ack_pkt->hdr.flags = 0;
+ ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
+ ack_pkt->payload = pkt->payload;
+ ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+ } else {
+ pr_err("%s: Remote loopack allocation failure\n",
+ __func__);
+ }
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Handle receive byte command for testing purposes.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ */
+static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ union notifier_metadata metadata;
+ unsigned long flags;
+
+ if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
+ return -ENXIO;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
+ pr_err("smux: ch %d error data on local state 0x%x",
+ lcid, ch->local_state);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
+ pr_err("smux: ch %d error data on remote state 0x%x",
+ lcid, ch->remote_state);
+ ret = -EIO;
+ goto out;
+ }
+
+ metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
+ metadata.read.buffer = 0;
+ schedule_notify(lcid, SMUX_READ_DONE, &metadata);
+ ret = 0;
+
+out:
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ return ret;
+}
+
+/**
+ * Handle receive status command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ union notifier_metadata meta;
+ unsigned long flags;
+ int tx_ready = 0;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+ meta.tiocm.tiocm_old = ch->remote_tiocm;
+ meta.tiocm.tiocm_new = pkt->hdr.flags;
+
+ /* update logical channel flow control */
+ if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
+ (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
+ /* logical channel flow control changed */
+ if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
+ /* disabled TX */
+ SMUX_DBG("TX Flow control enabled\n");
+ ch->tx_flow_control = 1;
+ } else {
+ /* re-enable channel */
+ SMUX_DBG("TX Flow control disabled\n");
+ ch->tx_flow_control = 0;
+ tx_ready = 1;
+ }
+ }
+ meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
+ ch->remote_tiocm = pkt->hdr.flags;
+ meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
+
+ /* client notification for status change */
+ if (IS_FULLY_OPENED(ch)) {
+ if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
+ schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Handle receive power command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
+{
+ int tx_ready = 0;
+ struct smux_pkt_t *ack_pkt;
+
+ spin_lock(&smux.tx_lock_lha2);
+ if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
+ /* local sleep request ack */
+ if (smux.power_state == SMUX_PWR_TURNING_OFF) {
+ /* Power-down complete, turn off UART */
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state, SMUX_PWR_OFF_FLUSH);
+ smux.power_state = SMUX_PWR_OFF_FLUSH;
+ queue_work(smux_tx_wq, &smux_inactivity_work);
+ } else {
+ pr_err("%s: sleep request ack invalid in state %d\n",
+ __func__, smux.power_state);
+ }
+ } else {
+ /* remote sleep request */
+ if (smux.power_state == SMUX_PWR_ON
+ || smux.power_state == SMUX_PWR_TURNING_OFF) {
+ ack_pkt = smux_alloc_pkt();
+ if (ack_pkt) {
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state,
+ SMUX_PWR_TURNING_OFF_FLUSH);
+
+ /* send power-down request */
+ ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
+ ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
+ ack_pkt->hdr.lcid = pkt->hdr.lcid;
+ smux_tx_queue(ack_pkt,
+ &smux_lch[ack_pkt->hdr.lcid], 0);
+ tx_ready = 1;
+ smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
+ queue_delayed_work(smux_tx_wq,
+ &smux_delayed_inactivity_work,
+ msecs_to_jiffies(
+ SMUX_INACTIVITY_TIMEOUT_MS));
+ }
+ } else {
+ pr_err("%s: sleep request invalid in state %d\n",
+ __func__, smux.power_state);
+ }
+ }
+ spin_unlock(&smux.tx_lock_lha2);
+
+ if (tx_ready)
+ list_channel(&smux_lch[ack_pkt->hdr.lcid]);
+
+ return 0;
+}
+
+/**
+ * Handle dispatching a completed packet for receive processing.
+ *
+ * @pkt Packet to process
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
+{
+ int ret;
+
+ SMUX_LOG_PKT_RX(pkt);
+
+ switch (pkt->hdr.cmd) {
+ case SMUX_CMD_OPEN_LCH:
+ ret = smux_handle_rx_open_cmd(pkt);
+ break;
+
+ case SMUX_CMD_DATA:
+ ret = smux_handle_rx_data_cmd(pkt);
+ break;
+
+ case SMUX_CMD_CLOSE_LCH:
+ ret = smux_handle_rx_close_cmd(pkt);
+ break;
+
+ case SMUX_CMD_STATUS:
+ ret = smux_handle_rx_status_cmd(pkt);
+ break;
+
+ case SMUX_CMD_PWR_CTL:
+ ret = smux_handle_rx_power_cmd(pkt);
+ break;
+
+ case SMUX_CMD_BYTE:
+ ret = smux_handle_rx_byte_cmd(pkt);
+ break;
+
+ default:
+ pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/**
+ * Deserializes a packet and dispatches it to the packet receive logic.
+ *
+ * @data Raw data for one packet
+ * @len Length of the data
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_deserialize(unsigned char *data, int len)
+{
+ struct smux_pkt_t recv;
+ uint8_t lcid;
+
+ smux_init_pkt(&recv);
+
+ /*
+ * It may be possible to optimize this to not use the
+ * temporary buffer.
+ */
+ memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
+
+ if (recv.hdr.magic != SMUX_MAGIC) {
+ pr_err("%s: invalid header magic\n", __func__);
+ return -EINVAL;
+ }
+
+ lcid = recv.hdr.lcid;
+ if (smux_assert_lch_id(lcid)) {
+ pr_err("%s: invalid channel id %d\n", __func__, lcid);
+ return -ENXIO;
+ }
+
+ if (recv.hdr.payload_len)
+ recv.payload = data + sizeof(struct smux_hdr_t);
+
+ return smux_dispatch_rx_pkt(&recv);
+}
+
+/**
+ * Handle wakeup request byte.
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static void smux_handle_wakeup_req(void)
+{
+ spin_lock(&smux.tx_lock_lha2);
+ if (smux.power_state == SMUX_PWR_OFF
+ || smux.power_state == SMUX_PWR_TURNING_ON) {
+ /* wakeup system */
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state, SMUX_PWR_ON);
+ smux.power_state = SMUX_PWR_ON;
+ queue_work(smux_tx_wq, &smux_wakeup_work);
+ queue_work(smux_tx_wq, &smux_tx_work);
+ queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
+ msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
+ smux_send_byte(SMUX_WAKEUP_ACK);
+ } else {
+ smux_send_byte(SMUX_WAKEUP_ACK);
+ }
+ spin_unlock(&smux.tx_lock_lha2);
+}
+
+/**
+ * Handle wakeup request ack.
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static void smux_handle_wakeup_ack(void)
+{
+ spin_lock(&smux.tx_lock_lha2);
+ if (smux.power_state == SMUX_PWR_TURNING_ON) {
+ /* received response to wakeup request */
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state, SMUX_PWR_ON);
+ smux.power_state = SMUX_PWR_ON;
+ queue_work(smux_tx_wq, &smux_tx_work);
+ queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
+ msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
+
+ } else if (smux.power_state != SMUX_PWR_ON) {
+ /* invalid message */
+ pr_err("%s: wakeup request ack invalid in state %d\n",
+ __func__, smux.power_state);
+ }
+ spin_unlock(&smux.tx_lock_lha2);
+}
+
+/**
+ * RX State machine - IDLE state processing.
+ *
+ * @data New RX data to process
+ * @len Length of the data
+ * @used Return value of length processed
+ * @flag Error flag - TTY_NORMAL 0 for no failure
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static void smux_rx_handle_idle(const unsigned char *data,
+ int len, int *used, int flag)
+{
+ int i;
+
+ if (flag) {
+ if (smux_byte_loopback)
+ smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
+ smux_byte_loopback);
+ pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
+ ++*used;
+ return;
+ }
+
+ for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
+ switch (data[i]) {
+ case SMUX_MAGIC_WORD1:
+ smux.rx_state = SMUX_RX_MAGIC;
+ break;
+ case SMUX_WAKEUP_REQ:
+ smux_handle_wakeup_req();
+ break;
+ case SMUX_WAKEUP_ACK:
+ smux_handle_wakeup_ack();
+ break;
+ default:
+ /* unexpected character */
+ if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
+ smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
+ smux_byte_loopback);
+ pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
+ (unsigned)data[i]);
+ break;
+ }
+ }
+
+ *used = i;
+}
+
+/**
+ * RX State machine - Header Magic state processing.
+ *
+ * @data New RX data to process
+ * @len Length of the data
+ * @used Return value of length processed
+ * @flag Error flag - TTY_NORMAL 0 for no failure
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static void smux_rx_handle_magic(const unsigned char *data,
+ int len, int *used, int flag)
+{
+ int i;
+
+ if (flag) {
+ pr_err("%s: TTY RX error %d\n", __func__, flag);
+ smux_enter_reset();
+ smux.rx_state = SMUX_RX_FAILURE;
+ ++*used;
+ return;
+ }
+
+ for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
+ /* wait for completion of the magic */
+ if (data[i] == SMUX_MAGIC_WORD2) {
+ smux.recv_len = 0;
+ smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
+ smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
+ smux.rx_state = SMUX_RX_HDR;
+ } else {
+ /* unexpected / trash character */
+ pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
+ __func__, data[i], *used, len);
+ smux.rx_state = SMUX_RX_IDLE;
+ }
+ }
+
+ *used = i;
+}
+
+/**
+ * RX State machine - Packet Header state processing.
+ *
+ * @data New RX data to process
+ * @len Length of the data
+ * @used Return value of length processed
+ * @flag Error flag - TTY_NORMAL 0 for no failure
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static void smux_rx_handle_hdr(const unsigned char *data,
+ int len, int *used, int flag)
+{
+ int i;
+ struct smux_hdr_t *hdr;
+
+ if (flag) {
+ pr_err("%s: TTY RX error %d\n", __func__, flag);
+ smux_enter_reset();
+ smux.rx_state = SMUX_RX_FAILURE;
+ ++*used;
+ return;
+ }
+
+ for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
+ smux.recv_buf[smux.recv_len++] = data[i];
+
+ if (smux.recv_len == sizeof(struct smux_hdr_t)) {
+ /* complete header received */
+ hdr = (struct smux_hdr_t *)smux.recv_buf;
+ smux.pkt_remain = hdr->payload_len + hdr->pad_len;
+ smux.rx_state = SMUX_RX_PAYLOAD;
+ }
+ }
+ *used = i;
+}
+
+/**
+ * RX State machine - Packet Payload state processing.
+ *
+ * @data New RX data to process
+ * @len Length of the data
+ * @used Return value of length processed
+ * @flag Error flag - TTY_NORMAL 0 for no failure
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static void smux_rx_handle_pkt_payload(const unsigned char *data,
+ int len, int *used, int flag)
+{
+ int remaining;
+
+ if (flag) {
+ pr_err("%s: TTY RX error %d\n", __func__, flag);
+ smux_enter_reset();
+ smux.rx_state = SMUX_RX_FAILURE;
+ ++*used;
+ return;
+ }
+
+ /* copy data into rx buffer */
+ if (smux.pkt_remain < (len - *used))
+ remaining = smux.pkt_remain;
+ else
+ remaining = len - *used;
+
+ memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
+ smux.recv_len += remaining;
+ smux.pkt_remain -= remaining;
+ *used += remaining;
+
+ if (smux.pkt_remain == 0) {
+ /* complete packet received */
+ smux_deserialize(smux.recv_buf, smux.recv_len);
+ smux.rx_state = SMUX_RX_IDLE;
+ }
+}
+
+/**
+ * Feed data to the receive state machine.
+ *
+ * @data Pointer to data block
+ * @len Length of data
+ * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+void smux_rx_state_machine(const unsigned char *data,
+ int len, int flag)
+{
+ unsigned long flags;
+ int used;
+ int initial_rx_state;
+
+
+ SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
+ spin_lock_irqsave(&smux.rx_lock_lha1, flags);
+ used = 0;
+ smux.rx_activity_flag = 1;
+ do {
+ SMUX_DBG("%s: state %d; %d of %d\n",
+ __func__, smux.rx_state, used, len);
+ initial_rx_state = smux.rx_state;
+
+ switch (smux.rx_state) {
+ case SMUX_RX_IDLE:
+ smux_rx_handle_idle(data, len, &used, flag);
+ break;
+ case SMUX_RX_MAGIC:
+ smux_rx_handle_magic(data, len, &used, flag);
+ break;
+ case SMUX_RX_HDR:
+ smux_rx_handle_hdr(data, len, &used, flag);
+ break;
+ case SMUX_RX_PAYLOAD:
+ smux_rx_handle_pkt_payload(data, len, &used, flag);
+ break;
+ default:
+ SMUX_DBG("%s: invalid state %d\n",
+ __func__, smux.rx_state);
+ smux.rx_state = SMUX_RX_IDLE;
+ break;
+ }
+ } while (used < len || smux.rx_state != initial_rx_state);
+ spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
+}
+
+/**
+ * Add channel to transmit-ready list and trigger transmit worker.
+ *
+ * @ch Channel to add
+ */
+static void list_channel(struct smux_lch_t *ch)
+{
+ unsigned long flags;
+
+ SMUX_DBG("%s: listing channel %d\n",
+ __func__, ch->lcid);
+
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ spin_lock(&ch->tx_lock_lhb2);
+ smux.tx_activity_flag = 1;
+ if (list_empty(&ch->tx_ready_list))
+ list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
+ spin_unlock(&ch->tx_lock_lhb2);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
+ queue_work(smux_tx_wq, &smux_tx_work);
+}
+
+/**
+ * Transmit packet on correct transport and then perform client
+ * notification.
+ *
+ * @ch Channel to transmit on
+ * @pkt Packet to transmit
+ */
+static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
+{
+ union notifier_metadata meta_write;
+ int ret;
+
+ if (ch && pkt) {
+ SMUX_LOG_PKT_TX(pkt);
+ if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
+ ret = smux_tx_loopback(pkt);
+ else
+ ret = smux_tx_tty(pkt);
+
+ if (pkt->hdr.cmd == SMUX_CMD_DATA) {
+ /* notify write-done */
+ meta_write.write.pkt_priv = pkt->priv;
+ meta_write.write.buffer = pkt->payload;
+ meta_write.write.len = pkt->hdr.payload_len;
+ if (ret >= 0) {
+ SMUX_DBG("%s: PKT write done", __func__);
+ schedule_notify(ch->lcid, SMUX_WRITE_DONE,
+ &meta_write);
+ } else {
+ pr_err("%s: failed to write pkt %d\n",
+ __func__, ret);
+ schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
+ &meta_write);
+ }
+ }
+ }
+}
+
+/**
+ * Power-up the UART.
+ */
+static void smux_uart_power_on(void)
+{
+ struct uart_state *state;
+
+ if (!smux.tty || !smux.tty->driver_data) {
+ pr_err("%s: unable to find UART port for tty %p\n",
+ __func__, smux.tty);
+ return;
+ }
+ state = smux.tty->driver_data;
+ msm_hs_request_clock_on(state->uart_port);
+}
+
+/**
+ * Power down the UART.
+ */
+static void smux_uart_power_off(void)
+{
+ struct uart_state *state;
+
+ if (!smux.tty || !smux.tty->driver_data) {
+ pr_err("%s: unable to find UART port for tty %p\n",
+ __func__, smux.tty);
+ return;
+ }
+ state = smux.tty->driver_data;
+ msm_hs_request_clock_off(state->uart_port);
+}
+
+/**
+ * TX Wakeup Worker
+ *
+ * @work Not used
+ *
+ * Do an exponential back-off wakeup sequence with a maximum period
+ * of approximately 1 second (1 << 20 microseconds).
+ */
+static void smux_wakeup_worker(struct work_struct *work)
+{
+ unsigned long flags;
+ unsigned wakeup_delay;
+ int complete = 0;
+
+ for (;;) {
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ if (smux.power_state == SMUX_PWR_ON) {
+ /* wakeup complete */
+ complete = 1;
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ break;
+ } else {
+ /* retry */
+ wakeup_delay = smux.pwr_wakeup_delay_us;
+ smux.pwr_wakeup_delay_us <<= 1;
+ if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
+ smux.pwr_wakeup_delay_us =
+ SMUX_WAKEUP_DELAY_MAX;
+ }
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ SMUX_DBG("%s: triggering wakeup\n", __func__);
+ smux_send_byte(SMUX_WAKEUP_REQ);
+
+ if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
+ SMUX_DBG("%s: sleeping for %u us\n", __func__,
+ wakeup_delay);
+ usleep_range(wakeup_delay, 2*wakeup_delay);
+ } else {
+ /* schedule delayed work */
+ SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
+ __func__, wakeup_delay / 1000);
+ queue_delayed_work(smux_tx_wq,
+ &smux_wakeup_delayed_work,
+ msecs_to_jiffies(wakeup_delay / 1000));
+ break;
+ }
+ }
+
+ if (complete) {
+ SMUX_DBG("%s: wakeup complete\n", __func__);
+ /*
+ * Cancel any pending retry. This avoids a race condition with
+ * a new power-up request because:
+ * 1) this worker doesn't modify the state
+ * 2) this worker is processed on the same single-threaded
+ * workqueue as new TX wakeup requests
+ */
+ cancel_delayed_work(&smux_wakeup_delayed_work);
+ }
+}
+
+
+/**
+ * Inactivity timeout worker. Periodically scheduled when link is active.
+ * When it detects inactivity, it will power-down the UART link.
+ *
+ * @work Work structure (not used)
+ */
+static void smux_inactivity_worker(struct work_struct *work)
+{
+ int tx_ready = 0;
+ struct smux_pkt_t *pkt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smux.rx_lock_lha1, flags);
+ spin_lock(&smux.tx_lock_lha2);
+
+ if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
+ /* no activity */
+ if (smux.powerdown_enabled) {
+ if (smux.power_state == SMUX_PWR_ON) {
+ /* start power-down sequence */
+ pkt = smux_alloc_pkt();
+ if (pkt) {
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state,
+ SMUX_PWR_TURNING_OFF);
+ smux.power_state = SMUX_PWR_TURNING_OFF;
+
+ /* send power-down request */
+ pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
+ pkt->hdr.flags = 0;
+ pkt->hdr.lcid = 0;
+ smux_tx_queue(pkt,
+ &smux_lch[SMUX_TEST_LCID],
+ 0);
+ tx_ready = 1;
+ }
+ }
+ } else {
+ SMUX_DBG("%s: link inactive, but powerdown disabled\n",
+ __func__);
+ }
+ }
+ smux.tx_activity_flag = 0;
+ smux.rx_activity_flag = 0;
+
+ spin_unlock(&smux.tx_lock_lha2);
+ spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
+
+ if (tx_ready)
+ list_channel(&smux_lch[SMUX_TEST_LCID]);
+
+ if ((smux.power_state == SMUX_PWR_OFF_FLUSH) ||
+ (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH)) {
+ /* ready to power-down the UART */
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state, SMUX_PWR_OFF);
+ smux_uart_power_off();
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ smux.power_state = SMUX_PWR_OFF;
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ }
+
+ /* reschedule inactivity worker */
+ if (smux.power_state != SMUX_PWR_OFF)
+ queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
+ msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
+}
+
+/**
+ * Transmit worker handles serializing and transmitting packets onto the
+ * underlying transport.
+ *
+ * @work Work structure (not used)
+ */
+static void smux_tx_worker(struct work_struct *work)
+{
+ struct smux_pkt_t *pkt;
+ struct smux_lch_t *ch;
+ unsigned low_wm_notif;
+ unsigned lcid;
+ unsigned long flags;
+
+
+ /*
+ * Transmit packets in round-robin fashion based upon ready
+ * channels.
+ *
+ * To eliminate the need to hold a lock for the entire
+ * iteration through the channel ready list, the head of the
+ * ready-channel list is always the next channel to be
+ * processed. To send a packet, the first valid packet in
+ * the head channel is removed and the head channel is then
+ * rescheduled at the end of the queue by removing it and
+ * inserting after the tail. The locks can then be released
+ * while the packet is processed.
+ */
+ for (;;) {
+ pkt = NULL;
+ low_wm_notif = 0;
+
+ /* get the next ready channel */
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ if (list_empty(&smux.lch_tx_ready_list)) {
+ /* no ready channels */
+ SMUX_DBG("%s: no more ready channels, exiting\n",
+ __func__);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ break;
+ }
+ smux.tx_activity_flag = 1;
+
+ if (smux.power_state != SMUX_PWR_ON
+ && smux.power_state != SMUX_PWR_TURNING_OFF
+ && smux.power_state != SMUX_PWR_TURNING_OFF_FLUSH) {
+ /* Link isn't ready to transmit */
+ if (smux.power_state == SMUX_PWR_OFF) {
+ /* link is off, trigger wakeup */
+ smux.pwr_wakeup_delay_us = 1;
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state,
+ SMUX_PWR_TURNING_ON);
+ smux.power_state = SMUX_PWR_TURNING_ON;
+ spin_unlock_irqrestore(&smux.tx_lock_lha2,
+ flags);
+ smux_uart_power_on();
+ queue_work(smux_tx_wq, &smux_wakeup_work);
+ } else {
+ SMUX_DBG("%s: can not tx with power state %d\n",
+ __func__,
+ smux.power_state);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2,
+ flags);
+ }
+ break;
+ }
+
+ /* get the next packet to send and rotate channel list */
+ ch = list_first_entry(&smux.lch_tx_ready_list,
+ struct smux_lch_t,
+ tx_ready_list);
+
+ spin_lock(&ch->state_lock_lhb1);
+ spin_lock(&ch->tx_lock_lhb2);
+ if (!list_empty(&ch->tx_queue)) {
+ /*
+ * If remote TX flow control is enabled or
+ * the channel is not fully opened, then only
+ * send command packets.
+ */
+ if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
+ struct smux_pkt_t *curr;
+ list_for_each_entry(curr, &ch->tx_queue, list) {
+ if (curr->hdr.cmd != SMUX_CMD_DATA) {
+ pkt = curr;
+ break;
+ }
+ }
+ } else {
+ /* get next cmd/data packet to send */
+ pkt = list_first_entry(&ch->tx_queue,
+ struct smux_pkt_t, list);
+ }
+ }
+
+ if (pkt) {
+ list_del(&pkt->list);
+
+ /* update packet stats */
+ if (pkt->hdr.cmd == SMUX_CMD_DATA) {
+ --ch->tx_pending_data_cnt;
+ if (ch->notify_lwm &&
+ ch->tx_pending_data_cnt
+ <= SMUX_WM_LOW) {
+ ch->notify_lwm = 0;
+ low_wm_notif = 1;
+ }
+ }
+
+ /* advance to the next ready channel */
+ list_rotate_left(&smux.lch_tx_ready_list);
+ } else {
+ /* no data in channel to send, remove from ready list */
+ list_del(&ch->tx_ready_list);
+ INIT_LIST_HEAD(&ch->tx_ready_list);
+ }
+ lcid = ch->lcid;
+ spin_unlock(&ch->tx_lock_lhb2);
+ spin_unlock(&ch->state_lock_lhb1);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
+ if (low_wm_notif)
+ schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
+
+ /* send the packet */
+ smux_tx_pkt(ch, pkt);
+ smux_free_pkt(pkt);
+ }
+}
+
+
+/**********************************************************************/
+/* Kernel API */
+/**********************************************************************/
+
+/**
+ * Set or clear channel option using the SMUX_CH_OPTION_* channel
+ * flags.
+ *
+ * @lcid Logical channel ID
+ * @set Options to set
+ * @clear Options to clear
+ *
+ * @returns 0 for success, < 0 for failure
+ */
+int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
+{
+ unsigned long flags;
+ struct smux_lch_t *ch;
+ int tx_ready = 0;
+ int ret = 0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ /* Local loopback mode */
+ if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
+ ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
+
+ if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
+ ch->local_mode = SMUX_LCH_MODE_NORMAL;
+
+ /* Remote loopback mode */
+ if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
+ ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
+
+ if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
+ ch->local_mode = SMUX_LCH_MODE_NORMAL;
+
+ /* Flow control */
+ if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
+ ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
+ ret = smux_send_status_cmd(ch);
+ tx_ready = 1;
+ }
+
+ if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
+ ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
+ ret = smux_send_status_cmd(ch);
+ tx_ready = 1;
+ }
+
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Starts the opening sequence for a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @priv Free for client usage
+ * @notify Event notification function
+ * @get_rx_buffer Function used to provide a receive buffer to SMUX
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * A channel must be fully closed (either not previously opened or
+ * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
+ * received.
+ *
+ * One the remote side is opened, the client will receive a SMUX_CONNECTED
+ * event.
+ */
+int msm_smux_open(uint8_t lcid, void *priv,
+ void (*notify)(void *priv, int event_type, const void *metadata),
+ int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
+ int size))
+{
+ int ret;
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *pkt;
+ int tx_ready = 0;
+ unsigned long flags;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
+ pr_err("%s: open lcid %d local state %x invalid\n",
+ __func__, lcid, ch->local_state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
+ ch->local_state,
+ SMUX_LCH_LOCAL_OPENING);
+
+ ch->local_state = SMUX_LCH_LOCAL_OPENING;
+
+ ch->priv = priv;
+ ch->notify = notify;
+ ch->get_rx_buffer = get_rx_buffer;
+ ret = 0;
+
+ /* Send Open Command */
+ pkt = smux_alloc_pkt();
+ if (!pkt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pkt->hdr.magic = SMUX_MAGIC;
+ pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
+ pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
+ if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
+ pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
+ pkt->hdr.lcid = lcid;
+ pkt->hdr.payload_len = 0;
+ pkt->hdr.pad_len = 0;
+ smux_tx_queue(pkt, ch, 0);
+ tx_ready = 1;
+
+out:
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ if (tx_ready)
+ list_channel(ch);
+ return ret;
+}
+
+/**
+ * Starts the closing sequence for a logical channel.
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * Once the close event has been acknowledge by the remote side, the client
+ * will receive a SMUX_DISCONNECTED notification.
+ */
+int msm_smux_close(uint8_t lcid)
+{
+ int ret = 0;
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *pkt;
+ int tx_ready = 0;
+ unsigned long flags;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+ ch->local_tiocm = 0x0;
+ ch->remote_tiocm = 0x0;
+ ch->tx_pending_data_cnt = 0;
+ ch->notify_lwm = 0;
+
+ /* Purge TX queue */
+ spin_lock(&ch->tx_lock_lhb2);
+ while (!list_empty(&ch->tx_queue)) {
+ pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
+ list);
+ list_del(&pkt->list);
+
+ if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
+ /* Open was never sent, just force to closed state */
+ union notifier_metadata meta_disconnected;
+
+ ch->local_state = SMUX_LCH_LOCAL_CLOSED;
+ meta_disconnected.disconnected.is_ssr = 0;
+ schedule_notify(lcid, SMUX_DISCONNECTED,
+ &meta_disconnected);
+ } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
+ /* Notify client of failed write */
+ union notifier_metadata meta_write;
+
+ meta_write.write.pkt_priv = pkt->priv;
+ meta_write.write.buffer = pkt->payload;
+ meta_write.write.len = pkt->hdr.payload_len;
+ schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
+ }
+ smux_free_pkt(pkt);
+ }
+ spin_unlock(&ch->tx_lock_lhb2);
+
+ /* Send Close Command */
+ if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
+ ch->local_state == SMUX_LCH_LOCAL_OPENING) {
+ SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
+ ch->local_state,
+ SMUX_LCH_LOCAL_CLOSING);
+
+ ch->local_state = SMUX_LCH_LOCAL_CLOSING;
+ pkt = smux_alloc_pkt();
+ if (pkt) {
+ pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ pkt->hdr.flags = 0;
+ pkt->hdr.lcid = lcid;
+ pkt->hdr.payload_len = 0;
+ pkt->hdr.pad_len = 0;
+ smux_tx_queue(pkt, ch, 0);
+ tx_ready = 1;
+ } else {
+ pr_err("%s: pkt allocation failed\n", __func__);
+ ret = -ENOMEM;
+ }
+ }
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Write data to a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
+ * SMUX_WRITE_FAIL notification.
+ * @data Data to write
+ * @len Length of @data
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * Data may be written immediately after msm_smux_open() is called,
+ * but the data will wait in the transmit queue until the channel has
+ * been fully opened.
+ *
+ * Once the data has been written, the client will receive either a completion
+ * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
+ */
+int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
+{
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *pkt;
+ int tx_ready = 0;
+ unsigned long flags;
+ int ret;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
+ ch->local_state != SMUX_LCH_LOCAL_OPENING) {
+ pr_err("%s: hdr.invalid local state %d channel %d\n",
+ __func__, ch->local_state, lcid);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
+ pr_err("%s: payload %d too large\n",
+ __func__, len);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ pkt = smux_alloc_pkt();
+ if (!pkt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pkt->hdr.cmd = SMUX_CMD_DATA;
+ pkt->hdr.lcid = lcid;
+ pkt->hdr.flags = 0;
+ pkt->hdr.payload_len = len;
+ pkt->payload = (void *)data;
+ pkt->priv = pkt_priv;
+ pkt->hdr.pad_len = 0;
+
+ spin_lock(&ch->tx_lock_lhb2);
+ /* verify high watermark */
+ SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
+
+ if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
+ pr_err("%s: ch %d high watermark %d exceeded %d\n",
+ __func__, lcid, SMUX_WM_HIGH,
+ ch->tx_pending_data_cnt);
+ ret = -EAGAIN;
+ goto out_inner;
+ }
+
+ /* queue packet for transmit */
+ if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
+ ch->notify_lwm = 1;
+ pr_err("%s: high watermark hit\n", __func__);
+ schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
+ }
+ list_add_tail(&pkt->list, &ch->tx_queue);
+
+ /* add to ready list */
+ if (IS_FULLY_OPENED(ch))
+ tx_ready = 1;
+
+ ret = 0;
+
+out_inner:
+ spin_unlock(&ch->tx_lock_lhb2);
+
+out:
+ if (ret)
+ smux_free_pkt(pkt);
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Returns true if the TX queue is currently full (high water mark).
+ *
+ * @lcid Logical channel ID
+ * @returns 0 if channel is not full
+ * 1 if it is full
+ * < 0 for error
+ */
+int msm_smux_is_ch_full(uint8_t lcid)
+{
+ struct smux_lch_t *ch;
+ unsigned long flags;
+ int is_full = 0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+
+ spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
+ if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
+ is_full = 1;
+ spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
+
+ return is_full;
+}
+
+/**
+ * Returns true if the TX queue has space for more packets it is at or
+ * below the low water mark).
+ *
+ * @lcid Logical channel ID
+ * @returns 0 if channel is above low watermark
+ * 1 if it's at or below the low watermark
+ * < 0 for error
+ */
+int msm_smux_is_ch_low(uint8_t lcid)
+{
+ struct smux_lch_t *ch;
+ unsigned long flags;
+ int is_low = 0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+
+ spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
+ if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
+ is_low = 1;
+ spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
+
+ return is_low;
+}
+
+/**
+ * Send TIOCM status update.
+ *
+ * @ch Channel for update
+ *
+ * @returns 0 for success, <0 for failure
+ *
+ * Channel lock must be held before calling.
+ */
+static int smux_send_status_cmd(struct smux_lch_t *ch)
+{
+ struct smux_pkt_t *pkt;
+
+ if (!ch)
+ return -EINVAL;
+
+ pkt = smux_alloc_pkt();
+ if (!pkt)
+ return -ENOMEM;
+
+ pkt->hdr.lcid = ch->lcid;
+ pkt->hdr.cmd = SMUX_CMD_STATUS;
+ pkt->hdr.flags = ch->local_tiocm;
+ pkt->hdr.payload_len = 0;
+ pkt->hdr.pad_len = 0;
+ smux_tx_queue(pkt, ch, 0);
+
+ return 0;
+}
+
+/**
+ * Internal helper function for getting the TIOCM status with
+ * state_lock_lhb1 already locked.
+ *
+ * @ch Channel pointer
+ *
+ * @returns TIOCM status
+ */
+static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
+{
+ long status = 0x0;
+
+ status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
+ status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
+ status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
+ status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
+
+ status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
+ status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
+
+ return status;
+}
+
+/**
+ * Get the TIOCM status bits.
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns >= 0 TIOCM status bits
+ * < 0 Error condition
+ */
+long msm_smux_tiocm_get(uint8_t lcid)
+{
+ struct smux_lch_t *ch;
+ unsigned long flags;
+ long status = 0x0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+ status = msm_smux_tiocm_get_atomic(ch);
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ return status;
+}
+
+/**
+ * Set/clear the TIOCM status bits.
+ *
+ * @lcid Logical channel ID
+ * @set Bits to set
+ * @clear Bits to clear
+ *
+ * @returns 0 for success; < 0 for failure
+ *
+ * If a bit is specified in both the @set and @clear masks, then the clear bit
+ * definition will dominate and the bit will be cleared.
+ */
+int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
+{
+ struct smux_lch_t *ch;
+ unsigned long flags;
+ uint8_t old_status;
+ uint8_t status_set = 0x0;
+ uint8_t status_clear = 0x0;
+ int tx_ready = 0;
+ int ret = 0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
+ status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
+ status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
+ status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
+
+ status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
+ status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
+ status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
+ status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
+
+ old_status = ch->local_tiocm;
+ ch->local_tiocm |= status_set;
+ ch->local_tiocm &= ~status_clear;
+
+ if (ch->local_tiocm != old_status) {
+ ret = smux_send_status_cmd(ch);
+ tx_ready = 1;
+ }
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**********************************************************************/
+/* Line Discipline Interface */
+/**********************************************************************/
+static int smuxld_open(struct tty_struct *tty)
+{
+ int i;
+ int tmp;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!smux.is_initialized)
+ return -ENODEV;
+
+ spin_lock_irqsave(&smux.lock_lha0, flags);
+ if (smux.ld_open_count) {
+ pr_err("%s: %p multiple instances not supported\n",
+ __func__, tty);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ ++smux.ld_open_count;
+ if (tty->ops->write == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* connect to TTY */
+ smux.tty = tty;
+ tty->disc_data = &smux;
+ tty->receive_room = TTY_RECEIVE_ROOM;
+ tty_driver_flush_buffer(tty);
+
+ /* power-down the UART if we are idle */
+ spin_lock(&smux.tx_lock_lha2);
+ if (smux.power_state == SMUX_PWR_OFF) {
+ SMUX_DBG("%s: powering off uart\n", __func__);
+ smux.power_state = SMUX_PWR_OFF_FLUSH;
+ spin_unlock(&smux.tx_lock_lha2);
+ queue_work(smux_tx_wq, &smux_inactivity_work);
+ } else {
+ spin_unlock(&smux.tx_lock_lha2);
+ }
+
+ /* register platform devices */
+ for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
+ tmp = platform_device_register(&smux_devs[i]);
+ if (tmp)
+ pr_err("%s: error %d registering device %s\n",
+ __func__, tmp, smux_devs[i].name);
+ }
+
+out:
+ spin_unlock_irqrestore(&smux.lock_lha0, flags);
+ return ret;
+}
+
+static void smuxld_close(struct tty_struct *tty)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&smux.lock_lha0, flags);
+ if (smux.ld_open_count <= 0) {
+ pr_err("%s: invalid ld count %d\n", __func__,
+ smux.ld_open_count);
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(smux_devs); ++i)
+ platform_device_unregister(&smux_devs[i]);
+
+ --smux.ld_open_count;
+
+out:
+ spin_unlock_irqrestore(&smux.lock_lha0, flags);
+}
+
+/**
+ * Receive data from TTY Line Discipline.
+ *
+ * @tty TTY structure
+ * @cp Character data
+ * @fp Flag data
+ * @count Size of character and flag data
+ */
+void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ int i;
+ int last_idx = 0;
+ const char *tty_name = NULL;
+ char *f;
+
+ if (smux_debug_mask & MSM_SMUX_DEBUG)
+ print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
+ 16, 1, cp, count, true);
+
+ /* verify error flags */
+ for (i = 0, f = fp; i < count; ++i, ++f) {
+ if (*f != TTY_NORMAL) {
+ if (tty)
+ tty_name = tty->name;
+ pr_err("%s: TTY %s Error %d (%s)\n", __func__,
+ tty_name, *f, tty_flag_to_str(*f));
+
+ /* feed all previous valid data to the parser */
+ smux_rx_state_machine(cp + last_idx, i - last_idx,
+ TTY_NORMAL);
+
+ /* feed bad data to parser */
+ smux_rx_state_machine(cp + i, 1, *f);
+ last_idx = i + 1;
+ }
+ }
+
+ /* feed data to RX state machine */
+ smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
+}
+
+static void smuxld_flush_buffer(struct tty_struct *tty)
+{
+ pr_err("%s: not supported\n", __func__);
+}
+
+static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t nr)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
+ struct poll_table_struct *tbl)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static void smuxld_write_wakeup(struct tty_struct *tty)
+{
+ pr_err("%s: not supported\n", __func__);
+}
+
+static struct tty_ldisc_ops smux_ldisc_ops = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_smux",
+ .open = smuxld_open,
+ .close = smuxld_close,
+ .flush_buffer = smuxld_flush_buffer,
+ .chars_in_buffer = smuxld_chars_in_buffer,
+ .read = smuxld_read,
+ .write = smuxld_write,
+ .ioctl = smuxld_ioctl,
+ .poll = smuxld_poll,
+ .receive_buf = smuxld_receive_buf,
+ .write_wakeup = smuxld_write_wakeup
+};
+
+static int __init smux_init(void)
+{
+ int ret;
+
+ spin_lock_init(&smux.lock_lha0);
+
+ spin_lock_init(&smux.rx_lock_lha1);
+ smux.rx_state = SMUX_RX_IDLE;
+ smux.power_state = SMUX_PWR_OFF;
+ smux.pwr_wakeup_delay_us = 1;
+ smux.powerdown_enabled = 0;
+ smux.rx_activity_flag = 0;
+ smux.tx_activity_flag = 0;
+ smux.recv_len = 0;
+ smux.tty = NULL;
+ smux.ld_open_count = 0;
+ smux.in_reset = 0;
+ smux.is_initialized = 1;
+ smux_byte_loopback = 0;
+
+ spin_lock_init(&smux.tx_lock_lha2);
+ INIT_LIST_HEAD(&smux.lch_tx_ready_list);
+
+ ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
+ if (ret != 0) {
+ pr_err("%s: error %d registering line discipline\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = lch_init();
+ if (ret != 0) {
+ pr_err("%s: lch_init failed\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit smux_exit(void)
+{
+ int ret;
+
+ ret = tty_unregister_ldisc(N_SMUX);
+ if (ret != 0) {
+ pr_err("%s error %d unregistering line discipline\n",
+ __func__, ret);
+ return;
+ }
+}
+
+module_init(smux_init);
+module_exit(smux_exit);
+
+MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_LDISC(N_SMUX);
diff --git a/drivers/tty/smux_ctl.c b/drivers/tty/smux_ctl.c
new file mode 100644
index 0000000..26a49a0
--- /dev/null
+++ b/drivers/tty/smux_ctl.c
@@ -0,0 +1,938 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Serial Mux Control Driver -- Provides a binary serial muxed control
+ * port interface.
+ */
+
+#define DEBUG
+
+#include <linux/cdev.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/atomic.h>
+#include <linux/platform_device.h>
+#include <linux/smux.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+
+#include <asm/ioctls.h>
+
+#define MAX_WRITE_RETRY 5
+#define MAGIC_NO_V1 0x33FC
+#define DEVICE_NAME "smuxctl"
+#define SMUX_CTL_MAX_BUF_SIZE 2048
+#define SMUX_CTL_MODULE_NAME "smux_ctl"
+#define DEBUG
+#define DEBUG_LOOPBACK
+
+static int msm_smux_ctl_debug_mask;
+module_param_named(debug_mask, msm_smux_ctl_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static uint32_t smux_ctl_ch_id[] = {
+ SMUX_DATA_CTL_0,
+ SMUX_DATA_CTL_1,
+ SMUX_DATA_CTL_2,
+ SMUX_DATA_CTL_3,
+ SMUX_DATA_CTL_4,
+ SMUX_DATA_CTL_5,
+ SMUX_DATA_CTL_6,
+ SMUX_DATA_CTL_7,
+ SMUX_USB_RMNET_CTL_0,
+ SMUX_CSVT_CTL_0
+};
+
+#define SMUX_CTL_NUM_CHANNELS ARRAY_SIZE(smux_ctl_ch_id)
+
+struct smux_ctl_dev {
+ int id;
+ char name[10];
+ struct cdev cdev;
+ struct device *devicep;
+ struct mutex dev_lock;
+ atomic_t ref_count;
+ int state;
+ int is_channel_reset;
+ int is_high_wm;
+ int write_pending;
+
+ struct mutex rx_lock;
+ uint32_t read_avail;
+ struct list_head rx_list;
+
+ wait_queue_head_t read_wait_queue;
+ wait_queue_head_t write_wait_queue;
+
+ struct {
+ uint32_t bytes_tx;
+ uint32_t bytes_rx;
+ uint32_t pkts_tx;
+ uint32_t pkts_rx;
+ uint32_t cnt_ssr;
+ uint32_t cnt_read_fail;
+ uint32_t cnt_write_fail;
+ uint32_t cnt_high_wm_hit;
+ } stats;
+
+} *smux_ctl_devp[SMUX_CTL_NUM_CHANNELS];
+
+struct smux_ctl_pkt {
+ int data_size;
+ void *data;
+};
+
+struct smux_ctl_list_elem {
+ struct list_head list;
+ struct smux_ctl_pkt ctl_pkt;
+};
+
+struct class *smux_ctl_classp;
+static dev_t smux_ctl_number;
+static uint32_t smux_ctl_inited;
+
+enum {
+ MSM_SMUX_CTL_DEBUG = 1U << 0,
+ MSM_SMUX_CTL_DUMP_BUFFER = 1U << 1,
+};
+
+#if defined(DEBUG)
+
+static const char *smux_ctl_event_str[] = {
+ "SMUX_CONNECTED",
+ "SMUX_DISCONNECTED",
+ "SMUX_READ_DONE",
+ "SMUX_READ_FAIL",
+ "SMUX_WRITE_DONE",
+ "SMUX_WRITE_FAIL",
+ "SMUX_TIOCM_UPDATE",
+ "SMUX_LOW_WM_HIT",
+ "SMUX_HIGH_WM_HIT",
+};
+
+#define SMUXCTL_DUMP_BUFFER(prestr, cnt, buf) \
+do { \
+ if (msm_smux_ctl_debug_mask & MSM_SMUX_CTL_DUMP_BUFFER) { \
+ int i; \
+ pr_err("%s", prestr); \
+ for (i = 0; i < cnt; i++) \
+ pr_err("%.2x", buf[i]); \
+ pr_err("\n"); \
+ } \
+} while (0)
+
+#define SMUXCTL_DBG(x...) \
+do { \
+ if (msm_smux_ctl_debug_mask & MSM_SMUX_CTL_DEBUG) \
+ pr_err(x); \
+} while (0)
+
+
+#else
+#define SMUXCTL_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
+#define SMUXCTL_DBG(x...) do {} while (0)
+#endif
+
+#if defined(DEBUG_LOOPBACK)
+#define SMUXCTL_SET_LOOPBACK(lcid) \
+ msm_smux_set_ch_option(lcid, SMUX_CH_OPTION_LOCAL_LOOPBACK, 0)
+#else
+#define SMUXCTL_SET_LOOPBACK(lcid) do {} while (0)
+#endif
+
+static int get_ctl_dev_index(int id)
+{
+ int dev_index;
+ for (dev_index = 0; dev_index < SMUX_CTL_NUM_CHANNELS; dev_index++) {
+ if (smux_ctl_ch_id[dev_index] == id)
+ return dev_index;
+ }
+ return -ENODEV;
+}
+
+static int smux_ctl_get_rx_buf_cb(void *priv, void **pkt_priv,
+ void **buffer, int size)
+{
+ void *buf = NULL;
+ int id = ((struct smux_ctl_dev *)(priv))->id;
+ int dev_index;
+
+ if (id < 0 || id > smux_ctl_ch_id[SMUX_CTL_NUM_CHANNELS - 1])
+ return -ENODEV;
+
+ if (!buffer || 0 >= size)
+ return -EINVAL;
+
+ dev_index = get_ctl_dev_index(id);
+ if (dev_index < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: Ch%d is not "
+ "exported to user-space\n",
+ __func__, id);
+ return -ENODEV;
+ }
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: Allocating Rx buf size %d "
+ "for ch%d\n",
+ __func__, size, smux_ctl_devp[dev_index]->id);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: buffer allocation failed: "
+ "Ch%d, size %d ", __func__, id, size);
+ return -ENOMEM;
+ }
+
+ *buffer = buf;
+ *pkt_priv = NULL;
+ return 0;
+
+}
+
+void smux_ctl_notify_cb(void *priv, int event_type, const void *metadata)
+{
+ int id = ((struct smux_ctl_dev *)(priv))->id;
+ struct smux_ctl_list_elem *list_elem = NULL;
+ int dev_index;
+ void *data;
+ int len;
+
+ if (id < 0 || id > smux_ctl_ch_id[SMUX_CTL_NUM_CHANNELS - 1])
+ return;
+
+ dev_index = get_ctl_dev_index(id);
+ if (dev_index < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: Ch%d is not exported "
+ "to user-space\n", __func__, id);
+ return;
+ }
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: Ch%d, Event %d (%s)\n",
+ __func__, smux_ctl_devp[dev_index]->id,
+ event_type, smux_ctl_event_str[event_type]);
+
+
+ switch (event_type) {
+ case SMUX_CONNECTED:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->state = SMUX_CONNECTED;
+ smux_ctl_devp[dev_index]->is_high_wm = 0;
+ smux_ctl_devp[dev_index]->is_channel_reset = 0;
+ smux_ctl_devp[dev_index]->read_avail = 0;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ break;
+
+ case SMUX_DISCONNECTED:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->state = SMUX_DISCONNECTED;
+ smux_ctl_devp[dev_index]->is_channel_reset =
+ ((struct smux_meta_disconnected *)metadata)->is_ssr;
+ if (smux_ctl_devp[dev_index]->is_channel_reset)
+ smux_ctl_devp[dev_index]->stats.cnt_ssr++;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ wake_up(&smux_ctl_devp[dev_index]->read_wait_queue);
+ break;
+
+ case SMUX_READ_FAIL:
+ data = ((struct smux_meta_read *)metadata)->buffer;
+ kfree(data);
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->stats.cnt_read_fail++;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->read_wait_queue);
+ break;
+
+ case SMUX_READ_DONE:
+ data = ((struct smux_meta_read *)metadata)->buffer;
+ len = ((struct smux_meta_read *)metadata)->len;
+
+ if (data && len > 0) {
+ list_elem = kmalloc(sizeof(struct smux_ctl_list_elem),
+ GFP_KERNEL);
+ if (list_elem) {
+ list_elem->ctl_pkt.data = data;
+ list_elem->ctl_pkt.data_size = len;
+
+ mutex_lock(&smux_ctl_devp[dev_index]->rx_lock);
+ list_add_tail(&list_elem->list,
+ &smux_ctl_devp[dev_index]->rx_list);
+ smux_ctl_devp[dev_index]->read_avail += len;
+ mutex_unlock(
+ &smux_ctl_devp[dev_index]->rx_lock);
+ } else {
+ kfree(data);
+ }
+ }
+
+ wake_up(&smux_ctl_devp[dev_index]->read_wait_queue);
+ break;
+
+ case SMUX_WRITE_DONE:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->write_pending = 0;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ data = ((struct smux_meta_write *)metadata)->buffer;
+ kfree(data);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ break;
+
+ case SMUX_WRITE_FAIL:
+ data = ((struct smux_meta_write *)metadata)->buffer;
+ kfree(data);
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->stats.cnt_write_fail++;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ break;
+
+ case SMUX_LOW_WM_HIT:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->is_high_wm = 0;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ break;
+
+ case SMUX_HIGH_WM_HIT:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->is_high_wm = 1;
+ smux_ctl_devp[dev_index]->stats.cnt_high_wm_hit++;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ break;
+
+ case SMUX_TIOCM_UPDATE:
+ default:
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: Event %d not supported\n",
+ __func__, event_type);
+ break;
+
+ }
+
+}
+
+int smux_ctl_open(struct inode *inode, struct file *file)
+{
+ int r = 0;
+ struct smux_ctl_dev *devp;
+
+ if (!smux_ctl_inited)
+ return -EIO;
+
+ devp = container_of(inode->i_cdev, struct smux_ctl_dev, cdev);
+ if (!devp)
+ return -ENODEV;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s called on smuxctl%d device\n",
+ __func__, devp->id);
+
+ if (1 == atomic_add_return(1, &devp->ref_count)) {
+
+ SMUXCTL_SET_LOOPBACK(devp->id);
+ r = msm_smux_open(devp->id,
+ devp,
+ smux_ctl_notify_cb,
+ smux_ctl_get_rx_buf_cb);
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: smux_open failed "
+ "for smuxctl%d with rc %d\n",
+ __func__, devp->id, r);
+ atomic_dec(&devp->ref_count);
+ return r;
+ }
+
+ r = wait_event_interruptible_timeout(
+ devp->write_wait_queue,
+ (devp->state == SMUX_CONNECTED),
+ (5 * HZ));
+ if (r == 0)
+ r = -ETIMEDOUT;
+
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "SMUX open timed out: %d, LCID %d\n",
+ __func__, r, devp->id);
+ atomic_dec(&devp->ref_count);
+ msm_smux_close(devp->id);
+ return r;
+
+ } else if (devp->state != SMUX_CONNECTED) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "Invalid open notification\n", __func__);
+ r = -ENODEV;
+ atomic_dec(&devp->ref_count);
+ msm_smux_close(devp->id);
+ return r;
+ }
+ }
+
+ file->private_data = devp;
+ return 0;
+}
+
+int smux_ctl_release(struct inode *inode, struct file *file)
+{
+ struct smux_ctl_dev *devp;
+ struct smux_ctl_list_elem *list_elem = NULL;
+
+ devp = file->private_data;
+ if (!devp)
+ return -EINVAL;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s called on smuxctl%d device\n",
+ __func__, devp->id);
+
+ mutex_lock(&devp->dev_lock);
+ if (atomic_dec_and_test(&devp->ref_count)) {
+ mutex_lock(&devp->rx_lock);
+ while (!list_empty(&devp->rx_list)) {
+ list_elem = list_first_entry(
+ &devp->rx_list,
+ struct smux_ctl_list_elem,
+ list);
+ list_del(&list_elem->list);
+ kfree(list_elem->ctl_pkt.data);
+ kfree(list_elem);
+ }
+ devp->read_avail = 0;
+ mutex_unlock(&devp->rx_lock);
+ msm_smux_close(devp->id);
+ }
+ mutex_unlock(&devp->dev_lock);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static int smux_ctl_readable(int id)
+{
+ int r;
+ int dev_index;
+
+ if (id < 0 || id > smux_ctl_ch_id[SMUX_CTL_NUM_CHANNELS - 1])
+ return -ENODEV;
+
+ dev_index = get_ctl_dev_index(id);
+ if (dev_index < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: Ch%d "
+ "is not exported to user-space\n",
+ __func__, id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+
+ if (signal_pending(current))
+ r = -ERESTARTSYS;
+
+ if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
+ smux_ctl_devp[dev_index]->is_channel_reset != 0)
+ r = -ENETRESET;
+
+ else if (smux_ctl_devp[dev_index]->state != SMUX_CONNECTED)
+ r = -ENODEV;
+
+ else
+ r = smux_ctl_devp[dev_index]->read_avail;
+
+
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+
+ return r;
+
+}
+
+ssize_t smux_ctl_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int r = 0, id, bytes_to_read, read_err;
+ struct smux_ctl_dev *devp;
+ struct smux_ctl_list_elem *list_elem = NULL;
+
+ devp = file->private_data;
+
+ if (!devp)
+ return -ENODEV;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: read from ch%d\n",
+ __func__, devp->id);
+
+ id = devp->id;
+ mutex_lock(&devp->rx_lock);
+ while (devp->read_avail <= 0) {
+ mutex_unlock(&devp->rx_lock);
+ r = wait_event_interruptible(devp->read_wait_queue,
+ 0 != (read_err = smux_ctl_readable(id)));
+
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s:"
+ "wait_event_interruptible "
+ "ret %i\n", __func__, r);
+ return r;
+ }
+
+ if (read_err < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s:"
+ " Read block failed for Ch%d, err %d\n",
+ __func__, devp->id, read_err);
+ return read_err;
+ }
+
+ mutex_lock(&devp->rx_lock);
+ }
+
+ if (list_empty(&devp->rx_list)) {
+ mutex_unlock(&devp->rx_lock);
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: "
+ "Nothing in ch%d's rx_list\n", __func__,
+ devp->id);
+ return -EAGAIN;
+ }
+
+ list_elem = list_first_entry(&devp->rx_list,
+ struct smux_ctl_list_elem, list);
+ bytes_to_read = (uint32_t)(list_elem->ctl_pkt.data_size);
+ if (bytes_to_read > count) {
+ mutex_unlock(&devp->rx_lock);
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "Packet size %d > buf size %d\n", __func__,
+ bytes_to_read, count);
+ return -ENOMEM;
+ }
+
+ if (copy_to_user(buf, list_elem->ctl_pkt.data, bytes_to_read)) {
+ mutex_unlock(&devp->rx_lock);
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "copy_to_user failed for ch%d\n", __func__,
+ devp->id);
+ return -EFAULT;
+ }
+
+ devp->read_avail -= bytes_to_read;
+ list_del(&list_elem->list);
+ kfree(list_elem->ctl_pkt.data);
+ kfree(list_elem);
+ devp->stats.pkts_rx++;
+ devp->stats.bytes_rx += bytes_to_read;
+ mutex_unlock(&devp->rx_lock);
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: "
+ "Returning %d bytes to ch%d\n", __func__,
+ bytes_to_read, devp->id);
+ return bytes_to_read;
+}
+
+static int smux_ctl_writeable(int id)
+{
+ int r;
+ int dev_index;
+
+ if (id < 0 || id > smux_ctl_ch_id[SMUX_CTL_NUM_CHANNELS - 1])
+ return -ENODEV;
+
+ dev_index = get_ctl_dev_index(id);
+ if (dev_index < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "Ch%d is not exported to user-space\n",
+ __func__, id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+
+ if (signal_pending(current))
+ r = -ERESTARTSYS;
+ else if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
+ smux_ctl_devp[dev_index]->is_channel_reset != 0)
+ r = -ENETRESET;
+ else if (smux_ctl_devp[dev_index]->state != SMUX_CONNECTED)
+ r = -ENODEV;
+ else if (smux_ctl_devp[dev_index]->is_high_wm ||
+ smux_ctl_devp[dev_index]->write_pending)
+ r = 0;
+ else
+ r = SMUX_CTL_MAX_BUF_SIZE;
+
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+
+ return r;
+
+}
+
+ssize_t smux_ctl_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int r = 0, id, write_err;
+ char *temp_buf;
+ struct smux_ctl_dev *devp;
+
+ if (count <= 0)
+ return -EINVAL;
+
+ devp = file->private_data;
+ if (!devp)
+ return -ENODEV;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: writing %i bytes on ch%d\n",
+ __func__, count, devp->id);
+
+ id = devp->id;
+ r = wait_event_interruptible(devp->write_wait_queue,
+ 0 != (write_err = smux_ctl_writeable(id)));
+
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s: wait_event_interruptible "
+ "ret %i\n", __func__, r);
+ return r;
+ }
+
+ if (write_err < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s:"
+ "Write block failed for Ch%d, err %d\n",
+ __func__, devp->id, write_err);
+ return write_err;
+ }
+
+ temp_buf = kmalloc(count, GFP_KERNEL);
+ if (!temp_buf) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s: temp_buf alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(temp_buf, buf, count)) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s: copy_from_user failed\n", __func__);
+ kfree(temp_buf);
+ return -EFAULT;
+ }
+
+ mutex_lock(&devp->dev_lock);
+ devp->write_pending = 1;
+ mutex_unlock(&devp->dev_lock);
+
+ r = msm_smux_write(id, NULL, (void *)temp_buf, count);
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s: smux_write on Ch%dfailed, err %d\n",
+ __func__, id, r);
+ mutex_lock(&devp->dev_lock);
+ devp->write_pending = 0;
+ mutex_unlock(&devp->dev_lock);
+ return r;
+ }
+
+ r = wait_event_interruptible(devp->write_wait_queue,
+ 0 != (write_err = smux_ctl_writeable(id)));
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME " :%s: wait_event_interruptible "
+ "ret %i\n", __func__, r);
+ mutex_lock(&devp->dev_lock);
+ devp->write_pending = 0;
+ mutex_unlock(&devp->dev_lock);
+ return r;
+ }
+
+ mutex_lock(&devp->dev_lock);
+ devp->write_pending = 0;
+ devp->stats.pkts_tx++;
+ devp->stats.bytes_tx += count;
+ mutex_unlock(&devp->dev_lock);
+ return count;
+}
+
+static long smux_ctl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct smux_ctl_dev *devp;
+
+ devp = file->private_data;
+ if (!devp)
+ return -ENODEV;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s called on smuxctl%d device\n",
+ __func__, devp->id);
+
+ switch (cmd) {
+ case TIOCMGET:
+ ret = msm_smux_tiocm_get(devp->id);
+ break;
+ case TIOCMSET:
+ ret = msm_smux_tiocm_set(devp->id, arg, ~arg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations smux_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = smux_ctl_open,
+ .release = smux_ctl_release,
+ .read = smux_ctl_read,
+ .write = smux_ctl_write,
+ .unlocked_ioctl = smux_ctl_ioctl,
+};
+
+static int smux_ctl_probe(struct platform_device *pdev)
+{
+ int i;
+ int r;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
+
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ smux_ctl_devp[i] = kzalloc(sizeof(struct smux_ctl_dev),
+ GFP_KERNEL);
+ if (IS_ERR(smux_ctl_devp[i])) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s kmalloc() ENOMEM\n", __func__);
+ r = -ENOMEM;
+ goto error0;
+ }
+
+ smux_ctl_devp[i]->id = smux_ctl_ch_id[i];
+ atomic_set(&smux_ctl_devp[i]->ref_count, 0);
+ smux_ctl_devp[i]->is_high_wm = 0;
+ smux_ctl_devp[i]->write_pending = 0;
+ smux_ctl_devp[i]->is_channel_reset = 0;
+ smux_ctl_devp[i]->state = SMUX_DISCONNECTED;
+ smux_ctl_devp[i]->read_avail = 0;
+
+ smux_ctl_devp[i]->stats.bytes_tx = 0;
+ smux_ctl_devp[i]->stats.bytes_rx = 0;
+ smux_ctl_devp[i]->stats.pkts_tx = 0;
+ smux_ctl_devp[i]->stats.pkts_rx = 0;
+ smux_ctl_devp[i]->stats.cnt_ssr = 0;
+ smux_ctl_devp[i]->stats.cnt_read_fail = 0;
+ smux_ctl_devp[i]->stats.cnt_write_fail = 0;
+ smux_ctl_devp[i]->stats.cnt_high_wm_hit = 0;
+
+ mutex_init(&smux_ctl_devp[i]->dev_lock);
+ init_waitqueue_head(&smux_ctl_devp[i]->read_wait_queue);
+ init_waitqueue_head(&smux_ctl_devp[i]->write_wait_queue);
+ mutex_init(&smux_ctl_devp[i]->rx_lock);
+ INIT_LIST_HEAD(&smux_ctl_devp[i]->rx_list);
+ }
+
+ r = alloc_chrdev_region(&smux_ctl_number, 0, SMUX_CTL_NUM_CHANNELS,
+ DEVICE_NAME);
+ if (IS_ERR_VALUE(r)) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "alloc_chrdev_region() ret %i.\n",
+ __func__, r);
+ goto error0;
+ }
+
+ smux_ctl_classp = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(smux_ctl_classp)) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "class_create() ENOMEM\n", __func__);
+ r = -ENOMEM;
+ goto error1;
+ }
+
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ cdev_init(&smux_ctl_devp[i]->cdev, &smux_ctl_fops);
+ smux_ctl_devp[i]->cdev.owner = THIS_MODULE;
+
+ r = cdev_add(&smux_ctl_devp[i]->cdev, (smux_ctl_number + i), 1);
+
+ if (IS_ERR_VALUE(r)) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "cdev_add() ret %i\n", __func__, r);
+ kfree(smux_ctl_devp[i]);
+ goto error2;
+ }
+
+ smux_ctl_devp[i]->devicep =
+ device_create(smux_ctl_classp, NULL,
+ (smux_ctl_number + i), NULL,
+ DEVICE_NAME "%d", smux_ctl_ch_id[i]);
+
+ if (IS_ERR(smux_ctl_devp[i]->devicep)) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "device_create() ENOMEM\n", __func__);
+ r = -ENOMEM;
+ cdev_del(&smux_ctl_devp[i]->cdev);
+ kfree(smux_ctl_devp[i]);
+ goto error2;
+ }
+ }
+
+ smux_ctl_inited = 1;
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: "
+ "SMUX Control Port Driver Initialized.\n", __func__);
+ return 0;
+
+error2:
+ while (--i >= 0) {
+ cdev_del(&smux_ctl_devp[i]->cdev);
+ device_destroy(smux_ctl_classp,
+ MKDEV(MAJOR(smux_ctl_number), i));
+ }
+
+ class_destroy(smux_ctl_classp);
+ i = SMUX_CTL_NUM_CHANNELS;
+
+error1:
+ unregister_chrdev_region(MAJOR(smux_ctl_number),
+ SMUX_CTL_NUM_CHANNELS);
+
+error0:
+ while (--i >= 0)
+ kfree(smux_ctl_devp[i]);
+
+ return r;
+}
+
+static int smux_ctl_remove(struct platform_device *pdev)
+{
+ int i;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
+
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ cdev_del(&smux_ctl_devp[i]->cdev);
+ kfree(smux_ctl_devp[i]);
+ device_destroy(smux_ctl_classp,
+ MKDEV(MAJOR(smux_ctl_number), i));
+ }
+ class_destroy(smux_ctl_classp);
+ unregister_chrdev_region(MAJOR(smux_ctl_number),
+ SMUX_CTL_NUM_CHANNELS);
+
+ return 0;
+}
+
+static struct platform_driver smux_ctl_driver = {
+ .probe = smux_ctl_probe,
+ .remove = smux_ctl_remove,
+ .driver = {
+ .name = "SMUX_CTL",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init smux_ctl_init(void)
+{
+ msm_smux_ctl_debug_mask = MSM_SMUX_CTL_DEBUG | MSM_SMUX_CTL_DUMP_BUFFER;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
+ return platform_driver_register(&smux_ctl_driver);
+}
+
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define DEBUG_BUFMAX 4096
+static char debug_buffer[DEBUG_BUFMAX];
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int bsize = 0;
+ int i;
+ if (!smux_ctl_inited) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: SMUX_CTL not yet inited\n",
+ __func__);
+ return -EIO;
+ }
+
+ bsize += scnprintf(debug_buffer + bsize, DEBUG_BUFMAX - bsize,
+ "SMUX_CTL Channel States:\n");
+
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ bsize += scnprintf(debug_buffer + bsize, DEBUG_BUFMAX - bsize,
+ "Ch%02d %s RefCnt=%01d State=%02d "
+ "SSR=%02d HighWM=%02d ReadAvail=%04d WritePending=%02d\n",
+ smux_ctl_devp[i]->id,
+ smux_ctl_devp[i]->name,
+ atomic_read(&smux_ctl_devp[i]->ref_count),
+ smux_ctl_devp[i]->state,
+ smux_ctl_devp[i]->is_channel_reset,
+ smux_ctl_devp[i]->is_high_wm,
+ smux_ctl_devp[i]->read_avail,
+ smux_ctl_devp[i]->write_pending);
+ }
+
+ bsize += scnprintf(debug_buffer + bsize, DEBUG_BUFMAX - bsize,
+ "\nSMUX_CTL Channel Statistics:\n");
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ bsize += scnprintf(debug_buffer + bsize, DEBUG_BUFMAX - bsize,
+ "Ch%02d %s BytesTX=%08d "
+ "BytesRx=%08d PktsTx=%04d PktsRx=%04d"
+ "CntSSR=%02d CntHighWM=%02d "
+ "CntReadFail%02d CntWriteFailed=%02d\n",
+ smux_ctl_devp[i]->id,
+ smux_ctl_devp[i]->name,
+ smux_ctl_devp[i]->stats.bytes_tx,
+ smux_ctl_devp[i]->stats.bytes_rx,
+ smux_ctl_devp[i]->stats.pkts_tx,
+ smux_ctl_devp[i]->stats.pkts_rx,
+ smux_ctl_devp[i]->stats.cnt_ssr,
+ smux_ctl_devp[i]->stats.cnt_high_wm_hit,
+ smux_ctl_devp[i]->stats.cnt_read_fail,
+ smux_ctl_devp[i]->stats.cnt_write_fail);
+ }
+
+ return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static int __init smux_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smux_ctl", 0);
+ if (!IS_ERR(dent))
+ debugfs_create_file("smux_ctl_state", 0444, dent,
+ NULL, &debug_ops);
+
+ return 0;
+}
+
+late_initcall(smux_debugfs_init);
+#endif
+
+module_init(smux_ctl_init);
+MODULE_DESCRIPTION("MSM SMUX Control Port");
+MODULE_LICENSE("GPL v2");
+
+
diff --git a/drivers/tty/smux_loopback.c b/drivers/tty/smux_loopback.c
new file mode 100644
index 0000000..52ce17f
--- /dev/null
+++ b/drivers/tty/smux_loopback.c
@@ -0,0 +1,289 @@
+/* drivers/tty/smux_loopback.c
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+#include <linux/smux.h>
+#include "smux_private.h"
+
+#define SMUX_LOOP_FIFO_SIZE 128
+
+static void smux_loopback_rx_worker(struct work_struct *work);
+static struct workqueue_struct *smux_loopback_wq;
+static DECLARE_WORK(smux_loopback_work, smux_loopback_rx_worker);
+static struct kfifo smux_loop_pkt_fifo;
+static DEFINE_SPINLOCK(hw_fn_lock);
+
+/**
+ * Initialize loopback framework (called by n_smux.c).
+ */
+int smux_loopback_init(void)
+{
+ int ret = 0;
+
+ spin_lock_init(&hw_fn_lock);
+ smux_loopback_wq = create_singlethread_workqueue("smux_loopback_wq");
+ if (IS_ERR(smux_loopback_wq)) {
+ pr_err("%s: failed to create workqueue\n", __func__);
+ return -ENOMEM;
+ }
+
+ ret |= kfifo_alloc(&smux_loop_pkt_fifo,
+ SMUX_LOOP_FIFO_SIZE * sizeof(struct smux_pkt_t *),
+ GFP_KERNEL);
+
+ return ret;
+}
+
+/**
+ * Simulate a write to the TTY hardware by duplicating
+ * the TX packet and putting it into the RX queue.
+ *
+ * @pkt Packet to write
+ *
+ * @returns 0 on success
+ */
+int smux_tx_loopback(struct smux_pkt_t *pkt_ptr)
+{
+ struct smux_pkt_t *send_pkt;
+ unsigned long flags;
+ int i;
+ int ret;
+
+ /* duplicate packet */
+ send_pkt = smux_alloc_pkt();
+ send_pkt->hdr = pkt_ptr->hdr;
+ if (pkt_ptr->hdr.payload_len) {
+ ret = smux_alloc_pkt_payload(send_pkt);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(send_pkt->payload, pkt_ptr->payload,
+ pkt_ptr->hdr.payload_len);
+ }
+
+ /* queue duplicate as pseudo-RX data */
+ spin_lock_irqsave(&hw_fn_lock, flags);
+ i = kfifo_avail(&smux_loop_pkt_fifo);
+ if (i < sizeof(struct smux_pkt_t *)) {
+ pr_err("%s: no space in fifo\n", __func__);
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ i = kfifo_in(&smux_loop_pkt_fifo,
+ &send_pkt,
+ sizeof(struct smux_pkt_t *));
+ if (i < 0) {
+ pr_err("%s: fifo error\n", __func__);
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ queue_work(smux_loopback_wq, &smux_loopback_work);
+ ret = 0;
+
+unlock:
+ spin_unlock_irqrestore(&hw_fn_lock, flags);
+out:
+ return ret;
+}
+
+/**
+ * Receive loopback byte processor.
+ *
+ * @pkt Incoming packet
+ */
+static void smux_loopback_rx_byte(struct smux_pkt_t *pkt)
+{
+ static int simulated_retry_cnt;
+ const char ack = SMUX_WAKEUP_ACK;
+
+ switch (pkt->hdr.flags) {
+ case SMUX_WAKEUP_REQ:
+ /* reply with ACK after appropriate delays */
+ ++simulated_retry_cnt;
+ if (simulated_retry_cnt >= smux_simulate_wakeup_delay) {
+ pr_err("%s: completed %d of %d\n",
+ __func__, simulated_retry_cnt,
+ smux_simulate_wakeup_delay);
+ pr_err("%s: simulated wakeup\n", __func__);
+ simulated_retry_cnt = 0;
+ smux_rx_state_machine(&ack, 1, 0);
+ } else {
+ /* force retry */
+ pr_err("%s: dropping wakeup request %d of %d\n",
+ __func__, simulated_retry_cnt,
+ smux_simulate_wakeup_delay);
+ }
+ break;
+ case SMUX_WAKEUP_ACK:
+ /* this shouldn't happen since we don't send requests */
+ pr_err("%s: wakeup ACK unexpected\n", __func__);
+ break;
+
+ default:
+ /* invalid character */
+ pr_err("%s: invalid character 0x%x\n",
+ __func__, (unsigned)pkt->hdr.flags);
+ break;
+ }
+}
+
+/**
+ * Simulated remote hardware used for local loopback testing.
+ *
+ * @work Not used
+ */
+static void smux_loopback_rx_worker(struct work_struct *work)
+{
+ struct smux_pkt_t *pkt;
+ struct smux_pkt_t reply_pkt;
+ char *data;
+ int len;
+ int lcid;
+ int i;
+ unsigned long flags;
+
+ data = kzalloc(SMUX_MAX_PKT_SIZE, GFP_ATOMIC);
+
+ spin_lock_irqsave(&hw_fn_lock, flags);
+ while (kfifo_len(&smux_loop_pkt_fifo) >= sizeof(struct smux_pkt_t *)) {
+ i = kfifo_out(&smux_loop_pkt_fifo, &pkt,
+ sizeof(struct smux_pkt_t *));
+ spin_unlock_irqrestore(&hw_fn_lock, flags);
+
+ if (pkt->hdr.magic != SMUX_MAGIC) {
+ pr_err("%s: invalid magic %x\n", __func__,
+ pkt->hdr.magic);
+ return;
+ }
+
+ lcid = pkt->hdr.lcid;
+ if (smux_assert_lch_id(lcid)) {
+ pr_err("%s: invalid channel id %d\n", __func__, lcid);
+ return;
+ }
+
+ switch (pkt->hdr.cmd) {
+ case SMUX_CMD_OPEN_LCH:
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
+ break;
+
+ /* Reply with Open ACK */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_OPEN_LCH;
+ reply_pkt.hdr.flags = SMUX_CMD_OPEN_ACK
+ | SMUX_CMD_OPEN_POWER_COLLAPSE;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.hdr.pad_len = 0;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+
+ /* Send Remote Open */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_OPEN_LCH;
+ reply_pkt.hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.hdr.pad_len = 0;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_CLOSE_LCH:
+ if (pkt->hdr.flags == SMUX_CMD_CLOSE_ACK)
+ break;
+
+ /* Reply with Close ACK */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ reply_pkt.hdr.flags = SMUX_CMD_CLOSE_ACK;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.hdr.pad_len = 0;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+
+ /* Send Remote Close */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ reply_pkt.hdr.flags = 0;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.hdr.pad_len = 0;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_DATA:
+ /* Echo back received data */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_DATA;
+ reply_pkt.hdr.flags = 0;
+ reply_pkt.hdr.payload_len = pkt->hdr.payload_len;
+ reply_pkt.payload = pkt->payload;
+ reply_pkt.hdr.pad_len = pkt->hdr.pad_len;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_STATUS:
+ /* Echo back received status */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_STATUS;
+ reply_pkt.hdr.flags = pkt->hdr.flags;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.payload = NULL;
+ reply_pkt.hdr.pad_len = pkt->hdr.pad_len;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_PWR_CTL:
+ /* reply with ack */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_PWR_CTL;
+ reply_pkt.hdr.flags = SMUX_CMD_PWR_CTL_SLEEP_REQ
+ | SMUX_CMD_PWR_CTL_ACK;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.payload = NULL;
+ reply_pkt.hdr.pad_len = pkt->hdr.pad_len;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_BYTE:
+ smux_loopback_rx_byte(pkt);
+ break;
+
+ default:
+ pr_err("%s: unknown command %d\n",
+ __func__, pkt->hdr.cmd);
+ break;
+ };
+
+ smux_free_pkt(pkt);
+ spin_lock_irqsave(&hw_fn_lock, flags);
+ }
+ spin_unlock_irqrestore(&hw_fn_lock, flags);
+ kfree(data);
+}
diff --git a/drivers/tty/smux_loopback.h b/drivers/tty/smux_loopback.h
new file mode 100644
index 0000000..85c6c23
--- /dev/null
+++ b/drivers/tty/smux_loopback.h
@@ -0,0 +1,39 @@
+/* drivers/tty/smux_loopback.h
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SMUX_LOOPBACK_H
+#define SMUX_LOOPBACK_H
+
+#include "smux_private.h"
+
+#ifdef CONFIG_N_SMUX_LOOPBACK
+
+int smux_loopback_init(void);
+int smux_tx_loopback(struct smux_pkt_t *pkt_ptr);
+
+#else
+static inline int smux_loopback_init(void)
+{
+ return 0;
+}
+
+static inline int smux_tx_loopback(struct smux_pkt_t *pkt_ptr)
+{
+ return -ENODEV;
+}
+
+
+#endif /* CONFIG_N_SMUX_LOOPBACK */
+#endif /* SMUX_LOOPBACK_H */
+
diff --git a/drivers/tty/smux_private.h b/drivers/tty/smux_private.h
new file mode 100644
index 0000000..5ce8fb8
--- /dev/null
+++ b/drivers/tty/smux_private.h
@@ -0,0 +1,115 @@
+/* drivers/tty/smux_private.h
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SMUX_PRIVATE_H
+#define SMUX_PRIVATE_H
+
+#define SMUX_MAX_PKT_SIZE 8192
+
+/* SMUX Protocol Characters */
+#define SMUX_MAGIC 0x33FC
+#define SMUX_MAGIC_WORD1 0xFC
+#define SMUX_MAGIC_WORD2 0x33
+#define SMUX_WAKEUP_REQ 0xFD
+#define SMUX_WAKEUP_ACK 0xFE
+
+/* Unit testing characters */
+#define SMUX_UT_ECHO_REQ 0xF0
+#define SMUX_UT_ECHO_ACK_OK 0xF1
+#define SMUX_UT_ECHO_ACK_FAIL 0xF2
+
+struct tty_struct;
+
+/* Packet header. */
+struct smux_hdr_t {
+ uint16_t magic;
+ uint8_t flags;
+ uint8_t cmd;
+ uint8_t pad_len;
+ uint8_t lcid;
+ uint16_t payload_len;
+};
+
+/* Internal packet structure. */
+struct smux_pkt_t {
+ struct smux_hdr_t hdr;
+ int allocated;
+ unsigned char *payload;
+ int free_payload;
+ struct list_head list;
+ void *priv;
+};
+
+/* SMUX Packet Commands */
+enum {
+ SMUX_CMD_DATA = 0x0,
+ SMUX_CMD_OPEN_LCH = 0x1,
+ SMUX_CMD_CLOSE_LCH = 0x2,
+ SMUX_CMD_STATUS = 0x3,
+ SMUX_CMD_PWR_CTL = 0x4,
+
+ SMUX_CMD_BYTE, /* for internal usage */
+ SMUX_NUM_COMMANDS
+};
+
+/* Open command flags */
+enum {
+ SMUX_CMD_OPEN_ACK = 1 << 0,
+ SMUX_CMD_OPEN_POWER_COLLAPSE = 1 << 1,
+ SMUX_CMD_OPEN_REMOTE_LOOPBACK = 1 << 2,
+};
+
+/* Close command flags */
+enum {
+ SMUX_CMD_CLOSE_ACK = 1 << 0,
+};
+
+/* Power command flags */
+enum {
+ SMUX_CMD_PWR_CTL_ACK = 1 << 0,
+ SMUX_CMD_PWR_CTL_SLEEP_REQ = 1 << 1,
+};
+
+/* Local logical channel states */
+enum {
+ SMUX_LCH_LOCAL_CLOSED,
+ SMUX_LCH_LOCAL_OPENING,
+ SMUX_LCH_LOCAL_OPENED,
+ SMUX_LCH_LOCAL_CLOSING,
+};
+
+/* Remote logical channel states */
+enum {
+ SMUX_LCH_REMOTE_CLOSED,
+ SMUX_LCH_REMOTE_OPENED,
+};
+
+
+int smux_assert_lch_id(uint32_t lcid);
+void smux_init_pkt(struct smux_pkt_t *pkt);
+struct smux_pkt_t *smux_alloc_pkt(void);
+int smux_alloc_pkt_payload(struct smux_pkt_t *pkt);
+void smux_free_pkt(struct smux_pkt_t *pkt);
+int smux_serialize(struct smux_pkt_t *pkt, char *out,
+ unsigned int *out_len);
+
+void smux_rx_state_machine(const unsigned char *data, int len, int flag);
+void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count);
+
+/* testing parameters */
+extern int smux_byte_loopback;
+extern int smux_simulate_wakeup_delay;
+
+#endif /* SMUX_PRIVATE_H */
diff --git a/drivers/tty/smux_test.c b/drivers/tty/smux_test.c
new file mode 100644
index 0000000..242c66e
--- /dev/null
+++ b/drivers/tty/smux_test.c
@@ -0,0 +1,1222 @@
+/* drivers/tty/smux_test.c
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/termios.h>
+#include <linux/smux.h>
+#include "smux_private.h"
+
+#define DEBUG_BUFMAX 4096
+
+/**
+ * Unit test assertion for logging test cases.
+ *
+ * @a lval
+ * @b rval
+ * @cmp comparison operator
+ *
+ * Assertion fails if (@a cmp @b) is not true which then
+ * logs the function and line number where the error occurred
+ * along with the values of @a and @b.
+ *
+ * Assumes that the following local variables exist:
+ * @buf - buffer to write failure message to
+ * @i - number of bytes written to buffer
+ * @max - maximum size of the buffer
+ * @failed - set to true if test fails
+ */
+#define UT_ASSERT_INT(a, cmp, b) \
+ if (!((a)cmp(b))) { \
+ i += scnprintf(buf + i, max - i, \
+ "%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
+ __func__, __LINE__, \
+ a, b); \
+ failed = 1; \
+ break; \
+ } \
+ do {} while (0)
+
+#define UT_ASSERT_PTR(a, cmp, b) \
+ if (!((a)cmp(b))) { \
+ i += scnprintf(buf + i, max - i, \
+ "%s:%d Fail: " #a "(%p) " #cmp " " #b "(%p)\n", \
+ __func__, __LINE__, \
+ a, b); \
+ failed = 1; \
+ break; \
+ } \
+ do {} while (0)
+
+#define UT_ASSERT_UINT(a, cmp, b) \
+ if (!((a)cmp(b))) { \
+ i += scnprintf(buf + i, max - i, \
+ "%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
+ __func__, __LINE__, \
+ a, b); \
+ failed = 1; \
+ break; \
+ } \
+ do {} while (0)
+
+static unsigned char test_array[] = {1, 1, 2, 3, 5, 8, 13, 21, 34, 55,
+ 89, 144, 233};
+
+/* Used for mapping local to remote TIOCM signals */
+struct tiocm_test_vector {
+ uint32_t input;
+ uint32_t set_old;
+ uint32_t set_new;
+ uint32_t clr_old;
+};
+
+/**
+ * Allocates a new buffer for SMUX for every call.
+ */
+int get_rx_buffer(void *priv, void **pkt_priv, void **buffer, int size)
+{
+ void *rx_buf;
+
+ rx_buf = kmalloc(size, GFP_ATOMIC);
+ *pkt_priv = (void *)0x1234;
+ *buffer = rx_buf;
+
+ return 0;
+}
+
+/* Test vector for packet tests. */
+struct test_vector {
+ const char *data;
+ const unsigned len;
+};
+
+/* Mock object metadata for SMUX_READ_DONE event */
+struct mock_read_event {
+ struct list_head list;
+ struct smux_meta_read meta;
+};
+
+/* Mock object metadata for SMUX_WRITE_DONE event */
+struct mock_write_event {
+ struct list_head list;
+ struct smux_meta_write meta;
+};
+
+/* Mock object for all SMUX callback events */
+struct smux_mock_callback {
+ int cb_count;
+ struct completion cb_completion;
+ spinlock_t lock;
+
+ /* status changes */
+ int event_connected;
+ int event_disconnected;
+ int event_disconnected_ssr;
+ int event_low_wm;
+ int event_high_wm;
+
+ /* TIOCM changes */
+ int event_tiocm;
+ struct smux_meta_tiocm tiocm_meta;
+
+ /* read event data */
+ int event_read_done;
+ int event_read_failed;
+ struct list_head read_events;
+
+ /* write event data */
+ int event_write_done;
+ int event_write_failed;
+ struct list_head write_events;
+};
+
+/**
+ * Initialize mock callback data. Only call once.
+ *
+ * @cb Mock callback data
+ */
+void mock_cb_data_init(struct smux_mock_callback *cb)
+{
+ init_completion(&cb->cb_completion);
+ spin_lock_init(&cb->lock);
+ INIT_LIST_HEAD(&cb->read_events);
+ INIT_LIST_HEAD(&cb->write_events);
+}
+
+/**
+ * Reset mock callback data to default values.
+ *
+ * @cb Mock callback data
+ *
+ * All packets are freed and counters reset to zero.
+ */
+void mock_cb_data_reset(struct smux_mock_callback *cb)
+{
+ cb->cb_count = 0;
+ INIT_COMPLETION(cb->cb_completion);
+ cb->event_connected = 0;
+ cb->event_disconnected = 0;
+ cb->event_disconnected_ssr = 0;
+ cb->event_low_wm = 0;
+ cb->event_high_wm = 0;
+ cb->event_tiocm = 0;
+ cb->tiocm_meta.tiocm_old = 0;
+ cb->tiocm_meta.tiocm_new = 0;
+
+ cb->event_read_done = 0;
+ cb->event_read_failed = 0;
+ while (!list_empty(&cb->read_events)) {
+ struct mock_read_event *meta;
+ meta = list_first_entry(&cb->read_events,
+ struct mock_read_event,
+ list);
+ kfree(meta->meta.buffer);
+ list_del(&meta->list);
+ kfree(meta);
+ }
+
+ cb->event_write_done = 0;
+ cb->event_write_failed = 0;
+ while (!list_empty(&cb->write_events)) {
+ struct mock_write_event *meta;
+ meta = list_first_entry(&cb->write_events,
+ struct mock_write_event,
+ list);
+ list_del(&meta->list);
+ kfree(meta);
+ }
+}
+
+/**
+ * Dump the values of the mock callback data for debug purposes.
+ *
+ * @cb Mock callback data
+ * @buf Print buffer
+ * @max Maximum number of characters to print
+ *
+ * @returns Number of characters added to buffer
+ */
+static int mock_cb_data_print(const struct smux_mock_callback *cb,
+ char *buf, int max)
+{
+ int i = 0;
+
+ i += scnprintf(buf + i, max - i,
+ "\tcb_count=%d\n"
+ "\tcb_completion.done=%d\n"
+ "\tevent_connected=%d\n"
+ "\tevent_disconnected=%d\n"
+ "\tevent_disconnected_ssr=%d\n"
+ "\tevent_low_wm=%d\n"
+ "\tevent_high_wm=%d\n"
+ "\tevent_tiocm=%d\n"
+ "\tevent_read_done=%d\n"
+ "\tevent_read_failed=%d\n"
+ "\tread_events=%d\n"
+ "\tevent_write_done=%d\n"
+ "\tevent_write_failed=%d\n"
+ "\twrite_events=%d\n",
+ cb->cb_count,
+ cb->cb_completion.done,
+ cb->event_connected,
+ cb->event_disconnected,
+ cb->event_disconnected_ssr,
+ cb->event_low_wm,
+ cb->event_high_wm,
+ cb->event_tiocm,
+ cb->event_read_done,
+ cb->event_read_failed,
+ !list_empty(&cb->read_events),
+ cb->event_write_done,
+ cb->event_write_failed,
+ list_empty(&cb->write_events)
+ );
+
+ return i;
+}
+
+/**
+ * Mock object event callback. Used to logs events for analysis in the unit
+ * tests.
+ */
+void smux_mock_cb(void *priv, int event, const void *metadata)
+{
+ struct smux_mock_callback *cb_data_ptr;
+ struct mock_write_event *write_event_meta;
+ struct mock_read_event *read_event_meta;
+ unsigned long flags;
+
+ cb_data_ptr = (struct smux_mock_callback *)priv;
+ if (cb_data_ptr == NULL) {
+ pr_err("%s: invalid private data\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&cb_data_ptr->lock, flags);
+ switch (event) {
+ case SMUX_CONNECTED:
+ ++cb_data_ptr->event_connected;
+ break;
+
+ case SMUX_DISCONNECTED:
+ ++cb_data_ptr->event_disconnected;
+ cb_data_ptr->event_disconnected_ssr =
+ ((struct smux_meta_disconnected *)metadata)->is_ssr;
+ break;
+
+ case SMUX_READ_DONE:
+ ++cb_data_ptr->event_read_done;
+ read_event_meta = kmalloc(sizeof(struct mock_read_event),
+ GFP_ATOMIC);
+ if (read_event_meta) {
+ read_event_meta->meta =
+ *(struct smux_meta_read *)metadata;
+ list_add_tail(&read_event_meta->list,
+ &cb_data_ptr->read_events);
+ }
+ break;
+
+ case SMUX_READ_FAIL:
+ ++cb_data_ptr->event_read_failed;
+ read_event_meta = kmalloc(sizeof(struct mock_read_event),
+ GFP_ATOMIC);
+ if (read_event_meta) {
+ read_event_meta->meta =
+ *(struct smux_meta_read *)metadata;
+ list_add_tail(&read_event_meta->list,
+ &cb_data_ptr->read_events);
+ }
+ break;
+
+ case SMUX_WRITE_DONE:
+ ++cb_data_ptr->event_write_done;
+ write_event_meta = kmalloc(sizeof(struct mock_write_event),
+ GFP_ATOMIC);
+ if (write_event_meta) {
+ write_event_meta->meta =
+ *(struct smux_meta_write *)metadata;
+ list_add_tail(&write_event_meta->list,
+ &cb_data_ptr->write_events);
+ }
+ break;
+
+ case SMUX_WRITE_FAIL:
+ ++cb_data_ptr->event_write_failed;
+ write_event_meta = kmalloc(sizeof(struct mock_write_event),
+ GFP_ATOMIC);
+ if (write_event_meta) {
+ write_event_meta->meta =
+ *(struct smux_meta_write *)metadata;
+ list_add_tail(&write_event_meta->list,
+ &cb_data_ptr->write_events);
+ }
+ break;
+
+ case SMUX_LOW_WM_HIT:
+ ++cb_data_ptr->event_low_wm;
+ break;
+
+ case SMUX_HIGH_WM_HIT:
+ ++cb_data_ptr->event_high_wm;
+ break;
+
+ case SMUX_TIOCM_UPDATE:
+ ++cb_data_ptr->event_tiocm;
+ cb_data_ptr->tiocm_meta = *(struct smux_meta_tiocm *)metadata;
+ break;
+
+ default:
+ pr_err("%s: unknown event %d\n", __func__, event);
+ };
+
+ ++cb_data_ptr->cb_count;
+ complete(&cb_data_ptr->cb_completion);
+ spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+}
+
+/**
+ * Test Read/write usage.
+ *
+ * @buf Output buffer for failure/status messages
+ * @max Size of @buf
+ * @vectors Test vector data (must end with NULL item)
+ * @name Name of the test case for failure messages
+ *
+ * Perform a sanity test consisting of opening a port, writing test packet(s),
+ * reading the response(s), and closing the port.
+ *
+ * The port should already be configured to use either local or remote
+ * loopback.
+ */
+static int smux_ut_basic_core(char *buf, int max,
+ const struct test_vector *vectors,
+ const char *name)
+{
+ int i = 0;
+ int failed = 0;
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ int ret;
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ while (!failed) {
+ struct mock_write_event *write_event;
+ struct mock_read_event *read_event;
+
+ /* open port */
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+ get_rx_buffer);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* write, read, and verify the test vector data */
+ for (; vectors->data != NULL; ++vectors) {
+ const char *test_data = vectors->data;
+ const unsigned test_len = vectors->len;
+
+ i += scnprintf(buf + i, max - i,
+ "Writing vector %p len %d\n",
+ test_data, test_len);
+
+ /* write data */
+ msm_smux_write(SMUX_TEST_LCID, (void *)0xCAFEFACE,
+ test_data, test_len);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+
+ /* wait for write and echo'd read to complete */
+ INIT_COMPLETION(cb_data.cb_completion);
+ if (cb_data.cb_count < 2)
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+
+ UT_ASSERT_INT(cb_data.cb_count, >=, 1);
+ UT_ASSERT_INT(cb_data.event_write_done, ==, 1);
+ UT_ASSERT_INT(list_empty(&cb_data.write_events), ==, 0);
+
+ write_event = list_first_entry(&cb_data.write_events,
+ struct mock_write_event, list);
+ UT_ASSERT_PTR(write_event->meta.pkt_priv, ==,
+ (void *)0xCAFEFACE);
+ UT_ASSERT_PTR(write_event->meta.buffer, ==,
+ (void *)test_data);
+ UT_ASSERT_INT(write_event->meta.len, ==, test_len);
+
+ /* verify read event */
+ UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
+ UT_ASSERT_INT(list_empty(&cb_data.read_events), ==, 0);
+ read_event = list_first_entry(&cb_data.read_events,
+ struct mock_read_event, list);
+ UT_ASSERT_PTR(read_event->meta.pkt_priv, ==,
+ (void *)0x1234);
+ UT_ASSERT_PTR(read_event->meta.buffer, !=, NULL);
+
+ if (read_event->meta.len != test_len ||
+ memcmp(read_event->meta.buffer,
+ test_data, test_len)) {
+ /* data mismatch */
+ char linebuff[80];
+
+ hex_dump_to_buffer(test_data, test_len,
+ 16, 1, linebuff, sizeof(linebuff), 1);
+ i += scnprintf(buf + i, max - i,
+ "Expected:\n%s\n\n", linebuff);
+
+ hex_dump_to_buffer(read_event->meta.buffer,
+ read_event->meta.len,
+ 16, 1, linebuff, sizeof(linebuff), 1);
+ i += scnprintf(buf + i, max - i,
+ "Actual:\n%s\n", linebuff);
+ failed = 1;
+ break;
+ }
+ mock_cb_data_reset(&cb_data);
+ }
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", name);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
+/**
+ * Verify Basic Local Loopback Support
+ *
+ * Perform a sanity test consisting of opening a port in local loopback
+ * mode and writing a packet and reading the echo'd packet back.
+ */
+static int smux_ut_basic(char *buf, int max)
+{
+ const struct test_vector test_data[] = {
+ {"hello\0world\n", sizeof("hello\0world\n")},
+ {0, 0},
+ };
+ int i = 0;
+ int failed = 0;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ while (!failed) {
+ /* enable loopback mode */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
+ break;
+ }
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ }
+ return i;
+}
+
+/**
+ * Verify Basic Remote Loopback Support
+ *
+ * Perform a sanity test consisting of opening a port in remote loopback
+ * mode and writing a packet and reading the echo'd packet back.
+ */
+static int smux_ut_remote_basic(char *buf, int max)
+{
+ const struct test_vector test_data[] = {
+ {"hello\0world\n", sizeof("hello\0world\n")},
+ {0, 0},
+ };
+ int i = 0;
+ int failed = 0;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ while (!failed) {
+ /* enable remote mode */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
+ break;
+ }
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ }
+ return i;
+}
+
+/**
+ * Fill test pattern into provided buffer including an optional
+ * redzone 16 bytes before and 16 bytes after the buffer.
+ *
+ * buf ---------
+ * redzone
+ * --------- <- returned pointer
+ * data
+ * --------- <- returned pointer + len
+ * redzone
+ * ---------
+ *
+ * @buf Pointer to the buffer of size len or len+32 (redzone)
+ * @len Length of the *data* buffer (excluding 32-byte redzone)
+ * @redzone If true, adds redzone data
+ *
+ * @returns pointer to buffer (buf + 16 if redzone enabled)
+ */
+uint8_t *test_pattern_fill(char *buf, int len, int redzone)
+{
+ void *ret;
+ uint8_t ch;
+
+ ret = buf;
+ if (redzone) {
+ memset((char *)buf, 0xAB, 16);
+ memset((char *)buf + len, 0xBA, 16);
+ ret += 16;
+ }
+
+ /* fill with test pattern */
+ for (ch = 0; len > 0; --len, ++ch)
+ *buf++ = (char)ch;
+
+ return ret;
+}
+
+/**
+ * Verify test pattern generated by test_pattern_fill.
+ *
+ * @buf_ptr Pointer to buffer pointer
+ * @len Length of the *data* buffer (excluding 32-byte redzone)
+ * @redzone If true, verifies redzone and adjusts *buf_ptr
+ * @errmsg Buffer for error message
+ * @errmsg_max Size of error message buffer
+ *
+ * @returns 0 for success; length of error message otherwise
+ */
+unsigned test_pattern_verify(char **buf_ptr, int len, int redzone,
+ char *errmsg, int errmsg_max)
+{
+ int n;
+ int i = 0;
+ char linebuff[80];
+
+ if (redzone) {
+ *buf_ptr -= 16;
+
+ /* verify prefix redzone */
+ for (n = 0; n < 16; ++n) {
+ if (*buf_ptr[n] != 0xAB) {
+ hex_dump_to_buffer(*buf_ptr, 16,
+ 16, 1, linebuff, sizeof(linebuff), 1);
+ i += scnprintf(errmsg + i, errmsg_max - i,
+ "Redzone violation: %s\n", linebuff);
+ break;
+ }
+ }
+
+ /* verify postfix redzone */
+ for (n = 0; n < 16; ++n) {
+ if (*buf_ptr[len + n] != 0xBA) {
+ hex_dump_to_buffer(&(*buf_ptr)[len], 16,
+ 16, 1, linebuff, sizeof(linebuff), 1);
+ i += scnprintf(errmsg + i, errmsg_max - i,
+ "Redzone violation: %s\n", linebuff);
+ break;
+ }
+ }
+ }
+ return i;
+}
+
+/**
+ * Write a multiple packets in ascending size and verify packet is received
+ * correctly.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ * @name Name of the test for error reporting
+ *
+ * @returns Number of bytes written to @buf
+ *
+ * Requires that the port already be opened and loopback mode is
+ * configured correctly (if required).
+ */
+static int smux_ut_loopback_big_pkt(char *buf, int max, const char *name)
+{
+ struct test_vector test_data[] = {
+ {0, 64},
+ {0, 128},
+ {0, 256},
+ {0, 512},
+ {0, 1024},
+ {0, 2048},
+ {0, 4096},
+ {0, 0},
+ };
+ int i = 0;
+ int failed = 0;
+ struct test_vector *tv;
+
+ /* generate test data */
+ for (tv = test_data; tv->len > 0; ++tv) {
+ tv->data = kmalloc(tv->len + 32, GFP_KERNEL);
+ pr_err("%s: allocating %p len %d\n",
+ __func__, tv->data, tv->len);
+ if (!tv->data) {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to allocate %d bytes\n",
+ __func__, tv->len);
+ failed = 1;
+ goto out;
+ }
+ test_pattern_fill((uint8_t *)tv->data, tv->len, 1);
+ }
+
+ /* run test */
+ i += scnprintf(buf + i, max - i, "Running %s\n", name);
+ while (!failed) {
+ i += smux_ut_basic_core(buf + i, max - i, test_data, name);
+ break;
+ }
+
+out:
+ if (failed) {
+ pr_err("%s: Failed\n", name);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ }
+
+ for (tv = test_data; tv->len > 0; ++tv) {
+ if (!tv->data) {
+ i += test_pattern_verify((char **)&tv->data,
+ tv->len, 1, buf + i, max - i);
+ pr_err("%s: freeing %p len %d\n", __func__,
+ tv->data, tv->len);
+ kfree(tv->data);
+ }
+ }
+
+ return i;
+}
+
+/**
+ * Verify Large-packet Local Loopback Support.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ *
+ * Open port in local loopback mode and write a multiple packets in ascending
+ * size and verify packet is received correctly.
+ */
+static int smux_ut_local_big_pkt(char *buf, int max)
+{
+ int i = 0;
+ int ret;
+
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
+
+ if (ret == 0) {
+ smux_byte_loopback = SMUX_TEST_LCID;
+ i += smux_ut_loopback_big_pkt(buf, max, __func__);
+ smux_byte_loopback = 0;
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to set loopback mode\n",
+ __func__);
+ }
+
+ return i;
+}
+
+/**
+ * Verify Large-packet Remote Loopback Support.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ *
+ * Open port in remote loopback mode and write a multiple packets in ascending
+ * size and verify packet is received correctly.
+ */
+static int smux_ut_remote_big_pkt(char *buf, int max)
+{
+ int i = 0;
+ int ret;
+
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
+ if (ret == 0) {
+ i += smux_ut_loopback_big_pkt(buf, max, __func__);
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to set loopback mode\n",
+ __func__);
+ }
+
+ return i;
+}
+
+/**
+ * Verify set and get operations for each TIOCM bit.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ * @name Name of the test for error reporting
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_tiocm(char *buf, int max, const char *name)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ static const struct tiocm_test_vector tiocm_vectors[] = {
+ /* bit to set, set old, set new, clear old */
+ {TIOCM_DTR, TIOCM_DTR, TIOCM_DTR | TIOCM_DSR, TIOCM_DSR},
+ {TIOCM_RTS, TIOCM_RTS, TIOCM_RTS | TIOCM_CTS, TIOCM_CTS},
+ {TIOCM_RI, 0x0, TIOCM_RI, TIOCM_RI},
+ {TIOCM_CD, 0x0, TIOCM_CD, TIOCM_CD},
+ };
+ int i = 0;
+ int failed = 0;
+ int n;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", name);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ while (!failed) {
+ /* open port */
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+ get_rx_buffer);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* set and clear each TIOCM bit */
+ for (n = 0; n < ARRAY_SIZE(tiocm_vectors) && !failed; ++n) {
+ /* set signal and verify */
+ ret = msm_smux_tiocm_set(SMUX_TEST_LCID,
+ tiocm_vectors[n].input, 0x0);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_tiocm, ==, 1);
+ UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_old, ==,
+ tiocm_vectors[n].set_old);
+ UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_new, ==,
+ tiocm_vectors[n].set_new);
+ mock_cb_data_reset(&cb_data);
+
+ /* clear signal and verify */
+ ret = msm_smux_tiocm_set(SMUX_TEST_LCID, 0x0,
+ tiocm_vectors[n].input);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_tiocm, ==, 1);
+ UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_old, ==,
+ tiocm_vectors[n].clr_old);
+ UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_new, ==, 0x0);
+ mock_cb_data_reset(&cb_data);
+ }
+ if (failed)
+ break;
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", name);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
+/**
+ * Verify TIOCM Status Bits for local loopback.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_tiocm(char *buf, int max)
+{
+ int i = 0;
+ int ret;
+
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
+
+ if (ret == 0) {
+ smux_byte_loopback = SMUX_TEST_LCID;
+ i += smux_ut_tiocm(buf, max, __func__);
+ smux_byte_loopback = 0;
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to set loopback mode\n",
+ __func__);
+ }
+
+ return i;
+}
+
+/**
+ * Verify TIOCM Status Bits for remote loopback.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_remote_tiocm(char *buf, int max)
+{
+ int i = 0;
+ int ret;
+
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
+ if (ret == 0) {
+ i += smux_ut_tiocm(buf, max, __func__);
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to set loopback mode\n",
+ __func__);
+ }
+
+ return i;
+}
+
+/**
+ * Verify High/Low Watermark notifications.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_wm(char *buf, int max)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ int i = 0;
+ int failed = 0;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ pr_err("%s", buf);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ smux_byte_loopback = SMUX_TEST_LCID;
+ while (!failed) {
+ /* open port for loopback with TX disabled */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK
+ | SMUX_CH_OPTION_REMOTE_TX_STOP,
+ 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+ get_rx_buffer);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* transmit 4 packets and verify high-watermark notification */
+ ret = 0;
+ ret |= msm_smux_write(SMUX_TEST_LCID, (void *)1,
+ test_array, sizeof(test_array));
+ ret |= msm_smux_write(SMUX_TEST_LCID, (void *)2,
+ test_array, sizeof(test_array));
+ ret |= msm_smux_write(SMUX_TEST_LCID, (void *)3,
+ test_array, sizeof(test_array));
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 0);
+ UT_ASSERT_INT(cb_data.event_high_wm, ==, 0);
+
+ ret = msm_smux_write(SMUX_TEST_LCID, (void *)4,
+ test_array, sizeof(test_array));
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.event_high_wm, ==, 1);
+ UT_ASSERT_INT(cb_data.event_low_wm, ==, 0);
+ mock_cb_data_reset(&cb_data);
+
+ /* exceed watermark and verify failure return value */
+ ret = msm_smux_write(SMUX_TEST_LCID, (void *)5,
+ test_array, sizeof(test_array));
+ UT_ASSERT_INT(ret, ==, -EAGAIN);
+
+ /* re-enable TX and verify low-watermark notification */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ 0, SMUX_CH_OPTION_REMOTE_TX_STOP);
+ UT_ASSERT_INT(ret, ==, 0);
+ while (cb_data.cb_count < 9) {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ INIT_COMPLETION(cb_data.cb_completion);
+ }
+ if (failed)
+ break;
+
+ UT_ASSERT_INT(cb_data.event_high_wm, ==, 0);
+ UT_ASSERT_INT(cb_data.event_low_wm, ==, 1);
+ UT_ASSERT_INT(cb_data.event_write_done, ==, 4);
+ mock_cb_data_reset(&cb_data);
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+ smux_byte_loopback = 0;
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
+/**
+ * Verify smuxld_receive_buf regular and error processing.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_smuxld_receive_buf(char *buf, int max)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ struct mock_read_event *meta;
+ int i = 0;
+ int failed = 0;
+ int ret;
+ char data[] = {SMUX_UT_ECHO_REQ,
+ SMUX_UT_ECHO_REQ, SMUX_UT_ECHO_REQ,
+ };
+ char flags[] = {0x0, 0x1, 0x0,};
+
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ smux_byte_loopback = SMUX_TEST_LCID;
+ while (!failed) {
+ /* open port for loopback */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+ get_rx_buffer);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /*
+ * Verify RX error processing by sending 3 echo requests:
+ * one OK, one fail, and a final OK
+ *
+ * The parsing framework should process the requests
+ * and send us three BYTE command packets with
+ * ECHO ACK FAIL and ECHO ACK OK characters.
+ */
+ smuxld_receive_buf(0, data, flags, sizeof(data));
+
+ /* verify response characters */
+ do {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ INIT_COMPLETION(cb_data.cb_completion);
+ } while (cb_data.cb_count < 3);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 3);
+ UT_ASSERT_INT(cb_data.event_read_done, ==, 3);
+
+ meta = list_first_entry(&cb_data.read_events,
+ struct mock_read_event, list);
+ UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
+ SMUX_UT_ECHO_ACK_OK);
+ list_del(&meta->list);
+
+ meta = list_first_entry(&cb_data.read_events,
+ struct mock_read_event, list);
+ UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
+ SMUX_UT_ECHO_ACK_FAIL);
+ list_del(&meta->list);
+
+ meta = list_first_entry(&cb_data.read_events,
+ struct mock_read_event, list);
+ UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
+ SMUX_UT_ECHO_ACK_OK);
+ list_del(&meta->list);
+ mock_cb_data_reset(&cb_data);
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+ smux_byte_loopback = 0;
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
+static char debug_buffer[DEBUG_BUFMAX];
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int (*fill)(char *buf, int max) = file->private_data;
+ int bsize;
+
+ if (*ppos != 0)
+ return 0;
+
+ bsize = fill(debug_buffer, DEBUG_BUFMAX);
+ return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+ struct dentry *dent,
+ int (*fill)(char *buf, int max))
+{
+ debugfs_create_file(name, mode, dent, fill, &debug_ops);
+}
+
+static int __init smux_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("n_smux", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ /*
+ * Add Unit Test entries.
+ *
+ * The idea with unit tests is that you can run all of them
+ * from ADB shell by doing:
+ * adb shell
+ * cat ut*
+ *
+ * And if particular tests fail, you can then repeatedly run the failing
+ * tests as you debug and resolve the failing test.
+ */
+ debug_create("ut_local_basic", 0444, dent, smux_ut_basic);
+ debug_create("ut_remote_basic", 0444, dent, smux_ut_remote_basic);
+ debug_create("ut_local_big_pkt", 0444, dent, smux_ut_local_big_pkt);
+ debug_create("ut_remote_big_pkt", 0444, dent, smux_ut_remote_big_pkt);
+ debug_create("ut_local_tiocm", 0444, dent, smux_ut_local_tiocm);
+ debug_create("ut_remote_tiocm", 0444, dent, smux_ut_remote_tiocm);
+ debug_create("ut_local_wm", 0444, dent, smux_ut_local_wm);
+ debug_create("ut_local_smuxld_receive_buf", 0444, dent,
+ smux_ut_local_smuxld_receive_buf);
+
+ return 0;
+}
+
+late_initcall(smux_debugfs_init);
+
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 311599d..034d6b6 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -1699,7 +1699,8 @@
/*
* If solid fill is enabled, flip and scale
* have to be disabled. otherwise, h/w
- * underruns.
+ * underruns. Also flush the pipe inorder
+ * to take solid fill into effect.
*/
op_mode = inpdw(rgb_base + 0x0058);
op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
@@ -1707,6 +1708,7 @@
outpdw(rgb_base + 0x0058, op_mode);
outpdw(rgb_base + 0x50, rgb_src_format);
outpdw(rgb_base + 0x1008, constant_color);
+ mdp4_overlay_reg_flush(bg_pipe, 0);
}
} else if (fg_alpha) {
blend_op = (MDP4_BLEND_BG_ALPHA_FG_PIXEL |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 6395692..47b1fe3 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -151,6 +151,7 @@
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
+ __REQ_SANITIZE, /* sanitize */
__REQ_NR_BITS, /* stops here */
};
@@ -161,13 +162,14 @@
#define REQ_SYNC (1 << __REQ_SYNC)
#define REQ_META (1 << __REQ_META)
#define REQ_DISCARD (1 << __REQ_DISCARD)
+#define REQ_SANITIZE (1 << __REQ_SANITIZE)
#define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
- REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
+ REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE | REQ_SANITIZE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
#define REQ_RAHEAD (1 << __REQ_RAHEAD)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1b13021..4dc4b3e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -403,6 +403,7 @@
#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
+#define QUEUE_FLAG_SANITIZE 19 /* supports SANITIZE */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -485,6 +486,7 @@
#define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
+#define blk_queue_sanitize(q) test_bit(QUEUE_FLAG_SANITIZE, &(q)->queue_flags)
#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
@@ -922,6 +924,7 @@
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
+extern int blkdev_issue_sanitize(struct block_device *bdev, gfp_t gfp_mask);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask);
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 35e4edf..1c91125 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -317,6 +317,7 @@
#define BLKPBSZGET _IO(0x12,123)
#define BLKDISCARDZEROES _IO(0x12,124)
#define BLKSECDISCARD _IO(0x12,125)
+#define BLKSANITIZE _IO(0x12, 126)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
diff --git a/include/linux/smux.h b/include/linux/smux.h
new file mode 100644
index 0000000..64d0ed6
--- /dev/null
+++ b/include/linux/smux.h
@@ -0,0 +1,295 @@
+/* include/linux/smux.h
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SMUX_H
+#define SMUX_H
+
+/**
+ * Logical Channel IDs
+ *
+ * This must be identical between local and remote clients.
+ */
+enum {
+ /* Data Ports */
+ SMUX_DATA_0,
+ SMUX_DATA_1,
+ SMUX_DATA_2,
+ SMUX_DATA_3,
+ SMUX_DATA_4,
+ SMUX_DATA_5,
+ SMUX_DATA_6,
+ SMUX_DATA_7,
+ SMUX_DATA_8,
+ SMUX_DATA_9,
+ SMUX_USB_RMNET_DATA_0,
+ SMUX_USB_DUN_0,
+ SMUX_USB_DIAG_0,
+ SMUX_SYS_MONITOR_0,
+ SMUX_CSVT_0,
+ /* add new data ports here */
+
+ /* Control Ports */
+ SMUX_DATA_CTL_0 = 32,
+ SMUX_DATA_CTL_1,
+ SMUX_DATA_CTL_2,
+ SMUX_DATA_CTL_3,
+ SMUX_DATA_CTL_4,
+ SMUX_DATA_CTL_5,
+ SMUX_DATA_CTL_6,
+ SMUX_DATA_CTL_7,
+ SMUX_DATA_CTL_8,
+ SMUX_DATA_CTL_9,
+ SMUX_USB_RMNET_CTL_0,
+ SMUX_USB_DUN_CTL_0_UNUSED,
+ SMUX_USB_DIAG_CTL_0,
+ SMUX_SYS_MONITOR_CTL_0,
+ SMUX_CSVT_CTL_0,
+ /* add new control ports here */
+
+ SMUX_TEST_LCID,
+ SMUX_NUM_LOGICAL_CHANNELS,
+};
+
+/**
+ * Notification events that are passed to the notify() function.
+ *
+ * If the @metadata argument in the notifier is non-null, then it will
+ * point to the associated struct smux_meta_* structure.
+ */
+enum {
+ SMUX_CONNECTED, /* @metadata is null */
+ SMUX_DISCONNECTED,
+ SMUX_READ_DONE,
+ SMUX_READ_FAIL,
+ SMUX_WRITE_DONE,
+ SMUX_WRITE_FAIL,
+ SMUX_TIOCM_UPDATE,
+ SMUX_LOW_WM_HIT, /* @metadata is NULL */
+ SMUX_HIGH_WM_HIT, /* @metadata is NULL */
+};
+
+/**
+ * Channel options used to modify channel behavior.
+ */
+enum {
+ SMUX_CH_OPTION_LOCAL_LOOPBACK = 1 << 0,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK = 1 << 1,
+ SMUX_CH_OPTION_REMOTE_TX_STOP = 1 << 2,
+};
+
+/**
+ * Metadata for SMUX_DISCONNECTED notification
+ *
+ * @is_ssr: Disconnect caused by subsystem restart
+ */
+struct smux_meta_disconnected {
+ int is_ssr;
+};
+
+/**
+ * Metadata for SMUX_READ_DONE/SMUX_READ_FAIL notification
+ *
+ * @pkt_priv: Packet-specific private data
+ * @buffer: Buffer pointer passed into msm_smux_write
+ * @len: Buffer length passed into msm_smux_write
+ */
+struct smux_meta_read {
+ void *pkt_priv;
+ void *buffer;
+ int len;
+};
+
+/**
+ * Metadata for SMUX_WRITE_DONE/SMUX_WRITE_FAIL notification
+ *
+ * @pkt_priv: Packet-specific private data
+ * @buffer: Buffer pointer returned by get_rx_buffer()
+ * @len: Buffer length returned by get_rx_buffer()
+ */
+struct smux_meta_write {
+ void *pkt_priv;
+ void *buffer;
+ int len;
+};
+
+/**
+ * Metadata for SMUX_TIOCM_UPDATE notification
+ *
+ * @tiocm_old: Previous TIOCM state
+ * @tiocm_new: Current TIOCM state
+ */
+struct smux_meta_tiocm {
+ uint32_t tiocm_old;
+ uint32_t tiocm_new;
+};
+
+
+#ifdef CONFIG_N_SMUX
+/**
+ * Starts the opening sequence for a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @priv Free for client usage
+ * @notify Event notification function
+ * @get_rx_buffer Function used to provide a receive buffer to SMUX
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * A channel must be fully closed (either not previously opened or
+ * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
+ * recevied.
+ *
+ * One the remote side is opened, the client will receive a SMUX_CONNECTED
+ * event.
+ */
+int msm_smux_open(uint8_t lcid, void *priv,
+ void (*notify)(void *priv, int event_type, const void *metadata),
+ int (*get_rx_buffer)(void *priv, void **pkt_priv,
+ void **buffer, int size));
+
+/**
+ * Starts the closing sequence for a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @returns 0 for success, <0 otherwise
+ *
+ * Once the close event has been acknowledge by the remote side, the client
+ * will receive a SMUX_DISCONNECTED notification.
+ */
+int msm_smux_close(uint8_t lcid);
+
+/**
+ * Write data to a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
+ * SMUX_WRITE_FAIL notification.
+ * @data Data to write
+ * @len Length of @data
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * Data may be written immediately after msm_smux_open() is called, but
+ * the data will wait in the transmit queue until the channel has been
+ * fully opened.
+ *
+ * Once the data has been written, the client will receive either a completion
+ * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
+ */
+int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len);
+
+/**
+ * Returns true if the TX queue is currently full (high water mark).
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns 0 if channel is not full; 1 if it is full; < 0 for error
+ */
+int msm_smux_is_ch_full(uint8_t lcid);
+
+/**
+ * Returns true if the TX queue has space for more packets it is at or
+ * below the low water mark).
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns 0 if channel is above low watermark
+ * 1 if it's at or below the low watermark
+ * < 0 for error
+ */
+int msm_smux_is_ch_low(uint8_t lcid);
+
+/**
+ * Get the TIOCM status bits.
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns >= 0 TIOCM status bits
+ * < 0 Error condition
+ */
+long msm_smux_tiocm_get(uint8_t lcid);
+
+/**
+ * Set/clear the TIOCM status bits.
+ *
+ * @lcid Logical channel ID
+ * @set Bits to set
+ * @clear Bits to clear
+ *
+ * @returns 0 for success; < 0 for failure
+ *
+ * If a bit is specified in both the @set and @clear masks, then the clear bit
+ * definition will dominate and the bit will be cleared.
+ */
+int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear);
+
+/**
+ * Set or clear channel option using the SMUX_CH_OPTION_* channel
+ * flags.
+ *
+ * @lcid Logical channel ID
+ * @set Options to set
+ * @clear Options to clear
+ *
+ * @returns 0 for success, < 0 for failure
+ */
+int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear);
+
+#else
+int msm_smux_open(uint8_t lcid, void *priv,
+ void (*notify)(void *priv, int event_type, const void *metadata),
+ int (*get_rx_buffer)(void *priv, void **pkt_priv,
+ void **buffer, int size))
+{
+ return -ENODEV;
+}
+
+int msm_smux_close(uint8_t lcid)
+{
+ return -ENODEV;
+}
+
+int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
+{
+ return -ENODEV;
+}
+
+int msm_smux_is_ch_full(uint8_t lcid);
+{
+ return -ENODEV;
+}
+
+int msm_smux_is_ch_low(uint8_t lcid);
+{
+ return -ENODEV;
+}
+
+long msm_smux_tiocm_get(uint8_t lcid)
+{
+ return 0;
+}
+
+int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
+{
+ return -ENODEV;
+}
+
+int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_N_SMUX */
+
+#endif /* SMUX_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 1ff6b62..818d189 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -52,6 +52,7 @@
#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */
#define N_TRACESINK 23 /* Trace data routing for MIPI P1149.7 */
#define N_TRACEROUTER 24 /* Trace data routing for MIPI P1149.7 */
+#define N_SMUX 25 /* Serial MUX */
/*
* This character is the same as _POSIX_VDISABLE: it cannot be used as
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 6957aa2..fa1d639 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1792,6 +1792,8 @@
rwbs[i++] = 'W';
else if (rw & REQ_DISCARD)
rwbs[i++] = 'D';
+ else if (rw & REQ_SANITIZE)
+ rwbs[i++] = 'Z';
else if (bytes)
rwbs[i++] = 'R';
else