Merge "USB: HSIC SMSC HUB: Add check before hub_vbus regulator disable"
diff --git a/Documentation/devicetree/bindings/bt-fm/fm.txt b/Documentation/devicetree/bindings/bt-fm/fm.txt
new file mode 100644
index 0000000..6bb3599
--- /dev/null
+++ b/Documentation/devicetree/bindings/bt-fm/fm.txt
@@ -0,0 +1,29 @@
+Qualcomm radio iris device
+
+-FM RX playback with no RDS
+
+ FM samples is filtered by external RF chips at baseband, then send to Riva-FM core through serial link.
+ FM signal is demodulated then audio L/R samples are stored inside memory.
+ FM Rx received samples data is connected to external audio codec.
+
+-Audio playback to FM TX
+
+ Used to play audio source to FM TX.
+ FM TX module will read the audio samples from memory then modulated samples will be send through serial interface to external RF chip.
+
+-RX playback with RDS
+
+ FM Rx receive audio data along with RDS.
+
+-FM TX with RDS
+
+ Used to send RDS messages to external FM receiver.
+
+Required Properties:
+- compatible: "qcom,iris_fm"
+
+Example:
+ qcom,iris-fm {
+ compatible = "qcom,iris_fm";
+ };
+
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 5e311be..3cd29e4 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -13,9 +13,11 @@
registers.
- reg-names: "ipa-base" - string to identify the IPA CORE base registers.
"bam-base" - string to identify the IPA BAM base registers.
+ "a2-bam-base" - string to identify the A2 BAM base registers.
- interrupts: Specifies the interrupt associated with IPA.
- interrupt-names: "ipa-irq" - string to identify the IPA core interrupt.
"bam-irq" - string to identify the IPA BAM interrupt.
+ "a2-bam-irq" - string to identify the A2 BAM interrupt.
- qcom,ipa-hw-ver: Specifies the IPA hardware version.
IPA pipe sub nodes (A2 static pipes configurations):
@@ -49,10 +51,12 @@
compatible = "qcom,ipa";
reg = <0xfd4c0000 0x26000>,
<0xfd4c4000 0x14818>;
- reg-names = "ipa-base", "bam-base";
+ <0xfc834000 0x7000>;
+ reg-names = "ipa-base", "bam-base"; "a2-bam-base";
interrupts = <0 252 0>,
<0 253 0>;
- interrupt-names = "ipa-irq", "bam-irq";
+ <0 29 1>;
+ interrupt-names = "ipa-irq", "bam-irq"; "a2-bam-irq";
qcom,ipa-hw-ver = <1>;
qcom,pipe1 {
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 5df176e..6f53742 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -53,6 +53,14 @@
For printing struct resources. The 'R' and 'r' specifiers result in a
printed resource with ('R') or without ('r') a decoded flags member.
+Physical addresses:
+
+ %pa 0x01234567 or 0x0123456789abcdef
+
+ For printing a phys_addr_t type (and its derivatives, such as
+ resource_size_t) which can vary based on build options, regardless of
+ the width of the CPU data path. Passed by reference.
+
MAC/FDDI addresses:
%pM 00:01:02:03:04:05
@@ -134,9 +142,9 @@
printk("%lld", (long long)s64_var);
If <type> is dependent on a config option for its size (e.g., sector_t,
-blkcnt_t, phys_addr_t, resource_size_t) or is architecture-dependent
-for its size (e.g., tcflag_t), use a format specifier of its largest
-possible type and explicitly cast to it. Example:
+blkcnt_t) or is architecture-dependent for its size (e.g., tcflag_t), use a
+format specifier of its largest possible type and explicitly cast to it.
+Example:
printk("test: sector number/total blocks: %llu/%llu\n",
(unsigned long long)sector, (unsigned long long)blockcount);
diff --git a/arch/arm/boot/dts/msm8226-mtp.dts b/arch/arm/boot/dts/msm8226-mtp.dts
index dddb44b..ef0fdc0 100644
--- a/arch/arm/boot/dts/msm8226-mtp.dts
+++ b/arch/arm/boot/dts/msm8226-mtp.dts
@@ -16,9 +16,9 @@
/ {
model = "Qualcomm MSM 8226 MTP";
compatible = "qcom,msm8226-mtp", "qcom,msm8226";
- qcom,msm-id = <145 7 0>;
+ qcom,msm-id = <145 8 0>;
serial@f991f000 {
- status = "disabled";
+ status = "ok";
};
-};
\ No newline at end of file
+};
diff --git a/arch/arm/boot/dts/msm8226-qrd.dts b/arch/arm/boot/dts/msm8226-qrd.dts
index be46a73..7909435 100644
--- a/arch/arm/boot/dts/msm8226-qrd.dts
+++ b/arch/arm/boot/dts/msm8226-qrd.dts
@@ -16,9 +16,9 @@
/ {
model = "Qualcomm MSM 8226 QRD";
compatible = "qcom,msm8226-qrd", "qcom,msm8226";
- qcom,msm-id = <145 99 0>;
+ qcom,msm-id = <145 11 0>;
serial@f991f000 {
- status = "disabled";
+ status = "ok";
};
};
diff --git a/arch/arm/boot/dts/msm8226-regulator.dtsi b/arch/arm/boot/dts/msm8226-regulator.dtsi
index 3c0dd1e..8168826 100644
--- a/arch/arm/boot/dts/msm8226-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8226-regulator.dtsi
@@ -45,7 +45,7 @@
qcom,enable-time = <500>;
qcom,system-load = <100000>;
regulator-always-on;
- regulator-min-microvolt = <1150000>;
+ regulator-min-microvolt = <1050000>;
regulator-max-microvolt = <1150000>;
};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index d1bc9ab..b0b7677 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -859,6 +859,10 @@
qcom,firmware-name = "wcnss";
};
+ qcom,iris-fm {
+ compatible = "qcom,iris_fm";
+ };
+
qcom,wcnss-wlan@fb000000 {
compatible = "qcom,wcnss_wlan";
reg = <0xfb000000 0x280000>;
diff --git a/arch/arm/boot/dts/msm9625-v1.dtsi b/arch/arm/boot/dts/msm9625-v1.dtsi
index 1074116..54fe443 100644
--- a/arch/arm/boot/dts/msm9625-v1.dtsi
+++ b/arch/arm/boot/dts/msm9625-v1.dtsi
@@ -29,6 +29,12 @@
reg = <0xfc42a8c8 0xc8>;
qcom,android-usb-swfi-latency = <100>;
};
+
+ qcom,bam_dmux@fc834000 {
+ compatible = "qcom,bam_dmux";
+ reg = <0xfc834000 0x7000>;
+ interrupts = <0 29 1>;
+ };
};
&ipa_hw {
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index e9ca053..f22fc28 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -317,20 +317,16 @@
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
};
- qcom,bam_dmux@fc834000 {
- compatible = "qcom,bam_dmux";
- reg = <0xfc834000 0x7000>;
- interrupts = <0 29 1>;
- };
-
ipa_hw: qcom,ipa@fd4c0000 {
compatible = "qcom,ipa";
reg = <0xfd4c0000 0x26000>,
- <0xfd4c4000 0x14818>;
- reg-names = "ipa-base", "bam-base";
+ <0xfd4c4000 0x14818>,
+ <0xfc834000 0x7000>;
+ reg-names = "ipa-base", "bam-base", "a2-bam-base";
interrupts = <0 252 0>,
- <0 253 0>;
- interrupt-names = "ipa-irq", "bam-irq";
+ <0 253 0>,
+ <0 29 1>;
+ interrupt-names = "ipa-irq", "bam-irq", "a2-bam-irq";
qcom,pipe1 {
label = "a2-to-ipa";
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index bc0b939..2e4f84d 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -61,6 +61,7 @@
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_MSM_DIRECT_SCLK_ACCESS=y
CONFIG_MSM_WATCHDOG_V2=y
+CONFIG_MSM_DLOAD_MODE=y
CONFIG_MSM_ADSP_LOADER=m
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index ea35b46..42ed059 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -2313,7 +2313,6 @@
config MSM_DLOAD_MODE
bool "Enable download mode on crashes"
- depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_MSM9615 || ARCH_MSM8974 || ARCH_MSM9625
default n
help
This makes the SoC enter download mode when it resets
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index 1de83a7..f864583 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -85,21 +85,6 @@
of_scan_flat_dt(dt_scan_for_memory_hole, msm8974_reserve_table);
}
-static struct platform_device msm_fm_platform_init = {
- .name = "iris_fm",
- .id = -1,
-};
-
-static struct platform_device *msm_bus_8974_devices[] = {
- &msm_fm_platform_init,
-};
-
-static void __init msm8974_init_buses(void)
-{
- platform_add_devices(msm_bus_8974_devices,
- ARRAY_SIZE(msm_bus_8974_devices));
-};
-
/*
* Used to satisfy dependencies for devices that need to be
* run early or in a particular order. Most likely your device doesn't fall
@@ -119,7 +104,6 @@
msm_clock_init(&msm8974_rumi_clock_init_data);
else
msm_clock_init(&msm8974_clock_init_data);
- msm8974_init_buses();
msm_thermal_device_init();
}
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
index 564c523..5ccdf82 100644
--- a/arch/arm/mach-msm/include/mach/ipa.h
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -438,9 +438,50 @@
int (*release_resource)(void);
};
+enum a2_mux_event_type {
+ A2_MUX_RECEIVE,
+ A2_MUX_WRITE_DONE
+};
+
+enum a2_mux_logical_channel_id {
+ A2_MUX_WWAN_0,
+ A2_MUX_WWAN_1,
+ A2_MUX_WWAN_2,
+ A2_MUX_WWAN_3,
+ A2_MUX_WWAN_4,
+ A2_MUX_WWAN_5,
+ A2_MUX_WWAN_6,
+ A2_MUX_WWAN_7,
+ A2_MUX_TETHERED_0,
+ A2_MUX_NUM_CHANNELS
+};
+
+typedef void (*a2_mux_notify_cb)(void *user_data,
+ enum a2_mux_event_type event,
+ unsigned long data);
+
#ifdef CONFIG_IPA
/*
+ * a2 service
+ */
+int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
+ void *user_data,
+ a2_mux_notify_cb notify_cb);
+
+int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid);
+
+int a2_mux_write(enum a2_mux_logical_channel_id lcid, struct sk_buff *skb);
+
+int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid);
+
+int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid);
+
+int a2_mux_get_tethered_client_handles(enum a2_mux_logical_channel_id lcid,
+ unsigned int *clnt_cons_handle,
+ unsigned int *clnt_prod_handle);
+
+/*
* Connect / Disconnect
*/
int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
@@ -610,6 +651,41 @@
#else /* CONFIG_IPA */
+static inline int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
+ void *user_data, a2_mux_notify_cb notify_cb)
+{
+ return -EPERM;
+}
+
+static inline int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid)
+{
+ return -EPERM;
+}
+
+static inline int a2_mux_write(enum a2_mux_logical_channel_id lcid,
+ struct sk_buff *skb)
+{
+ return -EPERM;
+}
+
+static inline int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid)
+{
+ return -EPERM;
+}
+
+static inline int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid)
+{
+ return -EPERM;
+}
+
+static inline int a2_mux_get_tethered_client_handles(
+ enum a2_mux_logical_channel_id lcid, unsigned int *clnt_cons_handle,
+ unsigned int *clnt_prod_handle)
+{
+ return -EPERM;
+}
+
+
/*
* Connect / Disconnect
*/
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 4f475fe..ebb096e 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -81,7 +81,7 @@
#define MSM_LPASS_CLK_CTL_BASE IOMEM(0xFA015000) /* 4K */
#define MSM_HFPLL_BASE IOMEM(0xFA016000) /* 4K */
#define MSM_TLMM_BASE IOMEM(0xFA017000) /* 16K */
-#define MSM_SHARED_RAM_BASE IOMEM(0xFA300000) /* 2M */
+#define MSM_SHARED_RAM_BASE IOMEM(0xFA400000) /* 2M */
#define MSM_SIC_NON_SECURE_BASE IOMEM(0xFA600000) /* 64K */
#define MSM_HDMI_BASE IOMEM(0xFA800000) /* 4K */
#define MSM_RPM_BASE IOMEM(0xFA801000) /* 4K */
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 78a666d..a76b689 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -18,3 +18,6 @@
bool
depends on CPU_IDLE && NO_HZ
default y
+
+config ARCH_NEEDS_CPU_IDLE_COUPLED
+ def_bool n
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 5634f88..38c8f69 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -3,3 +3,4 @@
#
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
+obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
new file mode 100644
index 0000000..c24dda0
--- /dev/null
+++ b/drivers/cpuidle/coupled.c
@@ -0,0 +1,727 @@
+/*
+ * coupled.c - helper functions to enter the same idle state on multiple cpus
+ *
+ * Copyright (c) 2011 Google, Inc.
+ *
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "cpuidle.h"
+
+/**
+ * DOC: Coupled cpuidle states
+ *
+ * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the
+ * cpus cannot be independently powered down, either due to
+ * sequencing restrictions (on Tegra 2, cpu 0 must be the last to
+ * power down), or due to HW bugs (on OMAP4460, a cpu powering up
+ * will corrupt the gic state unless the other cpu runs a work
+ * around). Each cpu has a power state that it can enter without
+ * coordinating with the other cpu (usually Wait For Interrupt, or
+ * WFI), and one or more "coupled" power states that affect blocks
+ * shared between the cpus (L2 cache, interrupt controller, and
+ * sometimes the whole SoC). Entering a coupled power state must
+ * be tightly controlled on both cpus.
+ *
+ * This file implements a solution, where each cpu will wait in the
+ * WFI state until all cpus are ready to enter a coupled state, at
+ * which point the coupled state function will be called on all
+ * cpus at approximately the same time.
+ *
+ * Once all cpus are ready to enter idle, they are woken by an smp
+ * cross call. At this point, there is a chance that one of the
+ * cpus will find work to do, and choose not to enter idle. A
+ * final pass is needed to guarantee that all cpus will call the
+ * power state enter function at the same time. During this pass,
+ * each cpu will increment the ready counter, and continue once the
+ * ready counter matches the number of online coupled cpus. If any
+ * cpu exits idle, the other cpus will decrement their counter and
+ * retry.
+ *
+ * requested_state stores the deepest coupled idle state each cpu
+ * is ready for. It is assumed that the states are indexed from
+ * shallowest (highest power, lowest exit latency) to deepest
+ * (lowest power, highest exit latency). The requested_state
+ * variable is not locked. It is only written from the cpu that
+ * it stores (or by the on/offlining cpu if that cpu is offline),
+ * and only read after all the cpus are ready for the coupled idle
+ * state are are no longer updating it.
+ *
+ * Three atomic counters are used. alive_count tracks the number
+ * of cpus in the coupled set that are currently or soon will be
+ * online. waiting_count tracks the number of cpus that are in
+ * the waiting loop, in the ready loop, or in the coupled idle state.
+ * ready_count tracks the number of cpus that are in the ready loop
+ * or in the coupled idle state.
+ *
+ * To use coupled cpuidle states, a cpuidle driver must:
+ *
+ * Set struct cpuidle_device.coupled_cpus to the mask of all
+ * coupled cpus, usually the same as cpu_possible_mask if all cpus
+ * are part of the same cluster. The coupled_cpus mask must be
+ * set in the struct cpuidle_device for each cpu.
+ *
+ * Set struct cpuidle_device.safe_state to a state that is not a
+ * coupled state. This is usually WFI.
+ *
+ * Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each
+ * state that affects multiple cpus.
+ *
+ * Provide a struct cpuidle_state.enter function for each state
+ * that affects multiple cpus. This function is guaranteed to be
+ * called on all cpus at approximately the same time. The driver
+ * should ensure that the cpus all abort together if any cpu tries
+ * to abort once the function is called. The function should return
+ * with interrupts still disabled.
+ */
+
+/**
+ * struct cpuidle_coupled - data for set of cpus that share a coupled idle state
+ * @coupled_cpus: mask of cpus that are part of the coupled set
+ * @requested_state: array of requested states for cpus in the coupled set
+ * @ready_waiting_counts: combined count of cpus in ready or waiting loops
+ * @online_count: count of cpus that are online
+ * @refcnt: reference count of cpuidle devices that are using this struct
+ * @prevent: flag to prevent coupled idle while a cpu is hotplugging
+ */
+struct cpuidle_coupled {
+ cpumask_t coupled_cpus;
+ int requested_state[NR_CPUS];
+ atomic_t ready_waiting_counts;
+ int online_count;
+ int refcnt;
+ int prevent;
+};
+
+#define WAITING_BITS 16
+#define MAX_WAITING_CPUS (1 << WAITING_BITS)
+#define WAITING_MASK (MAX_WAITING_CPUS - 1)
+#define READY_MASK (~WAITING_MASK)
+
+#define CPUIDLE_COUPLED_NOT_IDLE (-1)
+
+static DEFINE_MUTEX(cpuidle_coupled_lock);
+static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
+
+/*
+ * The cpuidle_coupled_poked_mask mask is used to avoid calling
+ * __smp_call_function_single with the per cpu call_single_data struct already
+ * in use. This prevents a deadlock where two cpus are waiting for each others
+ * call_single_data struct to be available
+ */
+static cpumask_t cpuidle_coupled_poked_mask;
+
+/**
+ * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
+ * @dev: cpuidle_device of the calling cpu
+ * @a: atomic variable to hold the barrier
+ *
+ * No caller to this function will return from this function until all online
+ * cpus in the same coupled group have called this function. Once any caller
+ * has returned from this function, the barrier is immediately available for
+ * reuse.
+ *
+ * The atomic variable a must be initialized to 0 before any cpu calls
+ * this function, will be reset to 0 before any cpu returns from this function.
+ *
+ * Must only be called from within a coupled idle state handler
+ * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set).
+ *
+ * Provides full smp barrier semantics before and after calling.
+ */
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
+{
+ int n = dev->coupled->online_count;
+
+ smp_mb__before_atomic_inc();
+ atomic_inc(a);
+
+ while (atomic_read(a) < n)
+ cpu_relax();
+
+ if (atomic_inc_return(a) == n * 2) {
+ atomic_set(a, 0);
+ return;
+ }
+
+ while (atomic_read(a) > n)
+ cpu_relax();
+}
+
+/**
+ * cpuidle_state_is_coupled - check if a state is part of a coupled set
+ * @dev: struct cpuidle_device for the current cpu
+ * @drv: struct cpuidle_driver for the platform
+ * @state: index of the target state in drv->states
+ *
+ * Returns true if the target state is coupled with cpus besides this one
+ */
+bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int state)
+{
+ return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
+}
+
+/**
+ * cpuidle_coupled_set_ready - mark a cpu as ready
+ * @coupled: the struct coupled that contains the current cpu
+ */
+static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled)
+{
+ atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
+}
+
+/**
+ * cpuidle_coupled_set_not_ready - mark a cpu as not ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Decrements the ready counter, unless the ready (and thus the waiting) counter
+ * is equal to the number of online cpus. Prevents a race where one cpu
+ * decrements the waiting counter and then re-increments it just before another
+ * cpu has decremented its ready counter, leading to the ready counter going
+ * down from the number of online cpus without going through the coupled idle
+ * state.
+ *
+ * Returns 0 if the counter was decremented successfully, -EINVAL if the ready
+ * counter was equal to the number of online cpus.
+ */
+static
+inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
+{
+ int all;
+ int ret;
+
+ all = coupled->online_count || (coupled->online_count << WAITING_BITS);
+ ret = atomic_add_unless(&coupled->ready_waiting_counts,
+ -MAX_WAITING_CPUS, all);
+
+ return ret ? 0 : -EINVAL;
+}
+
+/**
+ * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all of the cpus in a coupled set are out of the ready loop.
+ */
+static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled)
+{
+ int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
+ return r == 0;
+}
+
+/**
+ * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all cpus coupled to this target state are in the ready loop
+ */
+static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled)
+{
+ int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
+ return r == coupled->online_count;
+}
+
+/**
+ * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all cpus coupled to this target state are in the wait loop
+ */
+static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled)
+{
+ int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
+ return w == coupled->online_count;
+}
+
+/**
+ * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all of the cpus in a coupled set are out of the waiting loop.
+ */
+static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled)
+{
+ int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
+ return w == 0;
+}
+
+/**
+ * cpuidle_coupled_get_state - determine the deepest idle state
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns the deepest idle state that all coupled cpus can enter
+ */
+static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
+ struct cpuidle_coupled *coupled)
+{
+ int i;
+ int state = INT_MAX;
+
+ /*
+ * Read barrier ensures that read of requested_state is ordered after
+ * reads of ready_count. Matches the write barriers
+ * cpuidle_set_state_waiting.
+ */
+ smp_rmb();
+
+ for_each_cpu_mask(i, coupled->coupled_cpus)
+ if (cpu_online(i) && coupled->requested_state[i] < state)
+ state = coupled->requested_state[i];
+
+ return state;
+}
+
+static void cpuidle_coupled_poked(void *info)
+{
+ int cpu = (unsigned long)info;
+ cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
+}
+
+/**
+ * cpuidle_coupled_poke - wake up a cpu that may be waiting
+ * @cpu: target cpu
+ *
+ * Ensures that the target cpu exits it's waiting idle state (if it is in it)
+ * and will see updates to waiting_count before it re-enters it's waiting idle
+ * state.
+ *
+ * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu
+ * either has or will soon have a pending IPI that will wake it out of idle,
+ * or it is currently processing the IPI and is not in idle.
+ */
+static void cpuidle_coupled_poke(int cpu)
+{
+ struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
+
+ if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
+ __smp_call_function_single(cpu, csd, 0);
+}
+
+/**
+ * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Calls cpuidle_coupled_poke on all other online cpus.
+ */
+static void cpuidle_coupled_poke_others(int this_cpu,
+ struct cpuidle_coupled *coupled)
+{
+ int cpu;
+
+ for_each_cpu_mask(cpu, coupled->coupled_cpus)
+ if (cpu != this_cpu && cpu_online(cpu))
+ cpuidle_coupled_poke(cpu);
+}
+
+/**
+ * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ * @next_state: the index in drv->states of the requested state for this cpu
+ *
+ * Updates the requested idle state for the specified cpuidle device,
+ * poking all coupled cpus out of idle if necessary to let them see the new
+ * state.
+ */
+static void cpuidle_coupled_set_waiting(int cpu,
+ struct cpuidle_coupled *coupled, int next_state)
+{
+ int w;
+
+ coupled->requested_state[cpu] = next_state;
+
+ /*
+ * If this is the last cpu to enter the waiting state, poke
+ * all the other cpus out of their waiting state so they can
+ * enter a deeper state. This can race with one of the cpus
+ * exiting the waiting state due to an interrupt and
+ * decrementing waiting_count, see comment below.
+ *
+ * The atomic_inc_return provides a write barrier to order the write
+ * to requested_state with the later write that increments ready_count.
+ */
+ w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
+ if (w == coupled->online_count)
+ cpuidle_coupled_poke_others(cpu, coupled);
+}
+
+/**
+ * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Removes the requested idle state for the specified cpuidle device.
+ */
+static void cpuidle_coupled_set_not_waiting(int cpu,
+ struct cpuidle_coupled *coupled)
+{
+ /*
+ * Decrementing waiting count can race with incrementing it in
+ * cpuidle_coupled_set_waiting, but that's OK. Worst case, some
+ * cpus will increment ready_count and then spin until they
+ * notice that this cpu has cleared it's requested_state.
+ */
+ atomic_dec(&coupled->ready_waiting_counts);
+
+ coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;
+}
+
+/**
+ * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop
+ * @cpu: the current cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Marks this cpu as no longer in the ready and waiting loops. Decrements
+ * the waiting count first to prevent another cpu looping back in and seeing
+ * this cpu as waiting just before it exits idle.
+ */
+static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
+{
+ cpuidle_coupled_set_not_waiting(cpu, coupled);
+ atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
+}
+
+/**
+ * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
+ * @cpu - this cpu
+ *
+ * Turns on interrupts and spins until any outstanding poke interrupts have
+ * been processed and the poke bit has been cleared.
+ *
+ * Other interrupts may also be processed while interrupts are enabled, so
+ * need_resched() must be tested after turning interrupts off again to make sure
+ * the interrupt didn't schedule work that should take the cpu out of idle.
+ *
+ * Returns 0 if need_resched was false, -EINTR if need_resched was true.
+ */
+static int cpuidle_coupled_clear_pokes(int cpu)
+{
+ local_irq_enable();
+ while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
+ cpu_relax();
+ local_irq_disable();
+
+ return need_resched() ? -EINTR : 0;
+}
+
+/**
+ * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus
+ * @dev: struct cpuidle_device for the current cpu
+ * @drv: struct cpuidle_driver for the platform
+ * @next_state: index of the requested state in drv->states
+ *
+ * Coordinate with coupled cpus to enter the target state. This is a two
+ * stage process. In the first stage, the cpus are operating independently,
+ * and may call into cpuidle_enter_state_coupled at completely different times.
+ * To save as much power as possible, the first cpus to call this function will
+ * go to an intermediate state (the cpuidle_device's safe state), and wait for
+ * all the other cpus to call this function. Once all coupled cpus are idle,
+ * the second stage will start. Each coupled cpu will spin until all cpus have
+ * guaranteed that they will call the target_state.
+ *
+ * This function must be called with interrupts disabled. It may enable
+ * interrupts while preparing for idle, and it will always return with
+ * interrupts enabled.
+ */
+int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state)
+{
+ int entered_state = -1;
+ struct cpuidle_coupled *coupled = dev->coupled;
+
+ if (!coupled)
+ return -EINVAL;
+
+ while (coupled->prevent) {
+ if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ local_irq_enable();
+ return entered_state;
+ }
+ entered_state = cpuidle_enter_state(dev, drv,
+ dev->safe_state_index);
+ }
+
+ /* Read barrier ensures online_count is read after prevent is cleared */
+ smp_rmb();
+
+ cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
+
+retry:
+ /*
+ * Wait for all coupled cpus to be idle, using the deepest state
+ * allowed for a single cpu.
+ */
+ while (!cpuidle_coupled_cpus_waiting(coupled)) {
+ if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
+
+ if (coupled->prevent) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
+
+ entered_state = cpuidle_enter_state(dev, drv,
+ dev->safe_state_index);
+ }
+
+ if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
+
+ /*
+ * All coupled cpus are probably idle. There is a small chance that
+ * one of the other cpus just became active. Increment the ready count,
+ * and spin until all coupled cpus have incremented the counter. Once a
+ * cpu has incremented the ready counter, it cannot abort idle and must
+ * spin until either all cpus have incremented the ready counter, or
+ * another cpu leaves idle and decrements the waiting counter.
+ */
+
+ cpuidle_coupled_set_ready(coupled);
+ while (!cpuidle_coupled_cpus_ready(coupled)) {
+ /* Check if any other cpus bailed out of idle. */
+ if (!cpuidle_coupled_cpus_waiting(coupled))
+ if (!cpuidle_coupled_set_not_ready(coupled))
+ goto retry;
+
+ cpu_relax();
+ }
+
+ /* all cpus have acked the coupled state */
+ next_state = cpuidle_coupled_get_state(dev, coupled);
+
+ entered_state = cpuidle_enter_state(dev, drv, next_state);
+
+ cpuidle_coupled_set_done(dev->cpu, coupled);
+
+out:
+ /*
+ * Normal cpuidle states are expected to return with irqs enabled.
+ * That leads to an inefficiency where a cpu receiving an interrupt
+ * that brings it out of idle will process that interrupt before
+ * exiting the idle enter function and decrementing ready_count. All
+ * other cpus will need to spin waiting for the cpu that is processing
+ * the interrupt. If the driver returns with interrupts disabled,
+ * all other cpus will loop back into the safe idle state instead of
+ * spinning, saving power.
+ *
+ * Calling local_irq_enable here allows coupled states to return with
+ * interrupts disabled, but won't cause problems for drivers that
+ * exit with interrupts enabled.
+ */
+ local_irq_enable();
+
+ /*
+ * Wait until all coupled cpus have exited idle. There is no risk that
+ * a cpu exits and re-enters the ready state because this cpu has
+ * already decremented its waiting_count.
+ */
+ while (!cpuidle_coupled_no_cpus_ready(coupled))
+ cpu_relax();
+
+ return entered_state;
+}
+
+static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)
+{
+ cpumask_t cpus;
+ cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
+ coupled->online_count = cpumask_weight(&cpus);
+}
+
+/**
+ * cpuidle_coupled_register_device - register a coupled cpuidle device
+ * @dev: struct cpuidle_device for the current cpu
+ *
+ * Called from cpuidle_register_device to handle coupled idle init. Finds the
+ * cpuidle_coupled struct for this set of coupled cpus, or creates one if none
+ * exists yet.
+ */
+int cpuidle_coupled_register_device(struct cpuidle_device *dev)
+{
+ int cpu;
+ struct cpuidle_device *other_dev;
+ struct call_single_data *csd;
+ struct cpuidle_coupled *coupled;
+
+ if (cpumask_empty(&dev->coupled_cpus))
+ return 0;
+
+ for_each_cpu_mask(cpu, dev->coupled_cpus) {
+ other_dev = per_cpu(cpuidle_devices, cpu);
+ if (other_dev && other_dev->coupled) {
+ coupled = other_dev->coupled;
+ goto have_coupled;
+ }
+ }
+
+ /* No existing coupled info found, create a new one */
+ coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL);
+ if (!coupled)
+ return -ENOMEM;
+
+ coupled->coupled_cpus = dev->coupled_cpus;
+
+have_coupled:
+ dev->coupled = coupled;
+ if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus)))
+ coupled->prevent++;
+
+ cpuidle_coupled_update_online_cpus(coupled);
+
+ coupled->refcnt++;
+
+ csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
+ csd->func = cpuidle_coupled_poked;
+ csd->info = (void *)(unsigned long)dev->cpu;
+
+ return 0;
+}
+
+/**
+ * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device
+ * @dev: struct cpuidle_device for the current cpu
+ *
+ * Called from cpuidle_unregister_device to tear down coupled idle. Removes the
+ * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if
+ * this was the last cpu in the set.
+ */
+void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
+{
+ struct cpuidle_coupled *coupled = dev->coupled;
+
+ if (cpumask_empty(&dev->coupled_cpus))
+ return;
+
+ if (--coupled->refcnt)
+ kfree(coupled);
+ dev->coupled = NULL;
+}
+
+/**
+ * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state
+ * @coupled: the struct coupled that contains the cpu that is changing state
+ *
+ * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that
+ * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
+ */
+static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled)
+{
+ int cpu = get_cpu();
+
+ /* Force all cpus out of the waiting loop. */
+ coupled->prevent++;
+ cpuidle_coupled_poke_others(cpu, coupled);
+ put_cpu();
+ while (!cpuidle_coupled_no_cpus_waiting(coupled))
+ cpu_relax();
+}
+
+/**
+ * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state
+ * @coupled: the struct coupled that contains the cpu that is changing state
+ *
+ * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that
+ * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
+ */
+static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
+{
+ int cpu = get_cpu();
+
+ /*
+ * Write barrier ensures readers see the new online_count when they
+ * see prevent == 0.
+ */
+ smp_wmb();
+ coupled->prevent--;
+ /* Force cpus out of the prevent loop. */
+ cpuidle_coupled_poke_others(cpu, coupled);
+ put_cpu();
+}
+
+/**
+ * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions
+ * @nb: notifier block
+ * @action: hotplug transition
+ * @hcpu: target cpu number
+ *
+ * Called when a cpu is brought on or offline using hotplug. Updates the
+ * coupled cpu set appropriately
+ */
+static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (unsigned long)hcpu;
+ struct cpuidle_device *dev;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ case CPU_DOWN_PREPARE:
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ case CPU_DOWN_FAILED:
+ break;
+ default:
+ return NOTIFY_OK;
+ }
+
+ mutex_lock(&cpuidle_lock);
+
+ dev = per_cpu(cpuidle_devices, cpu);
+ if (!dev->coupled)
+ goto out;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ case CPU_DOWN_PREPARE:
+ cpuidle_coupled_prevent_idle(dev->coupled);
+ break;
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ cpuidle_coupled_update_online_cpus(dev->coupled);
+ /* Fall through */
+ case CPU_UP_CANCELED:
+ case CPU_DOWN_FAILED:
+ cpuidle_coupled_allow_idle(dev->coupled);
+ break;
+ }
+
+out:
+ mutex_unlock(&cpuidle_lock);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpuidle_coupled_cpu_notifier = {
+ .notifier_call = cpuidle_coupled_cpu_notify,
+};
+
+static int __init cpuidle_coupled_init(void)
+{
+ return register_cpu_notifier(&cpuidle_coupled_cpu_notifier);
+}
+core_initcall(cpuidle_coupled_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 3e3e3e4..e81cfda 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -171,7 +171,11 @@
trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle_rcuidle(next_state, dev->cpu);
- entered_state = cpuidle_enter_state(dev, drv, next_state);
+ if (cpuidle_state_is_coupled(dev, drv, next_state))
+ entered_state = cpuidle_enter_state_coupled(dev, drv,
+ next_state);
+ else
+ entered_state = cpuidle_enter_state(dev, drv, next_state);
trace_power_end_rcuidle(dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
@@ -403,13 +407,25 @@
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
- if ((ret = cpuidle_add_sysfs(cpu_dev))) {
- module_put(cpuidle_driver->owner);
- return ret;
- }
+ ret = cpuidle_add_sysfs(cpu_dev);
+ if (ret)
+ goto err_sysfs;
+
+ ret = cpuidle_coupled_register_device(dev);
+ if (ret)
+ goto err_coupled;
dev->registered = 1;
return 0;
+
+err_coupled:
+ cpuidle_remove_sysfs(cpu_dev);
+ wait_for_completion(&dev->kobj_unregister);
+err_sysfs:
+ list_del(&dev->device_list);
+ per_cpu(cpuidle_devices, dev->cpu) = NULL;
+ module_put(cpuidle_driver->owner);
+ return ret;
}
/**
@@ -459,6 +475,8 @@
wait_for_completion(&dev->kobj_unregister);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
+ cpuidle_coupled_unregister_device(dev);
+
cpuidle_resume_and_unlock();
module_put(cpuidle_driver->owner);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index d8a3ccc..76e7f69 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -32,4 +32,34 @@
extern int cpuidle_add_sysfs(struct device *dev);
extern void cpuidle_remove_sysfs(struct device *dev);
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int state);
+int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state);
+int cpuidle_coupled_register_device(struct cpuidle_device *dev);
+void cpuidle_coupled_unregister_device(struct cpuidle_device *dev);
+#else
+static inline bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int state)
+{
+ return false;
+}
+
+static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state)
+{
+ return -1;
+}
+
+static inline int cpuidle_coupled_register_device(struct cpuidle_device *dev)
+{
+ return 0;
+}
+
+static inline void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
+{
+}
+#endif
+
#endif /* __DRIVER_CPUIDLE_H */
diff --git a/drivers/gpu/ion/msm/ion_cp_common.c b/drivers/gpu/ion/msm/ion_cp_common.c
index fa4bad5..8c9b95d 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.c
+++ b/drivers/gpu/ion/msm/ion_cp_common.c
@@ -176,9 +176,9 @@
buffer, ret_value);
atomic_dec(&buf->secure_cnt);
} else {
- pr_debug("Protected buffer %p from %x-%x\n",
- buffer, buf->buffer,
- buf->buffer + buffer->size);
+ pr_debug("Protected buffer %p from %pa (size %x)\n",
+ buffer, &buf->buffer,
+ buffer->size);
buf->want_delayed_unsecure |=
flags & ION_UNSECURE_DELAYED ? 1 : 0;
buf->data = data;
diff --git a/drivers/iommu/msm_iommu_dev-v0.c b/drivers/iommu/msm_iommu_dev-v0.c
index b740bc9..549800f 100644
--- a/drivers/iommu/msm_iommu_dev-v0.c
+++ b/drivers/iommu/msm_iommu_dev-v0.c
@@ -144,8 +144,7 @@
}
drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!drvdata->base) {
- pr_err("%s: Unable to ioremap address %x size %x\n", __func__,
- r->start, resource_size(r));
+ pr_err("%s: Unable to ioremap %pr\n", __func__, r);
return -ENOMEM;
}
drvdata->glb_base = drvdata->base;
@@ -355,8 +354,7 @@
r2 = request_mem_region(r->start, len, r->name);
if (!r2) {
- pr_err("Could not request memory region: start=%p, len=%d\n",
- (void *) r->start, len);
+ pr_err("Could not request memory region: %pr\n", r);
ret = -EBUSY;
goto fail;
}
@@ -364,8 +362,7 @@
drvdata->base = devm_ioremap(&pdev->dev, r2->start, len);
if (!drvdata->base) {
- pr_err("Could not ioremap: start=%p, len=%d\n",
- (void *) r2->start, len);
+ pr_err("Could not ioremap: %pr\n", r);
ret = -EBUSY;
goto fail;
}
diff --git a/drivers/iommu/msm_iommu_dev-v1.c b/drivers/iommu/msm_iommu_dev-v1.c
index f37e619..3f9f1c4 100644
--- a/drivers/iommu/msm_iommu_dev-v1.c
+++ b/drivers/iommu/msm_iommu_dev-v1.c
@@ -118,8 +118,8 @@
drvdata->clk_reg_virt = devm_ioremap(&pdev->dev, r->start,
resource_size(r));
if (!drvdata->clk_reg_virt) {
- pr_err("Failed to map 0x%x for iommu clk\n",
- r->start);
+ pr_err("Failed to map resource for iommu clk: %pr\n",
+ r);
ret = -ENOMEM;
goto fail;
}
diff --git a/drivers/media/platform/msm/vcap/vcap_vp.c b/drivers/media/platform/msm/vcap/vcap_vp.c
index abc4e7e..aba7095 100644
--- a/drivers/media/platform/msm/vcap/vcap_vp.c
+++ b/drivers/media/platform/msm/vcap/vcap_vp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -472,7 +472,7 @@
int rc;
struct vcap_dev *dev = c_data->dev;
struct ion_handle *handle = NULL;
- unsigned long paddr, len, ionflag = 0;
+ unsigned long paddr, len;
void *vaddr;
size_t size = ((c_data->vp_out_fmt.width + 63) >> 6) *
((c_data->vp_out_fmt.height + 7) >> 3) * 16;
@@ -489,13 +489,6 @@
return -ENOMEM;
}
- rc = ion_handle_get_flags(dev->ion_client, handle, &ionflag);
- if (rc) {
- pr_err("%s: get flags ion handle failed\n", __func__);
- ion_free(dev->ion_client, handle);
- return rc;
- }
-
vaddr = ion_map_kernel(dev->ion_client, handle);
if (IS_ERR(vaddr)) {
pr_err("%s: Map motion buffer failed\n", __func__);
diff --git a/drivers/media/platform/msm/wfd/enc-venus-subdev.c b/drivers/media/platform/msm/wfd/enc-venus-subdev.c
index d37576d..b41ece6 100644
--- a/drivers/media/platform/msm/wfd/enc-venus-subdev.c
+++ b/drivers/media/platform/msm/wfd/enc-venus-subdev.c
@@ -641,7 +641,7 @@
struct mem_region *mregion)
{
int rc = 0;
- unsigned long flags = 0, size = 0, align_req = 0;
+ unsigned long size = 0, align_req = 0;
if (!mregion) {
rc = -EINVAL;
goto venc_map_fail;
@@ -663,12 +663,6 @@
goto venc_map_fail;
}
- rc = ion_handle_get_flags(venc_ion_client, mregion->ion_handle, &flags);
- if (rc) {
- WFD_MSG_ERR("Failed to get ion flags %d\n", rc);
- goto venc_map_fail;
- }
-
if (!inst->secure) {
mregion->kvaddr = ion_map_kernel(venc_ion_client,
mregion->ion_handle);
@@ -693,7 +687,8 @@
rc = ion_map_iommu(venc_ion_client, mregion->ion_handle,
inst->domain, 0, align_req, 0,
- (unsigned long *)&mregion->paddr, &size, flags, 0);
+ (unsigned long *)&mregion->paddr, &size, 0, 0);
+
if (rc) {
WFD_MSG_ERR("Failed to map into iommu\n");
goto venc_map_iommu_map_fail;
diff --git a/drivers/media/platform/msm/wfd/mdp-5-subdev.c b/drivers/media/platform/msm/wfd/mdp-5-subdev.c
index be705df..4089a99 100644
--- a/drivers/media/platform/msm/wfd/mdp-5-subdev.c
+++ b/drivers/media/platform/msm/wfd/mdp-5-subdev.c
@@ -202,7 +202,7 @@
int rc = 0, align = 0;
struct mem_region_map *mmap = arg;
struct mem_region *mregion;
- bool domain = -1;
+ int domain = -1;
struct mdp_instance *inst = NULL;
if (!mmap || !mmap->mregion || !mmap->cookie) {
@@ -241,6 +241,8 @@
!inst->secure ? "non" : "", rc);
goto iommu_fail;
}
+
+ return 0;
iommu_fail:
if (inst->secure)
msm_ion_unsecure_buffer(mmap->ion_client, mregion->ion_handle);
@@ -252,7 +254,7 @@
{
struct mem_region_map *mmap = arg;
struct mem_region *mregion;
- bool domain = -1;
+ int domain = -1;
struct mdp_instance *inst = NULL;
if (!mmap || !mmap->mregion || !mmap->cookie) {
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index afb40be..11a8f4d 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3956,10 +3956,16 @@
return 0;
}
+static const struct of_device_id iris_fm_match[] = {
+ {.compatible = "qcom,iris_fm"},
+ {}
+};
+
static struct platform_driver iris_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "iris_fm",
+ .of_match_table = iris_fm_match,
},
.remove = __devexit_p(iris_remove),
};
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 8aa4758..9e22ffb 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -663,6 +663,9 @@
&resp, sizeof(resp));
if (ret) {
pr_err("scm_call to load app failed\n");
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -EINVAL;
}
@@ -1524,8 +1527,12 @@
app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
memcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
ret = __qseecom_check_app_exists(app_ireq);
- if (ret < 0)
+ if (ret < 0) {
+ kzfree(data);
+ kfree(*handle);
+ *handle = NULL;
return -EINVAL;
+ }
if (ret > 0) {
pr_warn("App id %d for [%s] app exists\n", ret,
@@ -1554,6 +1561,7 @@
if (ret < 0) {
kfree(*handle);
+ kfree(data);
*handle = NULL;
return ret;
}
@@ -1563,6 +1571,9 @@
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
pr_err("kmalloc failed\n");
+ kfree(data);
+ kfree(*handle);
+ *handle = NULL;
return -ENOMEM;
}
entry->app_id = ret;
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 91ecad7..ed4e246 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -976,12 +976,11 @@
if (has_pronto_hw) {
has_48mhz_xo = of_property_read_bool(pdev->dev.of_node,
"qcom,has_48mhz_xo");
- penv->wcnss_hw_type = WCNSS_PRONTO_HW;
} else {
- penv->wcnss_hw_type = WCNSS_RIVA_HW;
has_48mhz_xo = pdata->has_48mhz_xo;
}
}
+ penv->wcnss_hw_type = (has_pronto_hw) ? WCNSS_PRONTO_HW : WCNSS_RIVA_HW;
penv->wlan_config.use_48mhz_xo = has_48mhz_xo;
penv->thermal_mitigation = 0;
diff --git a/drivers/platform/msm/ipa/a2_service.c b/drivers/platform/msm/ipa/a2_service.c
index 0ae2552..2c5245c 100644
--- a/drivers/platform/msm/ipa/a2_service.c
+++ b/drivers/platform/msm/ipa/a2_service.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,267 +10,1369 @@
* GNU General Public License for more details.
*/
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <mach/bam_dmux.h>
-#include <mach/ipa.h>
+/*
+ * A2 service component
+ */
+
+#include <net/ip.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/clk.h>
+#include <linux/wakelock.h>
#include <mach/sps.h>
+#include <mach/msm_smsm.h>
+#include <mach/socinfo.h>
+#include <mach/ipa.h>
#include "ipa_i.h"
-static struct a2_service_cb_type {
- void *tx_complete_cb;
- void *rx_cb;
- u32 producer_handle;
- u32 consumer_handle;
-} a2_service_cb;
+#define A2_NUM_PIPES 6
+#define A2_SUMMING_THRESHOLD 4096
+#define BUFFER_SIZE 2048
+#define NUM_BUFFERS 32
+#define BAM_CH_LOCAL_OPEN 0x1
+#define BAM_CH_REMOTE_OPEN 0x2
+#define BAM_CH_IN_RESET 0x4
+#define BAM_MUX_HDR_MAGIC_NO 0x33fc
+#define BAM_MUX_HDR_CMD_DATA 0
+#define BAM_MUX_HDR_CMD_OPEN 1
+#define BAM_MUX_HDR_CMD_CLOSE 2
+#define BAM_MUX_HDR_CMD_STATUS 3
+#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
+#define LOW_WATERMARK 2
+#define HIGH_WATERMARK 4
+#define A2_MUX_COMPLETION_TIMEOUT (60*HZ)
+#define ENABLE_DISCONNECT_ACK 0x1
+#define A2_MUX_PADDING_LENGTH(len) (4 - ((len) & 0x3))
-static struct sps_mem_buffer data_mem_buf[2];
-static struct sps_mem_buffer desc_mem_buf[2];
+struct bam_ch_info {
+ u32 status;
+ a2_mux_notify_cb notify_cb;
+ void *user_data;
+ spinlock_t lock;
+ int num_tx_pkts;
+ int use_wm;
+};
+struct tx_pkt_info {
+ struct sk_buff *skb;
+ char is_cmd;
+ u32 len;
+ struct list_head list_node;
+ unsigned ts_sec;
+ unsigned long ts_nsec;
+};
+struct bam_mux_hdr {
+ u16 magic_num;
+ u8 reserved;
+ u8 cmd;
+ u8 pad_len;
+ u8 ch_id;
+ u16 pkt_len;
+};
+struct a2_mux_context_type {
+ u32 tethered_prod;
+ u32 tethered_cons;
+ u32 embedded_prod;
+ u32 embedded_cons;
+ int a2_mux_apps_pc_enabled;
+ struct work_struct kickoff_ul_wakeup;
+ struct work_struct kickoff_ul_power_down;
+ struct work_struct kickoff_ul_request_resource;
+ struct bam_ch_info bam_ch[A2_MUX_NUM_CHANNELS];
+ struct list_head bam_tx_pool;
+ spinlock_t bam_tx_pool_spinlock;
+ struct workqueue_struct *a2_mux_tx_workqueue;
+ int a2_mux_initialized;
+ bool bam_is_connected;
+ int a2_mux_send_power_vote_on_init_once;
+ int a2_mux_sw_bridge_is_connected;
+ u32 a2_device_handle;
+ struct mutex wakeup_lock;
+ struct completion ul_wakeup_ack_completion;
+ struct completion bam_connection_completion;
+ struct completion request_resource_completion;
+ rwlock_t ul_wakeup_lock;
+ int wait_for_ack;
+ struct wake_lock bam_wakelock;
+ int a2_pc_disabled;
+ spinlock_t wakelock_reference_lock;
+ int wakelock_reference_count;
+ int a2_pc_disabled_wakelock_skipped;
+ int disconnect_ack;
+ struct mutex smsm_cb_lock;
+ int bam_dmux_uplink_vote;
+};
+static struct a2_mux_context_type *a2_mux_ctx;
-static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
- u8 *usb_pipe_idx,
- u32 *clnt_hdl,
- struct sps_pipe *pipe);
+static void handle_bam_mux_cmd(struct sk_buff *rx_skb);
-static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
- struct ipa_sps_params *out_params, u32 *clnt_hdl);
+static bool bam_ch_is_open(int index)
+{
+ return a2_mux_ctx->bam_ch[index].status ==
+ (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN);
+}
+
+static bool bam_ch_is_local_open(int index)
+{
+ return a2_mux_ctx->bam_ch[index].status &
+ BAM_CH_LOCAL_OPEN;
+}
+
+static bool bam_ch_is_remote_open(int index)
+{
+ return a2_mux_ctx->bam_ch[index].status &
+ BAM_CH_REMOTE_OPEN;
+}
+
+static bool bam_ch_is_in_reset(int index)
+{
+ return a2_mux_ctx->bam_ch[index].status &
+ BAM_CH_IN_RESET;
+}
+
+static void set_tx_timestamp(struct tx_pkt_info *pkt)
+{
+ unsigned long long t_now;
+
+ t_now = sched_clock();
+ pkt->ts_nsec = do_div(t_now, 1000000000U);
+ pkt->ts_sec = (unsigned)t_now;
+}
+
+static void verify_tx_queue_is_empty(const char *func)
+{
+ unsigned long flags;
+ struct tx_pkt_info *info;
+ int reported = 0;
+
+ spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+ list_for_each_entry(info, &a2_mux_ctx->bam_tx_pool, list_node) {
+ if (!reported) {
+ IPADBG("%s: tx pool not empty\n", func);
+ reported = 1;
+ }
+ IPADBG("%s: node=%p ts=%u.%09lu\n", __func__,
+ &info->list_node, info->ts_sec, info->ts_nsec);
+ }
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+}
+
+static void grab_wakelock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a2_mux_ctx->wakelock_reference_lock, flags);
+ IPADBG("%s: ref count = %d\n",
+ __func__,
+ a2_mux_ctx->wakelock_reference_count);
+ if (a2_mux_ctx->wakelock_reference_count == 0)
+ wake_lock(&a2_mux_ctx->bam_wakelock);
+ ++a2_mux_ctx->wakelock_reference_count;
+ spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock, flags);
+}
+
+static void release_wakelock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a2_mux_ctx->wakelock_reference_lock, flags);
+ if (a2_mux_ctx->wakelock_reference_count == 0) {
+ IPAERR("%s: bam_dmux wakelock not locked\n", __func__);
+ dump_stack();
+ spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock,
+ flags);
+ return;
+ }
+ IPADBG("%s: ref count = %d\n",
+ __func__,
+ a2_mux_ctx->wakelock_reference_count);
+ --a2_mux_ctx->wakelock_reference_count;
+ if (a2_mux_ctx->wakelock_reference_count == 0)
+ wake_unlock(&a2_mux_ctx->bam_wakelock);
+ spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock, flags);
+}
+
+static void toggle_apps_ack(void)
+{
+ static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
+
+ IPADBG("%s: apps ack %d->%d\n", __func__,
+ clear_bit & 0x1, ~clear_bit & 0x1);
+ smsm_change_state(SMSM_APPS_STATE,
+ clear_bit & SMSM_A2_POWER_CONTROL_ACK,
+ ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
+ clear_bit = ~clear_bit;
+}
+
+static void power_vote(int vote)
+{
+ IPADBG("%s: curr=%d, vote=%d\n",
+ __func__,
+ a2_mux_ctx->bam_dmux_uplink_vote, vote);
+ if (a2_mux_ctx->bam_dmux_uplink_vote == vote)
+ IPADBG("%s: warning - duplicate power vote\n", __func__);
+ a2_mux_ctx->bam_dmux_uplink_vote = vote;
+ if (vote)
+ smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
+ else
+ smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
+}
+
+static inline void ul_powerdown(void)
+{
+ IPADBG("%s: powerdown\n", __func__);
+ verify_tx_queue_is_empty(__func__);
+ if (a2_mux_ctx->a2_pc_disabled)
+ release_wakelock();
+ else {
+ a2_mux_ctx->wait_for_ack = 1;
+ INIT_COMPLETION(a2_mux_ctx->ul_wakeup_ack_completion);
+ power_vote(0);
+ }
+ a2_mux_ctx->bam_is_connected = false;
+}
+
+static void ul_wakeup(void)
+{
+ int ret;
+
+ mutex_lock(&a2_mux_ctx->wakeup_lock);
+ if (a2_mux_ctx->bam_is_connected) {
+ IPADBG("%s Already awake\n", __func__);
+ mutex_unlock(&a2_mux_ctx->wakeup_lock);
+ return;
+ }
+ if (a2_mux_ctx->a2_pc_disabled) {
+ /*
+ * don't grab the wakelock the first time because it is
+ * already grabbed when a2 powers on
+ */
+ if (likely(a2_mux_ctx->a2_pc_disabled_wakelock_skipped))
+ grab_wakelock();
+ else
+ a2_mux_ctx->a2_pc_disabled_wakelock_skipped = 1;
+ a2_mux_ctx->bam_is_connected = true;
+ mutex_unlock(&a2_mux_ctx->wakeup_lock);
+ return;
+ }
+ /*
+ * must wait for the previous power down request to have been acked
+ * chances are it already came in and this will just fall through
+ * instead of waiting
+ */
+ if (a2_mux_ctx->wait_for_ack) {
+ IPADBG("%s waiting for previous ack\n", __func__);
+ ret = wait_for_completion_timeout(
+ &a2_mux_ctx->ul_wakeup_ack_completion,
+ A2_MUX_COMPLETION_TIMEOUT);
+ a2_mux_ctx->wait_for_ack = 0;
+ if (unlikely(ret == 0)) {
+ IPADBG("%s timeout previous ack\n", __func__);
+ goto bail;
+ }
+ }
+ INIT_COMPLETION(a2_mux_ctx->ul_wakeup_ack_completion);
+ power_vote(1);
+ IPADBG("%s waiting for wakeup ack\n", __func__);
+ ret = wait_for_completion_timeout(&a2_mux_ctx->ul_wakeup_ack_completion,
+ A2_MUX_COMPLETION_TIMEOUT);
+ if (unlikely(ret == 0)) {
+ IPADBG("%s timeout wakeup ack\n", __func__);
+ goto bail;
+ }
+ INIT_COMPLETION(a2_mux_ctx->bam_connection_completion);
+ if (!a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
+ ret = wait_for_completion_timeout(
+ &a2_mux_ctx->bam_connection_completion,
+ A2_MUX_COMPLETION_TIMEOUT);
+ if (unlikely(ret == 0)) {
+ IPADBG("%s timeout power on\n", __func__);
+ goto bail;
+ }
+ }
+ a2_mux_ctx->bam_is_connected = true;
+ IPADBG("%s complete\n", __func__);
+ mutex_unlock(&a2_mux_ctx->wakeup_lock);
+ return;
+bail:
+ mutex_unlock(&a2_mux_ctx->wakeup_lock);
+ BUG();
+ return;
+}
+
+static void bam_mux_write_done(bool is_tethered, struct sk_buff *skb)
+{
+ struct tx_pkt_info *info;
+ enum a2_mux_logical_channel_id lcid;
+ unsigned long event_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+ info = list_first_entry(&a2_mux_ctx->bam_tx_pool,
+ struct tx_pkt_info, list_node);
+ if (unlikely(info->skb != skb)) {
+ struct tx_pkt_info *errant_pkt;
+
+ IPAERR("tx_pool mismatch next=%p list_node=%p, ts=%u.%09lu\n",
+ a2_mux_ctx->bam_tx_pool.next,
+ &info->list_node,
+ info->ts_sec, info->ts_nsec
+ );
+
+ list_for_each_entry(errant_pkt,
+ &a2_mux_ctx->bam_tx_pool, list_node) {
+ IPAERR("%s: node=%p ts=%u.%09lu\n", __func__,
+ &errant_pkt->list_node, errant_pkt->ts_sec,
+ errant_pkt->ts_nsec);
+ if (errant_pkt->skb == skb)
+ info = errant_pkt;
+
+ }
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+ flags);
+ BUG();
+ }
+ list_del(&info->list_node);
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+ if (info->is_cmd) {
+ dev_kfree_skb_any(info->skb);
+ kfree(info);
+ return;
+ }
+ skb = info->skb;
+ kfree(info);
+ event_data = (unsigned long)(skb);
+ if (is_tethered)
+ lcid = A2_MUX_TETHERED_0;
+ else {
+ struct bam_mux_hdr *hdr = (struct bam_mux_hdr *)skb->data;
+ lcid = (enum a2_mux_logical_channel_id) hdr->ch_id;
+ }
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ a2_mux_ctx->bam_ch[lcid].num_tx_pkts--;
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ if (a2_mux_ctx->bam_ch[lcid].notify_cb)
+ a2_mux_ctx->bam_ch[lcid].notify_cb(
+ a2_mux_ctx->bam_ch[lcid].user_data, A2_MUX_WRITE_DONE,
+ event_data);
+ else
+ dev_kfree_skb_any(skb);
+}
+
+static void kickoff_ul_power_down_func(struct work_struct *work)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&a2_mux_ctx->ul_wakeup_lock, flags);
+ if (a2_mux_ctx->bam_is_connected) {
+ IPADBG("%s: UL active - forcing powerdown\n", __func__);
+ ul_powerdown();
+ }
+ write_unlock_irqrestore(&a2_mux_ctx->ul_wakeup_lock, flags);
+ ipa_rm_notify_completion(IPA_RM_RESOURCE_RELEASED,
+ IPA_RM_RESOURCE_A2_CONS);
+}
+
+static void kickoff_ul_wakeup_func(struct work_struct *work)
+{
+ if (!a2_mux_ctx->bam_is_connected)
+ ul_wakeup();
+ ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
+ IPA_RM_RESOURCE_A2_CONS);
+}
+
+static void kickoff_ul_request_resource_func(struct work_struct *work)
+{
+ int ret;
+
+ INIT_COMPLETION(a2_mux_ctx->request_resource_completion);
+ ret = ipa_rm_request_resource(IPA_RM_RESOURCE_A2_PROD);
+ if (ret < 0 && ret != -EINPROGRESS) {
+ IPAERR("%s: ipa_rm_request_resource failed %d\n", __func__,
+ ret);
+ return;
+ }
+ if (ret == -EINPROGRESS) {
+ ret = wait_for_completion_timeout(
+ &a2_mux_ctx->request_resource_completion,
+ A2_MUX_COMPLETION_TIMEOUT);
+ if (unlikely(ret == 0)) {
+ IPADBG("%s timeout request A2 PROD resource\n",
+ __func__);
+ BUG();
+ return;
+ }
+ }
+ toggle_apps_ack();
+}
+
+static bool msm_bam_dmux_kickoff_ul_wakeup(void)
+{
+ bool is_connected;
+
+ read_lock(&a2_mux_ctx->ul_wakeup_lock);
+ is_connected = a2_mux_ctx->bam_is_connected;
+ read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+ if (!is_connected)
+ queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
+ &a2_mux_ctx->kickoff_ul_wakeup);
+ return is_connected;
+}
+
+static bool msm_bam_dmux_kickoff_ul_power_down(void)
+
+{
+ bool is_connected;
+
+ read_lock(&a2_mux_ctx->ul_wakeup_lock);
+ is_connected = a2_mux_ctx->bam_is_connected;
+ read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+ if (is_connected)
+ queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
+ &a2_mux_ctx->kickoff_ul_power_down);
+ return is_connected;
+}
+
+static void ipa_embedded_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ switch (evt) {
+ case IPA_RECEIVE:
+ handle_bam_mux_cmd((struct sk_buff *)data);
+ break;
+ case IPA_WRITE_DONE:
+ bam_mux_write_done(false, (struct sk_buff *)data);
+ break;
+ default:
+ IPAERR("%s: Unknown event %d\n", __func__, evt);
+ break;
+ }
+}
+
+static void ipa_tethered_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ IPADBG("%s: event = %d\n", __func__, evt);
+ switch (evt) {
+ case IPA_RECEIVE:
+ if (a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].notify_cb)
+ a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].notify_cb(
+ a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].user_data,
+ A2_MUX_RECEIVE,
+ data);
+ break;
+ case IPA_WRITE_DONE:
+ bam_mux_write_done(true, (struct sk_buff *)data);
+ break;
+ default:
+ IPAERR("%s: Unknown event %d\n", __func__, evt);
+ break;
+ }
+}
+
+static int connect_to_bam(void)
+{
+ int ret;
+ struct ipa_sys_connect_params connect_params;
+
+ IPAERR("%s:\n", __func__);
+ if (a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
+ IPAERR("%s: SW bridge is already UP\n",
+ __func__);
+ return -EFAULT;
+ }
+ ret = sps_device_reset(a2_mux_ctx->a2_device_handle);
+ if (ret)
+ IPAERR("%s: device reset failed ret = %d\n",
+ __func__, ret);
+ memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+ connect_params.client = IPA_CLIENT_A2_TETHERED_CONS;
+ connect_params.notify = ipa_tethered_notify;
+ connect_params.desc_fifo_sz = 0x800;
+ ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
+ &connect_params,
+ &a2_mux_ctx->tethered_prod);
+ if (ret) {
+ IPAERR("%s: IPA bridge tethered UL failed to connect: %d\n",
+ __func__, ret);
+ goto bridge_tethered_ul_failed;
+ }
+ memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+ connect_params.ipa_ep_cfg.mode.mode = IPA_DMA;
+ connect_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
+ connect_params.client = IPA_CLIENT_A2_TETHERED_PROD;
+ connect_params.notify = ipa_tethered_notify;
+ connect_params.desc_fifo_sz = 0x800;
+ ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
+ &connect_params,
+ &a2_mux_ctx->tethered_cons);
+ if (ret) {
+ IPAERR("%s: IPA bridge tethered DL failed to connect: %d\n",
+ __func__, ret);
+ goto bridge_tethered_dl_failed;
+ }
+ memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+ connect_params.client = IPA_CLIENT_A2_EMBEDDED_CONS;
+ connect_params.notify = ipa_embedded_notify;
+ connect_params.desc_fifo_sz = 0x800;
+ ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
+ &connect_params,
+ &a2_mux_ctx->embedded_prod);
+ if (ret) {
+ IPAERR("%s: IPA bridge embedded UL failed to connect: %d\n",
+ __func__, ret);
+ goto bridge_embedded_ul_failed;
+ }
+ memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+ connect_params.client = IPA_CLIENT_A2_EMBEDDED_PROD;
+ connect_params.notify = ipa_embedded_notify;
+ connect_params.desc_fifo_sz = 0x800;
+ ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_EMBEDDED,
+ &connect_params,
+ &a2_mux_ctx->embedded_cons);
+ if (ret) {
+ IPAERR("%s: IPA bridge embedded DL failed to connect: %d\n",
+ __func__, ret);
+ goto bridge_embedded_dl_failed;
+ }
+ a2_mux_ctx->a2_mux_sw_bridge_is_connected = 1;
+ complete_all(&a2_mux_ctx->bam_connection_completion);
+ return 0;
+
+bridge_embedded_dl_failed:
+ ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
+ a2_mux_ctx->embedded_prod);
+bridge_embedded_ul_failed:
+ ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
+ a2_mux_ctx->tethered_cons);
+bridge_tethered_dl_failed:
+ ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
+ a2_mux_ctx->tethered_prod);
+bridge_tethered_ul_failed:
+ return ret;
+}
+
+static int disconnect_to_bam(void)
+{
+ int ret;
+
+ IPAERR("%s\n", __func__);
+ if (!a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
+ IPAERR("%s: SW bridge is already DOWN\n",
+ __func__);
+ return -EFAULT;
+ }
+ ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
+ a2_mux_ctx->tethered_prod);
+ if (ret) {
+ IPAERR("%s: IPA bridge tethered UL failed to disconnect: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
+ a2_mux_ctx->tethered_cons);
+ if (ret) {
+ IPAERR("%s: IPA bridge tethered DL failed to disconnect: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
+ a2_mux_ctx->embedded_prod);
+ if (ret) {
+ IPAERR("%s: IPA bridge embedded UL failed to disconnect: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_EMBEDDED,
+ a2_mux_ctx->embedded_cons);
+ if (ret) {
+ IPAERR("%s: IPA bridge embedded DL failed to disconnect: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ ret = sps_device_reset(a2_mux_ctx->a2_device_handle);
+ if (ret) {
+ IPAERR("%s: device reset failed ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+ verify_tx_queue_is_empty(__func__);
+ (void) ipa_rm_release_resource(IPA_RM_RESOURCE_A2_PROD);
+ if (a2_mux_ctx->disconnect_ack)
+ toggle_apps_ack();
+ a2_mux_ctx->a2_mux_sw_bridge_is_connected = 0;
+ complete_all(&a2_mux_ctx->bam_connection_completion);
+ return 0;
+}
+
+static void bam_dmux_smsm_cb(void *priv,
+ u32 old_state,
+ u32 new_state)
+{
+ static int last_processed_state;
+
+ mutex_lock(&a2_mux_ctx->smsm_cb_lock);
+ IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
+ new_state);
+ if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
+ IPADBG("%s: already processed this state\n", __func__);
+ mutex_unlock(&a2_mux_ctx->smsm_cb_lock);
+ return;
+ }
+ last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
+ if (new_state & SMSM_A2_POWER_CONTROL) {
+ IPADBG("%s: MODEM PWR CTRL 1\n", __func__);
+ grab_wakelock();
+ (void) connect_to_bam();
+ queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
+ &a2_mux_ctx->kickoff_ul_request_resource);
+ } else if (!(new_state & SMSM_A2_POWER_CONTROL)) {
+ IPADBG("%s: MODEM PWR CTRL 0\n", __func__);
+ (void) disconnect_to_bam();
+ release_wakelock();
+ } else {
+ IPAERR("%s: unsupported state change\n", __func__);
+ }
+ mutex_unlock(&a2_mux_ctx->smsm_cb_lock);
+}
+
+static void bam_dmux_smsm_ack_cb(void *priv, u32 old_state,
+ u32 new_state)
+{
+ IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
+ new_state);
+ complete_all(&a2_mux_ctx->ul_wakeup_ack_completion);
+}
+
+static int a2_mux_pm_rm_request_resource(void)
+{
+ int result = 0;
+ bool is_connected;
+
+ is_connected = msm_bam_dmux_kickoff_ul_wakeup();
+ if (!is_connected)
+ result = -EINPROGRESS;
+ return result;
+}
+
+static int a2_mux_pm_rm_release_resource(void)
+{
+ int result = 0;
+ bool is_connected;
+
+ is_connected = msm_bam_dmux_kickoff_ul_power_down();
+ if (is_connected)
+ result = -EINPROGRESS;
+ return result;
+}
+
+static void a2_mux_pm_rm_notify_cb(void *user_data,
+ enum ipa_rm_event event,
+ unsigned long data)
+{
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ IPADBG("%s: PROD GRANTED CB\n", __func__);
+ complete_all(&a2_mux_ctx->request_resource_completion);
+ break;
+ case IPA_RM_RESOURCE_RELEASED:
+ IPADBG("%s: PROD RELEASED CB\n", __func__);
+ break;
+ default:
+ return;
+ }
+}
+static int a2_mux_pm_initialize_rm(void)
+{
+ struct ipa_rm_create_params create_params;
+ int result;
+
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.name = IPA_RM_RESOURCE_A2_PROD;
+ create_params.reg_params.notify_cb = &a2_mux_pm_rm_notify_cb;
+ result = ipa_rm_create_resource(&create_params);
+ if (result)
+ goto bail;
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.name = IPA_RM_RESOURCE_A2_CONS;
+ create_params.release_resource = &a2_mux_pm_rm_release_resource;
+ create_params.request_resource = &a2_mux_pm_rm_request_resource;
+ result = ipa_rm_create_resource(&create_params);
+bail:
+ return result;
+}
+
+static void bam_mux_process_data(struct sk_buff *rx_skb)
+{
+ unsigned long flags;
+ struct bam_mux_hdr *rx_hdr;
+ unsigned long event_data;
+
+ rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
+ rx_skb->data = (unsigned char *)(rx_hdr + 1);
+ rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
+ rx_skb->len = rx_hdr->pkt_len;
+ rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
+ event_data = (unsigned long)(rx_skb);
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
+ if (a2_mux_ctx->bam_ch[rx_hdr->ch_id].notify_cb)
+ a2_mux_ctx->bam_ch[rx_hdr->ch_id].notify_cb(
+ a2_mux_ctx->bam_ch[rx_hdr->ch_id].user_data,
+ A2_MUX_RECEIVE,
+ event_data);
+ else
+ dev_kfree_skb_any(rx_skb);
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
+ flags);
+}
+
+static void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
+ a2_mux_ctx->bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
+ a2_mux_ctx->bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
+ flags);
+}
+
+static void handle_bam_mux_cmd(struct sk_buff *rx_skb)
+{
+ unsigned long flags;
+ struct bam_mux_hdr *rx_hdr;
+
+ rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
+ IPADBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n",
+ __func__,
+ rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
+ rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
+ rx_hdr->magic_num = ntohs(rx_hdr->magic_num);
+ rx_hdr->pkt_len = ntohs(rx_hdr->pkt_len);
+ IPADBG("%s: converted to host order magic_num=%d, pkt_len=%d\n",
+ __func__, rx_hdr->magic_num, rx_hdr->pkt_len);
+ if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
+ IPAERR("bad hdr magic %x rvd %d cmd %d pad %d ch %d len %d\n",
+ rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
+ rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
+ dev_kfree_skb_any(rx_skb);
+ return;
+ }
+ if (rx_hdr->ch_id >= A2_MUX_NUM_CHANNELS) {
+ IPAERR("bad LCID %d rsvd %d cmd %d pad %d ch %d len %d\n",
+ rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
+ rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
+ dev_kfree_skb_any(rx_skb);
+ return;
+ }
+ switch (rx_hdr->cmd) {
+ case BAM_MUX_HDR_CMD_DATA:
+ bam_mux_process_data(rx_skb);
+ break;
+ case BAM_MUX_HDR_CMD_OPEN:
+ IPADBG("%s: opening cid %d PC enabled\n", __func__,
+ rx_hdr->ch_id);
+ handle_bam_mux_cmd_open(rx_hdr);
+ if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
+ IPADBG("%s: deactivating disconnect ack\n",
+ __func__);
+ a2_mux_ctx->disconnect_ack = 0;
+ }
+ dev_kfree_skb_any(rx_skb);
+ if (a2_mux_ctx->a2_mux_send_power_vote_on_init_once) {
+ kickoff_ul_wakeup_func(NULL);
+ a2_mux_ctx->a2_mux_send_power_vote_on_init_once = 0;
+ }
+ break;
+ case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
+ IPADBG("%s: opening cid %d PC disabled\n", __func__,
+ rx_hdr->ch_id);
+ if (!a2_mux_ctx->a2_pc_disabled) {
+ a2_mux_ctx->a2_pc_disabled = 1;
+ ul_wakeup();
+ }
+ handle_bam_mux_cmd_open(rx_hdr);
+ dev_kfree_skb_any(rx_skb);
+ break;
+ case BAM_MUX_HDR_CMD_CLOSE:
+ /* probably should drop pending write */
+ IPADBG("%s: closing cid %d\n", __func__,
+ rx_hdr->ch_id);
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
+ flags);
+ a2_mux_ctx->bam_ch[rx_hdr->ch_id].status &=
+ ~BAM_CH_REMOTE_OPEN;
+ spin_unlock_irqrestore(
+ &a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
+ dev_kfree_skb_any(rx_skb);
+ break;
+ default:
+ IPAERR("bad hdr.magic %x rvd %d cmd %d pad %d ch %d len %d\n",
+ rx_hdr->magic_num, rx_hdr->reserved,
+ rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
+ rx_hdr->pkt_len);
+ dev_kfree_skb_any(rx_skb);
+ return;
+ }
+}
+
+static int bam_mux_write_cmd(void *data, u32 len)
+{
+ int rc;
+ struct tx_pkt_info *pkt;
+ unsigned long flags;
+
+ pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
+ if (pkt == NULL) {
+ IPAERR("%s: mem alloc for tx_pkt_info failed\n", __func__);
+ return -ENOMEM;
+ }
+ pkt->skb = __dev_alloc_skb(len, GFP_NOWAIT | __GFP_NOWARN);
+ if (pkt->skb == NULL) {
+ IPAERR("%s: unable to alloc skb\n\n", __func__);
+ kfree(pkt);
+ return -ENOMEM;
+ }
+ memcpy(skb_put(pkt->skb, len), data, len);
+ kfree(data);
+ pkt->len = len;
+ pkt->is_cmd = 1;
+ set_tx_timestamp(pkt);
+ spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+ list_add_tail(&pkt->list_node, &a2_mux_ctx->bam_tx_pool);
+ rc = ipa_tx_dp(IPA_CLIENT_A2_EMBEDDED_CONS, pkt->skb, NULL);
+ if (rc) {
+ IPAERR("%s ipa_tx_dp failed rc=%d\n",
+ __func__, rc);
+ list_del(&pkt->list_node);
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+ flags);
+ dev_kfree_skb_any(pkt->skb);
+ kfree(pkt);
+ } else {
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+ flags);
+ }
+ return rc;
+}
/**
- * a2_mux_initialize() - initialize A2 MUX module
+ * a2_mux_get_tethered_client_handles() - provide the tethred
+ * pipe handles for post setup configuration
+ * @lcid: logical channel ID
+ * @clnt_cons_handle: [out] consumer pipe handle
+ * @clnt_prod_handle: [out] producer pipe handle
*
- * Return codes:
- * 0: success
+ * Returns: 0 on success, negative on failure
*/
-int a2_mux_initialize(void)
+int a2_mux_get_tethered_client_handles(enum a2_mux_logical_channel_id lcid,
+ unsigned int *clnt_cons_handle,
+ unsigned int *clnt_prod_handle)
{
- (void) msm_bam_dmux_ul_power_vote();
-
+ if (!a2_mux_ctx->a2_mux_initialized || lcid != A2_MUX_TETHERED_0)
+ return -ENODEV;
+ if (!clnt_cons_handle || !clnt_prod_handle)
+ return -EINVAL;
+ *clnt_prod_handle = a2_mux_ctx->tethered_prod;
+ *clnt_cons_handle = a2_mux_ctx->tethered_cons;
return 0;
}
/**
- * a2_mux_close() - close A2 MUX module
+ * a2_mux_write() - send the packet to A2,
+ * add MUX header acc to lcid provided
+ * @id: logical channel ID
+ * @skb: SKB to write
*
- * Return codes:
- * 0: success
- * -EINVAL: invalid parameters
+ * Returns: 0 on success, negative on failure
*/
-int a2_mux_close(void)
+int a2_mux_write(enum a2_mux_logical_channel_id id, struct sk_buff *skb)
{
- int ret = 0;
+ int rc = 0;
+ struct bam_mux_hdr *hdr;
+ unsigned long flags;
+ struct sk_buff *new_skb = NULL;
+ struct tx_pkt_info *pkt;
+ bool is_connected;
- (void) msm_bam_dmux_ul_power_unvote();
+ if (id >= A2_MUX_NUM_CHANNELS)
+ return -EINVAL;
+ if (!skb)
+ return -EINVAL;
+ if (!a2_mux_ctx->a2_mux_initialized)
+ return -ENODEV;
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[id].lock, flags);
+ if (!bam_ch_is_open(id)) {
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
+ IPAERR("%s: port not open: %d\n",
+ __func__,
+ a2_mux_ctx->bam_ch[id].status);
+ return -ENODEV;
+ }
+ if (a2_mux_ctx->bam_ch[id].use_wm &&
+ (a2_mux_ctx->bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
+ IPAERR("%s: watermark exceeded: %d\n", __func__, id);
+ return -EAGAIN;
+ }
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
+ read_lock(&a2_mux_ctx->ul_wakeup_lock);
+ is_connected = a2_mux_ctx->bam_is_connected;
+ read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+ if (!is_connected)
+ return -ENODEV;
+ if (id != A2_MUX_TETHERED_0) {
+ /*
+ * if skb do not have any tailroom for padding
+ * copy the skb into a new expanded skb
+ */
+ if ((skb->len & 0x3) &&
+ (skb_tailroom(skb) < A2_MUX_PADDING_LENGTH(skb->len))) {
+ new_skb = skb_copy_expand(skb, skb_headroom(skb),
+ A2_MUX_PADDING_LENGTH(skb->len),
+ GFP_ATOMIC);
+ if (new_skb == NULL) {
+ IPAERR("%s: cannot allocate skb\n", __func__);
+ rc = -ENOMEM;
+ goto write_fail;
+ }
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+ }
+ hdr = (struct bam_mux_hdr *)skb_push(
+ skb, sizeof(struct bam_mux_hdr));
+ /*
+ * caller should allocate for hdr and padding
+ * hdr is fine, padding is tricky
+ */
+ hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+ hdr->cmd = BAM_MUX_HDR_CMD_DATA;
+ hdr->reserved = 0;
+ hdr->ch_id = id;
+ hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
+ if (skb->len & 0x3)
+ skb_put(skb, A2_MUX_PADDING_LENGTH(skb->len));
+ hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) +
+ hdr->pkt_len);
+ IPADBG("data %p, tail %p skb len %d pkt len %d pad len %d\n",
+ skb->data, skb->tail, skb->len,
+ hdr->pkt_len, hdr->pad_len);
+ hdr->magic_num = htons(hdr->magic_num);
+ hdr->pkt_len = htons(hdr->pkt_len);
+ IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
+ hdr->magic_num, hdr->pkt_len);
+ }
+ pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
+ if (pkt == NULL) {
+ IPAERR("%s: mem alloc for tx_pkt_info failed\n", __func__);
+ rc = -ENOMEM;
+ goto write_fail2;
+ }
+ pkt->skb = skb;
+ pkt->is_cmd = 0;
+ set_tx_timestamp(pkt);
+ spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+ list_add_tail(&pkt->list_node, &a2_mux_ctx->bam_tx_pool);
+ if (id == A2_MUX_TETHERED_0)
+ rc = ipa_tx_dp(IPA_CLIENT_A2_TETHERED_CONS, skb, NULL);
+ else
+ rc = ipa_tx_dp(IPA_CLIENT_A2_EMBEDDED_CONS, skb, NULL);
+ if (rc) {
+ IPAERR("%s ipa_tx_dp failed rc=%d\n",
+ __func__, rc);
+ list_del(&pkt->list_node);
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+ flags);
+ goto write_fail3;
+ } else {
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+ flags);
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[id].lock, flags);
+ a2_mux_ctx->bam_ch[id].num_tx_pkts++;
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
+ }
+ return 0;
- ret = ipa_disconnect(a2_service_cb.consumer_handle);
- if (0 != ret) {
- pr_err("%s: ipa_disconnect failure\n", __func__);
- goto bail;
+write_fail3:
+ kfree(pkt);
+write_fail2:
+ if (new_skb)
+ dev_kfree_skb_any(new_skb);
+write_fail:
+ return rc;
+}
+
+/**
+ * a2_mux_open_channel() - opens logical channel
+ * to A2
+ * @lcid: logical channel ID
+ * @user_data: user provided data for below CB
+ * @notify_cb: user provided notification CB
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
+ void *user_data,
+ a2_mux_notify_cb notify_cb)
+{
+ struct bam_mux_hdr *hdr;
+ unsigned long flags;
+ int rc = 0;
+ bool is_connected;
+
+ IPADBG("%s: opening ch %d\n", __func__, lcid);
+ if (!a2_mux_ctx->a2_mux_initialized) {
+ IPAERR("%s: not inititialized\n", __func__);
+ return -ENODEV;
+ }
+ if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0) {
+ IPAERR("%s: invalid channel id %d\n", __func__, lcid);
+ return -EINVAL;
+ }
+ if (notify_cb == NULL) {
+ IPAERR("%s: notify function is NULL\n", __func__);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ if (bam_ch_is_open(lcid)) {
+ IPAERR("%s: Already opened %d\n", __func__, lcid);
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ goto open_done;
+ }
+ if (!bam_ch_is_remote_open(lcid)) {
+ IPAERR("%s: Remote not open; ch: %d\n", __func__, lcid);
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ return -ENODEV;
+ }
+ a2_mux_ctx->bam_ch[lcid].notify_cb = notify_cb;
+ a2_mux_ctx->bam_ch[lcid].user_data = user_data;
+ a2_mux_ctx->bam_ch[lcid].status |= BAM_CH_LOCAL_OPEN;
+ a2_mux_ctx->bam_ch[lcid].num_tx_pkts = 0;
+ a2_mux_ctx->bam_ch[lcid].use_wm = 0;
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ read_lock(&a2_mux_ctx->ul_wakeup_lock);
+ is_connected = a2_mux_ctx->bam_is_connected;
+ read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+ if (!is_connected)
+ return -ENODEV;
+ if (lcid != A2_MUX_TETHERED_0) {
+ hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
+ if (hdr == NULL) {
+ IPAERR("%s: hdr kmalloc failed. ch: %d\n",
+ __func__, lcid);
+ return -ENOMEM;
+ }
+ hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+ if (a2_mux_ctx->a2_mux_apps_pc_enabled) {
+ hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
+ } else {
+ IPAERR("%s: PC DISABLED BY A5 SW BY INTENTION\n",
+ __func__);
+ a2_mux_ctx->a2_pc_disabled = 1;
+ hdr->cmd = BAM_MUX_HDR_CMD_OPEN_NO_A2_PC;
+ }
+ hdr->reserved = 0;
+ hdr->ch_id = lcid;
+ hdr->pkt_len = 0;
+ hdr->pad_len = 0;
+ hdr->magic_num = htons(hdr->magic_num);
+ hdr->pkt_len = htons(hdr->pkt_len);
+ IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
+ hdr->magic_num, hdr->pkt_len);
+ rc = bam_mux_write_cmd((void *)hdr,
+ sizeof(struct bam_mux_hdr));
+ if (rc) {
+ IPAERR("%s: bam_mux_write_cmd failed %d; ch: %d\n",
+ __func__, rc, lcid);
+ kfree(hdr);
+ return rc;
+ }
}
- ret = ipa_disconnect(a2_service_cb.producer_handle);
- if (0 != ret) {
- pr_err("%s: ipa_disconnect failure\n", __func__);
- goto bail;
+open_done:
+ IPADBG("%s: opened ch %d\n", __func__, lcid);
+ return rc;
+}
+
+/**
+ * a2_mux_close_channel() - closes logical channel
+ * to A2
+ * @lcid: logical channel ID
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid)
+{
+ struct bam_mux_hdr *hdr;
+ unsigned long flags;
+ int rc = 0;
+ bool is_connected;
+
+ if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0)
+ return -EINVAL;
+ IPADBG("%s: closing ch %d\n", __func__, lcid);
+ if (!a2_mux_ctx->a2_mux_initialized)
+ return -ENODEV;
+ read_lock(&a2_mux_ctx->ul_wakeup_lock);
+ is_connected = a2_mux_ctx->bam_is_connected;
+ read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+ if (!is_connected && !bam_ch_is_in_reset(lcid))
+ return -ENODEV;
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ a2_mux_ctx->bam_ch[lcid].notify_cb = NULL;
+ a2_mux_ctx->bam_ch[lcid].user_data = NULL;
+ a2_mux_ctx->bam_ch[lcid].status &= ~BAM_CH_LOCAL_OPEN;
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ if (bam_ch_is_in_reset(lcid)) {
+ a2_mux_ctx->bam_ch[lcid].status &= ~BAM_CH_IN_RESET;
+ return 0;
}
+ if (lcid != A2_MUX_TETHERED_0) {
+ hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
+ if (hdr == NULL) {
+ IPAERR("%s: hdr kmalloc failed. ch: %d\n",
+ __func__, lcid);
+ return -ENOMEM;
+ }
+ hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+ hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
+ hdr->reserved = 0;
+ hdr->ch_id = lcid;
+ hdr->pkt_len = 0;
+ hdr->pad_len = 0;
+ hdr->magic_num = htons(hdr->magic_num);
+ hdr->pkt_len = htons(hdr->pkt_len);
+ IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
+ hdr->magic_num, hdr->pkt_len);
+ rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
+ if (rc) {
+ IPAERR("%s: bam_mux_write_cmd failed %d; ch: %d\n",
+ __func__, rc, lcid);
+ kfree(hdr);
+ return rc;
+ }
+ }
+ IPADBG("%s: closed ch %d\n", __func__, lcid);
+ return 0;
+}
- ret = 0;
+/**
+ * a2_mux_is_ch_full() - checks if channel is above predefined WM,
+ * used for flow control implementation
+ * @lcid: logical channel ID
+ *
+ * Returns: true if the channel is above predefined WM,
+ * false otherwise
+ */
+int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid)
+{
+ unsigned long flags;
+ int ret;
-bail:
-
+ if (lcid >= A2_MUX_NUM_CHANNELS ||
+ lcid < 0)
+ return -EINVAL;
+ if (!a2_mux_ctx->a2_mux_initialized)
+ return -ENODEV;
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ a2_mux_ctx->bam_ch[lcid].use_wm = 1;
+ ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts >= HIGH_WATERMARK;
+ IPADBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
+ lcid, a2_mux_ctx->bam_ch[lcid].num_tx_pkts, ret);
+ if (!bam_ch_is_local_open(lcid)) {
+ ret = -ENODEV;
+ IPAERR("%s: port not open: %d\n", __func__,
+ a2_mux_ctx->bam_ch[lcid].status);
+ }
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
return ret;
}
/**
- * a2_mux_open_port() - open connection to A2
- * @wwan_logical_channel_id: WWAN logical channel ID
- * @rx_cb: Rx callback
- * @tx_complete_cb: Tx completed callback
+ * a2_mux_is_ch_low() - checks if channel is below predefined WM,
+ * used for flow control implementation
+ * @lcid: logical channel ID
*
- * Return codes:
- * 0: success
- * -EINVAL: invalid parameters
+ * Returns: true if the channel is below predefined WM,
+ * false otherwise
*/
-int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
- void *tx_complete_cb)
+int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid)
{
- int ret = 0;
- u8 src_pipe = 0;
- u8 dst_pipe = 0;
- struct sps_pipe *a2_to_ipa_pipe = NULL;
- struct sps_pipe *ipa_to_a2_pipe = NULL;
-
- (void) wwan_logical_channel_id;
-
- a2_service_cb.rx_cb = rx_cb;
- a2_service_cb.tx_complete_cb = tx_complete_cb;
-
- ret = connect_pipe_ipa(A2_TO_IPA,
- &src_pipe,
- &(a2_service_cb.consumer_handle),
- a2_to_ipa_pipe);
- if (ret) {
- pr_err("%s: A2 to IPA pipe connection failure\n", __func__);
- goto bail;
- }
-
- ret = connect_pipe_ipa(IPA_TO_A2,
- &dst_pipe,
- &(a2_service_cb.producer_handle),
- ipa_to_a2_pipe);
- if (ret) {
- pr_err("%s: IPA to A2 pipe connection failure\n", __func__);
- sps_disconnect(a2_to_ipa_pipe);
- sps_free_endpoint(a2_to_ipa_pipe);
- (void) ipa_disconnect(a2_service_cb.consumer_handle);
- goto bail;
- }
-
- ret = 0;
-
-bail:
-
- return ret;
-}
-
-static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
- u8 *usb_pipe_idx,
- u32 *clnt_hdl,
- struct sps_pipe *pipe)
-{
+ unsigned long flags;
int ret;
- struct sps_connect connection = {0, };
- u32 a2_handle = 0;
- u32 a2_phy_addr = 0;
- struct a2_mux_pipe_connection pipe_connection = { 0, };
- struct ipa_connect_params ipa_in_params;
- struct ipa_sps_params sps_out_params;
- memset(&ipa_in_params, 0, sizeof(ipa_in_params));
- memset(&sps_out_params, 0, sizeof(sps_out_params));
-
- if (!usb_pipe_idx || !clnt_hdl) {
- pr_err("connect_pipe_ipa :: null arguments\n");
- ret = -EINVAL;
- goto bail;
+ if (lcid >= A2_MUX_NUM_CHANNELS ||
+ lcid < 0)
+ return -EINVAL;
+ if (!a2_mux_ctx->a2_mux_initialized)
+ return -ENODEV;
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ a2_mux_ctx->bam_ch[lcid].use_wm = 1;
+ ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts <= LOW_WATERMARK;
+ IPADBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
+ lcid, a2_mux_ctx->bam_ch[lcid].num_tx_pkts, ret);
+ if (!bam_ch_is_local_open(lcid)) {
+ ret = -ENODEV;
+ IPAERR("%s: port not open: %d\n", __func__,
+ a2_mux_ctx->bam_ch[lcid].status);
}
-
- ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_connection);
- if (ret) {
- pr_err("ipa_get_a2_mux_pipe_info failed\n");
- goto bail;
- }
-
- if (pipe_dir == A2_TO_IPA) {
- a2_phy_addr = pipe_connection.src_phy_addr;
- ipa_in_params.client = IPA_CLIENT_A2_TETHERED_PROD;
- ipa_in_params.ipa_ep_cfg.mode.mode = IPA_DMA;
- ipa_in_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
- pr_err("-*&- pipe_connection->src_pipe_index = %d\n",
- pipe_connection.src_pipe_index);
- ipa_in_params.client_ep_idx = pipe_connection.src_pipe_index;
- } else {
- a2_phy_addr = pipe_connection.dst_phy_addr;
- ipa_in_params.client = IPA_CLIENT_A2_TETHERED_CONS;
- ipa_in_params.client_ep_idx = pipe_connection.dst_pipe_index;
- }
-
- ret = sps_phy2h(a2_phy_addr, &a2_handle);
- if (ret) {
- pr_err("%s: sps_phy2h failed (A2 BAM) %d\n", __func__, ret);
- goto bail;
- }
-
- ipa_in_params.client_bam_hdl = a2_handle;
- ipa_in_params.desc_fifo_sz = pipe_connection.desc_fifo_size;
- ipa_in_params.data_fifo_sz = pipe_connection.data_fifo_size;
-
- if (pipe_connection.mem_type == IPA_SPS_PIPE_MEM) {
- pr_debug("%s: A2 BAM using SPS pipe memory\n", __func__);
- ret = sps_setup_bam2bam_fifo(&data_mem_buf[pipe_dir],
- pipe_connection.data_fifo_base_offset,
- pipe_connection.data_fifo_size, 1);
- if (ret) {
- pr_err("%s: data fifo setup failure %d\n",
- __func__, ret);
- goto bail;
- }
-
- ret = sps_setup_bam2bam_fifo(&desc_mem_buf[pipe_dir],
- pipe_connection.desc_fifo_base_offset,
- pipe_connection.desc_fifo_size, 1);
- if (ret) {
- pr_err("%s: desc. fifo setup failure %d\n",
- __func__, ret);
- goto bail;
- }
-
- ipa_in_params.data = data_mem_buf[pipe_dir];
- ipa_in_params.desc = desc_mem_buf[pipe_dir];
- }
-
- ret = a2_ipa_connect_pipe(&ipa_in_params,
- &sps_out_params,
- clnt_hdl);
- if (ret) {
- pr_err("-**- USB-IPA info: ipa_connect failed\n");
- pr_err("%s: usb_ipa_connect_pipe failed\n", __func__);
- goto bail;
- }
-
- pipe = sps_alloc_endpoint();
- if (pipe == NULL) {
- pr_err("%s: sps_alloc_endpoint failed\n", __func__);
- ret = -ENOMEM;
- goto a2_ipa_connect_pipe_failed;
- }
-
- ret = sps_get_config(pipe, &connection);
- if (ret) {
- pr_err("%s: tx get config failed %d\n", __func__, ret);
- goto get_config_failed;
- }
-
- if (pipe_dir == A2_TO_IPA) {
- connection.mode = SPS_MODE_SRC;
- *usb_pipe_idx = connection.src_pipe_index;
- connection.source = a2_handle;
- connection.destination = sps_out_params.ipa_bam_hdl;
- connection.src_pipe_index = pipe_connection.src_pipe_index;
- connection.dest_pipe_index = sps_out_params.ipa_ep_idx;
- } else {
- connection.mode = SPS_MODE_DEST;
- *usb_pipe_idx = connection.dest_pipe_index;
- connection.source = sps_out_params.ipa_bam_hdl;
- connection.destination = a2_handle;
- connection.src_pipe_index = sps_out_params.ipa_ep_idx;
- connection.dest_pipe_index = pipe_connection.dst_pipe_index;
- }
-
- connection.event_thresh = 16;
- connection.data = sps_out_params.data;
- connection.desc = sps_out_params.desc;
-
- ret = sps_connect(pipe, &connection);
- if (ret < 0) {
- pr_err("%s: tx connect error %d\n", __func__, ret);
- goto error;
- }
-
- ret = 0;
- goto bail;
-error:
- sps_disconnect(pipe);
-get_config_failed:
- sps_free_endpoint(pipe);
-a2_ipa_connect_pipe_failed:
- (void) ipa_disconnect(*clnt_hdl);
-bail:
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
return ret;
}
-static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
- struct ipa_sps_params *out_params, u32 *clnt_hdl)
+static int a2_mux_initialize_context(int handle)
{
- return ipa_connect(in_params, out_params, clnt_hdl);
+ int i;
+
+ a2_mux_ctx->a2_mux_apps_pc_enabled = 1;
+ a2_mux_ctx->a2_device_handle = handle;
+ INIT_WORK(&a2_mux_ctx->kickoff_ul_wakeup, kickoff_ul_wakeup_func);
+ INIT_WORK(&a2_mux_ctx->kickoff_ul_power_down,
+ kickoff_ul_power_down_func);
+ INIT_WORK(&a2_mux_ctx->kickoff_ul_request_resource,
+ kickoff_ul_request_resource_func);
+ INIT_LIST_HEAD(&a2_mux_ctx->bam_tx_pool);
+ spin_lock_init(&a2_mux_ctx->bam_tx_pool_spinlock);
+ mutex_init(&a2_mux_ctx->wakeup_lock);
+ rwlock_init(&a2_mux_ctx->ul_wakeup_lock);
+ spin_lock_init(&a2_mux_ctx->wakelock_reference_lock);
+ a2_mux_ctx->disconnect_ack = 1;
+ mutex_init(&a2_mux_ctx->smsm_cb_lock);
+ for (i = 0; i < A2_MUX_NUM_CHANNELS; ++i)
+ spin_lock_init(&a2_mux_ctx->bam_ch[i].lock);
+ init_completion(&a2_mux_ctx->ul_wakeup_ack_completion);
+ init_completion(&a2_mux_ctx->bam_connection_completion);
+ init_completion(&a2_mux_ctx->request_resource_completion);
+ wake_lock_init(&a2_mux_ctx->bam_wakelock,
+ WAKE_LOCK_SUSPEND, "a2_mux_wakelock");
+ a2_mux_ctx->a2_mux_initialized = 1;
+ a2_mux_ctx->a2_mux_send_power_vote_on_init_once = 1;
+ a2_mux_ctx->a2_mux_tx_workqueue =
+ create_singlethread_workqueue("a2_mux_tx");
+ if (!a2_mux_ctx->a2_mux_tx_workqueue) {
+ IPAERR("%s: a2_mux_tx_workqueue alloc failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+ return 0;
}
+/**
+ * a2_mux_init() - initialize A2 MUX component
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int a2_mux_init(void)
+{
+ int rc;
+ u32 h;
+ void *a2_virt_addr;
+ u32 a2_bam_mem_base;
+ u32 a2_bam_mem_size;
+ u32 a2_bam_irq;
+ struct sps_bam_props a2_props;
+
+
+ IPADBG("%s A2 MUX\n", __func__);
+ rc = ipa_get_a2_mux_bam_info(&a2_bam_mem_base,
+ &a2_bam_mem_size,
+ &a2_bam_irq);
+ if (rc) {
+ IPAERR("%s: ipa_get_a2_mux_bam_info failed\n", __func__);
+ rc = -EFAULT;
+ goto bail;
+ }
+ a2_virt_addr = ioremap_nocache((unsigned long)(a2_bam_mem_base),
+ a2_bam_mem_size);
+ if (!a2_virt_addr) {
+ IPAERR("%s: ioremap failed\n", __func__);
+ rc = -ENOMEM;
+ goto bail;
+ }
+ memset(&a2_props, 0, sizeof(a2_props));
+ a2_props.phys_addr = a2_bam_mem_base;
+ a2_props.virt_addr = a2_virt_addr;
+ a2_props.virt_size = a2_bam_mem_size;
+ a2_props.irq = a2_bam_irq;
+ a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
+ a2_props.num_pipes = A2_NUM_PIPES;
+ a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
+ /* need to free on tear down */
+ rc = sps_register_bam_device(&a2_props, &h);
+ if (rc < 0) {
+ IPAERR("%s: register bam error %d\n", __func__, rc);
+ goto register_bam_failed;
+ }
+ a2_mux_ctx = kzalloc(sizeof(*a2_mux_ctx), GFP_KERNEL);
+ if (!a2_mux_ctx) {
+ IPAERR("%s: a2_mux_ctx alloc failed, rc: %d\n", __func__, rc);
+ rc = -ENOMEM;
+ goto register_bam_failed;
+ }
+ rc = a2_mux_initialize_context(h);
+ if (rc) {
+ IPAERR("%s: a2_mux_initialize_context failed, rc: %d\n",
+ __func__, rc);
+ goto ctx_alloc_failed;
+ }
+ rc = a2_mux_pm_initialize_rm();
+ if (rc) {
+ IPAERR("%s: a2_mux_pm_initialize_rm failed, rc: %d\n",
+ __func__, rc);
+ goto ctx_alloc_failed;
+ }
+ rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
+ bam_dmux_smsm_cb, NULL);
+ if (rc) {
+ IPAERR("%s: smsm cb register failed, rc: %d\n", __func__, rc);
+ rc = -ENOMEM;
+ goto ctx_alloc_failed;
+ }
+ rc = smsm_state_cb_register(SMSM_MODEM_STATE,
+ SMSM_A2_POWER_CONTROL_ACK,
+ bam_dmux_smsm_ack_cb, NULL);
+ if (rc) {
+ IPAERR("%s: smsm ack cb register failed, rc: %d\n",
+ __func__, rc);
+ rc = -ENOMEM;
+ goto smsm_ack_cb_reg_failed;
+ }
+ if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
+ bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
+ rc = 0;
+ goto bail;
+
+smsm_ack_cb_reg_failed:
+ smsm_state_cb_deregister(SMSM_MODEM_STATE,
+ SMSM_A2_POWER_CONTROL,
+ bam_dmux_smsm_cb, NULL);
+ctx_alloc_failed:
+ kfree(a2_mux_ctx);
+register_bam_failed:
+ iounmap(a2_virt_addr);
+bail:
+ return rc;
+}
+
+/**
+ * a2_mux_exit() - destroy A2 MUX component
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int a2_mux_exit(void)
+{
+ smsm_state_cb_deregister(SMSM_MODEM_STATE,
+ SMSM_A2_POWER_CONTROL_ACK,
+ bam_dmux_smsm_ack_cb,
+ NULL);
+ smsm_state_cb_deregister(SMSM_MODEM_STATE,
+ SMSM_A2_POWER_CONTROL,
+ bam_dmux_smsm_cb,
+ NULL);
+ if (a2_mux_ctx->a2_mux_tx_workqueue)
+ destroy_workqueue(a2_mux_ctx->a2_mux_tx_workqueue);
+ return 0;
+}
diff --git a/drivers/platform/msm/ipa/a2_service.h b/drivers/platform/msm/ipa/a2_service.h
deleted file mode 100644
index 80885da..0000000
--- a/drivers/platform/msm/ipa/a2_service.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _A2_SERVICE_H_
-#define _A2_SERVICE_H_
-
-int a2_mux_initialize(void);
-
-int a2_mux_close(void);
-
-int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
- void *tx_complete_cb);
-
-#endif /* _A2_SERVICE_H_ */
-
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index 870a6bc..b07c653 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -1069,6 +1069,33 @@
return 0;
}
+/**
+* ipa_get_a2_mux_bam_info() - Exposes A2 parameters fetched from
+* DTS
+*
+* @a2_bam_mem_base: A2 BAM Memory base
+* @a2_bam_mem_size: A2 BAM Memory size
+* @a2_bam_irq: A2 BAM IRQ
+*
+* Return codes:
+* 0: success
+* -EFAULT: invalid parameters
+*/
+int ipa_get_a2_mux_bam_info(u32 *a2_bam_mem_base, u32 *a2_bam_mem_size,
+ u32 *a2_bam_irq)
+{
+ if (!a2_bam_mem_base || !a2_bam_mem_size || !a2_bam_irq) {
+ IPAERR("ipa_get_a2_mux_bam_info null args\n");
+ return -EFAULT;
+ }
+
+ *a2_bam_mem_base = ipa_res.a2_bam_mem_base;
+ *a2_bam_mem_size = ipa_res.a2_bam_mem_size;
+ *a2_bam_irq = ipa_res.a2_bam_irq;
+
+ return 0;
+}
+
static void ipa_set_aggregation_params(void)
{
struct ipa_ep_cfg_aggr agg_params;
@@ -1881,6 +1908,8 @@
goto fail_ipa_rm_init;
}
+ a2_mux_init();
+
IPADBG(":IPA driver init OK.\n");
return 0;
@@ -1994,6 +2023,18 @@
ipa_res.bam_mem_size = resource_size(resource_p);
}
+ /* Get IPA A2 BAM address */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+ "a2-bam-base");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for a2-bam-base!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.a2_bam_mem_base = resource_p->start;
+ ipa_res.a2_bam_mem_size = resource_size(resource_p);
+ }
+
/* Get IPA pipe mem start ofst */
resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
"ipa-pipe-mem");
@@ -2027,6 +2068,17 @@
ipa_res.bam_irq = resource_p->start;
}
+ /* Get IPA A2 BAM IRQ number */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
+ "a2-bam-irq");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for a2-bam-irq!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.a2_bam_irq = resource_p->start;
+ }
+
/* Get IPA HW Version */
result = of_property_read_u32(pdev_p->dev.of_node, "qcom,ipa-hw-ver",
&ipa_res.ipa_hw_type);
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
index 1b5b339..14195d7 100644
--- a/drivers/platform/msm/ipa/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -719,8 +719,11 @@
u32 ipa_mem_size;
u32 bam_mem_base;
u32 bam_mem_size;
+ u32 a2_bam_mem_base;
+ u32 a2_bam_mem_size;
u32 ipa_irq;
u32 bam_irq;
+ u32 a2_bam_irq;
u32 ipa_pipe_mem_start_ofst;
u32 ipa_pipe_mem_size;
enum ipa_hw_type ipa_hw_type;
@@ -733,6 +736,8 @@
int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction pipe_dir,
struct a2_mux_pipe_connection *pipe_connect);
+int ipa_get_a2_mux_bam_info(u32 *a2_bam_mem_base, u32 *a2_bam_mem_size,
+ u32 *a2_bam_irq);
void rmnet_bridge_get_client_handles(u32 *producer_handle,
u32 *consumer_handle);
int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
@@ -815,4 +820,7 @@
int ipa_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx);
int ipa_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx);
+int a2_mux_init(void);
+int a2_mux_exit(void);
+
#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/rmnet_bridge.c b/drivers/platform/msm/ipa/rmnet_bridge.c
index e5c7ec2..696b363 100644
--- a/drivers/platform/msm/ipa/rmnet_bridge.c
+++ b/drivers/platform/msm/ipa/rmnet_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,6 @@
#include <mach/bam_dmux.h>
#include <mach/ipa.h>
#include <mach/sps.h>
-#include "a2_service.h"
static struct rmnet_bridge_cb_type {
u32 producer_handle;
diff --git a/drivers/platform/msm/sps/bam.c b/drivers/platform/msm/sps/bam.c
index 1064086..0f81285 100644
--- a/drivers/platform/msm/sps/bam.c
+++ b/drivers/platform/msm/sps/bam.c
@@ -992,7 +992,11 @@
{
SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe);
- bam_write_reg_field(base, P_CTRL(pipe), P_EN, 1);
+ if (bam_read_reg_field(base, P_CTRL(pipe), P_EN))
+ SPS_DBG2("sps:bam=0x%x(va).pipe=%d is already enabled.\n",
+ (u32) base, pipe);
+ else
+ bam_write_reg_field(base, P_CTRL(pipe), P_EN, 1);
}
/**
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 8e25780..9dd9c40 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -76,10 +76,10 @@
#define USB_ETH_RNDIS y
#include "f_rndis.c"
#include "rndis.c"
+#include "f_qc_ecm.c"
#include "u_bam_data.c"
#include "f_mbim.c"
#include "f_ecm.c"
-#include "f_qc_ecm.c"
#include "f_qc_rndis.c"
#include "u_ether.c"
#include "u_qc_ether.c"
@@ -655,6 +655,9 @@
.attributes = rmnet_function_attributes,
};
+/* ecm transport string */
+static char ecm_transports[MAX_XPORT_STR_LEN];
+
struct ecm_function_config {
u8 ethaddr[ETH_ALEN];
};
@@ -678,6 +681,7 @@
struct usb_configuration *c)
{
int ret;
+ char *trans;
struct ecm_function_config *ecm = f->config;
if (!ecm) {
@@ -689,19 +693,28 @@
ecm->ethaddr[0], ecm->ethaddr[1], ecm->ethaddr[2],
ecm->ethaddr[3], ecm->ethaddr[4], ecm->ethaddr[5]);
- ret = gether_qc_setup_name(c->cdev->gadget, ecm->ethaddr, "ecm");
- if (ret) {
- pr_err("%s: gether_setup failed\n", __func__);
- return ret;
+ pr_debug("%s: ecm_transport is %s", __func__, ecm_transports);
+
+ trans = strim(ecm_transports);
+ if (strcmp("BAM2BAM_IPA", trans)) {
+ ret = gether_qc_setup_name(c->cdev->gadget,
+ ecm->ethaddr, "ecm");
+ if (ret) {
+ pr_err("%s: gether_setup failed\n", __func__);
+ return ret;
+ }
}
- return ecm_qc_bind_config(c, ecm->ethaddr);
+ return ecm_qc_bind_config(c, ecm->ethaddr, trans);
}
static void ecm_qc_function_unbind_config(struct android_usb_function *f,
struct usb_configuration *c)
{
- gether_qc_cleanup_name("ecm0");
+ char *trans = strim(ecm_transports);
+
+ if (strcmp("BAM2BAM_IPA", trans))
+ gether_qc_cleanup_name("ecm0");
}
static ssize_t ecm_ethaddr_show(struct device *dev,
@@ -731,7 +744,24 @@
static DEVICE_ATTR(ecm_ethaddr, S_IRUGO | S_IWUSR, ecm_ethaddr_show,
ecm_ethaddr_store);
+static ssize_t ecm_transports_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", ecm_transports);
+}
+
+static ssize_t ecm_transports_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ strlcpy(ecm_transports, buf, sizeof(ecm_transports));
+ return size;
+}
+
+static DEVICE_ATTR(ecm_transports, S_IRUGO | S_IWUSR, ecm_transports_show,
+ ecm_transports_store);
+
static struct device_attribute *ecm_function_attributes[] = {
+ &dev_attr_ecm_transports,
&dev_attr_ecm_ethaddr,
NULL
};
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index ff0bdaf..a32dd15 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -665,7 +665,8 @@
pr_info("dev:%p portno:%d\n", dev, dev->port_num);
- ret = bam_data_connect(&dev->bam_port, dev->port_num, dev->port_num);
+ ret = bam_data_connect(&dev->bam_port, dev->port_num,
+ USB_GADGET_XPORT_BAM2BAM, dev->port_num, USB_FUNC_MBIM);
if (ret) {
pr_err("bam_data_setup failed: err:%d\n",
ret);
diff --git a/drivers/usb/gadget/f_qc_ecm.c b/drivers/usb/gadget/f_qc_ecm.c
index 88d19f5..559fd04 100644
--- a/drivers/usb/gadget/f_qc_ecm.c
+++ b/drivers/usb/gadget/f_qc_ecm.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,11 @@
/* #define VERBOSE_DEBUG */
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -29,6 +34,9 @@
#include "u_ether.h"
#include "u_qc_ether.h"
+#include "u_bam_data.h"
+#include <mach/ecm_ipa.h>
+
/*
* This function is a "CDC Ethernet Networking Control Model" (CDC ECM)
@@ -58,9 +66,9 @@
};
struct f_ecm_qc {
- struct qc_gether port;
+ struct qc_gether port;
u8 ctrl_id, data_id;
-
+ enum transport_type xport;
char ethaddr[14];
struct usb_ep *notify;
@@ -69,6 +77,16 @@
bool is_open;
};
+struct f_ecm_qc_ipa_params {
+ u8 dev_mac[ETH_ALEN];
+ u8 host_mac[ETH_ALEN];
+ ecm_ipa_callback ipa_rx_cb;
+ ecm_ipa_callback ipa_tx_cb;
+ void *ipa_priv;
+};
+
+static struct f_ecm_qc_ipa_params ipa_params;
+
static inline struct f_ecm_qc *func_to_ecm_qc(struct usb_function *f)
{
return container_of(f, struct f_ecm_qc, port.func);
@@ -288,51 +306,6 @@
static struct data_port ecm_qc_bam_port;
-static int ecm_qc_bam_setup(void)
-{
- int ret;
-
- ret = bam_data_setup(ECM_QC_NO_PORTS);
- if (ret) {
- pr_err("bam_data_setup failed err: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static int ecm_qc_bam_connect(struct f_ecm_qc *dev)
-{
- int ret;
-
- ecm_qc_bam_port.cdev = dev->port.func.config->cdev;
- ecm_qc_bam_port.in = dev->port.in_ep;
- ecm_qc_bam_port.out = dev->port.out_ep;
-
- /* currently we use the first connection */
- ret = bam_data_connect(&ecm_qc_bam_port, 0, 0);
- if (ret) {
- pr_err("bam_data_connect failed: err:%d\n",
- ret);
- return ret;
- } else {
- pr_info("ecm bam connected\n");
- }
-
- return 0;
-}
-
-static int ecm_qc_bam_disconnect(struct f_ecm_qc *dev)
-{
- pr_debug("dev:%p. %s Disconnect BAM.\n", dev, __func__);
-
- bam_data_disconnect(&ecm_qc_bam_port, 0);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
static void ecm_qc_do_notify(struct f_ecm_qc *ecm)
{
struct usb_request *req = ecm->notify_req;
@@ -401,6 +374,73 @@
ecm_qc_do_notify(ecm);
}
+static int ecm_qc_bam_setup(void)
+{
+ int ret;
+
+ ret = bam_data_setup(ECM_QC_NO_PORTS);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ecm_qc_bam_connect(struct f_ecm_qc *dev)
+{
+ int ret;
+
+ ecm_qc_bam_port.cdev = dev->port.func.config->cdev;
+ ecm_qc_bam_port.in = dev->port.in_ep;
+ ecm_qc_bam_port.out = dev->port.out_ep;
+
+ /* currently we use the first connection */
+ ret = bam_data_connect(&ecm_qc_bam_port, 0, dev->xport,
+ 0, USB_FUNC_ECM);
+ if (ret) {
+ pr_err("bam_data_connect failed: err:%d\n", ret);
+ return ret;
+ } else {
+ pr_debug("ecm bam connected\n");
+ }
+
+ dev->is_open = true;
+ ecm_qc_notify(dev);
+
+ return 0;
+}
+
+static int ecm_qc_bam_disconnect(struct f_ecm_qc *dev)
+{
+ pr_debug("dev:%p. Disconnect BAM.\n", dev);
+
+ bam_data_disconnect(&ecm_qc_bam_port, 0);
+
+ ecm_ipa_cleanup(ipa_params.ipa_priv);
+
+ return 0;
+}
+
+void *ecm_qc_get_ipa_rx_cb(void)
+{
+ return ipa_params.ipa_rx_cb;
+}
+
+void *ecm_qc_get_ipa_tx_cb(void)
+{
+ return ipa_params.ipa_tx_cb;
+}
+
+void *ecm_qc_get_ipa_priv(void)
+{
+ return ipa_params.ipa_priv;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+
static void ecm_qc_notify_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_ecm_qc *ecm = req->context;
@@ -524,7 +564,8 @@
* we can disconnect the port from the network layer.
*/
ecm_qc_bam_disconnect(ecm);
- gether_qc_disconnect_name(&ecm->port, "ecm0");
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ gether_qc_disconnect_name(&ecm->port, "ecm0");
}
if (!ecm->port.in_ep->desc ||
@@ -553,9 +594,12 @@
);
ecm->port.cdc_filter = DEFAULT_FILTER;
DBG(cdev, "activate ecm\n");
- net = gether_qc_connect_name(&ecm->port, "ecm0");
- if (IS_ERR(net))
- return PTR_ERR(net);
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ net = gether_qc_connect_name(&ecm->port,
+ "ecm0");
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+ }
if (ecm_qc_bam_connect(ecm))
goto fail;
@@ -597,7 +641,8 @@
if (ecm->port.in_ep->driver_data) {
ecm_qc_bam_disconnect(ecm);
- gether_qc_disconnect_name(&ecm->port, "ecm0");
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ gether_qc_disconnect_name(&ecm->port, "ecm0");
}
if (ecm->notify->driver_data) {
@@ -662,6 +707,7 @@
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
+
ecm->ctrl_id = status;
ecm_qc_control_intf.bInterfaceNumber = status;
@@ -670,6 +716,7 @@
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
+
ecm->data_id = status;
ecm_qc_data_nop_intf.bInterfaceNumber = status;
@@ -797,6 +844,7 @@
* @c: the configuration to support the network link
* @ethaddr: a buffer in which the ethernet address of the host side
* side of the link was recorded
+ * @xport_name: data path transport type name ("BAM2BAM" or "BAM2BAM_IPA")
* Context: single threaded during gadget setup
*
* Returns zero on success, else negative errno.
@@ -805,7 +853,8 @@
* for calling @gether_cleanup() before module unload.
*/
int
-ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ char *xport_name)
{
struct f_ecm_qc *ecm;
int status;
@@ -819,6 +868,8 @@
return status;
}
+ pr_debug("data transport type is %s", xport_name);
+
/* maybe allocate device-global string IDs */
if (ecm_qc_string_defs[0].id == 0) {
@@ -849,11 +900,23 @@
if (!ecm)
return -ENOMEM;
+ ecm->xport = str_to_xport(xport_name);
+ pr_debug("set xport = %d", ecm->xport);
+
/* export host's Ethernet address in CDC format */
- snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
+ if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ gether_qc_get_macs(ipa_params.dev_mac, ipa_params.host_mac);
+ snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
+ "%02X%02X%02X%02X%02X%02X",
+ ipa_params.host_mac[0], ipa_params.host_mac[1],
+ ipa_params.host_mac[2], ipa_params.host_mac[3],
+ ipa_params.host_mac[4], ipa_params.host_mac[5]);
+ } else
+ snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
"%02X%02X%02X%02X%02X%02X",
ethaddr[0], ethaddr[1], ethaddr[2],
ethaddr[3], ethaddr[4], ethaddr[5]);
+
ecm_qc_string_defs[1].s = ecm->ethaddr;
ecm->port.cdc_filter = DEFAULT_FILTER;
@@ -870,8 +933,31 @@
status = usb_add_function(c, &ecm->port.func);
if (status) {
+ pr_err("failed to add function");
+ ecm_qc_string_defs[1].s = NULL;
+ kfree(ecm);
+ return status;
+ }
+
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ return status;
+
+ status = ecm_ipa_init(&ipa_params.ipa_rx_cb, &ipa_params.ipa_tx_cb,
+ &ipa_params.ipa_priv);
+ if (status) {
+ pr_err("failed to initialize ECM IPA Driver");
+ ecm_qc_string_defs[1].s = NULL;
+ kfree(ecm);
+ return status;
+ }
+
+ status = ecm_ipa_configure(ipa_params.host_mac, ipa_params.dev_mac,
+ ipa_params.ipa_priv);
+ if (status) {
+ pr_err("failed to configure ECM IPA Driver");
ecm_qc_string_defs[1].s = NULL;
kfree(ecm);
}
+
return status;
}
diff --git a/drivers/usb/gadget/f_qc_rndis.c b/drivers/usb/gadget/f_qc_rndis.c
index 128b6d1..51d7bc1 100644
--- a/drivers/usb/gadget/f_qc_rndis.c
+++ b/drivers/usb/gadget/f_qc_rndis.c
@@ -6,7 +6,7 @@
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz (mina86@mina86.com)
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -427,7 +427,8 @@
dev->bam_port.out = dev->port.out_ep;
/* currently we use the first connection */
- ret = bam_data_connect(&dev->bam_port, 0, 0);
+ ret = bam_data_connect(&dev->bam_port, 0, USB_GADGET_XPORT_BAM2BAM,
+ 0, USB_FUNC_RNDIS);
if (ret) {
pr_err("bam_data_connect failed: err:%d\n",
ret);
diff --git a/drivers/usb/gadget/u_bam_data.c b/drivers/usb/gadget/u_bam_data.c
index 70c71d4..8df06a4 100644
--- a/drivers/usb/gadget/u_bam_data.c
+++ b/drivers/usb/gadget/u_bam_data.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,9 +22,10 @@
#include <linux/usb/gadget.h>
#include <mach/bam_dmux.h>
-#include <mach/usb_gadget_xport.h>
#include <mach/usb_bam.h>
+#include "u_bam_data.h"
+
#define BAM2BAM_DATA_N_PORTS 1
static struct workqueue_struct *bam_data_wq;
@@ -34,12 +35,6 @@
#define SPS_PARAMS_TBE BIT(6)
#define MSM_VENDOR_ID BIT(16)
-struct data_port {
- struct usb_composite_dev *cdev;
- struct usb_ep *in;
- struct usb_ep *out;
-};
-
struct bam_data_ch_info {
unsigned long flags;
unsigned id;
@@ -53,6 +48,10 @@
u32 src_pipe_idx;
u32 dst_pipe_idx;
u8 connection_idx;
+
+ enum function_type func_type;
+ enum transport_type trans;
+ struct usb_bam_connect_ipa_params ipa_params;
};
struct bam_data_port {
@@ -175,6 +174,22 @@
return 0;
}
+static void bam2bam_data_disconnect_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, disconnect_w);
+ struct bam_data_ch_info *d = &port->data_ch;
+ int ret;
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ if (d->func_type == USB_FUNC_ECM)
+ ecm_ipa_disconnect(d->ipa_params.priv);
+ ret = usb_bam_disconnect_ipa(d->connection_idx, &d->ipa_params);
+ if (ret)
+ pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret);
+ }
+}
+
static void bam2bam_data_connect_work(struct work_struct *w)
{
struct bam_data_port *port = container_of(w, struct bam_data_port,
@@ -185,14 +200,49 @@
pr_debug("%s: Connect workqueue started", __func__);
- ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
- &d->dst_pipe_idx);
- d->src_pipe_idx = 11;
- d->dst_pipe_idx = 10;
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ d->ipa_params.client = IPA_CLIENT_USB_CONS;
+ d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+ if (d->func_type == USB_FUNC_ECM) {
+ d->ipa_params.notify = ecm_qc_get_ipa_tx_cb();
+ d->ipa_params.priv = ecm_qc_get_ipa_priv();
+ }
+ ret = usb_bam_connect_ipa(&d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ return;
+ }
- if (ret) {
- pr_err("usb_bam_connect failed: err:%d\n", ret);
- return;
+ d->ipa_params.client = IPA_CLIENT_USB_PROD;
+ d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+ if (d->func_type == USB_FUNC_ECM) {
+ d->ipa_params.notify = ecm_qc_get_ipa_rx_cb();
+ d->ipa_params.priv = ecm_qc_get_ipa_priv();
+ }
+ ret = usb_bam_connect_ipa(&d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ if (d->func_type == USB_FUNC_ECM) {
+ ret = ecm_ipa_connect(d->ipa_params.cons_clnt_hdl,
+ d->ipa_params.prod_clnt_hdl,
+ d->ipa_params.priv);
+ if (ret) {
+ pr_err("%s: failed to connect IPA: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ }
+ } else { /* transport type is USB_GADGET_XPORT_BAM2BAM */
+ ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
+ &d->dst_pipe_idx);
+ if (ret) {
+ pr_err("usb_bam_connect failed: err:%d\n", ret);
+ return;
+ }
}
if (!port->port_usb) {
@@ -230,15 +280,17 @@
bam_data_start_endless_rx(port);
bam_data_start_endless_tx(port);
- /* Register for peer reset callback */
- usb_bam_register_peer_reset_cb(d->connection_idx,
+ /* Register for peer reset callback if USB_GADGET_XPORT_BAM2BAM */
+ if (d->trans != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ usb_bam_register_peer_reset_cb(d->connection_idx,
bam_data_peer_reset_cb, port);
- ret = usb_bam_client_ready(true);
- if (ret) {
- pr_err("%s: usb_bam_client_ready failed: err:%d\n",
+ ret = usb_bam_client_ready(true);
+ if (ret) {
+ pr_err("%s: usb_bam_client_ready failed: err:%d\n",
__func__, ret);
- return;
+ return;
+ }
}
pr_debug("%s: Connect workqueue done", __func__);
@@ -262,6 +314,7 @@
port->port_num = portno;
INIT_WORK(&port->connect_w, bam2bam_data_connect_work);
+ INIT_WORK(&port->disconnect_w, bam2bam_data_disconnect_work);
/* data ch */
d = &port->data_ch;
@@ -276,6 +329,7 @@
void bam_data_disconnect(struct data_port *gr, u8 port_num)
{
struct bam_data_port *port;
+ struct bam_data_ch_info *d;
pr_debug("dev:%p port#%d\n", gr, port_num);
@@ -285,7 +339,7 @@
}
if (!gr) {
- pr_err("mbim data port is null\n");
+ pr_err("data port is null\n");
return;
}
@@ -303,12 +357,19 @@
port->port_usb = 0;
}
- if (usb_bam_client_ready(false))
- pr_err("%s: usb_bam_client_ready failed\n", __func__);
+ d = &port->data_ch;
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ queue_work(gbam_wq, &port->disconnect_w);
+ else {
+ if (usb_bam_client_ready(false)) {
+ pr_err("%s: usb_bam_client_ready failed\n",
+ __func__);
+ }
+ }
}
int bam_data_connect(struct data_port *gr, u8 port_num,
- u8 connection_idx)
+ enum transport_type trans, u8 connection_idx, enum function_type func)
{
struct bam_data_port *port;
struct bam_data_ch_info *d;
@@ -322,7 +383,7 @@
}
if (!gr) {
- pr_err("mbim data port is null\n");
+ pr_err("data port is null\n");
return -ENODEV;
}
@@ -349,6 +410,16 @@
d->connection_idx = connection_idx;
+ d->trans = trans;
+
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ d->ipa_params.src_pipe = &(d->src_pipe_idx);
+ d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
+ d->ipa_params.idx = connection_idx;
+ }
+
+ d->func_type = func;
+
queue_work(bam_data_wq, &port->connect_w);
return 0;
diff --git a/drivers/usb/gadget/u_bam_data.h b/drivers/usb/gadget/u_bam_data.h
new file mode 100644
index 0000000..71a01b9
--- /dev/null
+++ b/drivers/usb/gadget/u_bam_data.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_BAM_DATA_H
+#define __U_BAM_DATA_H
+
+#include <mach/usb_gadget_xport.h>
+
+enum function_type {
+ USB_FUNC_ECM,
+ USB_FUNC_MBIM,
+ USB_FUNC_RNDIS,
+};
+
+struct data_port {
+ struct usb_composite_dev *cdev;
+ struct usb_ep *in;
+ struct usb_ep *out;
+};
+
+void bam_data_disconnect(struct data_port *gr, u8 port_num);
+
+int bam_data_connect(struct data_port *gr, u8 port_num,
+ enum transport_type trans, u8 connection_idx, enum function_type func);
+
+int bam_data_setup(unsigned int no_bam2bam_port);
+
+void bam_data_suspend(u8 port_num);
+
+void bam_data_resume(u8 port_num);
+
+#endif /* __U_BAM_DATA_H */
diff --git a/drivers/usb/gadget/u_qc_ether.c b/drivers/usb/gadget/u_qc_ether.c
index ce0a12e..e10ec25 100644
--- a/drivers/usb/gadget/u_qc_ether.c
+++ b/drivers/usb/gadget/u_qc_ether.c
@@ -4,7 +4,7 @@
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -62,7 +62,7 @@
* or updating its backlink port_usb->ioport
*/
spinlock_t lock;
- struct qc_gether *port_usb;
+ struct qc_gether *port_usb;
struct net_device *net;
struct usb_gadget *gadget;
@@ -235,6 +235,14 @@
.name = "gadget",
};
+void gether_qc_get_macs(u8 dev_mac[ETH_ALEN], u8 host_mac[ETH_ALEN])
+{
+ if (get_qc_ether_addr(qc_dev_addr, dev_mac))
+ pr_debug("using random dev_mac ethernet address\n");
+ if (get_qc_ether_addr(qc_host_addr, host_mac))
+ pr_debug("using random host_mac ethernet address\n");
+}
+
/**
* gether_qc_setup - initialize one ethernet-over-usb link
* @g: gadget to associated with these links
@@ -320,6 +328,7 @@
/**
* gether_qc_cleanup_name - remove Ethernet-over-USB device
+ * @netname: name for network device (for example, "usb")
* Context: may sleep
*
* This is called to free all resources allocated by @gether_qc_setup().
@@ -343,6 +352,7 @@
* is active
* @link: the USB link, set up with endpoints, descriptors matching
* current device speed, and any framing wrapper(s) set up.
+ * @netname: name for network device (for example, "usb")
* Context: irqs blocked
*
* This is called to let the network layer know the connection
@@ -391,6 +401,7 @@
* gether_qc_disconnect_name - notify network layer that USB
* link is inactive
* @link: the USB link, on which gether_connect() was called
+ * @netname: name for network device (for example, "usb")
* Context: irqs blocked
*
* This is called to let the network layer know the connection
diff --git a/drivers/usb/gadget/u_qc_ether.h b/drivers/usb/gadget/u_qc_ether.h
index 29193e0..25562da 100644
--- a/drivers/usb/gadget/u_qc_ether.h
+++ b/drivers/usb/gadget/u_qc_ether.h
@@ -4,7 +4,7 @@
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -49,7 +49,7 @@
struct usb_function func;
/* updated by gether_{connect,disconnect} */
- struct eth_qc_dev *ioport;
+ struct eth_qc_dev *ioport;
/* endpoints handle full and/or high speeds */
struct usb_ep *in_ep;
@@ -61,10 +61,7 @@
/* hooks for added framing, as needed for RNDIS and EEM. */
u32 header_len;
- /* NCM requires fixed size bundles */
- bool is_fixed;
- u32 fixed_out_len;
- u32 fixed_in_len;
+
struct sk_buff *(*wrap)(struct qc_gether *port,
struct sk_buff *skb);
int (*unwrap)(struct qc_gether *port,
@@ -89,10 +86,14 @@
void gether_qc_disconnect_name(struct qc_gether *link, const char *netname);
/* each configuration may bind one instance of an ethernet link */
-int ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ char *xport_name);
int
rndis_qc_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
u32 vendorID, const char *manufacturer,
u8 maxPktPerXfer);
+
+void gether_qc_get_macs(u8 dev_mac[ETH_ALEN], u8 host_mac[ETH_ALEN]);
+
#endif /* __U_QC_ETHER_H */
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index ddf5cfc..c1dcc18 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -628,13 +628,23 @@
mutex_lock(&mfd->ov_lock);
mutex_lock(&mfd->lock);
list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
- if (pipe->params_changed || pipe->back_buf.num_planes) {
- ret = mdss_mdp_pipe_queue_data(pipe, &pipe->back_buf);
- if (IS_ERR_VALUE(ret)) {
- pr_warn("Unable to queue data for pnum=%d\n",
- pipe->num);
- mdss_mdp_overlay_free_buf(&pipe->back_buf);
- }
+ struct mdss_mdp_data *buf;
+ if (pipe->back_buf.num_planes) {
+ buf = &pipe->back_buf;
+ } else if (!pipe->params_changed) {
+ continue;
+ } else if (pipe->front_buf.num_planes) {
+ buf = &pipe->front_buf;
+ } else {
+ pr_warn("pipe queue without buffer\n");
+ buf = NULL;
+ }
+
+ ret = mdss_mdp_pipe_queue_data(pipe, buf);
+ if (IS_ERR_VALUE(ret)) {
+ pr_warn("Unable to queue data for pnum=%d\n",
+ pipe->num);
+ mdss_mdp_overlay_free_buf(buf);
}
}
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 0715b0b..5f994a0 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -1181,6 +1181,26 @@
bpp = 4;
break;
+ case MDP_BGRA_8888:
+ fix->type = FB_TYPE_PACKED_PIXELS;
+ fix->xpanstep = 1;
+ fix->ypanstep = 1;
+ var->vmode = FB_VMODE_NONINTERLACED;
+ var->blue.offset = 0;
+ var->green.offset = 8;
+ var->red.offset = 16;
+ var->blue.length = 8;
+ var->green.length = 8;
+ var->red.length = 8;
+ var->blue.msb_right = 0;
+ var->green.msb_right = 0;
+ var->red.msb_right = 0;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ bpp = 4;
+ break;
+
+
case MDP_YCRYCB_H2V1:
/* ToDo: need to check TV-Out YUV422i framebuffer format */
/* we might need to create new type define */
@@ -1900,7 +1920,9 @@
break;
case 32:
- if (var->transp.offset == 24)
+ if ((var->transp.offset == 24) && (var->blue.offset == 0))
+ mfd->fb_imgType = MDP_BGRA_8888;
+ else if (var->transp.offset == 24)
mfd->fb_imgType = MDP_ARGB_8888;
else
mfd->fb_imgType = MDP_RGBA_8888;
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 6c26a3d..5ab7183 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -57,6 +57,7 @@
/* Idle State Flags */
#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
+#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
@@ -100,6 +101,12 @@
struct list_head device_list;
struct kobject kobj;
struct completion kobj_unregister;
+
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+ int safe_state_index;
+ cpumask_t coupled_cpus;
+ struct cpuidle_coupled *coupled;
+#endif
};
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
@@ -176,6 +183,10 @@
#endif
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
+#endif
+
/******************************
* CPUIDLE GOVERNOR INTERFACE *
******************************/
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 1849cee..05d75ce 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -1203,20 +1203,15 @@
static inline int32_t qpnp_adc_scale_therm_pu1(int32_t adc_code,
const struct qpnp_adc_properties *adc_prop,
const struct qpnp_vadc_chan_properties *chan_prop,
- struct qpnp_vadc_result *chan_rslt);
+ struct qpnp_vadc_result *chan_rslt)
{ return -ENXIO; }
static inline int32_t qpnp_adc_scale_therm_pu2(int32_t adc_code,
const struct qpnp_adc_properties *adc_prop,
const struct qpnp_vadc_chan_properties *chan_prop,
- struct qpnp_vadc_result *chan_rslt);
+ struct qpnp_vadc_result *chan_rslt)
{ return -ENXIO; }
static inline int32_t qpnp_vadc_is_ready(void)
{ return -ENXIO; }
-static inline int32_t qpnp_adc_scale_default(int32_t adc_code,
- const struct qpnp_adc_properties *adc_prop,
- const struct qpnp_adc_chan_properties *chan_prop,
- struct qpnp_adc_chan_result *chan_rslt)
-{ return -ENXIO; }
static inline int32_t qpnp_get_vadc_gain_and_offset(
struct qpnp_vadc_linear_graph *param,
enum qpnp_adc_calib_type calib_type)
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index f5dfe0c..08b5ae7 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -857,6 +857,7 @@
* correctness of the format string and va_list arguments.
* - 'K' For a kernel pointer that should be hidden from unprivileged users
* - 'NF' For a netdev_features_t
+ * - 'a' For a phys_addr_t type and its derivative types (passed by reference)
*
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
@@ -941,6 +942,12 @@
return netdev_feature_string(buf, end, ptr, spec);
}
break;
+ case 'a':
+ spec.flags |= SPECIAL | SMALL | ZEROPAD;
+ spec.field_width = sizeof(phys_addr_t) * 2 + 2;
+ spec.base = 16;
+ return number(buf, end,
+ (unsigned long long) *((phys_addr_t *)ptr), spec);
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
diff --git a/scripts/build-all.py b/scripts/build-all.py
index f5048e0..4789af7 100755
--- a/scripts/build-all.py
+++ b/scripts/build-all.py
@@ -1,6 +1,6 @@
#! /usr/bin/env python
-# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
+# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@@ -88,7 +88,6 @@
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
- r'omap2*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):