Merge "msm: Add device tree support for audio drivers" into msm-3.4
diff --git a/Documentation/devicetree/bindings/arm/msm/tz-log.txt b/Documentation/devicetree/bindings/arm/msm/tz-log.txt
new file mode 100644
index 0000000..6928611
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/tz-log.txt
@@ -0,0 +1,16 @@
+* TZLOG (Trust Zone Log)
+
+The tz_log driver is a platform device driver that exposes a debugfs
+interface for accessing and displaying diagnostic information
+related to secure code (Trustzone/QSEE).
+
+Required properties:
+- compatible : Should be "qcom,tz-log"
+- reg : Offset and size of the register set for the device
+
+Example:
+
+ qcom,tz-log@fe805720 {
+ compatible = "qcom,tz-log";
+ reg = <0xfe805720 0x1000>;
+ };
diff --git a/arch/arm/configs/msm8660-perf_defconfig b/arch/arm/configs/msm8660-perf_defconfig
index fe30dc8..9f4eee3 100644
--- a/arch/arm/configs/msm8660-perf_defconfig
+++ b/arch/arm/configs/msm8660-perf_defconfig
@@ -68,6 +68,7 @@
CONFIG_MSM_PIL_QDSP6V3=y
CONFIG_MSM_PIL_TZAPPS=y
CONFIG_MSM_PIL_DSPS=y
+CONFIG_MSM_PIL_VIDC=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_TZ_LOG=y
CONFIG_MSM_RPM_LOG=y
diff --git a/arch/arm/configs/msm8660_defconfig b/arch/arm/configs/msm8660_defconfig
index 45339ee..678ece5 100644
--- a/arch/arm/configs/msm8660_defconfig
+++ b/arch/arm/configs/msm8660_defconfig
@@ -67,6 +67,7 @@
CONFIG_MSM_PIL_QDSP6V3=y
CONFIG_MSM_PIL_TZAPPS=y
CONFIG_MSM_PIL_DSPS=y
+CONFIG_MSM_PIL_VIDC=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_TZ_LOG=y
CONFIG_MSM_RPM_LOG=y
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index 6bd1b7d..2664d6b 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -246,7 +246,7 @@
REGULATOR_SUPPLY("ext_3p3v", NULL),
REGULATOR_SUPPLY("vdd_ana", "3-005b"),
REGULATOR_SUPPLY("vdd_lvds_3p3v", "mipi_dsi.1"),
- REGULATOR_SUPPLY("mhl_ext_3p3v", "msm_otg"),
+ REGULATOR_SUPPLY("mhl_usb_hs_switch", "msm_otg"),
};
VREG_CONSUMERS(EXT_OTG_SW) = {
REGULATOR_SUPPLY("ext_otg_sw", NULL),
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 6fd2b4d..251c1de 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -1292,6 +1292,7 @@
.soft_reset_inverted = 1,
.peripheral_platform_device = NULL,
.ramdump_timeout_ms = 600000,
+ .no_powerdown_after_ramdumps = 1,
};
#define MSM_TSIF0_PHYS (0x18200000)
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 472a87e..33b411a 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -1192,6 +1192,7 @@
.disable_dmx = 0,
.disable_fullhd = 0,
.cont_mode_dpb_count = 18,
+ .fw_addr = 0x9fe00000,
};
struct platform_device apq8064_msm_device_vidc = {
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index 6ea8d7b..aba467b 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -695,6 +695,7 @@
#endif
.disable_dmx = 1,
.disable_fullhd = 0,
+ .fw_addr = 0x9fe00000,
};
struct platform_device apq8930_msm_device_vidc = {
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 3522e80..8b7e097 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -729,6 +729,7 @@
.disable_dmx = 0,
.disable_fullhd = 0,
.cont_mode_dpb_count = 18,
+ .fw_addr = 0x9fe00000,
};
struct platform_device msm_device_vidc = {
diff --git a/arch/arm/mach-msm/devices-msm8x60.c b/arch/arm/mach-msm/devices-msm8x60.c
index 8bc455e..f36b4ec 100644
--- a/arch/arm/mach-msm/devices-msm8x60.c
+++ b/arch/arm/mach-msm/devices-msm8x60.c
@@ -2286,15 +2286,18 @@
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
.memtype = ION_CP_MM_HEAP_ID,
.enable_ion = 1,
- .cp_enabled = 0,
+ .cp_enabled = 1,
+ .secure_wb_heap = 1,
#else
.memtype = MEMTYPE_SMI_KERNEL,
.enable_ion = 0,
+ .secure_wb_heap = 0,
#endif
.disable_dmx = 0,
.disable_fullhd = 0,
.cont_mode_dpb_count = 8,
.disable_turbo = 1,
+ .fw_addr = 0x38000000,
};
struct platform_device msm_device_vidc = {
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index ef0b517..1aa3814 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -519,11 +519,13 @@
int disable_dmx;
int disable_fullhd;
u32 cp_enabled;
+ u32 secure_wb_heap;
#ifdef CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *vidc_bus_client_pdata;
#endif
int cont_mode_dpb_count;
int disable_turbo;
+ unsigned long fw_addr;
};
struct vcap_platform_data {
diff --git a/arch/arm/mach-msm/perf_event_msm_krait_l2.c b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
index 5f76a92..3635572 100644
--- a/arch/arm/mach-msm/perf_event_msm_krait_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
@@ -19,7 +19,10 @@
#include <mach/msm-krait-l2-accessors.h>
#define MAX_L2_PERIOD ((1ULL << 32) - 1)
-#define MAX_KRAIT_L2_CTRS 5
+#define MAX_KRAIT_L2_CTRS 10
+
+#define PMCR_NUM_EV_SHIFT 11
+#define PMCR_NUM_EV_MASK 0x1f
#define L2_EVT_MASK 0xfffff
@@ -29,7 +32,6 @@
#define L2PMCCNTCR 0x408
#define L2PMCCNTSR 0x40A
#define L2CYCLE_CTR_BIT 31
-#define L2CYCLE_CTR_EVENT_IDX 4
#define L2CYCLE_CTR_RAW_CODE 0xfe
#define L2PMOVSR 0x406
@@ -109,6 +111,9 @@
/* L2 slave port traffic filtering */
static u32 l2_slv_filter_prefix = 0x000f0010;
+static int total_l2_ctrs;
+static int l2_cycle_ctr_idx;
+
static u32 pmu_type;
static struct arm_pmu krait_l2_pmu;
@@ -203,7 +208,7 @@
static void enable_intenset(u32 idx)
{
- if (idx == L2CYCLE_CTR_EVENT_IDX)
+ if (idx == l2_cycle_ctr_idx)
set_l2_indirect_reg(L2PMINTENSET, 1 << L2CYCLE_CTR_BIT);
else
set_l2_indirect_reg(L2PMINTENSET, 1 << idx);
@@ -211,7 +216,7 @@
static void disable_intenclr(u32 idx)
{
- if (idx == L2CYCLE_CTR_EVENT_IDX)
+ if (idx == l2_cycle_ctr_idx)
set_l2_indirect_reg(L2PMINTENCLR, 1 << L2CYCLE_CTR_BIT);
else
set_l2_indirect_reg(L2PMINTENCLR, 1 << idx);
@@ -219,7 +224,7 @@
static void enable_counter(u32 idx)
{
- if (idx == L2CYCLE_CTR_EVENT_IDX)
+ if (idx == l2_cycle_ctr_idx)
set_l2_indirect_reg(L2PMCNTENSET, 1 << L2CYCLE_CTR_BIT);
else
set_l2_indirect_reg(L2PMCNTENSET, 1 << idx);
@@ -227,7 +232,7 @@
static void disable_counter(u32 idx)
{
- if (idx == L2CYCLE_CTR_EVENT_IDX)
+ if (idx == l2_cycle_ctr_idx)
set_l2_indirect_reg(L2PMCNTENCLR, 1 << L2CYCLE_CTR_BIT);
else
set_l2_indirect_reg(L2PMCNTENCLR, 1 << idx);
@@ -238,7 +243,7 @@
u32 val;
u32 counter_reg = (idx * 16) + IA_L2PMXEVCNTR_BASE;
- if (idx == L2CYCLE_CTR_EVENT_IDX)
+ if (idx == l2_cycle_ctr_idx)
val = get_l2_indirect_reg(L2PMCCNTR);
else
val = get_l2_indirect_reg(counter_reg);
@@ -250,7 +255,7 @@
{
u32 counter_reg = (idx * 16) + IA_L2PMXEVCNTR_BASE;
- if (idx == L2CYCLE_CTR_EVENT_IDX)
+ if (idx == l2_cycle_ctr_idx)
set_l2_indirect_reg(L2PMCCNTR, val);
else
set_l2_indirect_reg(counter_reg, val);
@@ -330,11 +335,11 @@
int ctr = 0;
if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
- if (!test_and_set_bit(L2CYCLE_CTR_EVENT_IDX, cpuc->used_mask))
- return L2CYCLE_CTR_EVENT_IDX;
+ if (!test_and_set_bit(l2_cycle_ctr_idx, cpuc->used_mask))
+ return l2_cycle_ctr_idx;
}
- for (ctr = 0; ctr < MAX_KRAIT_L2_CTRS - 1; ctr++) {
+ for (ctr = 0; ctr < total_l2_ctrs - 1; ctr++) {
if (!test_and_set_bit(ctr, cpuc->used_mask))
return ctr;
}
@@ -389,7 +394,7 @@
bitp = __ffs(pmovsr);
if (bitp == L2CYCLE_CTR_BIT)
- idx = L2CYCLE_CTR_EVENT_IDX;
+ idx = l2_cycle_ctr_idx;
else
idx = bitp;
@@ -488,6 +493,19 @@
return 1;
}
+int get_num_events(void)
+{
+ int val;
+
+ val = get_l2_indirect_reg(L2PMCR);
+
+ /*
+ * Read bits 15:11 of the L2PMCR and add 1
+ * for the cycle counter.
+ */
+ return ((val >> PMCR_NUM_EV_SHIFT) & PMCR_NUM_EV_MASK) + 1;
+}
+
static struct arm_pmu krait_l2_pmu = {
.id = ARM_PERF_PMU_ID_KRAIT_L2,
.type = ARM_PMU_DEVICE_L2CC,
@@ -505,7 +523,6 @@
.map_event = krait_l2_map_event,
.max_period = MAX_L2_PERIOD,
.get_hw_events = krait_l2_get_hw_events,
- .num_events = MAX_KRAIT_L2_CTRS,
.test_set_event_constraints = msm_l2_test_set_ev_constraint,
.clear_event_constraints = msm_l2_clear_ev_constraint,
.pmu.attr_groups = msm_l2_pmu_attr_grps,
@@ -533,6 +550,21 @@
/* Reset all ctrs */
set_l2_indirect_reg(L2PMCR, L2PMCR_RESET_ALL);
+ /* Get num of counters in the L2cc PMU. */
+ total_l2_ctrs = get_num_events();
+ krait_l2_pmu.num_events = total_l2_ctrs;
+
+ pr_info("Detected %d counters on the L2CC PMU.\n",
+ total_l2_ctrs);
+
+ /*
+ * The L2 cycle counter index in the used_mask
+ * bit stream is always after the other counters.
+ * Counter indexes begin from 0 to keep it consistent
+ * with the h/w.
+ */
+ l2_cycle_ctr_idx = total_l2_ctrs - 1;
+
/* Avoid spurious interrupt if any */
get_reset_pmovsr();
diff --git a/arch/arm/mach-msm/perf_event_msm_l2.c b/arch/arm/mach-msm/perf_event_msm_l2.c
index 5a5bf57..aae2552 100644
--- a/arch/arm/mach-msm/perf_event_msm_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_l2.c
@@ -16,9 +16,9 @@
#include <linux/spinlock.h>
-#define MAX_SCORPION_L2_CTRS 5
+#define MAX_SCORPION_L2_CTRS 10
+
#define SCORPION_L2CYCLE_CTR_BIT 31
-#define SCORPION_L2CYCLE_CTR_EVENT_IDX 4
#define SCORPION_L2CYCLE_CTR_RAW_CODE 0xfe
#define SCORPIONL2_PMNC_E (1 << 0) /* Enable all counters */
#define SCORPION_L2_EVT_PREFIX 3
@@ -29,6 +29,8 @@
#define L2_EVT_PREFIX_SHIFT 16
#define L2_SLAVE_EVT_PREFIX 4
+#define PMCR_NUM_EV_SHIFT 11
+#define PMCR_NUM_EV_MASK 0x1f
/*
* The L2 PMU is shared between all CPU's, so protect
@@ -70,6 +72,9 @@
NULL,
};
+static u32 total_l2_ctrs;
+static u32 l2_cycle_ctr_idx;
+
static u32 pmu_type;
static struct arm_pmu scorpion_l2_pmu;
@@ -508,7 +513,7 @@
static void scorpion_l2_enable_intenset(u32 idx)
{
- if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+ if (idx == l2_cycle_ctr_idx) {
asm volatile ("mcr p15, 3, %0, c15, c5, 1" : : "r"
(1 << SCORPION_L2CYCLE_CTR_BIT));
} else {
@@ -518,7 +523,7 @@
static void scorpion_l2_disable_intenclr(u32 idx)
{
- if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+ if (idx == l2_cycle_ctr_idx) {
asm volatile ("mcr p15, 3, %0, c15, c5, 0" : : "r"
(1 << SCORPION_L2CYCLE_CTR_BIT));
} else {
@@ -528,7 +533,7 @@
static void scorpion_l2_enable_counter(u32 idx)
{
- if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+ if (idx == l2_cycle_ctr_idx) {
asm volatile ("mcr p15, 3, %0, c15, c4, 3" : : "r"
(1 << SCORPION_L2CYCLE_CTR_BIT));
} else {
@@ -538,7 +543,7 @@
static void scorpion_l2_disable_counter(u32 idx)
{
- if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+ if (idx == l2_cycle_ctr_idx) {
asm volatile ("mcr p15, 3, %0, c15, c4, 2" : : "r"
(1 << SCORPION_L2CYCLE_CTR_BIT));
} else {
@@ -551,7 +556,7 @@
u32 val;
unsigned long iflags;
- if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+ if (idx == l2_cycle_ctr_idx) {
asm volatile ("mrc p15, 3, %0, c15, c4, 5" : "=r" (val));
} else {
raw_spin_lock_irqsave(&scorpion_l2_pmu_hw_events.pmu_lock,
@@ -571,7 +576,7 @@
{
unsigned long iflags;
- if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+ if (idx == l2_cycle_ctr_idx) {
asm volatile ("mcr p15, 3, %0, c15, c4, 5" : : "r" (val));
} else {
raw_spin_lock_irqsave(&scorpion_l2_pmu_hw_events.pmu_lock,
@@ -662,12 +667,12 @@
int ctr = 0;
if (hwc->config_base == SCORPION_L2CYCLE_CTR_RAW_CODE) {
- if (!test_and_set_bit(SCORPION_L2CYCLE_CTR_EVENT_IDX,
+ if (!test_and_set_bit(l2_cycle_ctr_idx,
cpuc->used_mask))
- return SCORPION_L2CYCLE_CTR_EVENT_IDX;
+ return l2_cycle_ctr_idx;
}
- for (ctr = 0; ctr < MAX_SCORPION_L2_CTRS - 1; ctr++) {
+ for (ctr = 0; ctr < total_l2_ctrs - 1; ctr++) {
if (!test_and_set_bit(ctr, cpuc->used_mask))
return ctr;
}
@@ -726,7 +731,7 @@
bitp = __ffs(pmovsr);
if (bitp == SCORPION_L2CYCLE_CTR_BIT)
- idx = SCORPION_L2CYCLE_CTR_EVENT_IDX;
+ idx = l2_cycle_ctr_idx;
else
idx = bitp;
@@ -834,6 +839,18 @@
return 1;
}
+static int get_num_events(void)
+{
+ int val;
+
+ val = scorpion_l2_pmnc_read();
+ /*
+ * Read bits 15:11 of the L2PMCR and add 1
+ * for the cycle counter.
+ */
+ return ((val >> PMCR_NUM_EV_SHIFT) & PMCR_NUM_EV_MASK) + 1;
+}
+
static struct arm_pmu scorpion_l2_pmu = {
.id = ARM_PERF_PMU_ID_SCORPIONMP_L2,
.type = ARM_PMU_DEVICE_L2CC,
@@ -851,7 +868,6 @@
.map_event = scorpion_l2_map_event,
.max_period = (1LLU << 32) - 1,
.get_hw_events = scorpion_l2_get_hw_events,
- .num_events = MAX_SCORPION_L2_CTRS,
.test_set_event_constraints = msm_l2_test_set_ev_constraint,
.clear_event_constraints = msm_l2_clear_ev_constraint,
.pmu.attr_groups = msm_l2_pmu_attr_grps,
@@ -879,6 +895,20 @@
/* Avoid spurious interrupt if any */
scorpion_l2_get_reset_pmovsr();
+ total_l2_ctrs = get_num_events();
+ scorpion_l2_pmu.num_events = total_l2_ctrs;
+
+ pr_info("Detected %d counters on the L2CC PMU.\n",
+ total_l2_ctrs);
+
+ /*
+ * The L2 cycle counter index in the used_mask
+ * bit stream is always after the other counters.
+ * Counter indexes begin from 0 to keep it consistent
+ * with the h/w.
+ */
+ l2_cycle_ctr_idx = total_l2_ctrs - 1;
+
return platform_driver_register(&scorpion_l2_pmu_driver);
}
device_initcall(register_scorpion_l2_pmu_driver);
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index bfbf4bc..16c21f7 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -226,7 +226,7 @@
static int segment_is_loadable(const struct elf32_phdr *p)
{
- return (p->p_type & PT_LOAD) && !segment_is_hash(p->p_flags);
+ return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags);
}
/* Sychronize request_firmware() with suspend */
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index b74ebfb..595484e 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -1040,8 +1040,6 @@
msm_pm_mode_sysfs_add();
msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));
- msm_spm_allow_x_cpu_set_vdd(false);
-
suspend_set_ops(&msm_pm_ops);
msm_pm_qtimer_available();
msm_cpuidle_init();
diff --git a/arch/arm/mach-msm/spm.c b/arch/arm/mach-msm/spm.c
index 4654fba..3d90678 100644
--- a/arch/arm/mach-msm/spm.c
+++ b/arch/arm/mach-msm/spm.c
@@ -72,8 +72,6 @@
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_spm_devices);
-static atomic_t msm_spm_set_vdd_x_cpu_allowed = ATOMIC_INIT(1);
-
/******************************************************************************
* Internal helper functions
*****************************************************************************/
@@ -189,20 +187,9 @@
int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
{
- unsigned long flags;
struct msm_spm_device *dev;
uint32_t timeout_us;
- local_irq_save(flags);
-
- if (!atomic_read(&msm_spm_set_vdd_x_cpu_allowed) &&
- unlikely(smp_processor_id() != cpu)) {
- if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
- pr_info("%s: attempting to set vdd of cpu %u from "
- "cpu %u\n", __func__, cpu, smp_processor_id());
- goto set_vdd_x_cpu_bail;
- }
-
dev = &per_cpu(msm_spm_devices, cpu);
if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
@@ -239,15 +226,12 @@
pr_info("%s: cpu %u done, remaining timeout %uus\n",
__func__, cpu, timeout_us);
- local_irq_restore(flags);
return 0;
set_vdd_bail:
pr_err("%s: cpu %u failed, remaining timeout %uus, vlevel 0x%x\n",
__func__, cpu, timeout_us, msm_spm_get_sts_curr_pmic_data(dev));
-set_vdd_x_cpu_bail:
- local_irq_restore(flags);
return -EIO;
}
@@ -263,11 +247,6 @@
mb();
}
-void msm_spm_allow_x_cpu_set_vdd(bool allowed)
-{
- atomic_set(&msm_spm_set_vdd_x_cpu_allowed, allowed ? 1 : 0);
-}
-
int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
{
unsigned int cpu;
diff --git a/arch/arm/mach-msm/spm.h b/arch/arm/mach-msm/spm.h
index 154303b..e81e335 100644
--- a/arch/arm/mach-msm/spm.h
+++ b/arch/arm/mach-msm/spm.h
@@ -146,16 +146,9 @@
*/
int msm_spm_turn_on_cpu_rail(unsigned int cpu);
-
/* Internal low power management specific functions */
/**
- * msm_spm_allow_x_cpu_set_vdd(): Turn on/off cross calling to set voltage
- * @allowed: boolean to indicate on/off.
- */
-void msm_spm_allow_x_cpu_set_vdd(bool allowed);
-
-/**
* msm_spm_reinit(): Reinitialize SPM registers
*/
void msm_spm_reinit(void);
@@ -251,11 +244,6 @@
/* empty */
}
-static inline void msm_spm_allow_x_cpu_set_vdd(bool allowed)
-{
- /* empty */
-}
-
static inline int msm_spm_turn_on_cpu_rail(unsigned int cpu)
{
return -ENOSYS;
diff --git a/arch/arm/mach-msm/spm_devices.c b/arch/arm/mach-msm/spm_devices.c
index 6e81be6..079a3ac 100644
--- a/arch/arm/mach-msm/spm_devices.c
+++ b/arch/arm/mach-msm/spm_devices.c
@@ -40,31 +40,15 @@
static struct msm_spm_device msm_spm_l2_device;
static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_cpu_spm_device);
-static atomic_t msm_spm_set_vdd_x_cpu_allowed = ATOMIC_INIT(1);
-
-void msm_spm_allow_x_cpu_set_vdd(bool allowed)
-{
- atomic_set(&msm_spm_set_vdd_x_cpu_allowed, allowed ? 1 : 0);
-}
-EXPORT_SYMBOL(msm_spm_allow_x_cpu_set_vdd);
int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
{
- unsigned long flags;
struct msm_spm_device *dev;
int ret = -EIO;
- local_irq_save(flags);
- if (!atomic_read(&msm_spm_set_vdd_x_cpu_allowed) &&
- unlikely(smp_processor_id() != cpu)) {
- goto set_vdd_x_cpu_bail;
- }
-
dev = &per_cpu(msm_cpu_spm_device, cpu);
ret = msm_spm_drv_set_vdd(&dev->reg_data, vlevel);
-set_vdd_x_cpu_bail:
- local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL(msm_spm_set_vdd);
diff --git a/arch/arm/mach-msm/subsystem_restart.c b/arch/arm/mach-msm/subsystem_restart.c
index c98a672..e630e31 100644
--- a/arch/arm/mach-msm/subsystem_restart.c
+++ b/arch/arm/mach-msm/subsystem_restart.c
@@ -92,6 +92,9 @@
/* MSM 8960 restart ordering info */
static const char * const order_8960[] = {"modem", "lpass"};
+/*SGLTE restart ordering info*/
+static const char * const order_8960_sglte[] = {"external_modem",
+ "modem"};
static struct subsys_soc_restart_order restart_orders_8960_one = {
.subsystem_list = order_8960,
@@ -99,9 +102,19 @@
.subsys_ptrs = {[ARRAY_SIZE(order_8960)] = NULL}
};
+static struct subsys_soc_restart_order restart_orders_8960_fusion_sglte = {
+ .subsystem_list = order_8960_sglte,
+ .count = ARRAY_SIZE(order_8960_sglte),
+ .subsys_ptrs = {[ARRAY_SIZE(order_8960_sglte)] = NULL}
+ };
+
static struct subsys_soc_restart_order *restart_orders_8960[] = {
&restart_orders_8960_one,
-};
+ };
+
+static struct subsys_soc_restart_order *restart_orders_8960_sglte[] = {
+ &restart_orders_8960_fusion_sglte,
+ };
/* These will be assigned to one of the sets above after
* runtime SoC identification.
@@ -557,8 +570,18 @@
if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm9615() ||
cpu_is_apq8064()) {
- restart_orders = restart_orders_8960;
- n_restart_orders = ARRAY_SIZE(restart_orders_8960);
+ if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) {
+ restart_orders = restart_orders_8960_sglte;
+ n_restart_orders =
+ ARRAY_SIZE(restart_orders_8960_sglte);
+ } else {
+ restart_orders = restart_orders_8960;
+ n_restart_orders = ARRAY_SIZE(restart_orders_8960);
+ }
+ for (i = 0; i < n_restart_orders; i++) {
+ mutex_init(&restart_orders[i]->powerup_lock);
+ mutex_init(&restart_orders[i]->shutdown_lock);
+ }
}
if (restart_orders == NULL || n_restart_orders < 1) {
diff --git a/arch/arm/mach-msm/tz_log.c b/arch/arm/mach-msm/tz_log.c
index 7426bb2..db797cd 100644
--- a/arch/arm/mach-msm/tz_log.c
+++ b/arch/arm/mach-msm/tz_log.c
@@ -536,12 +536,19 @@
return 0;
}
+static struct of_device_id tzlog_match[] = {
+ { .compatible = "qcom,tz-log",
+ },
+ {}
+};
+
static struct platform_driver tz_log_driver = {
.probe = tz_log_probe,
.remove = __devexit_p(tz_log_remove),
.driver = {
.name = "tz_log",
.owner = THIS_MODULE,
+ .of_match_table = tzlog_match,
},
};
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index b280183..ad0ec48 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -374,9 +374,19 @@
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
sizedwords += (cmds - &link[0]);
- if (sizedwords)
- adreno_ringbuffer_issuecmds(device,
- KGSL_CMD_FLAGS_PMODE, &link[0], sizedwords);
+ if (sizedwords) {
+ unsigned int ts;
+ /*
+ * add an interrupt at the end of commands so that the smmu
+ * disable clock off function will get called
+ */
+ *cmds++ = cp_type3_packet(CP_INTERRUPT, 1);
+ *cmds++ = CP_INT_CNTL__RB_INT_MASK;
+ sizedwords += 2;
+ ts = adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+ &link[0], sizedwords);
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, ts, true);
+ }
done:
if (num_iommu_units)
kfree(reg_map_array);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 3d46221..347a57d 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -591,7 +591,7 @@
return timestamp;
}
-void
+unsigned int
adreno_ringbuffer_issuecmds(struct kgsl_device *device,
unsigned int flags,
unsigned int *cmds,
@@ -601,8 +601,9 @@
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
if (device->state & KGSL_STATE_HUNG)
- return;
- adreno_ringbuffer_addcmds(rb, NULL, flags, cmds, sizedwords);
+ return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
+ KGSL_TIMESTAMP_RETIRED);
+ return adreno_ringbuffer_addcmds(rb, NULL, flags, cmds, sizedwords);
}
static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index ae2e4c7..ebea4ed 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -103,7 +103,7 @@
void adreno_ringbuffer_close(struct adreno_ringbuffer *rb);
-void adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+unsigned int adreno_ringbuffer_issuecmds(struct kgsl_device *device,
unsigned int flags,
unsigned int *cmdaddr,
int sizedwords);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 5883f08..c597b42 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -62,9 +62,9 @@
* @returns - 0 on success or error code on failure
*/
-static int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
+int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
- struct kgsl_device_private *owner)
+ void *owner)
{
struct kgsl_event *event;
struct list_head *n;
@@ -122,6 +122,7 @@
queue_work(device->work_queue, &device->ts_expired_ws);
return 0;
}
+EXPORT_SYMBOL(kgsl_add_event);
/**
* kgsl_cancel_events_ctxt - Cancel all events for a context
@@ -162,8 +163,8 @@
* @owner - driver instance that owns the events to cancel
*
*/
-static void kgsl_cancel_events(struct kgsl_device *device,
- struct kgsl_device_private *owner)
+void kgsl_cancel_events(struct kgsl_device *device,
+ void *owner)
{
struct kgsl_event *event, *event_tmp;
unsigned int id, cur;
@@ -189,6 +190,7 @@
kfree(event);
}
}
+EXPORT_SYMBOL(kgsl_cancel_events);
/* kgsl_get_mem_entry - get the mem_entry structure for the specified object
* @ptbase - the pagetable base of the object
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index b67f460..f367166 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -179,6 +179,13 @@
struct kgsl_process_private *private, unsigned int gpuaddr,
size_t size);
+int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
+ void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
+ void *owner);
+
+void kgsl_cancel_events(struct kgsl_device *device,
+ void *owner);
+
extern const struct dev_pm_ops kgsl_pm_ops;
struct early_suspend;
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 932c995..4524668 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -125,7 +125,7 @@
void (*func)(struct kgsl_device *, void *, u32, u32);
void *priv;
struct list_head list;
- struct kgsl_device_private *owner;
+ void *owner;
};
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 429d035..d9fe3c6 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -729,7 +729,7 @@
.mmu_pagefault = kgsl_gpummu_pagefault,
.mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
.mmu_enable_clk = NULL,
- .mmu_disable_clk = NULL,
+ .mmu_disable_clk_on_ts = NULL,
.mmu_get_hwpagetable_asid = NULL,
.mmu_get_pt_lsb = NULL,
.mmu_get_reg_map_desc = NULL,
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index d20cf7e..8d66eaa 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -130,6 +130,89 @@
}
/*
+ * kgsl_iommu_disable_clk_event - An event function that is executed when
+ * the required timestamp is reached. It disables the IOMMU clocks if
+ * the timestamp on which the clocks can be disabled has expired.
+ * @device - The kgsl device pointer
+ * @data - The data passed during event creation, it is the MMU pointer
+ * @id - Context ID, should always be KGSL_MEMSTORE_GLOBAL
+ * @ts - The current timestamp that has expired for the device
+ *
+ * Disables IOMMU clocks if timestamp has expired
+ * Return - void
+ */
+static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
+ unsigned int id, unsigned int ts)
+{
+ struct kgsl_mmu *mmu = data;
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (!iommu->clk_event_queued) {
+ if (0 > timestamp_cmp(ts, iommu->iommu_last_cmd_ts))
+ KGSL_DRV_ERR(device,
+ "IOMMU disable clock event being cancelled, "
+ "iommu_last_cmd_ts: %x, retired ts: %x\n",
+ iommu->iommu_last_cmd_ts, ts);
+ return;
+ }
+
+ if (0 <= timestamp_cmp(ts, iommu->iommu_last_cmd_ts)) {
+ kgsl_iommu_disable_clk(mmu);
+ iommu->clk_event_queued = false;
+ } else {
+ /* add new event to fire when ts is reached, this can happen
+ * if we queued an event and someone requested the clocks to
+ * be disbaled on a later timestamp */
+ if (kgsl_add_event(device, id, iommu->iommu_last_cmd_ts,
+ kgsl_iommu_clk_disable_event, mmu, mmu)) {
+ KGSL_DRV_ERR(device,
+ "Failed to add IOMMU disable clk event\n");
+ iommu->clk_event_queued = false;
+ }
+ }
+}
+
+/*
+ * kgsl_iommu_disable_clk_on_ts - Sets up event to disable IOMMU clocks
+ * @mmu - The kgsl MMU pointer
+ * @ts - Timestamp on which the clocks should be disabled
+ * @ts_valid - Indicates whether ts parameter is valid, if this parameter
+ * is false then it means that the calling function wants to disable the
+ * IOMMU clocks immediately without waiting for any timestamp
+ *
+ * Creates an event to disable the IOMMU clocks on timestamp and if event
+ * already exists then updates the timestamp of disabling the IOMMU clocks
+ * with the passed in ts if it is greater than the current value at which
+ * the clocks will be disabled
+ * Return - void
+ */
+static void
+kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu, unsigned int ts,
+ bool ts_valid)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (iommu->clk_event_queued) {
+ if (ts_valid && (0 <
+ timestamp_cmp(ts, iommu->iommu_last_cmd_ts)))
+ iommu->iommu_last_cmd_ts = ts;
+ } else {
+ if (ts_valid) {
+ iommu->iommu_last_cmd_ts = ts;
+ iommu->clk_event_queued = true;
+ if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL,
+ ts, kgsl_iommu_clk_disable_event, mmu, mmu)) {
+ KGSL_DRV_ERR(mmu->device,
+ "Failed to add IOMMU disable clk event\n");
+ iommu->clk_event_queued = false;
+ }
+ } else {
+ kgsl_iommu_disable_clk(mmu);
+ }
+ }
+}
+
+/*
* kgsl_iommu_enable_clk - Enable iommu clocks
* @mmu - Pointer to mmu structure
* @ctx_id - The context bank whose clocks are to be turned on
@@ -751,12 +834,12 @@
KGSL_IOMMU_CONTEXT_USER,
CONTEXTIDR);
- kgsl_iommu_disable_clk(mmu);
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
mmu->flags |= KGSL_FLAGS_STARTED;
done:
if (status) {
- kgsl_iommu_disable_clk(mmu);
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
kgsl_detach_pagetable_iommu_domain(mmu);
}
return status;
@@ -827,6 +910,7 @@
static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
{
+ struct kgsl_iommu *iommu = mmu->priv;
/*
* stop device mmu
*
@@ -841,6 +925,11 @@
mmu->flags &= ~KGSL_FLAGS_STARTED;
}
+
+ /* switch off MMU clocks and cancel any events it has queued */
+ iommu->clk_event_queued = false;
+ kgsl_cancel_events(mmu->device, mmu);
+ kgsl_iommu_disable_clk(mmu);
}
static int kgsl_iommu_close(struct kgsl_mmu *mmu)
@@ -883,7 +972,7 @@
pt_base = readl_relaxed(iommu->iommu_units[0].reg_map.hostptr +
(KGSL_IOMMU_CONTEXT_USER << KGSL_IOMMU_CTX_SHIFT) +
KGSL_IOMMU_TTBR0);
- kgsl_iommu_disable_clk(mmu);
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
return pt_base & (KGSL_IOMMU_TTBR0_PA_MASK <<
KGSL_IOMMU_TTBR0_PA_SHIFT);
}
@@ -996,7 +1085,7 @@
}
}
/* Disable smmu clock */
- kgsl_iommu_disable_clk(mmu);
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
}
/*
@@ -1046,7 +1135,7 @@
.mmu_pagefault = NULL,
.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
.mmu_enable_clk = kgsl_iommu_enable_clk,
- .mmu_disable_clk = kgsl_iommu_disable_clk,
+ .mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
.mmu_get_hwpagetable_asid = kgsl_iommu_get_hwpagetable_asid,
.mmu_get_pt_lsb = kgsl_iommu_get_pt_lsb,
.mmu_get_reg_map_desc = kgsl_iommu_get_reg_map_desc,
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index efc3d9c..354a5cf 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -103,6 +103,8 @@
* instance of the IOMMU driver
* @iommu_last_cmd_ts: The timestamp of last command submitted that
* aceeses iommu registers
+ * @clk_event_queued: Indicates whether an event to disable clocks
+ * is already queued or not
* @device: Pointer to kgsl device
* @asids: A bit structure indicating which id's are presently used
* @asid: Contains the initial value of IOMMU_CONTEXTIDR when a domain
@@ -113,6 +115,7 @@
struct kgsl_iommu_unit iommu_units[KGSL_IOMMU_MAX_UNITS];
unsigned int unit_count;
unsigned int iommu_last_cmd_ts;
+ bool clk_event_queued;
struct kgsl_device *device;
unsigned long *asids;
unsigned int asid;
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 4c0c015..de53946 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -131,8 +131,8 @@
void (*mmu_pagefault) (struct kgsl_mmu *mmu);
unsigned int (*mmu_get_current_ptbase)
(struct kgsl_mmu *mmu);
- void (*mmu_disable_clk)
- (struct kgsl_mmu *mmu);
+ void (*mmu_disable_clk_on_ts)
+ (struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
int (*mmu_enable_clk)
(struct kgsl_mmu *mmu, int ctx_id);
int (*mmu_get_hwpagetable_asid)(struct kgsl_mmu *mmu);
@@ -291,10 +291,11 @@
return 0;
}
-static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
+static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
+ unsigned int ts, bool ts_valid)
{
- if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
- mmu->mmu_ops->mmu_disable_clk(mmu);
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk_on_ts)
+ mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ts_valid);
}
#endif /* __KGSL_MMU_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 409fe40..325cd98 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -713,7 +713,6 @@
}
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
- kgsl_mmu_disable_clk(&device->mmu);
kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
@@ -755,7 +754,6 @@
gpu_freq);
_sleep_accounting(device);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
- kgsl_mmu_disable_clk(&device->mmu);
kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
pm_qos_update_request(&device->pm_qos_req_dma,
PM_QOS_DEFAULT_VALUE);
@@ -888,7 +886,6 @@
/* Order pwrrail/clk sequence based upon platform */
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
- kgsl_mmu_disable_clk(&device->mmu);
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
}
EXPORT_SYMBOL(kgsl_pwrctrl_disable);
diff --git a/drivers/tty/smux_ctl.c b/drivers/tty/smux_ctl.c
index 69adbf3..2b8f028 100644
--- a/drivers/tty/smux_ctl.c
+++ b/drivers/tty/smux_ctl.c
@@ -49,15 +49,6 @@
static uint32_t smux_ctl_ch_id[] = {
SMUX_DATA_CTL_0,
- SMUX_DATA_CTL_1,
- SMUX_DATA_CTL_2,
- SMUX_DATA_CTL_3,
- SMUX_DATA_CTL_4,
- SMUX_DATA_CTL_5,
- SMUX_DATA_CTL_6,
- SMUX_DATA_CTL_7,
- SMUX_USB_RMNET_CTL_0,
- SMUX_CSVT_CTL_0
};
#define SMUX_CTL_NUM_CHANNELS ARRAY_SIZE(smux_ctl_ch_id)
@@ -78,6 +69,7 @@
uint32_t read_avail;
struct list_head rx_list;
+ int abort_wait;
wait_queue_head_t read_wait_queue;
wait_queue_head_t write_wait_queue;
@@ -359,7 +351,8 @@
r = wait_event_interruptible_timeout(
devp->write_wait_queue,
- (devp->state == SMUX_CONNECTED),
+ (devp->state == SMUX_CONNECTED ||
+ devp->abort_wait),
(5 * HZ));
if (r == 0)
r = -ETIMEDOUT;
@@ -372,6 +365,13 @@
msm_smux_close(devp->id);
return r;
+ } else if (devp->abort_wait) {
+ pr_err("%s: %s: Open command aborted\n",
+ SMUX_CTL_MODULE_NAME, __func__);
+ r = -EIO;
+ atomic_dec(&devp->ref_count);
+ msm_smux_close(devp->id);
+ return r;
} else if (devp->state != SMUX_CONNECTED) {
pr_err(SMUX_CTL_MODULE_NAME ": %s: "
"Invalid open notification\n", __func__);
@@ -440,8 +440,9 @@
if (signal_pending(current))
r = -ERESTARTSYS;
-
- if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
+ else if (smux_ctl_devp[dev_index]->abort_wait)
+ r = -ENETRESET;
+ else if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
smux_ctl_devp[dev_index]->is_channel_reset != 0)
r = -ENETRESET;
@@ -560,6 +561,9 @@
if (signal_pending(current))
r = -ERESTARTSYS;
+
+ else if (smux_ctl_devp[dev_index]->abort_wait)
+ r = -ENETRESET;
else if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
smux_ctl_devp[dev_index]->is_channel_reset != 0)
r = -ENETRESET;
@@ -645,6 +649,13 @@
r = wait_event_interruptible(devp->write_wait_queue,
0 != (write_err = smux_ctl_writeable(id)));
+
+ if (-EIO == r) {
+ pr_err("%s: %s: wait_event_interruptible ret %i\n",
+ SMUX_CTL_MODULE_NAME, __func__, r);
+ return -EIO;
+ }
+
if (r < 0) {
pr_err(SMUX_CTL_MODULE_NAME " :%s: wait_event_interruptible "
"ret %i\n", __func__, r);
@@ -699,6 +710,25 @@
.unlocked_ioctl = smux_ctl_ioctl,
};
+static void smux_ctl_reset_channel(struct smux_ctl_dev *devp)
+{
+ devp->is_high_wm = 0;
+ devp->write_pending = 0;
+ devp->is_channel_reset = 0;
+ devp->state = SMUX_DISCONNECTED;
+ devp->read_avail = 0;
+
+ devp->stats.bytes_tx = 0;
+ devp->stats.bytes_rx = 0;
+ devp->stats.pkts_tx = 0;
+ devp->stats.pkts_rx = 0;
+ devp->stats.cnt_ssr = 0;
+ devp->stats.cnt_read_fail = 0;
+ devp->stats.cnt_write_fail = 0;
+ devp->stats.cnt_high_wm_hit = 0;
+ devp->abort_wait = 0;
+}
+
static int smux_ctl_probe(struct platform_device *pdev)
{
int i;
@@ -706,6 +736,27 @@
SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
+ if (smux_ctl_inited) {
+ /* Already loaded once - reinitialize channels */
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ struct smux_ctl_dev *devp = smux_ctl_devp[i];
+
+ smux_ctl_reset_channel(devp);
+
+ if (atomic_read(&devp->ref_count)) {
+ r = msm_smux_open(devp->id,
+ devp,
+ smux_ctl_notify_cb,
+ smux_ctl_get_rx_buf_cb);
+ if (r)
+ pr_err("%s: unable to reopen ch %d, ret %d\n",
+ __func__, devp->id, r);
+ }
+ }
+ return 0;
+ }
+
+ /* Create character devices */
for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
smux_ctl_devp[i] = kzalloc(sizeof(struct smux_ctl_dev),
GFP_KERNEL);
@@ -718,26 +769,13 @@
smux_ctl_devp[i]->id = smux_ctl_ch_id[i];
atomic_set(&smux_ctl_devp[i]->ref_count, 0);
- smux_ctl_devp[i]->is_high_wm = 0;
- smux_ctl_devp[i]->write_pending = 0;
- smux_ctl_devp[i]->is_channel_reset = 0;
- smux_ctl_devp[i]->state = SMUX_DISCONNECTED;
- smux_ctl_devp[i]->read_avail = 0;
-
- smux_ctl_devp[i]->stats.bytes_tx = 0;
- smux_ctl_devp[i]->stats.bytes_rx = 0;
- smux_ctl_devp[i]->stats.pkts_tx = 0;
- smux_ctl_devp[i]->stats.pkts_rx = 0;
- smux_ctl_devp[i]->stats.cnt_ssr = 0;
- smux_ctl_devp[i]->stats.cnt_read_fail = 0;
- smux_ctl_devp[i]->stats.cnt_write_fail = 0;
- smux_ctl_devp[i]->stats.cnt_high_wm_hit = 0;
mutex_init(&smux_ctl_devp[i]->dev_lock);
init_waitqueue_head(&smux_ctl_devp[i]->read_wait_queue);
init_waitqueue_head(&smux_ctl_devp[i]->write_wait_queue);
mutex_init(&smux_ctl_devp[i]->rx_lock);
INIT_LIST_HEAD(&smux_ctl_devp[i]->rx_list);
+ smux_ctl_reset_channel(smux_ctl_devp[i]);
}
r = alloc_chrdev_region(&smux_ctl_number, 0, SMUX_CTL_NUM_CHANNELS,
@@ -761,7 +799,8 @@
cdev_init(&smux_ctl_devp[i]->cdev, &smux_ctl_fops);
smux_ctl_devp[i]->cdev.owner = THIS_MODULE;
- r = cdev_add(&smux_ctl_devp[i]->cdev, (smux_ctl_number + i), 1);
+ r = cdev_add(&smux_ctl_devp[i]->cdev,
+ (smux_ctl_number + i), 1);
if (IS_ERR_VALUE(r)) {
pr_err(SMUX_CTL_MODULE_NAME ": %s: "
@@ -818,15 +857,32 @@
SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
- cdev_del(&smux_ctl_devp[i]->cdev);
- kfree(smux_ctl_devp[i]);
- device_destroy(smux_ctl_classp,
- MKDEV(MAJOR(smux_ctl_number), i));
- }
- class_destroy(smux_ctl_classp);
- unregister_chrdev_region(MAJOR(smux_ctl_number),
- SMUX_CTL_NUM_CHANNELS);
+ struct smux_ctl_dev *devp = smux_ctl_devp[i];
+ mutex_lock(&devp->dev_lock);
+ devp->abort_wait = 1;
+ wake_up(&devp->write_wait_queue);
+ wake_up(&devp->read_wait_queue);
+ mutex_unlock(&devp->dev_lock);
+
+ /* Empty RX queue */
+ mutex_lock(&devp->rx_lock);
+ while (!list_empty(&devp->rx_list)) {
+ struct smux_ctl_list_elem *list_elem;
+
+ list_elem = list_first_entry(
+ &devp->rx_list,
+ struct smux_ctl_list_elem,
+ list);
+ list_del(&list_elem->list);
+ kfree(list_elem->ctl_pkt.data);
+ kfree(list_elem);
+ }
+ devp->read_avail = 0;
+ mutex_unlock(&devp->rx_lock);
+ }
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Ends\n", __func__);
return 0;
}
@@ -841,8 +897,6 @@
static int __init smux_ctl_init(void)
{
- msm_smux_ctl_debug_mask = MSM_SMUX_CTL_DEBUG | MSM_SMUX_CTL_DUMP_BUFFER;
-
SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
return platform_driver_register(&smux_ctl_driver);
}
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 5b85d98..14118e7 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -74,7 +74,7 @@
static struct regulator *hsusb_1p8;
static struct regulator *hsusb_vddcx;
static struct regulator *vbus_otg;
-static struct regulator *mhl_analog_switch;
+static struct regulator *mhl_usb_hs_switch;
static struct power_supply *psy;
static bool aca_id_turned_on;
@@ -250,16 +250,16 @@
if (!pdata->mhl_enable)
return;
- if (!mhl_analog_switch) {
- pr_err("%s: mhl_analog_switch is NULL.\n", __func__);
+ if (!mhl_usb_hs_switch) {
+ pr_err("%s: mhl_usb_hs_switch is NULL.\n", __func__);
return;
}
if (on) {
- if (regulator_enable(mhl_analog_switch))
- pr_err("unable to enable mhl_analog_switch\n");
+ if (regulator_enable(mhl_usb_hs_switch))
+ pr_err("unable to enable mhl_usb_hs_switch\n");
} else {
- regulator_disable(mhl_analog_switch);
+ regulator_disable(mhl_usb_hs_switch);
}
}
@@ -3355,10 +3355,10 @@
}
if (pdata->mhl_enable) {
- mhl_analog_switch = devm_regulator_get(motg->phy.dev,
- "mhl_ext_3p3v");
- if (IS_ERR(mhl_analog_switch)) {
- dev_err(&pdev->dev, "Unable to get mhl_analog_switch\n");
+ mhl_usb_hs_switch = devm_regulator_get(motg->phy.dev,
+ "mhl_usb_hs_switch");
+ if (IS_ERR(mhl_usb_hs_switch)) {
+ dev_err(&pdev->dev, "Unable to get mhl_usb_hs_switch\n");
goto free_ldo_init;
}
}
diff --git a/drivers/video/msm/mdp4_overlay_dsi_cmd.c b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
index 7ba4e75..8ebf8a0 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_cmd.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
@@ -652,7 +652,7 @@
{
unsigned long flag;
-
+ mdp4_iommu_attach();
/* change mdp clk */
mdp4_set_perf_level();
@@ -705,7 +705,6 @@
mdp4_overlay_update_dsi_cmd(mfd);
- mdp4_iommu_attach();
mdp4_dsi_cmd_kickoff_ui(mfd, dsi_pipe);
mdp4_iommu_unmap(dsi_pipe);
/* signal if pan function is waiting for the update completion */
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
index a5171f0..291de5f 100644
--- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
@@ -25,7 +25,6 @@
#include "vidc.h"
#include "vcd_res_tracker.h"
-#define PIL_FW_BASE_ADDR 0x9fe00000
#define PIL_FW_SIZE 0x200000
static unsigned int vidc_clk_table[4] = {
@@ -181,6 +180,7 @@
{
u32 alloc_size;
struct ddl_context *ddl_context;
+ unsigned long fw_addr;
int rc = 0;
DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
if (!addr) {
@@ -216,8 +216,9 @@
goto bail_out;
}
} else {
+ fw_addr = resource_context.vidc_platform_data->fw_addr;
addr->alloc_handle = NULL;
- addr->alloced_phys_addr = PIL_FW_BASE_ADDR;
+ addr->alloced_phys_addr = fw_addr;
addr->buffer_size = sz;
}
} else {
@@ -966,6 +967,10 @@
}
msm_ion_secure_heap(ION_HEAP(resource_context.memtype));
msm_ion_secure_heap(ION_HEAP(resource_context.cmd_mem_type));
+
+ if (resource_context.vidc_platform_data->secure_wb_heap)
+ msm_ion_secure_heap(ION_HEAP(ION_CP_WB_HEAP_ID));
+
res_trk_disable_iommu_clocks();
mutex_unlock(&resource_context.secure_lock);
}
@@ -988,6 +993,10 @@
}
msm_ion_unsecure_heap(ION_HEAP(resource_context.cmd_mem_type));
msm_ion_unsecure_heap(ION_HEAP(resource_context.memtype));
+
+ if (resource_context.vidc_platform_data->secure_wb_heap)
+ msm_ion_unsecure_heap(ION_HEAP(ION_CP_WB_HEAP_ID));
+
res_trk_disable_iommu_clocks();
mutex_unlock(&resource_context.secure_lock);
}