Merge "msm: Add proper 9625 debug_ll support"
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index 6aed1ce..b4ae5e6 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -198,57 +198,74 @@
The CPUfreq governor "interactive" is designed for latency-sensitive,
interactive workloads. This governor sets the CPU speed depending on
-usage, similar to "ondemand" and "conservative" governors. However,
-the governor is more aggressive about scaling the CPU speed up in
-response to CPU-intensive activity.
-
-Sampling the CPU load every X ms can lead to under-powering the CPU
-for X ms, leading to dropped frames, stuttering UI, etc. Instead of
-sampling the cpu at a specified rate, the interactive governor will
-check whether to scale the cpu frequency up soon after coming out of
-idle. When the cpu comes out of idle, a timer is configured to fire
-within 1-2 ticks. If the cpu is very busy between exiting idle and
-when the timer fires then we assume the cpu is underpowered and ramp
-to MAX speed.
-
-If the cpu was not sufficiently busy to immediately ramp to MAX speed,
-then governor evaluates the cpu load since the last speed adjustment,
-choosing the highest value between that longer-term load or the
-short-term load since idle exit to determine the cpu speed to ramp to.
+usage, similar to "ondemand" and "conservative" governors, but with a
+different set of configurable behaviors.
The tuneable values for this governor are:
+target_loads: CPU load values used to adjust speed to influence the
+current CPU load toward that value. In general, the lower the target
+load, the more often the governor will raise CPU speeds to bring load
+below the target. The format is a single target load, optionally
+followed by pairs of CPU speeds and CPU loads to target at or above
+those speeds. Colons can be used between the speeds and associated
+target loads for readability. For example:
+
+ 85 1000000:90 1700000:99
+
+targets CPU load 85% below speed 1GHz, 90% at or above 1GHz, until
+1.7GHz and above, at which load 99% is targeted. If speeds are
+specified these must appear in ascending order. Higher target load
+values are typically specified for higher speeds, that is, target load
+values also usually appear in an ascending order. The default is
+target load 90% for all speeds.
+
min_sample_time: The minimum amount of time to spend at the current
-frequency before ramping down. This is to ensure that the governor has
-seen enough historic cpu load data to determine the appropriate
-workload. Default is 80000 uS.
+frequency before ramping down. Default is 80000 uS.
hispeed_freq: An intermediate "hi speed" at which to initially ramp
when CPU load hits the value specified in go_hispeed_load. If load
stays high for the amount of time specified in above_hispeed_delay,
-then speed may be bumped higher. Default is maximum speed.
+then speed may be bumped higher. Default is the maximum speed
+allowed by the policy at governor initialization time.
-go_hispeed_load: The CPU load at which to ramp to the intermediate "hi
-speed". Default is 85%.
+go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
+Default is 99%.
-above_hispeed_delay: Once speed is set to hispeed_freq, wait for this
-long before bumping speed higher in response to continued high load.
+above_hispeed_delay: When speed is at or above hispeed_freq, wait for
+this long before raising speed in response to continued high load.
Default is 20000 uS.
-timer_rate: Sample rate for reevaluating cpu load when the system is
-not idle. Default is 20000 uS.
+timer_rate: Sample rate for reevaluating CPU load when the CPU is not
+idle. A deferrable timer is used, such that the CPU will not be woken
+from idle to service this timer until something else needs to run.
+(The maximum time to allow deferring this timer when not running at
+minimum speed is configurable via timer_slack.) Default is 20000 uS.
-input_boost: If non-zero, boost speed of all CPUs to hispeed_freq on
-touchscreen activity. Default is 0.
+timer_slack: Maximum additional time to defer handling the governor
+sampling timer beyond timer_rate when running at speeds above the
+minimum. For platforms that consume additional power at idle when
+CPUs are running at speeds greater than minimum, this places an upper
+bound on how long the timer will be deferred prior to re-evaluating
+load and dropping speed. For example, if timer_rate is 20000uS and
+timer_slack is 10000uS then timers will be deferred for up to 30msec
+when not at lowest speed. A value of -1 means defer timers
+indefinitely at all speeds. Default is 80000 uS.
boost: If non-zero, immediately boost speed of all CPUs to at least
hispeed_freq until zero is written to this attribute. If zero, allow
CPU speeds to drop below hispeed_freq according to load as usual.
+Default is zero.
-boostpulse: Immediately boost speed of all CPUs to hispeed_freq for
-min_sample_time, after which speeds are allowed to drop below
+boostpulse: On each write, immediately boost speed of all CPUs to
+hispeed_freq for at least the period of time specified by
+boostpulse_duration, after which speeds are allowed to drop below
hispeed_freq according to load as usual.
+boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
+on a write to boostpulse, before allowing speed to drop according to
+load as usual. Default is 80000 uS.
+
3. The Governor Interface in the CPUfreq Core
=============================================
diff --git a/Documentation/devicetree/bindings/cache/msm_cache_erp.txt b/Documentation/devicetree/bindings/cache/msm_cache_erp.txt
index 400b299..8d00cc2 100644
--- a/Documentation/devicetree/bindings/cache/msm_cache_erp.txt
+++ b/Documentation/devicetree/bindings/cache/msm_cache_erp.txt
@@ -7,6 +7,17 @@
- interrupt-names: Should contain the interrupt names "l1_irq" and
"l2_irq"
+Optional properties:
+- reg: A set of I/O regions to be dumped in the event of a hardware fault being
+ detected. If this property is present, the "reg-names" property is must be
+ present as well.
+- reg-names: Human-readable names assigned to the I/O regions defined by the
+ "reg" property. The names can be completely arbitrary, since they are
+ intended to be human-read during failure analysis, and because the set of I/O
+ regions of interest may vary with the overall system design. This property
+ shall only be present if the "reg" property is present, and must contain as
+ many elements as the "reg" property.
+
Example:
qcom,cache_erp {
compatible = "qcom,cache_erp";
@@ -14,3 +25,17 @@
interrupt-names = "l1_irq", "l2_irq";
};
+Example with "reg" property defined:
+ qcom,cache_erp@f9012000 {
+ reg = <0xf9012000 0x80>,
+ <0xf9089000 0x80>,
+ <0xf9099000 0x80>;
+
+ reg-names = "l2_saw",
+ "krait0_saw",
+ "krait1_saw";
+
+ compatible = "qcom,cache_erp";
+ interrupts = <1 9 0>, <0 2 0>;
+ interrupt-names = "l1_irq", "l2_irq";
+ };
diff --git a/Documentation/devicetree/bindings/fb/mdss-mdp.txt b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
index bb19768..a3f3a06 100644
--- a/Documentation/devicetree/bindings/fb/mdss-mdp.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
@@ -118,6 +118,9 @@
Subnode properties:
- compatible : Must be "qcom,mdss-fb"
- cell-index : Index representing frame buffer
+- qcom,mdss-mixer-swap: A boolean property that indicates if the mixer muxes
+ need to be swapped based on the target panel.
+ By default the property is not defined.
@@ -162,6 +165,7 @@
mdss_fb0: qcom,mdss_fb_primary {
cell-index = <0>;
compatible = "qcom,mdss-fb";
+ qcom,mdss-mixer-swap;
};
};
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 65de56f..383e50d 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -449,6 +449,10 @@
- qcom,prim-auxpcm-gpio-sync : GPIO on which Primary AUXPCM SYNC signal is coming.
- qcom,prim-auxpcm-gpio-din : GPIO on which Primary AUXPCM DIN signal is coming.
- qcom,prim-auxpcm-gpio-dout : GPIO on which Primary AUXPCM DOUT signal is coming.
+- qcom,prim-auxpcm-gpio-set : set of GPIO lines used for Primary AUXPCM port
+ Possible Values:
+ prim-gpio-prim : Primary AUXPCM shares GPIOs with Primary MI2S
+ prim-gpio-tert : Primary AUXPCM shares GPIOs with Tertiary MI2S
- qcom,sec-auxpcm-gpio-clk : GPIO on which Secondary AUXPCM clk signal is coming.
- qcom,sec-auxpcm-gpio-sync : GPIO on which Secondary AUXPCM SYNC signal is coming.
- qcom,sec-auxpcm-gpio-din : GPIO on which Secondary AUXPCM DIN signal is coming.
@@ -503,6 +507,7 @@
qcom,prim-auxpcm-gpio-sync = <&msmgpio 66 0>;
qcom,prim-auxpcm-gpio-din = <&msmgpio 67 0>;
qcom,prim-auxpcm-gpio-dout = <&msmgpio 68 0>;
+ qcom,prim-auxpcm-gpio-set = "prim-gpio-prim";
qcom,sec-auxpcm-gpio-clk = <&msmgpio 79 0>;
qcom,sec-auxpcm-gpio-sync = <&msmgpio 80 0>;
qcom,sec-auxpcm-gpio-din = <&msmgpio 81 0>;
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index a458869..d9acb81 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -347,6 +347,12 @@
qcom,ext-spk-amp-gpio = <&pm8841_mpps 1 0>;
qcom,dock-plug-det-irq = <&pm8841_mpps 2 0>;
qcom,hdmi-audio-rx;
+
+ qcom,prim-auxpcm-gpio-clk = <&msmgpio 74 0>;
+ qcom,prim-auxpcm-gpio-sync = <&msmgpio 75 0>;
+ qcom,prim-auxpcm-gpio-din = <&msmgpio 76 0>;
+ qcom,prim-auxpcm-gpio-dout = <&msmgpio 77 0>;
+ qcom,prim-auxpcm-gpio-set = "prim-gpio-tert";
};
hsic_hub {
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 3227370..9b8353b 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -691,6 +691,7 @@
qcom,prim-auxpcm-gpio-sync = <&msmgpio 66 0>;
qcom,prim-auxpcm-gpio-din = <&msmgpio 67 0>;
qcom,prim-auxpcm-gpio-dout = <&msmgpio 68 0>;
+ qcom,prim-auxpcm-gpio-set = "prim-gpio-prim";
qcom,sec-auxpcm-gpio-clk = <&msmgpio 79 0>;
qcom,sec-auxpcm-gpio-sync = <&msmgpio 80 0>;
qcom,sec-auxpcm-gpio-din = <&msmgpio 81 0>;
@@ -1239,7 +1240,27 @@
qcom,firmware-name = "venus";
};
- qcom,cache_erp {
+ qcom,cache_erp@f9012000 {
+ reg = <0xf9012000 0x80>,
+ <0xf9089000 0x80>,
+ <0xf9099000 0x80>,
+ <0xf90a9000 0x80>,
+ <0xf90b9000 0x80>,
+ <0xf9088000 0x40>,
+ <0xf9098000 0x40>,
+ <0xf90a8000 0x40>,
+ <0xf90b8000 0x40>;
+
+ reg-names = "l2_saw",
+ "krait0_saw",
+ "krait1_saw",
+ "krait2_saw",
+ "krait3_saw",
+ "krait0_acs",
+ "krait1_acs",
+ "krait2_acs",
+ "krait3_acs";
+
compatible = "qcom,cache_erp";
interrupts = <1 9 0>, <0 2 0>;
interrupt-names = "l1_irq", "l2_irq";
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 23244dc..59cafd1 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -350,6 +350,7 @@
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_MSM_KGSL=y
+CONFIG_KGSL_PER_PROCESS_PAGE_TABLE=y
CONFIG_FB=y
CONFIG_FB_MSM=y
# CONFIG_FB_MSM_BACKLIGHT is not set
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index df1837f..fd8a639 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -356,6 +356,7 @@
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_MSM_KGSL=y
+CONFIG_KGSL_PER_PROCESS_PAGE_TABLE=y
CONFIG_FB=y
CONFIG_FB_MSM=y
# CONFIG_FB_MSM_BACKLIGHT is not set
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 5ca6d2b..b06ed1c 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -437,6 +437,7 @@
select CPU_FREQ
select CPU_FREQ_GOV_USERSPACE
select CPU_FREQ_GOV_ONDEMAND
+ select CPU_FREQ_GOV_POWERSAVE
select MSM_PIL
select MSM_RUN_QUEUE_STATS
select ARM_HAS_SG_CHAIN
diff --git a/arch/arm/mach-msm/acpuclock-cortex.c b/arch/arm/mach-msm/acpuclock-cortex.c
index 74ca145..47bf27a 100644
--- a/arch/arm/mach-msm/acpuclock-cortex.c
+++ b/arch/arm/mach-msm/acpuclock-cortex.c
@@ -129,12 +129,26 @@
pr_warn("acpu rcg didn't update its configuration\n");
}
-/*
- * This function can be called in both atomic and nonatomic context.
- * Since regulator APIS can sleep, we cannot always use the clk prepare
- * unprepare API.
- */
-static int set_speed(struct clkctl_acpu_speed *tgt_s, bool atomic)
+static int set_speed_atomic(struct clkctl_acpu_speed *tgt_s)
+{
+ struct clkctl_acpu_speed *strt_s = priv->current_speed;
+ struct clk *strt = priv->src_clocks[strt_s->src].clk;
+ struct clk *tgt = priv->src_clocks[tgt_s->src].clk;
+ int rc = 0;
+
+ WARN(strt_s->src == ACPUPLL && tgt_s->src == ACPUPLL,
+ "can't reprogram ACPUPLL during atomic context\n");
+ rc = clk_enable(tgt);
+ if (rc)
+ return rc;
+
+ select_clk_source_div(priv, tgt_s);
+ clk_disable(strt);
+
+ return rc;
+}
+
+static int set_speed(struct clkctl_acpu_speed *tgt_s)
{
int rc = 0;
unsigned int tgt_freq_hz = tgt_s->khz * 1000;
@@ -148,19 +162,13 @@
select_clk_source_div(priv, cxo_s);
/* Re-program acpu pll */
- if (atomic)
- clk_disable(tgt);
- else
- clk_disable_unprepare(tgt);
+ clk_disable_unprepare(tgt);
rc = clk_set_rate(tgt, tgt_freq_hz);
if (rc)
pr_err("Failed to set ACPU PLL to %u\n", tgt_freq_hz);
- if (atomic)
- BUG_ON(clk_enable(tgt));
- else
- BUG_ON(clk_prepare_enable(tgt));
+ BUG_ON(clk_prepare_enable(tgt));
/* Switch back to acpu pll */
select_clk_source_div(priv, tgt_s);
@@ -172,10 +180,7 @@
return rc;
}
- if (atomic)
- rc = clk_enable(tgt);
- else
- rc = clk_prepare_enable(tgt);
+ rc = clk_prepare_enable(tgt);
if (rc) {
pr_err("ACPU PLL enable failed\n");
@@ -184,16 +189,10 @@
select_clk_source_div(priv, tgt_s);
- if (atomic)
- clk_disable(strt);
- else
- clk_disable_unprepare(strt);
+ clk_disable_unprepare(strt);
} else {
- if (atomic)
- rc = clk_enable(tgt);
- else
- rc = clk_prepare_enable(tgt);
+ rc = clk_prepare_enable(tgt);
if (rc) {
pr_err("%s enable failed\n",
@@ -203,10 +202,7 @@
select_clk_source_div(priv, tgt_s);
- if (atomic)
- clk_disable(strt);
- else
- clk_disable_unprepare(strt);
+ clk_disable_unprepare(strt);
}
@@ -250,9 +246,9 @@
/* Switch CPU speed. Flag indicates atomic context */
if (reason == SETRATE_CPUFREQ || reason == SETRATE_INIT)
- rc = set_speed(tgt_s, false);
+ rc = set_speed(tgt_s);
else
- rc = set_speed(tgt_s, true);
+ rc = set_speed_atomic(tgt_s);
if (rc)
goto out;
diff --git a/arch/arm/mach-msm/cache_erp.c b/arch/arm/mach-msm/cache_erp.c
index ddea91c..f52bc28 100644
--- a/arch/arm/mach-msm/cache_erp.c
+++ b/arch/arm/mach-msm/cache_erp.c
@@ -123,11 +123,18 @@
unsigned int mplxrexnok;
};
+struct msm_erp_dump_region {
+ struct resource *res;
+ void __iomem *va;
+};
+
static DEFINE_PER_CPU(struct msm_l1_err_stats, msm_l1_erp_stats);
static struct msm_l2_err_stats msm_l2_erp_stats;
static int l1_erp_irq, l2_erp_irq;
static struct proc_dir_entry *procfs_entry;
+static int num_dump_regions;
+static struct msm_erp_dump_region *dump_regions;
#ifdef CONFIG_MSM_L1_ERR_LOG
static struct proc_dir_entry *procfs_log_entry;
@@ -211,6 +218,22 @@
return len;
}
+static int msm_erp_dump_regions(void)
+{
+ int i = 0;
+ struct msm_erp_dump_region *r;
+
+ for (i = 0; i < num_dump_regions; i++) {
+ r = &dump_regions[i];
+
+ pr_alert("%s %pR:\n", r->res->name, r->res);
+ print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 32, 4, r->va,
+ resource_size(r->res), 0);
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_MSM_L1_ERR_LOG
static int proc_read_log(char *page, char **start, off_t off, int count,
int *eof, void *data)
@@ -267,6 +290,7 @@
pr_alert("\tCESR = 0x%08x\n", cesr);
pr_alert("\tCPU speed = %lu\n", acpuclk_get_rate(cpu));
pr_alert("\tMIDR = 0x%08x\n", read_cpuid_id());
+ msm_erp_dump_regions();
}
if (cesr & CESR_DCTPE) {
@@ -425,6 +449,9 @@
if (port_error && print_alert)
ERP_PORT_ERR("L2 master port error detected");
+ if (soft_error && print_alert)
+ msm_erp_dump_regions();
+
if (soft_error && !unrecoverable)
ERP_1BIT_ERR("L2 single-bit error detected");
@@ -464,6 +491,37 @@
.notifier_call = cache_erp_cpu_callback,
};
+static int msm_erp_read_dump_regions(struct platform_device *pdev)
+{
+ int i;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+
+ num_dump_regions = of_property_count_strings(np, "reg-names");
+
+ if (num_dump_regions <= 0) {
+ num_dump_regions = 0;
+ return 0; /* Not an error - this is an optional property */
+ }
+
+ dump_regions = devm_kzalloc(&pdev->dev,
+ sizeof(*dump_regions) * num_dump_regions,
+ GFP_KERNEL);
+ if (!dump_regions)
+ return -ENOMEM;
+
+ for (i = 0; i < num_dump_regions; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ dump_regions[i].res = res;
+ dump_regions[i].va = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!dump_regions[i].va)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int msm_cache_erp_probe(struct platform_device *pdev)
{
struct resource *r;
@@ -511,6 +569,11 @@
goto fail_l2;
}
+ ret = msm_erp_read_dump_regions(pdev);
+
+ if (ret)
+ goto fail_l2;
+
get_online_cpus();
register_hotcpu_notifier(&cache_erp_cpu_notifier);
for_each_cpu(cpu, cpu_online_mask)
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index 6d76c41..cd0d337 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -1709,13 +1709,19 @@
F_MMSS( 100000000, gpll0, 6, 0, 0),
F_MMSS( 109090000, gpll0, 5.5, 0, 0),
F_MMSS( 133330000, gpll0, 4.5, 0, 0),
+ F_MMSS( 150000000, gpll0, 4, 0, 0),
F_MMSS( 200000000, gpll0, 3, 0, 0),
F_MMSS( 228570000, mmpll0_pll, 3.5, 0, 0),
F_MMSS( 266670000, mmpll0_pll, 3, 0, 0),
F_MMSS( 320000000, mmpll0_pll, 2.5, 0, 0),
+ F_MMSS( 400000000, mmpll0_pll, 2, 0, 0),
F_END
};
+static unsigned long camss_vfe_vfe0_fmax_v2[VDD_DIG_NUM] = {
+ 150000000, 320000000, 400000000,
+};
+
static struct rcg_clk vfe0_clk_src = {
.cmd_rcgr_reg = VFE0_CMD_RCGR,
.set_rate = set_rate_hid,
@@ -1972,11 +1978,17 @@
static struct clk_freq_tbl ftbl_camss_vfe_cpp_clk[] = {
F_MMSS( 133330000, gpll0, 4.5, 0, 0),
+ F_MMSS( 150000000, gpll0, 4, 0, 0),
F_MMSS( 266670000, mmpll0_pll, 3, 0, 0),
F_MMSS( 320000000, mmpll0_pll, 2.5, 0, 0),
+ F_MMSS( 400000000, mmpll0_pll, 2, 0, 0),
F_END
};
+static unsigned long camss_vfe_cpp_fmax_v2[VDD_DIG_NUM] = {
+ 150000000, 320000000, 400000000,
+};
+
static struct rcg_clk cpp_clk_src = {
.cmd_rcgr_reg = CPP_CMD_RCGR,
.set_rate = set_rate_hid,
@@ -2810,8 +2822,8 @@
static DEFINE_CLK_VOTER(pnoc_sps_clk, &pnoc_clk.c, LONG_MAX);
-static DEFINE_CLK_VOTER(qseecom_ce1_clk_src, &ce1_clk_src.c, LONG_MAX);
-static DEFINE_CLK_VOTER(scm_ce1_clk_src, &ce1_clk_src.c, LONG_MAX);
+static DEFINE_CLK_VOTER(qseecom_ce1_clk_src, &ce1_clk_src.c, 100000000);
+static DEFINE_CLK_VOTER(scm_ce1_clk_src, &ce1_clk_src.c, 100000000);
static DEFINE_CLK_BRANCH_VOTER(cxo_otg_clk, &xo.c);
static DEFINE_CLK_BRANCH_VOTER(cxo_pil_lpass_clk, &xo.c);
@@ -3545,6 +3557,12 @@
reg_init();
+ /* v2 specific changes */
+ if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 2) {
+ cpp_clk_src.c.fmax = camss_vfe_cpp_fmax_v2;
+ vfe0_clk_src.c.fmax = camss_vfe_vfe0_fmax_v2;
+ }
+
/*
* MDSS needs the ahb clock and needs to init before we register the
* lookup table.
diff --git a/arch/arm/mach-msm/remote_spinlock.c b/arch/arm/mach-msm/remote_spinlock.c
index 94923a0..62e3e05 100644
--- a/arch/arm/mach-msm/remote_spinlock.c
+++ b/arch/arm/mach-msm/remote_spinlock.c
@@ -143,6 +143,7 @@
}
/* end dekkers implementation ----------------------------------------------- */
+#ifndef CONFIG_THUMB2_KERNEL
/* swp implementation ------------------------------------------------------- */
static void __raw_remote_swp_spin_lock(raw_remote_spinlock_t *lock)
{
@@ -194,6 +195,7 @@
: "cc");
}
/* end swp implementation --------------------------------------------------- */
+#endif
/* ldrex implementation ----------------------------------------------------- */
static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
@@ -431,6 +433,7 @@
current_ops.owner = __raw_remote_dek_spin_owner;
is_hw_lock_type = 0;
break;
+#ifndef CONFIG_THUMB2_KERNEL
case SWP_MODE:
current_ops.lock = __raw_remote_swp_spin_lock;
current_ops.unlock = __raw_remote_swp_spin_unlock;
@@ -439,6 +442,7 @@
current_ops.owner = __raw_remote_gen_spin_owner;
is_hw_lock_type = 0;
break;
+#endif
case LDREX_MODE:
current_ops.lock = __raw_remote_ex_spin_lock;
current_ops.unlock = __raw_remote_ex_spin_unlock;
diff --git a/drivers/coresight/coresight-csr.c b/drivers/coresight/coresight-csr.c
index 988d1c9..1c2ab25 100644
--- a/drivers/coresight/coresight-csr.c
+++ b/drivers/coresight/coresight-csr.c
@@ -102,7 +102,7 @@
CSR_LOCK(drvdata);
}
-EXPORT_SYMBOL_GPL(msm_qdss_csr_enable_bam_to_usb);
+EXPORT_SYMBOL(msm_qdss_csr_enable_bam_to_usb);
void msm_qdss_csr_disable_bam_to_usb(void)
{
@@ -117,7 +117,7 @@
CSR_LOCK(drvdata);
}
-EXPORT_SYMBOL_GPL(msm_qdss_csr_disable_bam_to_usb);
+EXPORT_SYMBOL(msm_qdss_csr_disable_bam_to_usb);
void msm_qdss_csr_disable_flush(void)
{
@@ -132,7 +132,7 @@
CSR_LOCK(drvdata);
}
-EXPORT_SYMBOL_GPL(msm_qdss_csr_disable_flush);
+EXPORT_SYMBOL(msm_qdss_csr_disable_flush);
static int __devinit csr_probe(struct platform_device *pdev)
{
diff --git a/drivers/coresight/coresight-stm.c b/drivers/coresight/coresight-stm.c
index 87cf63a..7d4dabe 100644
--- a/drivers/coresight/coresight-stm.c
+++ b/drivers/coresight/coresight-stm.c
@@ -595,7 +595,7 @@
return __stm_trace(options, entity_id, proto_id, data, size);
}
-EXPORT_SYMBOL_GPL(stm_trace);
+EXPORT_SYMBOL(stm_trace);
static ssize_t stm_write(struct file *file, const char __user *data,
size_t size, loff_t *ppos)
diff --git a/drivers/coresight/coresight.c b/drivers/coresight/coresight.c
index aef3d26..e237fb7 100644
--- a/drivers/coresight/coresight.c
+++ b/drivers/coresight/coresight.c
@@ -368,7 +368,7 @@
pr_err("coresight: enable failed\n");
return ret;
}
-EXPORT_SYMBOL_GPL(coresight_enable);
+EXPORT_SYMBOL(coresight_enable);
void coresight_disable(struct coresight_device *csdev)
{
@@ -391,7 +391,7 @@
out:
up(&coresight_mutex);
}
-EXPORT_SYMBOL_GPL(coresight_disable);
+EXPORT_SYMBOL(coresight_disable);
void coresight_abort(void)
{
@@ -415,7 +415,7 @@
out:
up(&coresight_mutex);
}
-EXPORT_SYMBOL_GPL(coresight_abort);
+EXPORT_SYMBOL(coresight_abort);
static ssize_t coresight_show_type(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -681,7 +681,7 @@
err_kzalloc_csdev:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(coresight_register);
+EXPORT_SYMBOL(coresight_register);
void coresight_unregister(struct coresight_device *csdev)
{
@@ -693,7 +693,7 @@
put_device(&csdev->dev);
}
}
-EXPORT_SYMBOL_GPL(coresight_unregister);
+EXPORT_SYMBOL(coresight_unregister);
static int __init coresight_init(void)
{
diff --git a/drivers/coresight/of_coresight.c b/drivers/coresight/of_coresight.c
index 1eccd09..8b8c0d62 100644
--- a/drivers/coresight/of_coresight.c
+++ b/drivers/coresight/of_coresight.c
@@ -97,7 +97,7 @@
"coresight-default-sink");
return pdata;
}
-EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
+EXPORT_SYMBOL(of_get_coresight_platform_data);
struct coresight_cti_data *of_get_coresight_cti_data(
struct device *dev, struct device_node *node)
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 63cdc68..7d1952c 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -20,97 +20,94 @@
#include <linux/cpumask.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
-#include <linux/mutex.h>
+#include <linux/moduleparam.h>
+#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/input.h>
#include <asm/cputime.h>
#define CREATE_TRACE_POINTS
#include <trace/events/cpufreq_interactive.h>
-static atomic_t active_count = ATOMIC_INIT(0);
+static int active_count;
struct cpufreq_interactive_cpuinfo {
struct timer_list cpu_timer;
- int timer_idlecancel;
+ struct timer_list cpu_slack_timer;
+ spinlock_t load_lock; /* protects the next 4 fields */
u64 time_in_idle;
- u64 idle_exit_time;
- u64 timer_run_time;
- int idling;
- u64 target_set_time;
- u64 target_set_time_in_idle;
+ u64 time_in_idle_timestamp;
+ u64 cputime_speedadj;
+ u64 cputime_speedadj_timestamp;
struct cpufreq_policy *policy;
struct cpufreq_frequency_table *freq_table;
unsigned int target_freq;
unsigned int floor_freq;
u64 floor_validate_time;
u64 hispeed_validate_time;
+ struct rw_semaphore enable_sem;
int governor_enabled;
};
static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
-/* Workqueues handle frequency scaling */
-static struct task_struct *up_task;
-static struct workqueue_struct *down_wq;
-static struct work_struct freq_scale_down_work;
-static cpumask_t up_cpumask;
-static spinlock_t up_cpumask_lock;
-static cpumask_t down_cpumask;
-static spinlock_t down_cpumask_lock;
-static struct mutex set_speed_lock;
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
/* Hi speed to bump to from lo speed when load burst (default max) */
-static u64 hispeed_freq;
+static unsigned int hispeed_freq;
/* Go to hi speed when CPU load at or above this value. */
-#define DEFAULT_GO_HISPEED_LOAD 85
-static unsigned long go_hispeed_load;
+#define DEFAULT_GO_HISPEED_LOAD 99
+static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+
+/* Target load. Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+static spinlock_t target_loads_lock;
+static unsigned int *target_loads = default_target_loads;
+static int ntarget_loads = ARRAY_SIZE(default_target_loads);
/*
* The minimum amount of time to spend at a frequency before we can ramp down.
*/
#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
-static unsigned long min_sample_time;
+static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
/*
* The sample rate of the timer used to increase frequency
*/
#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
-static unsigned long timer_rate;
+static unsigned long timer_rate = DEFAULT_TIMER_RATE;
/*
* Wait this long before raising speed above hispeed, by default a single
* timer interval.
*/
#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
-static unsigned long above_hispeed_delay_val;
+static unsigned long above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
-/*
- * Boost pulse to hispeed on touchscreen input.
- */
-
-static int input_boost_val;
-
-struct cpufreq_interactive_inputopen {
- struct input_handle *handle;
- struct work_struct inputopen_work;
-};
-
-static struct cpufreq_interactive_inputopen inputopen;
-
-/*
- * Non-zero means longer-term speed boost active.
- */
-
+/* Non-zero means indefinite speed boost active */
static int boost_val;
+/* Duration of a boot pulse in usecs */
+static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+/* End time of boost pulse in ktime converted to usecs */
+static u64 boostpulse_endtime;
+
+/*
+ * Max additional time to wait in idle, beyond timer_rate, at speeds above
+ * minimum before wakeup to reduce speed, or -1 if unnecessary.
+ */
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+static int timer_slack_val = DEFAULT_TIMER_SLACK;
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
unsigned int event);
@@ -125,104 +122,210 @@
.owner = THIS_MODULE,
};
-static void cpufreq_interactive_timer(unsigned long data)
+static void cpufreq_interactive_timer_resched(
+ struct cpufreq_interactive_cpuinfo *pcpu)
{
- unsigned int delta_idle;
- unsigned int delta_time;
- int cpu_load;
- int load_since_change;
- u64 time_in_idle;
- u64 idle_exit_time;
- struct cpufreq_interactive_cpuinfo *pcpu =
- &per_cpu(cpuinfo, data);
- u64 now_idle;
- unsigned int new_freq;
- unsigned int index;
+ unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
unsigned long flags;
- smp_rmb();
+ mod_timer_pinned(&pcpu->cpu_timer, expires);
+ if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
+ expires += usecs_to_jiffies(timer_slack_val);
+ mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
+ }
+ spin_lock_irqsave(&pcpu->load_lock, flags);
+ pcpu->time_in_idle =
+ get_cpu_idle_time_us(smp_processor_id(),
+ &pcpu->time_in_idle_timestamp);
+ pcpu->cputime_speedadj = 0;
+ pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+static unsigned int freq_to_targetload(unsigned int freq)
+{
+ int i;
+ unsigned int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&target_loads_lock, flags);
+
+ for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
+ ;
+
+ ret = target_loads[i];
+ spin_unlock_irqrestore(&target_loads_lock, flags);
+ return ret;
+}
+
+/*
+ * If increasing frequencies never map to a lower target load then
+ * choose_freq() will find the minimum frequency that does not exceed its
+ * target load given the current load.
+ */
+
+static unsigned int choose_freq(
+ struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
+{
+ unsigned int freq = pcpu->policy->cur;
+ unsigned int prevfreq, freqmin, freqmax;
+ unsigned int tl;
+ int index;
+
+ freqmin = 0;
+ freqmax = UINT_MAX;
+
+ do {
+ prevfreq = freq;
+ tl = freq_to_targetload(freq);
+
+ /*
+ * Find the lowest frequency where the computed load is less
+ * than or equal to the target load.
+ */
+
+ cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
+ CPUFREQ_RELATION_L, &index);
+ freq = pcpu->freq_table[index].frequency;
+
+ if (freq > prevfreq) {
+ /* The previous frequency is too low. */
+ freqmin = prevfreq;
+
+ if (freq >= freqmax) {
+ /*
+ * Find the highest frequency that is less
+ * than freqmax.
+ */
+ cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmax - 1, CPUFREQ_RELATION_H,
+ &index);
+ freq = pcpu->freq_table[index].frequency;
+
+ if (freq == freqmin) {
+ /*
+ * The first frequency below freqmax
+ * has already been found to be too
+ * low. freqmax is the lowest speed
+ * we found that is fast enough.
+ */
+ freq = freqmax;
+ break;
+ }
+ }
+ } else if (freq < prevfreq) {
+ /* The previous frequency is high enough. */
+ freqmax = prevfreq;
+
+ if (freq <= freqmin) {
+ /*
+ * Find the lowest frequency that is higher
+ * than freqmin.
+ */
+ cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmin + 1, CPUFREQ_RELATION_L,
+ &index);
+ freq = pcpu->freq_table[index].frequency;
+
+ /*
+ * If freqmax is the first frequency above
+ * freqmin then we have already found that
+ * this speed is fast enough.
+ */
+ if (freq == freqmax)
+ break;
+ }
+ }
+
+ /* If same frequency chosen as previous then done. */
+ } while (freq != prevfreq);
+
+ return freq;
+}
+
+static u64 update_load(int cpu)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+ u64 now;
+ u64 now_idle;
+ unsigned int delta_idle;
+ unsigned int delta_time;
+ u64 active_time;
+
+ now_idle = get_cpu_idle_time_us(cpu, &now);
+ delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+ delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
+ active_time = delta_time - delta_idle;
+ pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
+
+ pcpu->time_in_idle = now_idle;
+ pcpu->time_in_idle_timestamp = now;
+ return now;
+}
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+ u64 now;
+ unsigned int delta_time;
+ u64 cputime_speedadj;
+ int cpu_load;
+ struct cpufreq_interactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, data);
+ unsigned int new_freq;
+ unsigned int loadadjfreq;
+ unsigned int index;
+ unsigned long flags;
+ bool boosted;
+
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return;
if (!pcpu->governor_enabled)
goto exit;
- /*
- * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
- * this lets idle exit know the current idle time sample has
- * been processed, and idle exit can generate a new sample and
- * re-arm the timer. This prevents a concurrent idle
- * exit on that CPU from writing a new set of info at the same time
- * the timer function runs (the timer function can't use that info
- * until more time passes).
- */
- time_in_idle = pcpu->time_in_idle;
- idle_exit_time = pcpu->idle_exit_time;
- now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
- smp_wmb();
+ spin_lock_irqsave(&pcpu->load_lock, flags);
+ now = update_load(data);
+ delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
+ cputime_speedadj = pcpu->cputime_speedadj;
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
- /* If we raced with cancelling a timer, skip. */
- if (!idle_exit_time)
- goto exit;
-
- delta_idle = (unsigned int)(now_idle - time_in_idle);
- delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
-
- /*
- * If timer ran less than 1ms after short-term sample started, retry.
- */
- if (delta_time < 1000)
+ if (WARN_ON_ONCE(!delta_time))
goto rearm;
- if (delta_idle > delta_time)
- cpu_load = 0;
- else
- cpu_load = 100 * (delta_time - delta_idle) / delta_time;
+ do_div(cputime_speedadj, delta_time);
+ loadadjfreq = (unsigned int)cputime_speedadj * 100;
+ cpu_load = loadadjfreq / pcpu->target_freq;
+ boosted = boost_val || now < boostpulse_endtime;
- delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
- delta_time = (unsigned int)(pcpu->timer_run_time -
- pcpu->target_set_time);
-
- if ((delta_time == 0) || (delta_idle > delta_time))
- load_since_change = 0;
- else
- load_since_change =
- 100 * (delta_time - delta_idle) / delta_time;
-
- /*
- * Choose greater of short-term load (since last idle timer
- * started or timer function re-armed itself) or long-term load
- * (since last frequency change).
- */
- if (load_since_change > cpu_load)
- cpu_load = load_since_change;
-
- if (cpu_load >= go_hispeed_load || boost_val) {
- if (pcpu->target_freq <= pcpu->policy->min) {
+ if (cpu_load >= go_hispeed_load || boosted) {
+ if (pcpu->target_freq < hispeed_freq) {
new_freq = hispeed_freq;
} else {
- new_freq = pcpu->policy->max * cpu_load / 100;
+ new_freq = choose_freq(pcpu, loadadjfreq);
if (new_freq < hispeed_freq)
new_freq = hispeed_freq;
-
- if (pcpu->target_freq == hispeed_freq &&
- new_freq > hispeed_freq &&
- pcpu->timer_run_time - pcpu->hispeed_validate_time
- < above_hispeed_delay_val) {
- trace_cpufreq_interactive_notyet(data, cpu_load,
- pcpu->target_freq,
- new_freq);
- goto rearm;
- }
}
} else {
- new_freq = pcpu->policy->max * cpu_load / 100;
+ new_freq = choose_freq(pcpu, loadadjfreq);
}
- if (new_freq <= hispeed_freq)
- pcpu->hispeed_validate_time = pcpu->timer_run_time;
+ if (pcpu->target_freq >= hispeed_freq &&
+ new_freq > pcpu->target_freq &&
+ now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
+ trace_cpufreq_interactive_notyet(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+ goto rearm;
+ }
+
+ pcpu->hispeed_validate_time = now;
if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
- new_freq, CPUFREQ_RELATION_H,
+ new_freq, CPUFREQ_RELATION_L,
&index)) {
pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
(int) data);
@@ -236,41 +339,42 @@
* floor frequency for the minimum sample time since last validated.
*/
if (new_freq < pcpu->floor_freq) {
- if (pcpu->timer_run_time - pcpu->floor_validate_time
- < min_sample_time) {
- trace_cpufreq_interactive_notyet(data, cpu_load,
- pcpu->target_freq, new_freq);
+ if (now - pcpu->floor_validate_time < min_sample_time) {
+ trace_cpufreq_interactive_notyet(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
goto rearm;
}
}
- pcpu->floor_freq = new_freq;
- pcpu->floor_validate_time = pcpu->timer_run_time;
+ /*
+ * Update the timestamp for checking whether speed has been held at
+ * or above the selected frequency for a minimum of min_sample_time,
+ * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
+ * allow the speed to drop as soon as the boostpulse duration expires
+ * (or the indefinite boost is turned off).
+ */
+
+ if (!boosted || new_freq > hispeed_freq) {
+ pcpu->floor_freq = new_freq;
+ pcpu->floor_validate_time = now;
+ }
if (pcpu->target_freq == new_freq) {
- trace_cpufreq_interactive_already(data, cpu_load,
- pcpu->target_freq, new_freq);
+ trace_cpufreq_interactive_already(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
goto rearm_if_notmax;
}
trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
- new_freq);
- pcpu->target_set_time_in_idle = now_idle;
- pcpu->target_set_time = pcpu->timer_run_time;
+ pcpu->policy->cur, new_freq);
- if (new_freq < pcpu->target_freq) {
- pcpu->target_freq = new_freq;
- spin_lock_irqsave(&down_cpumask_lock, flags);
- cpumask_set_cpu(data, &down_cpumask);
- spin_unlock_irqrestore(&down_cpumask_lock, flags);
- queue_work(down_wq, &freq_scale_down_work);
- } else {
- pcpu->target_freq = new_freq;
- spin_lock_irqsave(&up_cpumask_lock, flags);
- cpumask_set_cpu(data, &up_cpumask);
- spin_unlock_irqrestore(&up_cpumask_lock, flags);
- wake_up_process(up_task);
- }
+ pcpu->target_freq = new_freq;
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+ cpumask_set_cpu(data, &speedchange_cpumask);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+ wake_up_process(speedchange_task);
rearm_if_notmax:
/*
@@ -281,28 +385,11 @@
goto exit;
rearm:
- if (!timer_pending(&pcpu->cpu_timer)) {
- /*
- * If already at min: if that CPU is idle, don't set timer.
- * Else cancel the timer if that CPU goes idle. We don't
- * need to re-evaluate speed until the next idle exit.
- */
- if (pcpu->target_freq == pcpu->policy->min) {
- smp_rmb();
-
- if (pcpu->idling)
- goto exit;
-
- pcpu->timer_idlecancel = 1;
- }
-
- pcpu->time_in_idle = get_cpu_idle_time_us(
- data, &pcpu->idle_exit_time);
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
- }
+ if (!timer_pending(&pcpu->cpu_timer))
+ cpufreq_interactive_timer_resched(pcpu);
exit:
+ up_read(&pcpu->enable_sem);
return;
}
@@ -312,15 +399,16 @@
&per_cpu(cpuinfo, smp_processor_id());
int pending;
- if (!pcpu->governor_enabled)
+ if (!down_read_trylock(&pcpu->enable_sem))
return;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return;
+ }
- pcpu->idling = 1;
- smp_wmb();
pending = timer_pending(&pcpu->cpu_timer);
if (pcpu->target_freq != pcpu->policy->min) {
-#ifdef CONFIG_SMP
/*
* Entering idle while not at lowest speed. On some
* platforms this can hold the other CPU(s) at that speed
@@ -329,33 +417,11 @@
* min indefinitely. This should probably be a quirk of
* the CPUFreq driver.
*/
- if (!pending) {
- pcpu->time_in_idle = get_cpu_idle_time_us(
- smp_processor_id(), &pcpu->idle_exit_time);
- pcpu->timer_idlecancel = 0;
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
- }
-#endif
- } else {
- /*
- * If at min speed and entering idle after load has
- * already been evaluated, and a timer has been set just in
- * case the CPU suddenly goes busy, cancel that timer. The
- * CPU didn't go busy; we'll recheck things upon idle exit.
- */
- if (pending && pcpu->timer_idlecancel) {
- del_timer(&pcpu->cpu_timer);
- /*
- * Ensure last timer run time is after current idle
- * sample start time, so next idle exit will always
- * start a new idle sampling period.
- */
- pcpu->idle_exit_time = 0;
- pcpu->timer_idlecancel = 0;
- }
+ if (!pending)
+ cpufreq_interactive_timer_resched(pcpu);
}
+ up_read(&pcpu->enable_sem);
}
static void cpufreq_interactive_idle_end(void)
@@ -363,34 +429,26 @@
struct cpufreq_interactive_cpuinfo *pcpu =
&per_cpu(cpuinfo, smp_processor_id());
- pcpu->idling = 0;
- smp_wmb();
-
- /*
- * Arm the timer for 1-2 ticks later if not already, and if the timer
- * function has already processed the previous load sampling
- * interval. (If the timer is not pending but has not processed
- * the previous interval, it is probably racing with us on another
- * CPU. Let it compute load based on the previous sample and then
- * re-arm the timer for another interval when it's done, rather
- * than updating the interval start time to be "now", which doesn't
- * give the timer function enough time to make a decision on this
- * run.)
- */
- if (timer_pending(&pcpu->cpu_timer) == 0 &&
- pcpu->timer_run_time >= pcpu->idle_exit_time &&
- pcpu->governor_enabled) {
- pcpu->time_in_idle =
- get_cpu_idle_time_us(smp_processor_id(),
- &pcpu->idle_exit_time);
- pcpu->timer_idlecancel = 0;
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return;
}
+ /* Arm the timer for 1-2 ticks later if not already. */
+ if (!timer_pending(&pcpu->cpu_timer)) {
+ cpufreq_interactive_timer_resched(pcpu);
+ } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
+ del_timer(&pcpu->cpu_timer);
+ del_timer(&pcpu->cpu_slack_timer);
+ cpufreq_interactive_timer(smp_processor_id());
+ }
+
+ up_read(&pcpu->enable_sem);
}
-static int cpufreq_interactive_up_task(void *data)
+static int cpufreq_interactive_speedchange_task(void *data)
{
unsigned int cpu;
cpumask_t tmp_mask;
@@ -399,34 +457,35 @@
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irqsave(&up_cpumask_lock, flags);
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
- if (cpumask_empty(&up_cpumask)) {
- spin_unlock_irqrestore(&up_cpumask_lock, flags);
+ if (cpumask_empty(&speedchange_cpumask)) {
+ spin_unlock_irqrestore(&speedchange_cpumask_lock,
+ flags);
schedule();
if (kthread_should_stop())
break;
- spin_lock_irqsave(&up_cpumask_lock, flags);
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
}
set_current_state(TASK_RUNNING);
- tmp_mask = up_cpumask;
- cpumask_clear(&up_cpumask);
- spin_unlock_irqrestore(&up_cpumask_lock, flags);
+ tmp_mask = speedchange_cpumask;
+ cpumask_clear(&speedchange_cpumask);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
for_each_cpu(cpu, &tmp_mask) {
unsigned int j;
unsigned int max_freq = 0;
pcpu = &per_cpu(cpuinfo, cpu);
- smp_rmb();
-
- if (!pcpu->governor_enabled)
+ if (!down_read_trylock(&pcpu->enable_sem))
continue;
-
- mutex_lock(&set_speed_lock);
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ continue;
+ }
for_each_cpu(j, pcpu->policy->cpus) {
struct cpufreq_interactive_cpuinfo *pjcpu =
@@ -440,57 +499,17 @@
__cpufreq_driver_target(pcpu->policy,
max_freq,
CPUFREQ_RELATION_H);
- mutex_unlock(&set_speed_lock);
- trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
+ trace_cpufreq_interactive_setspeed(cpu,
+ pcpu->target_freq,
pcpu->policy->cur);
+
+ up_read(&pcpu->enable_sem);
}
}
return 0;
}
-static void cpufreq_interactive_freq_down(struct work_struct *work)
-{
- unsigned int cpu;
- cpumask_t tmp_mask;
- unsigned long flags;
- struct cpufreq_interactive_cpuinfo *pcpu;
-
- spin_lock_irqsave(&down_cpumask_lock, flags);
- tmp_mask = down_cpumask;
- cpumask_clear(&down_cpumask);
- spin_unlock_irqrestore(&down_cpumask_lock, flags);
-
- for_each_cpu(cpu, &tmp_mask) {
- unsigned int j;
- unsigned int max_freq = 0;
-
- pcpu = &per_cpu(cpuinfo, cpu);
- smp_rmb();
-
- if (!pcpu->governor_enabled)
- continue;
-
- mutex_lock(&set_speed_lock);
-
- for_each_cpu(j, pcpu->policy->cpus) {
- struct cpufreq_interactive_cpuinfo *pjcpu =
- &per_cpu(cpuinfo, j);
-
- if (pjcpu->target_freq > max_freq)
- max_freq = pjcpu->target_freq;
- }
-
- if (max_freq != pcpu->policy->cur)
- __cpufreq_driver_target(pcpu->policy, max_freq,
- CPUFREQ_RELATION_H);
-
- mutex_unlock(&set_speed_lock);
- trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
- pcpu->policy->cur);
- }
-}
-
static void cpufreq_interactive_boost(void)
{
int i;
@@ -498,17 +517,16 @@
unsigned long flags;
struct cpufreq_interactive_cpuinfo *pcpu;
- spin_lock_irqsave(&up_cpumask_lock, flags);
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
for_each_online_cpu(i) {
pcpu = &per_cpu(cpuinfo, i);
if (pcpu->target_freq < hispeed_freq) {
pcpu->target_freq = hispeed_freq;
- cpumask_set_cpu(i, &up_cpumask);
- pcpu->target_set_time_in_idle =
- get_cpu_idle_time_us(i, &pcpu->target_set_time);
- pcpu->hispeed_validate_time = pcpu->target_set_time;
+ cpumask_set_cpu(i, &speedchange_cpumask);
+ pcpu->hispeed_validate_time =
+ ktime_to_us(ktime_get());
anyboost = 1;
}
@@ -521,106 +539,126 @@
pcpu->floor_validate_time = ktime_to_us(ktime_get());
}
- spin_unlock_irqrestore(&up_cpumask_lock, flags);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
if (anyboost)
- wake_up_process(up_task);
+ wake_up_process(speedchange_task);
}
-/*
- * Pulsed boost on input event raises CPUs to hispeed_freq and lets
- * usual algorithm of min_sample_time decide when to allow speed
- * to drop.
- */
-
-static void cpufreq_interactive_input_event(struct input_handle *handle,
- unsigned int type,
- unsigned int code, int value)
+static int cpufreq_interactive_notifier(
+ struct notifier_block *nb, unsigned long val, void *data)
{
- if (input_boost_val && type == EV_SYN && code == SYN_REPORT) {
- trace_cpufreq_interactive_boost("input");
- cpufreq_interactive_boost();
+ struct cpufreq_freqs *freq = data;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ int cpu;
+ unsigned long flags;
+
+ if (val == CPUFREQ_POSTCHANGE) {
+ pcpu = &per_cpu(cpuinfo, freq->cpu);
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return 0;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return 0;
+ }
+
+ for_each_cpu(cpu, pcpu->policy->cpus) {
+ struct cpufreq_interactive_cpuinfo *pjcpu =
+ &per_cpu(cpuinfo, cpu);
+ spin_lock_irqsave(&pjcpu->load_lock, flags);
+ update_load(cpu);
+ spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+ }
+
+ up_read(&pcpu->enable_sem);
}
-}
-
-static void cpufreq_interactive_input_open(struct work_struct *w)
-{
- struct cpufreq_interactive_inputopen *io =
- container_of(w, struct cpufreq_interactive_inputopen,
- inputopen_work);
- int error;
-
- error = input_open_device(io->handle);
- if (error)
- input_unregister_handle(io->handle);
-}
-
-static int cpufreq_interactive_input_connect(struct input_handler *handler,
- struct input_dev *dev,
- const struct input_device_id *id)
-{
- struct input_handle *handle;
- int error;
-
- pr_info("%s: connect to %s\n", __func__, dev->name);
- handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- handle->dev = dev;
- handle->handler = handler;
- handle->name = "cpufreq_interactive";
-
- error = input_register_handle(handle);
- if (error)
- goto err;
-
- inputopen.handle = handle;
- queue_work(down_wq, &inputopen.inputopen_work);
return 0;
-err:
- kfree(handle);
- return error;
}
-static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
+static struct notifier_block cpufreq_notifier_block = {
+ .notifier_call = cpufreq_interactive_notifier,
+};
+
+static ssize_t show_target_loads(
+ struct kobject *kobj, struct attribute *attr, char *buf)
{
- input_close_device(handle);
- input_unregister_handle(handle);
- kfree(handle);
+ int i;
+ ssize_t ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&target_loads_lock, flags);
+
+ for (i = 0; i < ntarget_loads; i++)
+ ret += sprintf(buf + ret, "%u%s", target_loads[i],
+ i & 0x1 ? ":" : " ");
+
+ ret += sprintf(buf + ret, "\n");
+ spin_unlock_irqrestore(&target_loads_lock, flags);
+ return ret;
}
-static const struct input_device_id cpufreq_interactive_ids[] = {
- {
- .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
- INPUT_DEVICE_ID_MATCH_ABSBIT,
- .evbit = { BIT_MASK(EV_ABS) },
- .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
- BIT_MASK(ABS_MT_POSITION_X) |
- BIT_MASK(ABS_MT_POSITION_Y) },
- }, /* multi-touch touchscreen */
- {
- .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
- INPUT_DEVICE_ID_MATCH_ABSBIT,
- .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
- .absbit = { [BIT_WORD(ABS_X)] =
- BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
- }, /* touchpad */
- { },
-};
+static ssize_t store_target_loads(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ const char *cp;
+ unsigned int *new_target_loads = NULL;
+ int ntokens = 1;
+ int i;
+ unsigned long flags;
-static struct input_handler cpufreq_interactive_input_handler = {
- .event = cpufreq_interactive_input_event,
- .connect = cpufreq_interactive_input_connect,
- .disconnect = cpufreq_interactive_input_disconnect,
- .name = "cpufreq_interactive",
- .id_table = cpufreq_interactive_ids,
-};
+ cp = buf;
+ while ((cp = strpbrk(cp + 1, " :")))
+ ntokens++;
+
+ if (!(ntokens & 0x1))
+ goto err_inval;
+
+ new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+ if (!new_target_loads) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ cp = buf;
+ i = 0;
+ while (i < ntokens) {
+ if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
+ goto err_inval;
+
+ cp = strpbrk(cp, " :");
+ if (!cp)
+ break;
+ cp++;
+ }
+
+ if (i != ntokens)
+ goto err_inval;
+
+ spin_lock_irqsave(&target_loads_lock, flags);
+ if (target_loads != default_target_loads)
+ kfree(target_loads);
+ target_loads = new_target_loads;
+ ntarget_loads = ntokens;
+ spin_unlock_irqrestore(&target_loads_lock, flags);
+ return count;
+
+err_inval:
+ ret = -EINVAL;
+err:
+ kfree(new_target_loads);
+ return ret;
+}
+
+static struct global_attr target_loads_attr =
+ __ATTR(target_loads, S_IRUGO | S_IWUSR,
+ show_target_loads, store_target_loads);
static ssize_t show_hispeed_freq(struct kobject *kobj,
struct attribute *attr, char *buf)
{
- return sprintf(buf, "%llu\n", hispeed_freq);
+ return sprintf(buf, "%u\n", hispeed_freq);
}
static ssize_t store_hispeed_freq(struct kobject *kobj,
@@ -628,9 +666,9 @@
size_t count)
{
int ret;
- u64 val;
+ long unsigned int val;
- ret = strict_strtoull(buf, 0, &val);
+ ret = strict_strtoul(buf, 0, &val);
if (ret < 0)
return ret;
hispeed_freq = val;
@@ -729,26 +767,28 @@
static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
show_timer_rate, store_timer_rate);
-static ssize_t show_input_boost(struct kobject *kobj, struct attribute *attr,
- char *buf)
+static ssize_t show_timer_slack(
+ struct kobject *kobj, struct attribute *attr, char *buf)
{
- return sprintf(buf, "%u\n", input_boost_val);
+ return sprintf(buf, "%d\n", timer_slack_val);
}
-static ssize_t store_input_boost(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
+static ssize_t store_timer_slack(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
{
int ret;
unsigned long val;
- ret = strict_strtoul(buf, 0, &val);
+ ret = kstrtol(buf, 10, &val);
if (ret < 0)
return ret;
- input_boost_val = val;
+
+ timer_slack_val = val;
return count;
}
-define_one_global_rw(input_boost);
+define_one_global_rw(timer_slack);
static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
char *buf)
@@ -790,6 +830,7 @@
if (ret < 0)
return ret;
+ boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
trace_cpufreq_interactive_boost("pulse");
cpufreq_interactive_boost();
return count;
@@ -798,15 +839,40 @@
static struct global_attr boostpulse =
__ATTR(boostpulse, 0200, NULL, store_boostpulse);
+static ssize_t show_boostpulse_duration(
+ struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ boostpulse_duration_val = val;
+ return count;
+}
+
+define_one_global_rw(boostpulse_duration);
+
static struct attribute *interactive_attributes[] = {
+ &target_loads_attr.attr,
&hispeed_freq_attr.attr,
&go_hispeed_load_attr.attr,
&above_hispeed_delay.attr,
&min_sample_time_attr.attr,
&timer_rate_attr.attr,
- &input_boost.attr,
+ &timer_slack.attr,
&boost.attr,
&boostpulse.attr,
+ &boostpulse_duration.attr,
NULL,
};
@@ -815,102 +881,6 @@
.name = "interactive",
};
-static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
- unsigned int event)
-{
- int rc;
- unsigned int j;
- struct cpufreq_interactive_cpuinfo *pcpu;
- struct cpufreq_frequency_table *freq_table;
-
- switch (event) {
- case CPUFREQ_GOV_START:
- if (!cpu_online(policy->cpu))
- return -EINVAL;
-
- freq_table =
- cpufreq_frequency_get_table(policy->cpu);
-
- for_each_cpu(j, policy->cpus) {
- pcpu = &per_cpu(cpuinfo, j);
- pcpu->policy = policy;
- pcpu->target_freq = policy->cur;
- pcpu->freq_table = freq_table;
- pcpu->target_set_time_in_idle =
- get_cpu_idle_time_us(j,
- &pcpu->target_set_time);
- pcpu->floor_freq = pcpu->target_freq;
- pcpu->floor_validate_time =
- pcpu->target_set_time;
- pcpu->hispeed_validate_time =
- pcpu->target_set_time;
- pcpu->governor_enabled = 1;
- pcpu->idle_exit_time = pcpu->target_set_time;
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
- smp_wmb();
- }
-
- if (!hispeed_freq)
- hispeed_freq = policy->max;
-
- /*
- * Do not register the idle hook and create sysfs
- * entries if we have already done so.
- */
- if (atomic_inc_return(&active_count) > 1)
- return 0;
-
- rc = sysfs_create_group(cpufreq_global_kobject,
- &interactive_attr_group);
- if (rc)
- return rc;
-
- rc = input_register_handler(&cpufreq_interactive_input_handler);
- if (rc)
- pr_warn("%s: failed to register input handler\n",
- __func__);
-
- break;
-
- case CPUFREQ_GOV_STOP:
- for_each_cpu(j, policy->cpus) {
- pcpu = &per_cpu(cpuinfo, j);
- pcpu->governor_enabled = 0;
- smp_wmb();
- del_timer_sync(&pcpu->cpu_timer);
-
- /*
- * Reset idle exit time since we may cancel the timer
- * before it can run after the last idle exit time,
- * to avoid tripping the check in idle exit for a timer
- * that is trying to run.
- */
- pcpu->idle_exit_time = 0;
- }
-
- flush_work(&freq_scale_down_work);
- if (atomic_dec_return(&active_count) > 0)
- return 0;
-
- input_unregister_handler(&cpufreq_interactive_input_handler);
- sysfs_remove_group(cpufreq_global_kobject,
- &interactive_attr_group);
-
- break;
-
- case CPUFREQ_GOV_LIMITS:
- if (policy->max < policy->cur)
- __cpufreq_driver_target(policy,
- policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > policy->cur)
- __cpufreq_driver_target(policy,
- policy->min, CPUFREQ_RELATION_L);
- break;
- }
- return 0;
-}
-
static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
unsigned long val,
void *data)
@@ -931,57 +901,148 @@
.notifier_call = cpufreq_interactive_idle_notifier,
};
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ int rc;
+ unsigned int j;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ struct cpufreq_frequency_table *freq_table;
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if (!cpu_online(policy->cpu))
+ return -EINVAL;
+
+ mutex_lock(&gov_lock);
+
+ freq_table =
+ cpufreq_frequency_get_table(policy->cpu);
+ if (!hispeed_freq)
+ hispeed_freq = policy->max;
+
+ for_each_cpu(j, policy->cpus) {
+ unsigned long expires;
+
+ pcpu = &per_cpu(cpuinfo, j);
+ pcpu->policy = policy;
+ pcpu->target_freq = policy->cur;
+ pcpu->freq_table = freq_table;
+ pcpu->floor_freq = pcpu->target_freq;
+ pcpu->floor_validate_time =
+ ktime_to_us(ktime_get());
+ pcpu->hispeed_validate_time =
+ pcpu->floor_validate_time;
+ down_write(&pcpu->enable_sem);
+ expires = jiffies + usecs_to_jiffies(timer_rate);
+ pcpu->cpu_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_timer, j);
+ if (timer_slack_val >= 0) {
+ expires += usecs_to_jiffies(timer_slack_val);
+ pcpu->cpu_slack_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_slack_timer, j);
+ }
+ pcpu->governor_enabled = 1;
+ up_write(&pcpu->enable_sem);
+ }
+
+ /*
+ * Do not register the idle hook and create sysfs
+ * entries if we have already done so.
+ */
+ if (++active_count > 1) {
+ mutex_unlock(&gov_lock);
+ return 0;
+ }
+
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ &interactive_attr_group);
+ if (rc) {
+ mutex_unlock(&gov_lock);
+ return rc;
+ }
+
+ idle_notifier_register(&cpufreq_interactive_idle_nb);
+ cpufreq_register_notifier(
+ &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
+ mutex_unlock(&gov_lock);
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ mutex_lock(&gov_lock);
+ for_each_cpu(j, policy->cpus) {
+ pcpu = &per_cpu(cpuinfo, j);
+ down_write(&pcpu->enable_sem);
+ pcpu->governor_enabled = 0;
+ del_timer_sync(&pcpu->cpu_timer);
+ del_timer_sync(&pcpu->cpu_slack_timer);
+ up_write(&pcpu->enable_sem);
+ }
+
+ if (--active_count > 0) {
+ mutex_unlock(&gov_lock);
+ return 0;
+ }
+
+ cpufreq_unregister_notifier(
+ &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
+ idle_notifier_unregister(&cpufreq_interactive_idle_nb);
+ sysfs_remove_group(cpufreq_global_kobject,
+ &interactive_attr_group);
+ mutex_unlock(&gov_lock);
+
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ if (policy->max < policy->cur)
+ __cpufreq_driver_target(policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > policy->cur)
+ __cpufreq_driver_target(policy,
+ policy->min, CPUFREQ_RELATION_L);
+ break;
+ }
+ return 0;
+}
+
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
static int __init cpufreq_interactive_init(void)
{
unsigned int i;
struct cpufreq_interactive_cpuinfo *pcpu;
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
- go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
- min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
- above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
- timer_rate = DEFAULT_TIMER_RATE;
-
/* Initalize per-cpu timers */
for_each_possible_cpu(i) {
pcpu = &per_cpu(cpuinfo, i);
- init_timer(&pcpu->cpu_timer);
+ init_timer_deferrable(&pcpu->cpu_timer);
pcpu->cpu_timer.function = cpufreq_interactive_timer;
pcpu->cpu_timer.data = i;
+ init_timer(&pcpu->cpu_slack_timer);
+ pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
+ spin_lock_init(&pcpu->load_lock);
+ init_rwsem(&pcpu->enable_sem);
}
- up_task = kthread_create(cpufreq_interactive_up_task, NULL,
- "kinteractiveup");
- if (IS_ERR(up_task))
- return PTR_ERR(up_task);
+ spin_lock_init(&target_loads_lock);
+ spin_lock_init(&speedchange_cpumask_lock);
+ mutex_init(&gov_lock);
+ speedchange_task =
+ kthread_create(cpufreq_interactive_speedchange_task, NULL,
+ "cfinteractive");
+ if (IS_ERR(speedchange_task))
+ return PTR_ERR(speedchange_task);
- sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m);
- get_task_struct(up_task);
+ sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
+ get_task_struct(speedchange_task);
- /* No rescuer thread, bind to CPU queuing the work for possibly
- warm cache (probably doesn't matter much). */
- down_wq = alloc_workqueue("knteractive_down", 0, 1);
+ /* NB: wake up so the thread does not look hung to the freezer */
+ wake_up_process(speedchange_task);
- if (!down_wq)
- goto err_freeuptask;
-
- INIT_WORK(&freq_scale_down_work,
- cpufreq_interactive_freq_down);
-
- spin_lock_init(&up_cpumask_lock);
- spin_lock_init(&down_cpumask_lock);
- mutex_init(&set_speed_lock);
-
- /* Kick the kthread to idle */
- wake_up_process(up_task);
-
- idle_notifier_register(&cpufreq_interactive_idle_nb);
- INIT_WORK(&inputopen.inputopen_work, cpufreq_interactive_input_open);
return cpufreq_register_governor(&cpufreq_gov_interactive);
-
-err_freeuptask:
- put_task_struct(up_task);
- return -ENOMEM;
}
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
@@ -993,9 +1054,8 @@
static void __exit cpufreq_interactive_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_interactive);
- kthread_stop(up_task);
- put_task_struct(up_task);
- destroy_workqueue(down_wq);
+ kthread_stop(speedchange_task);
+ put_task_struct(speedchange_task);
}
module_exit(cpufreq_interactive_exit);
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 9a2e901..60bab32 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -531,6 +531,7 @@
result = adreno_dev->gpudev->irq_handler(adreno_dev);
+ device->pwrctrl.irq_last = 1;
if (device->requested_state == KGSL_STATE_NONE) {
if (device->pwrctrl.nap_allowed == true) {
kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 479f9b8..61ea916 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -1122,6 +1122,7 @@
ret = 0;
done:
+ device->pwrctrl.irq_last = 0;
kgsl_trace_issueibcmds(device, context ? context->id : 0, ibdesc,
numibs, *timestamp, flags, ret,
drawctxt ? drawctxt->type : 0);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index ef22c41..b124257 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -18,6 +18,7 @@
#include <mach/msm_iomap.h>
#include <mach/msm_bus.h>
#include <linux/ktime.h>
+#include <linux/delay.h>
#include "kgsl.h"
#include "kgsl_pwrscale.h"
@@ -33,6 +34,16 @@
#define UPDATE_BUSY_VAL 1000000
#define UPDATE_BUSY 50
+/*
+ * Expected delay for post-interrupt processing on A3xx.
+ * The delay may be longer, gradually increase the delay
+ * to compensate. If the GPU isn't done by max delay,
+ * it's working on something other than just the final
+ * command sequence so stop waiting for it to be idle.
+ */
+#define INIT_UDELAY 200
+#define MAX_UDELAY 2000
+
struct clk_pair {
const char *name;
uint map;
@@ -1069,6 +1080,8 @@
*/
void kgsl_idle_check(struct work_struct *work)
{
+ int delay = INIT_UDELAY;
+ int requested_state;
struct kgsl_device *device = container_of(work, struct kgsl_device,
idle_check_ws);
WARN_ON(device == NULL);
@@ -1081,10 +1094,32 @@
if (device->state == KGSL_STATE_ACTIVE
|| device->state == KGSL_STATE_NAP) {
- if (device->active_cnt > 0 || kgsl_pwrctrl_sleep(device) != 0) {
+ /*
+ * If no user is explicitly trying to use the GPU
+ * (active_cnt is zero), then loop with increasing delay,
+ * waiting for the GPU to become idle.
+ */
+ while (!device->active_cnt && delay < MAX_UDELAY) {
+ requested_state = device->requested_state;
+ if (!kgsl_pwrctrl_sleep(device))
+ break;
+ /*
+ * If no new commands have been issued since the
+ * last interrupt, stay in this loop waiting for
+ * the GPU to become idle.
+ */
+ if (!device->pwrctrl.irq_last)
+ break;
+ kgsl_pwrctrl_request_state(device, requested_state);
+ mutex_unlock(&device->mutex);
+ udelay(delay);
+ delay *= 2;
+ mutex_lock(&device->mutex);
+ }
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ if (device->state == KGSL_STATE_ACTIVE) {
mod_timer(&device->idle_timer,
jiffies +
device->pwrctrl.interval_timeout);
@@ -1098,6 +1133,8 @@
kgsl_pwrctrl_busy_time(device, true);
device->pwrctrl.clk_stats.no_nap_cnt = 0;
}
+ } else {
+ device->pwrctrl.irq_last = 0;
}
} else if (device->state & (KGSL_STATE_HUNG |
KGSL_STATE_DUMP_AND_FT)) {
@@ -1484,10 +1521,10 @@
(device->state == KGSL_STATE_ACTIVE &&
device->requested_state == KGSL_STATE_NONE)) {
kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
- if (kgsl_pwrctrl_sleep(device) != 0)
- mod_timer(&device->idle_timer,
- jiffies
- + device->pwrctrl.interval_timeout);
+ if (kgsl_pwrctrl_sleep(device) && device->pwrctrl.irq_last) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ queue_work(device->work_queue, &device->idle_check_ws);
+ }
}
device->active_cnt--;
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index bb45829..b3e8702 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -91,6 +91,7 @@
struct pm_qos_request pm_qos_req_dma;
unsigned int pm_qos_latency;
unsigned int step_mul;
+ unsigned int irq_last;
};
void kgsl_pwrctrl_irq(struct kgsl_device *device, int state);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index d880016..1ee9c67 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -2394,6 +2394,7 @@
if (inst->capability.capability_set) {
if (msm_vp8_low_tier &&
+ inst->core->hfi_type == VIDC_HFI_VENUS &&
inst->fmts[OUTPUT_PORT]->fourcc == V4L2_PIX_FMT_VP8) {
capability->width.max = DEFAULT_WIDTH;
capability->height.max = DEFAULT_HEIGHT;
diff --git a/drivers/media/platform/msm/wfd/wfd-ioctl.c b/drivers/media/platform/msm/wfd/wfd-ioctl.c
index af3cd69..b1b1980 100644
--- a/drivers/media/platform/msm/wfd/wfd-ioctl.c
+++ b/drivers/media/platform/msm/wfd/wfd-ioctl.c
@@ -1006,6 +1006,7 @@
spin_unlock_irqrestore(&inst->inst_lock, flags);
WFD_MSG_DBG("Calling videobuf_streamoff\n");
vb2_streamoff(&inst->vid_bufq, i);
+ wake_up(&inst->event_handler.wait);
return 0;
}
static int wfdioc_dqbuf(struct file *filp, void *fh,
@@ -1545,14 +1546,22 @@
unsigned int wfd_poll(struct file *filp, struct poll_table_struct *pt)
{
struct wfd_inst *inst = file_to_inst(filp);
- unsigned int flags = 0;
+ unsigned int poll_flags = 0;
+ unsigned long flags;
+ bool streamoff = false;
poll_wait(filp, &inst->event_handler.wait, pt);
- if (v4l2_event_pending(&inst->event_handler))
- flags |= POLLPRI;
+ spin_lock_irqsave(&inst->inst_lock, flags);
+ streamoff = inst->streamoff;
+ spin_unlock_irqrestore(&inst->inst_lock, flags);
- return flags;
+ if (v4l2_event_pending(&inst->event_handler))
+ poll_flags |= POLLPRI;
+ if (streamoff)
+ poll_flags |= POLLERR;
+
+ return poll_flags;
}
static const struct v4l2_file_operations g_wfd_fops = {
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index 34adb6a..6a2ce8d 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -89,6 +89,7 @@
#define BUCK_VCHG_OV 0x77
#define BUCK_TEST_SMBC_MODES 0xE6
#define SEC_ACCESS 0xD0
+#define BAT_IF_VREF_BAT_THM_CTRL 0x4A
#define REG_OFFSET_PERP_SUBTYPE 0x05
/* SMBB peripheral subtype values */
@@ -123,6 +124,8 @@
#define CHGR_ON_BAT_FORCE_BIT BIT(0)
#define USB_VALID_DEB_20MS 0x03
#define BUCK_VBAT_REG_NODE_SEL_BIT BIT(0)
+#define VREF_BATT_THERM_FORCE_ON 0xC0
+#define VREF_BAT_THM_ENABLED_FSM 0x80
/* Interrupt definitions */
/* smbb_chg_interrupts */
@@ -1756,6 +1759,15 @@
case SMBB_BAT_IF_SUBTYPE:
case SMBBP_BAT_IF_SUBTYPE:
case SMBCL_BAT_IF_SUBTYPE:
+ /* Force on VREF_BAT_THM */
+ rc = qpnp_chg_masked_write(chip,
+ chip->bat_if_base + BAT_IF_VREF_BAT_THM_CTRL,
+ VREF_BATT_THERM_FORCE_ON,
+ VREF_BATT_THERM_FORCE_ON, 1);
+ if (rc) {
+ pr_debug("failed to force on VREF_BAT_THM rc=%d\n", rc);
+ return rc;
+ }
break;
case SMBB_USB_CHGPTH_SUBTYPE:
case SMBBP_USB_CHGPTH_SUBTYPE:
@@ -2203,6 +2215,41 @@
return 0;
}
+static int qpnp_chg_resume(struct device *dev)
+{
+ struct qpnp_chg_chip *chip = dev_get_drvdata(dev);
+ int rc = 0;
+
+ rc = qpnp_chg_masked_write(chip,
+ chip->bat_if_base + BAT_IF_VREF_BAT_THM_CTRL,
+ VREF_BATT_THERM_FORCE_ON,
+ VREF_BATT_THERM_FORCE_ON, 1);
+ if (rc)
+ pr_debug("failed to force on VREF_BAT_THM rc=%d\n", rc);
+
+ return rc;
+}
+
+static int qpnp_chg_suspend(struct device *dev)
+{
+ struct qpnp_chg_chip *chip = dev_get_drvdata(dev);
+ int rc = 0;
+
+ rc = qpnp_chg_masked_write(chip,
+ chip->bat_if_base + BAT_IF_VREF_BAT_THM_CTRL,
+ VREF_BATT_THERM_FORCE_ON,
+ VREF_BAT_THM_ENABLED_FSM, 1);
+ if (rc)
+ pr_debug("failed to enable FSM ctrl VREF_BAT_THM rc=%d\n", rc);
+
+ return rc;
+}
+
+static const struct dev_pm_ops qpnp_bms_pm_ops = {
+ .resume = qpnp_chg_resume,
+ .suspend = qpnp_chg_suspend,
+};
+
static struct spmi_driver qpnp_charger_driver = {
.probe = qpnp_charger_probe,
.remove = __devexit_p(qpnp_charger_remove),
diff --git a/drivers/thermal/msm8974-tsens.c b/drivers/thermal/msm8974-tsens.c
index 95378c5..991cf2e 100644
--- a/drivers/thermal/msm8974-tsens.c
+++ b/drivers/thermal/msm8974-tsens.c
@@ -333,6 +333,8 @@
else
degc = num/den;
+ pr_debug("raw_code:0x%x, sensor_num:%d, degc:%d\n",
+ adc_code, idx, degc);
return degc;
}
@@ -345,6 +347,8 @@
code = TSENS_THRESHOLD_MAX_CODE;
else if (code < TSENS_THRESHOLD_MIN_CODE)
code = TSENS_THRESHOLD_MIN_CODE;
+ pr_debug("raw_code:0x%x, sensor_num:%d, degc:%d\n",
+ code, idx, degc);
return code;
}
@@ -729,6 +733,7 @@
tsens_calibration_mode = (calib_data[0] & TSENS_8X10_TSENS_CAL_SEL)
>> TSENS_8X10_CAL_SEL_SHIFT;
+ pr_debug("calib mode scheme:%x\n", tsens_calibration_mode);
if ((tsens_calibration_mode == TSENS_TWO_POINT_CALIB) ||
(tsens_calibration_mode == TSENS_ONE_POINT_CALIB_OPTION_2)) {
@@ -783,6 +788,9 @@
int32_t num = 0, den = 0;
tmdev->sensor[i].calib_data_point2 = calib_tsens_point2_data[i];
tmdev->sensor[i].calib_data_point1 = calib_tsens_point1_data[i];
+ pr_debug("sensor:%d - calib_data_point1:0x%x, calib_data_point2:0x%x\n",
+ i, tmdev->sensor[i].calib_data_point1,
+ tmdev->sensor[i].calib_data_point2);
if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB) {
/* slope (m) = adc_code2 - adc_code1 (y2 - y1)/
temp_120_degc - temp_30_degc (x2 - x1) */
@@ -827,6 +835,7 @@
tsens_calibration_mode = (calib_data[5] & TSENS_8X26_TSENS_CAL_SEL)
>> TSENS_8X26_CAL_SEL_SHIFT;
+ pr_debug("calib mode scheme:%x\n", tsens_calibration_mode);
if ((tsens_calibration_mode == TSENS_TWO_POINT_CALIB) ||
(tsens_calibration_mode == TSENS_ONE_POINT_CALIB_OPTION_2)) {
@@ -936,6 +945,9 @@
int32_t num = 0, den = 0;
tmdev->sensor[i].calib_data_point2 = calib_tsens_point2_data[i];
tmdev->sensor[i].calib_data_point1 = calib_tsens_point1_data[i];
+ pr_debug("sensor:%d - calib_data_point1:0x%x, calib_data_point2:0x%x\n",
+ i, tmdev->sensor[i].calib_data_point1,
+ tmdev->sensor[i].calib_data_point2);
if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB) {
/* slope (m) = adc_code2 - adc_code1 (y2 - y1)/
temp_120_degc - temp_30_degc (x2 - x1) */
@@ -976,11 +988,14 @@
TSENS_EEPROM_REDUNDANCY_SEL(tmdev->tsens_calib_addr));
calib_redun_sel = calib_redun_sel & TSENS_QFPROM_BACKUP_REDUN_SEL;
calib_redun_sel >>= TSENS_QFPROM_BACKUP_REDUN_SHIFT;
+ pr_debug("calib_redun_sel:%x\n", calib_redun_sel);
- for (i = 0; i < TSENS_MAIN_CALIB_ADDR_RANGE; i++)
+ for (i = 0; i < TSENS_MAIN_CALIB_ADDR_RANGE; i++) {
calib_data[i] = readl_relaxed(
(TSENS_EEPROM(tmdev->tsens_calib_addr))
+ (i * TSENS_SN_ADDR_OFFSET));
+ pr_debug("calib raw data row%d:0x%x\n", i, calib_data[i]);
+ }
if (calib_redun_sel == TSENS_QFPROM_BACKUP_SEL) {
tsens_calibration_mode = (calib_data[4] & TSENS_CAL_SEL_0_1)
@@ -988,6 +1003,7 @@
temp = (calib_data[5] & TSENS_CAL_SEL_2)
>> TSENS_CAL_SEL_SHIFT_2;
tsens_calibration_mode |= temp;
+ pr_debug("backup calib mode:%x\n", calib_redun_sel);
for (i = 0; i < TSENS_BACKUP_CALIB_ADDR_RANGE; i++)
calib_data_backup[i] = readl_relaxed(
@@ -1073,6 +1089,7 @@
temp = (calib_data[3] & TSENS_CAL_SEL_2)
>> TSENS_CAL_SEL_SHIFT_2;
tsens_calibration_mode |= temp;
+ pr_debug("calib mode scheme:%x\n", tsens_calibration_mode);
if ((tsens_calibration_mode == TSENS_ONE_POINT_CALIB) ||
(tsens_calibration_mode ==
TSENS_ONE_POINT_CALIB_OPTION_2) ||
@@ -1184,6 +1201,7 @@
if ((tsens_calibration_mode == TSENS_ONE_POINT_CALIB_OPTION_2) ||
(tsens_calibration_mode == TSENS_TWO_POINT_CALIB)) {
+ pr_debug("one point calibration calculation\n");
calib_tsens_point1_data[0] =
((((tsens_base1_data) + tsens0_point1) << 2) |
TSENS_BIT_APPEND);
@@ -1261,6 +1279,9 @@
int32_t num = 0, den = 0;
tmdev->sensor[i].calib_data_point2 = calib_tsens_point2_data[i];
tmdev->sensor[i].calib_data_point1 = calib_tsens_point1_data[i];
+ pr_debug("sensor:%d - calib_data_point1:0x%x, calib_data_point2:0x%x\n",
+ i, tmdev->sensor[i].calib_data_point1,
+ tmdev->sensor[i].calib_data_point2);
if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB) {
/* slope (m) = adc_code2 - adc_code1 (y2 - y1)/
temp_120_degc - temp_30_degc (x2 - x1) */
@@ -1273,6 +1294,7 @@
tmdev->sensor[i].offset = (tmdev->sensor[i].calib_data_point1 *
tmdev->tsens_factor) - (TSENS_CAL_DEGC_POINT1 *
tmdev->sensor[i].slope_mul_tsens_factor);
+ pr_debug("offset:%d\n", tmdev->sensor[i].offset);
INIT_WORK(&tmdev->sensor[i].work, notify_uspace_tsens_fn);
tmdev->prev_reading_avail = false;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 6597329..6018e6f 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -122,6 +122,7 @@
u32 opmode;
u32 flush_bits;
+ bool is_video_mode;
u32 play_cnt;
u32 vsync_cnt;
u32 underrun_cnt;
@@ -327,6 +328,7 @@
struct list_head overlay_list;
struct list_head pipes_used;
struct list_head pipes_cleanup;
+ bool mixer_swap;
};
#define is_vig_pipe(_pipe_id_) ((_pipe_id_) <= MDSS_MDP_SSPP_VIG2)
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index 5ec215d..d9b1f0c 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -651,15 +651,18 @@
}
ctl->mfd = mfd;
ctl->panel_data = pdata;
+ ctl->is_video_mode = false;
switch (pdata->panel_info.type) {
case EDP_PANEL:
+ ctl->is_video_mode = true;
ctl->intf_num = MDSS_MDP_INTF0;
ctl->intf_type = MDSS_INTF_EDP;
ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
ctl->start_fnc = mdss_mdp_video_start;
break;
case MIPI_VIDEO_PANEL:
+ ctl->is_video_mode = true;
if (pdata->panel_info.pdest == DISPLAY_1)
ctl->intf_num = MDSS_MDP_INTF1;
else
@@ -678,6 +681,7 @@
ctl->start_fnc = mdss_mdp_cmd_start;
break;
case DTV_PANEL:
+ ctl->is_video_mode = true;
ctl->intf_num = MDSS_MDP_INTF3;
ctl->intf_type = MDSS_INTF_HDMI;
ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
@@ -1193,16 +1197,19 @@
struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux)
{
struct mdss_mdp_mixer *mixer = NULL;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(ctl->mfd);
if (!ctl)
return NULL;
switch (mux) {
case MDSS_MDP_MIXER_MUX_DEFAULT:
case MDSS_MDP_MIXER_MUX_LEFT:
- mixer = ctl->mixer_left;
+ mixer = mdp5_data->mixer_swap ?
+ ctl->mixer_right : ctl->mixer_left;
break;
case MDSS_MDP_MIXER_MUX_RIGHT:
- mixer = ctl->mixer_right;
+ mixer = mdp5_data->mixer_swap ?
+ ctl->mixer_left : ctl->mixer_right;
break;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 97704bf..024b73f 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -38,6 +38,7 @@
static atomic_t ov_active_panels = ATOMIC_INIT(0);
static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd);
+static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd);
static int mdss_mdp_overlay_get(struct msm_fb_data_type *mfd,
struct mdp_overlay *req)
@@ -599,6 +600,7 @@
list_move(&pipe->cleanup_list, &destroy_pipes);
mdss_mdp_overlay_free_buf(&pipe->back_buf);
mdss_mdp_overlay_free_buf(&pipe->front_buf);
+ pipe->mfd = NULL;
}
list_for_each_entry(pipe, &mdp5_data->pipes_used, used_list) {
@@ -1003,6 +1005,41 @@
return ret;
}
+static int mdss_mdp_overlay_force_cleanup(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+ int ret;
+
+ pr_debug("forcing cleanup to unset dma pipes on fb%d\n", mfd->index);
+
+ /*
+ * video mode panels require the layer to be unstaged and wait for
+ * vsync to be able to release buffer.
+ */
+ if (ctl && ctl->is_video_mode) {
+ ret = mdss_mdp_display_commit(ctl, NULL);
+ if (!IS_ERR_VALUE(ret))
+ mdss_mdp_display_wait4comp(ctl);
+ }
+
+ ret = mdss_mdp_overlay_cleanup(mfd);
+
+ return ret;
+}
+
+static void mdss_mdp_overlay_force_dma_cleanup(struct mdss_data_type *mdata)
+{
+ struct mdss_mdp_pipe *pipe;
+ int i;
+
+ for (i = 0; i < mdata->ndma_pipes; i++) {
+ pipe = mdata->dma_pipes + i;
+ if (atomic_read(&pipe->ref_cnt) && pipe->mfd)
+ mdss_mdp_overlay_force_cleanup(pipe->mfd);
+ }
+}
+
static int mdss_mdp_overlay_play(struct msm_fb_data_type *mfd,
struct msmfb_overlay_data *req)
{
@@ -1027,6 +1064,8 @@
}
if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
+ mdss_mdp_overlay_force_dma_cleanup(mfd_to_mdata(mfd));
+
ret = mdss_mdp_overlay_rotate(mfd, req);
} else if (req->id == BORDERFILL_NDX) {
pr_debug("borderfill enable\n");
@@ -1951,6 +1990,10 @@
}
mfd->mdp.private1 = mdp5_data;
+ rc = mdss_mdp_overlay_fb_parse_dt(mfd);
+ if (rc)
+ return rc;
+
rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group);
if (rc) {
pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
@@ -1972,3 +2015,18 @@
kfree(mdp5_data);
return rc;
}
+
+static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd)
+{
+ struct platform_device *pdev = mfd->pdev;
+ struct mdss_overlay_private *mdp5_mdata = mfd_to_mdp5_data(mfd);
+
+ mdp5_mdata->mixer_swap = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-mixer-swap");
+ if (mdp5_mdata->mixer_swap) {
+ pr_info("mixer swap is enabled for fb device=%s\n",
+ pdev->name);
+ }
+
+ return 0;
+}
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
index ea83664..951e6ca 100644
--- a/include/trace/events/cpufreq_interactive.h
+++ b/include/trace/events/cpufreq_interactive.h
@@ -28,13 +28,7 @@
__entry->actualfreq)
);
-DEFINE_EVENT(set, cpufreq_interactive_up,
- TP_PROTO(u32 cpu_id, unsigned long targfreq,
- unsigned long actualfreq),
- TP_ARGS(cpu_id, targfreq, actualfreq)
-);
-
-DEFINE_EVENT(set, cpufreq_interactive_down,
+DEFINE_EVENT(set, cpufreq_interactive_setspeed,
TP_PROTO(u32 cpu_id, unsigned long targfreq,
unsigned long actualfreq),
TP_ARGS(cpu_id, targfreq, actualfreq)
@@ -42,44 +36,50 @@
DECLARE_EVENT_CLASS(loadeval,
TP_PROTO(unsigned long cpu_id, unsigned long load,
- unsigned long curfreq, unsigned long targfreq),
- TP_ARGS(cpu_id, load, curfreq, targfreq),
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
TP_STRUCT__entry(
__field(unsigned long, cpu_id )
__field(unsigned long, load )
- __field(unsigned long, curfreq )
- __field(unsigned long, targfreq )
+ __field(unsigned long, curtarg )
+ __field(unsigned long, curactual )
+ __field(unsigned long, newtarg )
),
TP_fast_assign(
__entry->cpu_id = cpu_id;
__entry->load = load;
- __entry->curfreq = curfreq;
- __entry->targfreq = targfreq;
+ __entry->curtarg = curtarg;
+ __entry->curactual = curactual;
+ __entry->newtarg = newtarg;
),
- TP_printk("cpu=%lu load=%lu cur=%lu targ=%lu",
- __entry->cpu_id, __entry->load, __entry->curfreq,
- __entry->targfreq)
+ TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+ __entry->cpu_id, __entry->load, __entry->curtarg,
+ __entry->curactual, __entry->newtarg)
);
DEFINE_EVENT(loadeval, cpufreq_interactive_target,
TP_PROTO(unsigned long cpu_id, unsigned long load,
- unsigned long curfreq, unsigned long targfreq),
- TP_ARGS(cpu_id, load, curfreq, targfreq)
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
);
DEFINE_EVENT(loadeval, cpufreq_interactive_already,
TP_PROTO(unsigned long cpu_id, unsigned long load,
- unsigned long curfreq, unsigned long targfreq),
- TP_ARGS(cpu_id, load, curfreq, targfreq)
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
);
DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
TP_PROTO(unsigned long cpu_id, unsigned long load,
- unsigned long curfreq, unsigned long targfreq),
- TP_ARGS(cpu_id, load, curfreq, targfreq)
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
);
TRACE_EVENT(cpufreq_interactive_boost,
diff --git a/sound/soc/codecs/wcd9304-tables.c b/sound/soc/codecs/wcd9304-tables.c
index 83c0c1d..7ec0152 100644
--- a/sound/soc/codecs/wcd9304-tables.c
+++ b/sound/soc/codecs/wcd9304-tables.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -235,8 +235,24 @@
[SITAR_A_CDC_RX1_B4_CTL] = SITAR_A_CDC_RX1_B4_CTL__POR,
[SITAR_A_CDC_RX1_B5_CTL] = SITAR_A_CDC_RX1_B5_CTL__POR,
[SITAR_A_CDC_RX1_B6_CTL] = SITAR_A_CDC_RX1_B6_CTL__POR,
+ [SITAR_A_CDC_RX2_B1_CTL] = SITAR_A_CDC_RX2_B1_CTL__POR,
+ [SITAR_A_CDC_RX2_B2_CTL] = SITAR_A_CDC_RX2_B2_CTL__POR,
+ [SITAR_A_CDC_RX2_B3_CTL] = SITAR_A_CDC_RX2_B3_CTL__POR,
+ [SITAR_A_CDC_RX2_B4_CTL] = SITAR_A_CDC_RX2_B4_CTL__POR,
+ [SITAR_A_CDC_RX2_B5_CTL] = SITAR_A_CDC_RX2_B5_CTL__POR,
+ [SITAR_A_CDC_RX2_B6_CTL] = SITAR_A_CDC_RX2_B6_CTL__POR,
+ [SITAR_A_CDC_RX3_B1_CTL] = SITAR_A_CDC_RX3_B1_CTL__POR,
+ [SITAR_A_CDC_RX3_B2_CTL] = SITAR_A_CDC_RX3_B2_CTL__POR,
+ [SITAR_A_CDC_RX3_B3_CTL] = SITAR_A_CDC_RX3_B3_CTL__POR,
+ [SITAR_A_CDC_RX3_B4_CTL] = SITAR_A_CDC_RX3_B4_CTL__POR,
+ [SITAR_A_CDC_RX3_B5_CTL] = SITAR_A_CDC_RX3_B5_CTL__POR,
+ [SITAR_A_CDC_RX3_B6_CTL] = SITAR_A_CDC_RX3_B6_CTL__POR,
[SITAR_A_CDC_RX1_VOL_CTL_B1_CTL] = SITAR_A_CDC_RX1_VOL_CTL_B1_CTL__POR,
[SITAR_A_CDC_RX1_VOL_CTL_B2_CTL] = SITAR_A_CDC_RX1_VOL_CTL_B2_CTL__POR,
+ [SITAR_A_CDC_RX2_VOL_CTL_B1_CTL] = SITAR_A_CDC_RX2_VOL_CTL_B1_CTL__POR,
+ [SITAR_A_CDC_RX2_VOL_CTL_B2_CTL] = SITAR_A_CDC_RX2_VOL_CTL_B2_CTL__POR,
+ [SITAR_A_CDC_RX3_VOL_CTL_B1_CTL] = SITAR_A_CDC_RX3_VOL_CTL_B1_CTL__POR,
+ [SITAR_A_CDC_RX3_VOL_CTL_B2_CTL] = SITAR_A_CDC_RX3_VOL_CTL_B2_CTL__POR,
[SITAR_A_CDC_CLK_ANC_RESET_CTL] = SITAR_A_CDC_CLK_ANC_RESET_CTL__POR,
[SITAR_A_CDC_CLK_RX_RESET_CTL] = SITAR_A_CDC_CLK_RX_RESET_CTL__POR,
[SITAR_A_CDC_CLK_TX_RESET_B1_CTL] =
@@ -322,6 +338,15 @@
[SITAR_A_CDC_COMP1_SHUT_DOWN_STATUS] =
SITAR_A_CDC_COMP1_SHUT_DOWN_STATUS__POR,
[SITAR_A_CDC_COMP1_FS_CFG] = SITAR_A_CDC_COMP1_FS_CFG__POR,
+ [SITAR_A_CDC_COMP2_B1_CTL] = SITAR_A_CDC_COMP2_B1_CTL__POR,
+ [SITAR_A_CDC_COMP2_B2_CTL] = SITAR_A_CDC_COMP2_B2_CTL__POR,
+ [SITAR_A_CDC_COMP2_B3_CTL] = SITAR_A_CDC_COMP2_B3_CTL__POR,
+ [SITAR_A_CDC_COMP2_B4_CTL] = SITAR_A_CDC_COMP2_B4_CTL__POR,
+ [SITAR_A_CDC_COMP2_B5_CTL] = SITAR_A_CDC_COMP2_B5_CTL__POR,
+ [SITAR_A_CDC_COMP2_B6_CTL] = SITAR_A_CDC_COMP2_B6_CTL__POR,
+ [SITAR_A_CDC_COMP2_SHUT_DOWN_STATUS] =
+ SITAR_A_CDC_COMP2_SHUT_DOWN_STATUS__POR,
+ [SITAR_A_CDC_COMP2_FS_CFG] = SITAR_A_CDC_COMP2_FS_CFG__POR,
[SITAR_A_CDC_CONN_RX1_B1_CTL] = SITAR_A_CDC_CONN_RX1_B1_CTL__POR,
[SITAR_A_CDC_CONN_RX1_B2_CTL] = SITAR_A_CDC_CONN_RX1_B2_CTL__POR,
[SITAR_A_CDC_CONN_RX1_B3_CTL] = SITAR_A_CDC_CONN_RX1_B3_CTL__POR,
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index f5f4e23..616f8d5 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -81,6 +81,11 @@
#define SITAR_OCP_ATTEMPT 1
+#define COMP_DIGITAL_DB_GAIN_APPLY(a, b) \
+ (((a) <= 0) ? ((a) - b) : (a))
+/* The wait time value comes from codec HW specification */
+#define COMP_BRINGUP_WAIT_TIME 3000
+
#define SITAR_MCLK_RATE_12288KHZ 12288000
#define SITAR_MCLK_RATE_9600KHZ 9600000
@@ -148,6 +153,22 @@
BAND_MAX,
};
+enum {
+ COMPANDER_1 = 0,
+ COMPANDER_2,
+ COMPANDER_MAX,
+};
+
+enum {
+ COMPANDER_FS_8KHZ = 0,
+ COMPANDER_FS_16KHZ,
+ COMPANDER_FS_32KHZ,
+ COMPANDER_FS_48KHZ,
+ COMPANDER_FS_96KHZ,
+ COMPANDER_FS_192KHZ,
+ COMPANDER_FS_MAX,
+};
+
/* Flags to track of PA and DAC state.
* PA and DAC should be tracked separately as AUXPGA loopback requires
* only PA to be turned on without DAC being on. */
@@ -158,6 +179,33 @@
SITAR_HPHR_DAC_OFF_ACK
};
+struct comp_sample_dependent_params {
+ u32 peak_det_timeout;
+ u32 rms_meter_div_fact;
+ u32 rms_meter_resamp_fact;
+};
+
+struct comp_dgtl_gain_offset {
+ u8 whole_db_gain;
+ u8 half_db_gain;
+};
+
+static const struct comp_dgtl_gain_offset comp_dgtl_gain[] = {
+ {0, 0},
+ {1, 1},
+ {3, 0},
+ {4, 1},
+ {6, 0},
+ {7, 1},
+ {9, 0},
+ {10, 1},
+ {12, 0},
+ {13, 1},
+ {15, 0},
+ {16, 1},
+ {18, 0},
+};
+
/* Data used by MBHC */
struct mbhc_internal_cal_data {
u16 dce_z;
@@ -273,6 +321,11 @@
/* num of slim ports required */
struct wcd9xxx_codec_dai_data dai[NUM_CODEC_DAIS];
+ /*compander*/
+ int comp_enabled[COMPANDER_MAX];
+ u32 comp_fs[COMPANDER_MAX];
+ u8 comp_gain_offset[NUM_INTERPOLATORS];
+
/* Currently, only used for mbhc purpose, to protect
* concurrent execution of mbhc threaded irq handlers and
* kill race between DAPM and MBHC.But can serve as a
@@ -295,6 +348,47 @@
struct sitar_priv *debug_sitar_priv;
#endif
+static const int comp_rx_path[] = {
+ COMPANDER_2,
+ COMPANDER_1,
+ COMPANDER_1,
+ COMPANDER_MAX,
+};
+
+static const struct comp_sample_dependent_params
+ comp_samp_params[COMPANDER_FS_MAX] = {
+ {
+ .peak_det_timeout = 0x6,
+ .rms_meter_div_fact = 0x9 << 4,
+ .rms_meter_resamp_fact = 0x06,
+ },
+ {
+ .peak_det_timeout = 0x7,
+ .rms_meter_div_fact = 0xA << 4,
+ .rms_meter_resamp_fact = 0x0C,
+ },
+ {
+ .peak_det_timeout = 0x8,
+ .rms_meter_div_fact = 0xB << 4,
+ .rms_meter_resamp_fact = 0x30,
+ },
+ {
+ .peak_det_timeout = 0x9,
+ .rms_meter_div_fact = 0xB << 4,
+ .rms_meter_resamp_fact = 0x28,
+ },
+ {
+ .peak_det_timeout = 0xA,
+ .rms_meter_div_fact = 0xC << 4,
+ .rms_meter_resamp_fact = 0x50,
+ },
+ {
+ .peak_det_timeout = 0xB,
+ .rms_meter_div_fact = 0xC << 4,
+ .rms_meter_resamp_fact = 0x50,
+ },
+};
+
static int sitar_get_anc_slot(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -539,6 +633,268 @@
return 0;
}
+static int sitar_compander_gain_offset(
+ struct snd_soc_codec *codec, u32 enable,
+ unsigned int pa_reg, unsigned int vol_reg,
+ int mask, int event,
+ struct comp_dgtl_gain_offset *gain_offset,
+ int index)
+{
+ unsigned int pa_gain = snd_soc_read(codec, pa_reg);
+ unsigned int digital_vol = snd_soc_read(codec, vol_reg);
+ int pa_mode = pa_gain & mask;
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
+
+ pr_debug("%s: pa_gain(0x%x=0x%x)digital_vol(0x%x=0x%x)event(0x%x) index(%d)\n",
+ __func__, pa_reg, pa_gain, vol_reg, digital_vol, event, index);
+ if (((pa_gain & 0xF) + 1) > ARRAY_SIZE(comp_dgtl_gain) ||
+ (index >= ARRAY_SIZE(sitar->comp_gain_offset))) {
+ pr_err("%s: Out of array boundary\n", __func__);
+ return -EINVAL;
+ }
+
+ if (SND_SOC_DAPM_EVENT_ON(event) && (enable != 0)) {
+ gain_offset->whole_db_gain = COMP_DIGITAL_DB_GAIN_APPLY(
+ (digital_vol - comp_dgtl_gain[pa_gain & 0xF].whole_db_gain),
+ comp_dgtl_gain[pa_gain & 0xF].half_db_gain);
+ pr_debug("%s: listed whole_db_gain:0x%x, adjusted whole_db_gain:0x%x\n",
+ __func__, comp_dgtl_gain[pa_gain & 0xF].whole_db_gain,
+ gain_offset->whole_db_gain);
+ gain_offset->half_db_gain =
+ comp_dgtl_gain[pa_gain & 0xF].half_db_gain;
+ sitar->comp_gain_offset[index] = digital_vol -
+ gain_offset->whole_db_gain ;
+ }
+ if (SND_SOC_DAPM_EVENT_OFF(event) && (pa_mode == 0)) {
+ gain_offset->whole_db_gain = digital_vol +
+ sitar->comp_gain_offset[index];
+ pr_debug("%s: listed whole_db_gain:0x%x, adjusted whole_db_gain:0x%x\n",
+ __func__, comp_dgtl_gain[pa_gain & 0xF].whole_db_gain,
+ gain_offset->whole_db_gain);
+ gain_offset->half_db_gain = 0;
+ }
+
+ pr_debug("%s: half_db_gain(%d)whole_db_gain(0x%x)comp_gain_offset[%d](%d)\n",
+ __func__, gain_offset->half_db_gain,
+ gain_offset->whole_db_gain, index,
+ sitar->comp_gain_offset[index]);
+ return 0;
+}
+
+static int sitar_config_gain_compander(
+ struct snd_soc_codec *codec,
+ u32 compander, u32 enable, int event)
+{
+ int value = 0;
+ int mask = 1 << 4;
+ struct comp_dgtl_gain_offset gain_offset = {0, 0};
+ if (compander >= COMPANDER_MAX) {
+ pr_err("%s: Error, invalid compander channel\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((enable == 0) || SND_SOC_DAPM_EVENT_OFF(event))
+ value = 1 << 4;
+
+ if (compander == COMPANDER_1) {
+ sitar_compander_gain_offset(codec, enable,
+ SITAR_A_RX_HPH_L_GAIN,
+ SITAR_A_CDC_RX2_VOL_CTL_B2_CTL,
+ mask, event, &gain_offset, 1);
+ snd_soc_update_bits(codec, SITAR_A_RX_HPH_L_GAIN, mask, value);
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX2_VOL_CTL_B2_CTL,
+ 0xFF, gain_offset.whole_db_gain);
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX2_B6_CTL,
+ 0x02, gain_offset.half_db_gain);
+ sitar_compander_gain_offset(codec, enable,
+ SITAR_A_RX_HPH_R_GAIN,
+ SITAR_A_CDC_RX3_VOL_CTL_B2_CTL,
+ mask, event, &gain_offset, 2);
+ snd_soc_update_bits(codec, SITAR_A_RX_HPH_R_GAIN, mask, value);
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX3_VOL_CTL_B2_CTL,
+ 0xFF, gain_offset.whole_db_gain);
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX3_B6_CTL,
+ 0x02, gain_offset.half_db_gain);
+ } else if (compander == COMPANDER_2) {
+ sitar_compander_gain_offset(codec, enable,
+ SITAR_A_RX_LINE_1_GAIN,
+ SITAR_A_CDC_RX1_VOL_CTL_B2_CTL,
+ mask, event, &gain_offset, 0);
+ snd_soc_update_bits(codec, SITAR_A_RX_LINE_1_GAIN, mask, value);
+ snd_soc_update_bits(codec, SITAR_A_RX_LINE_2_GAIN, mask, value);
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX1_VOL_CTL_B2_CTL,
+ 0xFF, gain_offset.whole_db_gain);
+ snd_soc_update_bits(codec, SITAR_A_CDC_RX1_B6_CTL,
+ 0x02, gain_offset.half_db_gain);
+ }
+ return 0;
+}
+
+static int sitar_get_compander(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ int comp = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->shift;
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = sitar->comp_enabled[comp];
+
+ return 0;
+}
+
+static int sitar_set_compander(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
+ int comp = ((struct soc_multi_mixer_control *)
+ kcontrol->private_value)->shift;
+ int value = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: compander #%d enable %d\n",
+ __func__, comp + 1, value);
+ if (value == sitar->comp_enabled[comp]) {
+ pr_debug("%s: compander #%d enable %d no change\n",
+ __func__, comp + 1, value);
+ return 0;
+ }
+ sitar->comp_enabled[comp] = value;
+ return 0;
+}
+
+static int sitar_config_compander(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
+ u32 rate = sitar->comp_fs[w->shift];
+ u32 value;
+
+ pr_debug("%s: compander #%d enable %d event %d widget name %s\n",
+ __func__, w->shift + 1,
+ sitar->comp_enabled[w->shift], event , w->name);
+ if (sitar->comp_enabled[w->shift] == 0)
+ goto rtn;
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Update compander sample rate */
+ snd_soc_update_bits(codec, SITAR_A_CDC_COMP1_FS_CFG +
+ w->shift * 8, 0x07, rate);
+ /* Enable compander clock */
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_RX_B2_CTL,
+ 1 << w->shift,
+ 1 << w->shift);
+ /* Toggle compander reset bits */
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_OTHR_RESET_CTL,
+ 1 << w->shift,
+ 1 << w->shift);
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_OTHR_RESET_CTL,
+ 1 << w->shift, 0);
+ sitar_config_gain_compander(codec, w->shift, 1, event);
+ /* Compander enable -> 0x370/0x378 */
+ snd_soc_update_bits(codec, SITAR_A_CDC_COMP1_B1_CTL +
+ w->shift * 8, 0x03, 0x03);
+ /* Update the RMS meter resampling */
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_COMP1_B3_CTL +
+ w->shift * 8, 0xFF, 0x01);
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_COMP1_B2_CTL +
+ w->shift * 8, 0xF0, 0x50);
+ usleep_range(COMP_BRINGUP_WAIT_TIME, COMP_BRINGUP_WAIT_TIME);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLSG_CTL,
+ 0x11, 0x00);
+ if (w->shift == COMPANDER_1)
+ value = 0x22;
+ else
+ value = 0x11;
+ snd_soc_write(codec,
+ SITAR_A_CDC_CONN_CLSG_CTL, value);
+
+ snd_soc_update_bits(codec, SITAR_A_CDC_COMP1_B2_CTL +
+ w->shift * 8, 0x0F,
+ comp_samp_params[rate].peak_det_timeout);
+ snd_soc_update_bits(codec, SITAR_A_CDC_COMP1_B2_CTL +
+ w->shift * 8, 0xF0,
+ comp_samp_params[rate].rms_meter_div_fact);
+ snd_soc_update_bits(codec, SITAR_A_CDC_COMP1_B3_CTL +
+ w->shift * 8, 0xFF,
+ comp_samp_params[rate].rms_meter_resamp_fact);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, SITAR_A_CDC_COMP1_B1_CTL +
+ w->shift * 8, 0x03, 0x00);
+ /* Toggle compander reset bits */
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_OTHR_RESET_CTL,
+ 1 << w->shift,
+ 1 << w->shift);
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_OTHR_RESET_CTL,
+ 1 << w->shift, 0);
+ /* Disable compander clock */
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_RX_B2_CTL,
+ 1 << w->shift,
+ 0);
+ /* Restore the gain */
+ sitar_config_gain_compander(codec, w->shift,
+ sitar->comp_enabled[w->shift],
+ event);
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLSG_CTL,
+ 0x11, 0x11);
+ snd_soc_write(codec,
+ SITAR_A_CDC_CONN_CLSG_CTL, 0x14);
+ break;
+ }
+rtn:
+ return 0;
+}
+
+static int sitar_codec_dem_input_selection(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
+ pr_debug("%s: compander#1->enable(%d) compander#2->enable(%d) reg(0x%x = 0x%x) event(%d)\n",
+ __func__, sitar->comp_enabled[COMPANDER_1],
+ sitar->comp_enabled[COMPANDER_2],
+ SITAR_A_CDC_RX1_B6_CTL + w->shift * 8,
+ snd_soc_read(codec, SITAR_A_CDC_RX1_B6_CTL + w->shift * 8),
+ event);
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (sitar->comp_enabled[COMPANDER_1] ||
+ sitar->comp_enabled[COMPANDER_2])
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_RX1_B6_CTL +
+ w->shift * 8,
+ 1 << 5, 0);
+ else
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_RX1_B6_CTL +
+ w->shift * 8,
+ 1 << 5, 0x20);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_RX1_B6_CTL + w->shift * 8,
+ 1 << 5, 0);
+ break;
+ }
+ return 0;
+}
+
static const char * const sitar_ear_pa_gain_text[] = {"POS_6_DB",
"POS_2_DB", "NEG_2P5_DB", "NEG_12_DB"};
@@ -652,6 +1008,10 @@
sitar_get_iir_band_audio_mixer, sitar_put_iir_band_audio_mixer),
SOC_SINGLE_MULTI_EXT("IIR2 Band5", IIR2, BAND5, 255, 0, 5,
sitar_get_iir_band_audio_mixer, sitar_put_iir_band_audio_mixer),
+ SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, COMPANDER_1, 1, 0,
+ sitar_get_compander, sitar_set_compander),
+ SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, COMPANDER_2, 1, 0,
+ sitar_get_compander, sitar_set_compander),
};
static const char *rx_mix1_text[] = {
@@ -1267,9 +1627,14 @@
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
u16 lineout_gain_reg;
- pr_debug("%s %d %s\n", __func__, event, w->name);
+ pr_debug("%s %d %s comp2 enable %d\n", __func__, event, w->name,
+ sitar->comp_enabled[COMPANDER_2]);
+
+ if (sitar->comp_enabled[COMPANDER_2])
+ goto rtn;
switch (w->shift) {
case 0:
@@ -1311,6 +1676,7 @@
snd_soc_update_bits(codec, lineout_gain_reg, 0x10, 0x00);
break;
}
+rtn:
return 0;
}
@@ -2010,16 +2376,22 @@
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
+ struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
- pr_debug("%s %s %d\n", __func__, w->name, event);
+ pr_debug("%s %s %d comp#1 enable %d\n", __func__,
+ w->name, event, sitar->comp_enabled[COMPANDER_1]);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
if (w->reg == SITAR_A_RX_HPH_L_DAC_CTL) {
- snd_soc_update_bits(codec, SITAR_A_CDC_CONN_CLSG_CTL,
- 0x30, 0x20);
- snd_soc_update_bits(codec, SITAR_A_CDC_CONN_CLSG_CTL,
- 0x0C, 0x08);
+ if (!sitar->comp_enabled[COMPANDER_1]) {
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CONN_CLSG_CTL,
+ 0x30, 0x20);
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CONN_CLSG_CTL,
+ 0x0C, 0x08);
+ }
}
snd_soc_update_bits(codec, w->reg, 0x40, 0x40);
break;
@@ -2338,9 +2710,15 @@
SND_SOC_DAPM_MUX("DAC4 MUX", SND_SOC_NOPM, 0, 0,
&rx_dac4_mux),
- SND_SOC_DAPM_MIXER("RX1 CHAIN", SITAR_A_CDC_RX1_B6_CTL, 5, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("RX2 CHAIN", SITAR_A_CDC_RX2_B6_CTL, 5, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("RX3 CHAIN", SITAR_A_CDC_RX3_B6_CTL, 5, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER_E("RX1 CHAIN", SND_SOC_NOPM, 0, 0, NULL,
+ 0, sitar_codec_dem_input_selection,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX2 CHAIN", SND_SOC_NOPM, 1, 0, NULL,
+ 0, sitar_codec_dem_input_selection,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX3 CHAIN", SND_SOC_NOPM, 2, 0, NULL,
+ 0, sitar_codec_dem_input_selection,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
&rx_mix1_inp1_mux),
@@ -2463,6 +2841,13 @@
sitar_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, COMPANDER_1, 0,
+ sitar_config_compander, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, COMPANDER_2, 0,
+ sitar_config_compander, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
/* Sidetone */
SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
SND_SOC_DAPM_PGA("IIR1", SITAR_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0),
@@ -2592,6 +2977,10 @@
{"SLIM RX3", NULL, "SLIM RX3 MUX"},
{"SLIM RX4", NULL, "SLIM RX4 MUX"},
+ {"RX1 MIX1", NULL, "COMP2_CLK"},
+ {"RX2 MIX1", NULL, "COMP1_CLK"},
+ {"RX3 MIX1", NULL, "COMP1_CLK"},
+
/* Slimbus port 5 is non functional in Sitar 1.0 */
{"RX1 MIX1 INP1", "RX1", "SLIM RX1"},
{"RX1 MIX1 INP1", "RX2", "SLIM RX2"},
@@ -2733,6 +3122,10 @@
if (reg == SITAR_A_CDC_RX1_VOL_CTL_B2_CTL + (8 * i))
return 1;
}
+
+ if ((reg == SITAR_A_CDC_COMP1_SHUT_DOWN_STATUS) ||
+ (reg == SITAR_A_CDC_COMP2_SHUT_DOWN_STATUS))
+ return 1;
return 0;
}
@@ -3191,6 +3584,7 @@
struct snd_soc_codec *codec = dai->codec;
struct sitar_priv *sitar = snd_soc_codec_get_drvdata(dai->codec);
u8 path, shift;
+ u32 compander_fs;
u16 tx_fs_reg, rx_fs_reg;
u8 tx_fs_rate, rx_fs_rate, rx_state, tx_state;
@@ -3200,18 +3594,32 @@
case 8000:
tx_fs_rate = 0x00;
rx_fs_rate = 0x00;
+ compander_fs = COMPANDER_FS_8KHZ;
break;
case 16000:
tx_fs_rate = 0x01;
rx_fs_rate = 0x20;
+ compander_fs = COMPANDER_FS_16KHZ;
break;
case 32000:
tx_fs_rate = 0x02;
rx_fs_rate = 0x40;
+ compander_fs = COMPANDER_FS_32KHZ;
break;
case 48000:
tx_fs_rate = 0x03;
rx_fs_rate = 0x60;
+ compander_fs = COMPANDER_FS_48KHZ;
+ break;
+ case 96000:
+ tx_fs_rate = 0x04;
+ rx_fs_rate = 0x80;
+ compander_fs = COMPANDER_FS_96KHZ;
+ break;
+ case 192000:
+ tx_fs_rate = 0x05;
+ rx_fs_rate = 0xa0;
+ compander_fs = COMPANDER_FS_192KHZ;
break;
default:
pr_err("%s: Invalid sampling rate %d\n", __func__,
@@ -3285,6 +3693,9 @@
+ (BITS_PER_REG*(path-1));
snd_soc_update_bits(codec, rx_fs_reg,
0xE0, rx_fs_rate);
+ if (comp_rx_path[shift] < COMPANDER_MAX)
+ sitar->comp_fs[comp_rx_path[shift]]
+ = compander_fs;
}
}
if (sitar->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
@@ -5484,6 +5895,11 @@
if (sitar->intf_type == WCD9XXX_INTERFACE_TYPE_I2C)
sitar_i2c_codec_init_reg(codec);
+ for (i = 0; i < COMPANDER_MAX; i++) {
+ sitar->comp_enabled[i] = 0;
+ sitar->comp_fs[i] = COMPANDER_FS_48KHZ;
+ }
+
ret = sitar_handle_pdata(sitar);
if (IS_ERR_VALUE(ret)) {
pr_err("%s: bad pdata\n", __func__);
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index 778dc10..7acb294 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -2271,6 +2271,7 @@
struct snd_soc_card *card = &snd_soc_card_msm8974;
struct msm8974_asoc_mach_data *pdata;
int ret;
+ const char *auxpcm_pri_gpio_set = NULL;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No platform supplied from device tree\n");
@@ -2396,7 +2397,24 @@
goto err;
}
- lpaif_pri_muxsel_virt_addr = ioremap(LPAIF_PRI_MODE_MUXSEL, 4);
+ ret = of_property_read_string(pdev->dev.of_node,
+ "qcom,prim-auxpcm-gpio-set", &auxpcm_pri_gpio_set);
+ if (ret) {
+ dev_err(&pdev->dev, "Looking up %s property in node %s failed",
+ "qcom,prim-auxpcm-gpio-set",
+ pdev->dev.of_node->full_name);
+ goto err;
+ }
+ if (!strcmp(auxpcm_pri_gpio_set, "prim-gpio-prim")) {
+ lpaif_pri_muxsel_virt_addr = ioremap(LPAIF_PRI_MODE_MUXSEL, 4);
+ } else if (!strcmp(auxpcm_pri_gpio_set, "prim-gpio-tert")) {
+ lpaif_pri_muxsel_virt_addr = ioremap(LPAIF_TER_MODE_MUXSEL, 4);
+ } else {
+ dev_err(&pdev->dev, "Invalid value %s for AUXPCM GPIO set\n",
+ auxpcm_pri_gpio_set);
+ ret = -EINVAL;
+ goto err;
+ }
if (lpaif_pri_muxsel_virt_addr == NULL) {
pr_err("%s Pri muxsel virt addr is null\n", __func__);
ret = -EINVAL;